diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-12 18:05:02 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-06-12 18:05:02 -0400 |
| commit | 38da0d6888a6bccd3ae93227aa9f29537f8bd5f9 (patch) | |
| tree | 64c7c1d008460f05b579110a7e40b6c796fb1578 /drivers/pci/controller | |
| parent | 19785cf93b6c4252981894394f2dbd35c5e5d1ec (diff) | |
| parent | ce4c7b241227ed49c0b6f0f1867653e1ee0007ef (diff) | |
Merge tag 'pci-v4.18-changes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
Pull more PCI updates from Bjorn Helgaas:
- squash AER directory into drivers/pci/pcie/aer.c (Bjorn Helgaas)
- collect all native hardware drivers under drivers/pci/controller/
(Shawn Lin)
* tag 'pci-v4.18-changes-2' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci:
PCI/AER: Use "PCI Express" consistently in Kconfig text
PCI/AER: Hoist aerdrv.c, aer_inject.c up to drivers/pci/pcie/
PCI/AER: Squash Kconfig.debug into Kconfig
PCI/AER: Move private AER things to aerdrv.c
PCI/AER: Move aer_irq() declaration to portdrv.h
PCI/AER: Move pcie_aer_get_firmware_first() to portdrv.h
PCI/AER: Remove duplicate pcie_port_bus_type declaration
PCI/AER: Squash ecrc.c into aerdrv.c
PCI/AER: Squash aerdrv_acpi.c into aerdrv.c
PCI/AER: Squash aerdrv_errprint.c into aerdrv.c
PCI/AER: Squash aerdrv_core.c into aerdrv.c
PCI/AER: Reorder code to group probe/remove stuff together
PCI/AER: Remove forward declarations
PCI: Collect all native drivers under drivers/pci/controller/
Diffstat (limited to 'drivers/pci/controller')
59 files changed, 35727 insertions, 0 deletions
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig new file mode 100644 index 000000000000..18fa09b3ac8f --- /dev/null +++ b/drivers/pci/controller/Kconfig | |||
| @@ -0,0 +1,275 @@ | |||
| 1 | # SPDX-License-Identifier: GPL-2.0 | ||
| 2 | |||
| 3 | menu "PCI controller drivers" | ||
| 4 | depends on PCI | ||
| 5 | |||
| 6 | config PCI_MVEBU | ||
| 7 | bool "Marvell EBU PCIe controller" | ||
| 8 | depends on ARCH_MVEBU || ARCH_DOVE || COMPILE_TEST | ||
| 9 | depends on MVEBU_MBUS | ||
| 10 | depends on ARM | ||
| 11 | depends on OF | ||
| 12 | |||
| 13 | config PCI_AARDVARK | ||
| 14 | bool "Aardvark PCIe controller" | ||
| 15 | depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST | ||
| 16 | depends on OF | ||
| 17 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 18 | help | ||
| 19 | Add support for Aardvark 64bit PCIe Host Controller. This | ||
| 20 | controller is part of the South Bridge of the Marvel Armada | ||
| 21 | 3700 SoC. | ||
| 22 | |||
| 23 | menu "Cadence PCIe controllers support" | ||
| 24 | |||
| 25 | config PCIE_CADENCE | ||
| 26 | bool | ||
| 27 | |||
| 28 | config PCIE_CADENCE_HOST | ||
| 29 | bool "Cadence PCIe host controller" | ||
| 30 | depends on OF | ||
| 31 | depends on PCI | ||
| 32 | select IRQ_DOMAIN | ||
| 33 | select PCIE_CADENCE | ||
| 34 | help | ||
| 35 | Say Y here if you want to support the Cadence PCIe controller in host | ||
| 36 | mode. This PCIe controller may be embedded into many different vendors | ||
| 37 | SoCs. | ||
| 38 | |||
| 39 | config PCIE_CADENCE_EP | ||
| 40 | bool "Cadence PCIe endpoint controller" | ||
| 41 | depends on OF | ||
| 42 | depends on PCI_ENDPOINT | ||
| 43 | select PCIE_CADENCE | ||
| 44 | help | ||
| 45 | Say Y here if you want to support the Cadence PCIe controller in | ||
| 46 | endpoint mode. This PCIe controller may be embedded into many | ||
| 47 | different vendors SoCs. | ||
| 48 | |||
| 49 | endmenu | ||
| 50 | |||
| 51 | config PCIE_XILINX_NWL | ||
| 52 | bool "NWL PCIe Core" | ||
| 53 | depends on ARCH_ZYNQMP || COMPILE_TEST | ||
| 54 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 55 | help | ||
| 56 | Say 'Y' here if you want kernel support for Xilinx | ||
| 57 | NWL PCIe controller. The controller can act as Root Port | ||
| 58 | or End Point. The current option selection will only | ||
| 59 | support root port enabling. | ||
| 60 | |||
| 61 | config PCI_FTPCI100 | ||
| 62 | bool "Faraday Technology FTPCI100 PCI controller" | ||
| 63 | depends on OF | ||
| 64 | default ARCH_GEMINI | ||
| 65 | |||
| 66 | config PCI_TEGRA | ||
| 67 | bool "NVIDIA Tegra PCIe controller" | ||
| 68 | depends on ARCH_TEGRA || COMPILE_TEST | ||
| 69 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 70 | help | ||
| 71 | Say Y here if you want support for the PCIe host controller found | ||
| 72 | on NVIDIA Tegra SoCs. | ||
| 73 | |||
| 74 | config PCI_RCAR_GEN2 | ||
| 75 | bool "Renesas R-Car Gen2 Internal PCI controller" | ||
| 76 | depends on ARCH_RENESAS || COMPILE_TEST | ||
| 77 | depends on ARM | ||
| 78 | help | ||
| 79 | Say Y here if you want internal PCI support on R-Car Gen2 SoC. | ||
| 80 | There are 3 internal PCI controllers available with a single | ||
| 81 | built-in EHCI/OHCI host controller present on each one. | ||
| 82 | |||
| 83 | config PCIE_RCAR | ||
| 84 | bool "Renesas R-Car PCIe controller" | ||
| 85 | depends on ARCH_RENESAS || COMPILE_TEST | ||
| 86 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 87 | help | ||
| 88 | Say Y here if you want PCIe controller support on R-Car SoCs. | ||
| 89 | |||
| 90 | config PCI_HOST_COMMON | ||
| 91 | bool | ||
| 92 | select PCI_ECAM | ||
| 93 | |||
| 94 | config PCI_HOST_GENERIC | ||
| 95 | bool "Generic PCI host controller" | ||
| 96 | depends on OF | ||
| 97 | select PCI_HOST_COMMON | ||
| 98 | select IRQ_DOMAIN | ||
| 99 | select PCI_DOMAINS | ||
| 100 | help | ||
| 101 | Say Y here if you want to support a simple generic PCI host | ||
| 102 | controller, such as the one emulated by kvmtool. | ||
| 103 | |||
| 104 | config PCIE_XILINX | ||
| 105 | bool "Xilinx AXI PCIe host bridge support" | ||
| 106 | depends on ARCH_ZYNQ || MICROBLAZE || (MIPS && PCI_DRIVERS_GENERIC) || COMPILE_TEST | ||
| 107 | help | ||
| 108 | Say 'Y' here if you want kernel to support the Xilinx AXI PCIe | ||
| 109 | Host Bridge driver. | ||
| 110 | |||
| 111 | config PCI_XGENE | ||
| 112 | bool "X-Gene PCIe controller" | ||
| 113 | depends on ARM64 || COMPILE_TEST | ||
| 114 | depends on OF || (ACPI && PCI_QUIRKS) | ||
| 115 | help | ||
| 116 | Say Y here if you want internal PCI support on APM X-Gene SoC. | ||
| 117 | There are 5 internal PCIe ports available. Each port is GEN3 capable | ||
| 118 | and have varied lanes from x1 to x8. | ||
| 119 | |||
| 120 | config PCI_XGENE_MSI | ||
| 121 | bool "X-Gene v1 PCIe MSI feature" | ||
| 122 | depends on PCI_XGENE | ||
| 123 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 124 | default y | ||
| 125 | help | ||
| 126 | Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC. | ||
| 127 | This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC. | ||
| 128 | |||
| 129 | config PCI_V3_SEMI | ||
| 130 | bool "V3 Semiconductor PCI controller" | ||
| 131 | depends on OF | ||
| 132 | depends on ARM || COMPILE_TEST | ||
| 133 | default ARCH_INTEGRATOR_AP | ||
| 134 | |||
| 135 | config PCI_VERSATILE | ||
| 136 | bool "ARM Versatile PB PCI controller" | ||
| 137 | depends on ARCH_VERSATILE | ||
| 138 | |||
| 139 | config PCIE_IPROC | ||
| 140 | tristate | ||
| 141 | select PCI_DOMAINS | ||
| 142 | help | ||
| 143 | This enables the iProc PCIe core controller support for Broadcom's | ||
| 144 | iProc family of SoCs. An appropriate bus interface driver needs | ||
| 145 | to be enabled to select this. | ||
| 146 | |||
| 147 | config PCIE_IPROC_PLATFORM | ||
| 148 | tristate "Broadcom iProc PCIe platform bus driver" | ||
| 149 | depends on ARCH_BCM_IPROC || (ARM && COMPILE_TEST) | ||
| 150 | depends on OF | ||
| 151 | select PCIE_IPROC | ||
| 152 | default ARCH_BCM_IPROC | ||
| 153 | help | ||
| 154 | Say Y here if you want to use the Broadcom iProc PCIe controller | ||
| 155 | through the generic platform bus interface | ||
| 156 | |||
| 157 | config PCIE_IPROC_BCMA | ||
| 158 | tristate "Broadcom iProc PCIe BCMA bus driver" | ||
| 159 | depends on ARM && (ARCH_BCM_IPROC || COMPILE_TEST) | ||
| 160 | select PCIE_IPROC | ||
| 161 | select BCMA | ||
| 162 | default ARCH_BCM_5301X | ||
| 163 | help | ||
| 164 | Say Y here if you want to use the Broadcom iProc PCIe controller | ||
| 165 | through the BCMA bus interface | ||
| 166 | |||
| 167 | config PCIE_IPROC_MSI | ||
| 168 | bool "Broadcom iProc PCIe MSI support" | ||
| 169 | depends on PCIE_IPROC_PLATFORM || PCIE_IPROC_BCMA | ||
| 170 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 171 | default ARCH_BCM_IPROC | ||
| 172 | help | ||
| 173 | Say Y here if you want to enable MSI support for Broadcom's iProc | ||
| 174 | PCIe controller | ||
| 175 | |||
| 176 | config PCIE_ALTERA | ||
| 177 | bool "Altera PCIe controller" | ||
| 178 | depends on ARM || NIOS2 || COMPILE_TEST | ||
| 179 | select PCI_DOMAINS | ||
| 180 | help | ||
| 181 | Say Y here if you want to enable PCIe controller support on Altera | ||
| 182 | FPGA. | ||
| 183 | |||
| 184 | config PCIE_ALTERA_MSI | ||
| 185 | bool "Altera PCIe MSI feature" | ||
| 186 | depends on PCIE_ALTERA | ||
| 187 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 188 | help | ||
| 189 | Say Y here if you want PCIe MSI support for the Altera FPGA. | ||
| 190 | This MSI driver supports Altera MSI to GIC controller IP. | ||
| 191 | |||
| 192 | config PCI_HOST_THUNDER_PEM | ||
| 193 | bool "Cavium Thunder PCIe controller to off-chip devices" | ||
| 194 | depends on ARM64 || COMPILE_TEST | ||
| 195 | depends on OF || (ACPI && PCI_QUIRKS) | ||
| 196 | select PCI_HOST_COMMON | ||
| 197 | help | ||
| 198 | Say Y here if you want PCIe support for CN88XX Cavium Thunder SoCs. | ||
| 199 | |||
| 200 | config PCI_HOST_THUNDER_ECAM | ||
| 201 | bool "Cavium Thunder ECAM controller to on-chip devices on pass-1.x silicon" | ||
| 202 | depends on ARM64 || COMPILE_TEST | ||
| 203 | depends on OF || (ACPI && PCI_QUIRKS) | ||
| 204 | select PCI_HOST_COMMON | ||
| 205 | help | ||
| 206 | Say Y here if you want ECAM support for CN88XX-Pass-1.x Cavium Thunder SoCs. | ||
| 207 | |||
| 208 | config PCIE_ROCKCHIP | ||
| 209 | bool | ||
| 210 | depends on PCI | ||
| 211 | |||
| 212 | config PCIE_ROCKCHIP_HOST | ||
| 213 | tristate "Rockchip PCIe host controller" | ||
| 214 | depends on ARCH_ROCKCHIP || COMPILE_TEST | ||
| 215 | depends on OF | ||
| 216 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 217 | select MFD_SYSCON | ||
| 218 | select PCIE_ROCKCHIP | ||
| 219 | help | ||
| 220 | Say Y here if you want internal PCI support on Rockchip SoC. | ||
| 221 | There is 1 internal PCIe port available to support GEN2 with | ||
| 222 | 4 slots. | ||
| 223 | |||
| 224 | config PCIE_ROCKCHIP_EP | ||
| 225 | bool "Rockchip PCIe endpoint controller" | ||
| 226 | depends on ARCH_ROCKCHIP || COMPILE_TEST | ||
| 227 | depends on OF | ||
| 228 | depends on PCI_ENDPOINT | ||
| 229 | select MFD_SYSCON | ||
| 230 | select PCIE_ROCKCHIP | ||
| 231 | help | ||
| 232 | Say Y here if you want to support Rockchip PCIe controller in | ||
| 233 | endpoint mode on Rockchip SoC. There is 1 internal PCIe port | ||
| 234 | available to support GEN2 with 4 slots. | ||
| 235 | |||
| 236 | config PCIE_MEDIATEK | ||
| 237 | bool "MediaTek PCIe controller" | ||
| 238 | depends on ARCH_MEDIATEK || COMPILE_TEST | ||
| 239 | depends on OF | ||
| 240 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 241 | help | ||
| 242 | Say Y here if you want to enable PCIe controller support on | ||
| 243 | MediaTek SoCs. | ||
| 244 | |||
| 245 | config PCIE_TANGO_SMP8759 | ||
| 246 | bool "Tango SMP8759 PCIe controller (DANGEROUS)" | ||
| 247 | depends on ARCH_TANGO && PCI_MSI && OF | ||
| 248 | depends on BROKEN | ||
| 249 | select PCI_HOST_COMMON | ||
| 250 | help | ||
| 251 | Say Y here to enable PCIe controller support for Sigma Designs | ||
| 252 | Tango SMP8759-based systems. | ||
| 253 | |||
| 254 | Note: The SMP8759 controller multiplexes PCI config and MMIO | ||
| 255 | accesses, and Linux doesn't provide a way to serialize them. | ||
| 256 | This can lead to data corruption if drivers perform concurrent | ||
| 257 | config and MMIO accesses. | ||
| 258 | |||
| 259 | config VMD | ||
| 260 | depends on PCI_MSI && X86_64 && SRCU | ||
| 261 | tristate "Intel Volume Management Device Driver" | ||
| 262 | ---help--- | ||
| 263 | Adds support for the Intel Volume Management Device (VMD). VMD is a | ||
| 264 | secondary PCI host bridge that allows PCI Express root ports, | ||
| 265 | and devices attached to them, to be removed from the default | ||
| 266 | PCI domain and placed within the VMD domain. This provides | ||
| 267 | more bus resources than are otherwise possible with a | ||
| 268 | single domain. If you know your system provides one of these and | ||
| 269 | has devices attached to it, say Y; if you are not sure, say N. | ||
| 270 | |||
| 271 | To compile this driver as a module, choose M here: the | ||
| 272 | module will be called vmd. | ||
| 273 | |||
| 274 | source "drivers/pci/controller/dwc/Kconfig" | ||
| 275 | endmenu | ||
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile new file mode 100644 index 000000000000..24322b92f200 --- /dev/null +++ b/drivers/pci/controller/Makefile | |||
| @@ -0,0 +1,49 @@ | |||
| 1 | # SPDX-License-Identifier: GPL-2.0 | ||
| 2 | obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o | ||
| 3 | obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o | ||
| 4 | obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o | ||
| 5 | obj-$(CONFIG_PCI_FTPCI100) += pci-ftpci100.o | ||
| 6 | obj-$(CONFIG_PCI_HYPERV) += pci-hyperv.o | ||
| 7 | obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o | ||
| 8 | obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o | ||
| 9 | obj-$(CONFIG_PCI_TEGRA) += pci-tegra.o | ||
| 10 | obj-$(CONFIG_PCI_RCAR_GEN2) += pci-rcar-gen2.o | ||
| 11 | obj-$(CONFIG_PCIE_RCAR) += pcie-rcar.o | ||
| 12 | obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o | ||
| 13 | obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o | ||
| 14 | obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o | ||
| 15 | obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o | ||
| 16 | obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o | ||
| 17 | obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o | ||
| 18 | obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o | ||
| 19 | obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o | ||
| 20 | obj-$(CONFIG_PCIE_IPROC_MSI) += pcie-iproc-msi.o | ||
| 21 | obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o | ||
| 22 | obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o | ||
| 23 | obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o | ||
| 24 | obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o | ||
| 25 | obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o | ||
| 26 | obj-$(CONFIG_PCIE_ROCKCHIP_EP) += pcie-rockchip-ep.o | ||
| 27 | obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o | ||
| 28 | obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o | ||
| 29 | obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o | ||
| 30 | obj-$(CONFIG_VMD) += vmd.o | ||
| 31 | # pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW | ||
| 32 | obj-y += dwc/ | ||
| 33 | |||
| 34 | |||
| 35 | # The following drivers are for devices that use the generic ACPI | ||
| 36 | # pci_root.c driver but don't support standard ECAM config access. | ||
| 37 | # They contain MCFG quirks to replace the generic ECAM accessors with | ||
| 38 | # device-specific ones that are shared with the DT driver. | ||
| 39 | |||
| 40 | # The ACPI driver is generic and should not require driver-specific | ||
| 41 | # config options to be enabled, so we always build these drivers on | ||
| 42 | # ARM64 and use internal ifdefs to only build the pieces we need | ||
| 43 | # depending on whether ACPI, the DT driver, or both are enabled. | ||
| 44 | |||
| 45 | ifdef CONFIG_PCI | ||
| 46 | obj-$(CONFIG_ARM64) += pci-thunder-ecam.o | ||
| 47 | obj-$(CONFIG_ARM64) += pci-thunder-pem.o | ||
| 48 | obj-$(CONFIG_ARM64) += pci-xgene.o | ||
| 49 | endif | ||
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig new file mode 100644 index 000000000000..16f52c626b4b --- /dev/null +++ b/drivers/pci/controller/dwc/Kconfig | |||
| @@ -0,0 +1,197 @@ | |||
| 1 | # SPDX-License-Identifier: GPL-2.0 | ||
| 2 | |||
| 3 | menu "DesignWare PCI Core Support" | ||
| 4 | depends on PCI | ||
| 5 | |||
| 6 | config PCIE_DW | ||
| 7 | bool | ||
| 8 | |||
| 9 | config PCIE_DW_HOST | ||
| 10 | bool | ||
| 11 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 12 | select PCIE_DW | ||
| 13 | |||
| 14 | config PCIE_DW_EP | ||
| 15 | bool | ||
| 16 | depends on PCI_ENDPOINT | ||
| 17 | select PCIE_DW | ||
| 18 | |||
| 19 | config PCI_DRA7XX | ||
| 20 | bool | ||
| 21 | |||
| 22 | config PCI_DRA7XX_HOST | ||
| 23 | bool "TI DRA7xx PCIe controller Host Mode" | ||
| 24 | depends on SOC_DRA7XX || COMPILE_TEST | ||
| 25 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 26 | depends on OF && HAS_IOMEM && TI_PIPE3 | ||
| 27 | select PCIE_DW_HOST | ||
| 28 | select PCI_DRA7XX | ||
| 29 | default y | ||
| 30 | help | ||
| 31 | Enables support for the PCIe controller in the DRA7xx SoC to work in | ||
| 32 | host mode. There are two instances of PCIe controller in DRA7xx. | ||
| 33 | This controller can work either as EP or RC. In order to enable | ||
| 34 | host-specific features PCI_DRA7XX_HOST must be selected and in order | ||
| 35 | to enable device-specific features PCI_DRA7XX_EP must be selected. | ||
| 36 | This uses the DesignWare core. | ||
| 37 | |||
| 38 | config PCI_DRA7XX_EP | ||
| 39 | bool "TI DRA7xx PCIe controller Endpoint Mode" | ||
| 40 | depends on SOC_DRA7XX || COMPILE_TEST | ||
| 41 | depends on PCI_ENDPOINT | ||
| 42 | depends on OF && HAS_IOMEM && TI_PIPE3 | ||
| 43 | select PCIE_DW_EP | ||
| 44 | select PCI_DRA7XX | ||
| 45 | help | ||
| 46 | Enables support for the PCIe controller in the DRA7xx SoC to work in | ||
| 47 | endpoint mode. There are two instances of PCIe controller in DRA7xx. | ||
| 48 | This controller can work either as EP or RC. In order to enable | ||
| 49 | host-specific features PCI_DRA7XX_HOST must be selected and in order | ||
| 50 | to enable device-specific features PCI_DRA7XX_EP must be selected. | ||
| 51 | This uses the DesignWare core. | ||
| 52 | |||
| 53 | config PCIE_DW_PLAT | ||
| 54 | bool | ||
| 55 | |||
| 56 | config PCIE_DW_PLAT_HOST | ||
| 57 | bool "Platform bus based DesignWare PCIe Controller - Host mode" | ||
| 58 | depends on PCI && PCI_MSI_IRQ_DOMAIN | ||
| 59 | select PCIE_DW_HOST | ||
| 60 | select PCIE_DW_PLAT | ||
| 61 | default y | ||
| 62 | help | ||
| 63 | Enables support for the PCIe controller in the Designware IP to | ||
| 64 | work in host mode. There are two instances of PCIe controller in | ||
| 65 | Designware IP. | ||
| 66 | This controller can work either as EP or RC. In order to enable | ||
| 67 | host-specific features PCIE_DW_PLAT_HOST must be selected and in | ||
| 68 | order to enable device-specific features PCI_DW_PLAT_EP must be | ||
| 69 | selected. | ||
| 70 | |||
| 71 | config PCIE_DW_PLAT_EP | ||
| 72 | bool "Platform bus based DesignWare PCIe Controller - Endpoint mode" | ||
| 73 | depends on PCI && PCI_MSI_IRQ_DOMAIN | ||
| 74 | depends on PCI_ENDPOINT | ||
| 75 | select PCIE_DW_EP | ||
| 76 | select PCIE_DW_PLAT | ||
| 77 | help | ||
| 78 | Enables support for the PCIe controller in the Designware IP to | ||
| 79 | work in endpoint mode. There are two instances of PCIe controller | ||
| 80 | in Designware IP. | ||
| 81 | This controller can work either as EP or RC. In order to enable | ||
| 82 | host-specific features PCIE_DW_PLAT_HOST must be selected and in | ||
| 83 | order to enable device-specific features PCI_DW_PLAT_EP must be | ||
| 84 | selected. | ||
| 85 | |||
| 86 | config PCI_EXYNOS | ||
| 87 | bool "Samsung Exynos PCIe controller" | ||
| 88 | depends on SOC_EXYNOS5440 || COMPILE_TEST | ||
| 89 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 90 | select PCIE_DW_HOST | ||
| 91 | |||
| 92 | config PCI_IMX6 | ||
| 93 | bool "Freescale i.MX6 PCIe controller" | ||
| 94 | depends on SOC_IMX6Q || (ARM && COMPILE_TEST) | ||
| 95 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 96 | select PCIE_DW_HOST | ||
| 97 | |||
| 98 | config PCIE_SPEAR13XX | ||
| 99 | bool "STMicroelectronics SPEAr PCIe controller" | ||
| 100 | depends on ARCH_SPEAR13XX || COMPILE_TEST | ||
| 101 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 102 | select PCIE_DW_HOST | ||
| 103 | help | ||
| 104 | Say Y here if you want PCIe support on SPEAr13XX SoCs. | ||
| 105 | |||
| 106 | config PCI_KEYSTONE | ||
| 107 | bool "TI Keystone PCIe controller" | ||
| 108 | depends on ARCH_KEYSTONE || (ARM && COMPILE_TEST) | ||
| 109 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 110 | select PCIE_DW_HOST | ||
| 111 | help | ||
| 112 | Say Y here if you want to enable PCI controller support on Keystone | ||
| 113 | SoCs. The PCI controller on Keystone is based on DesignWare hardware | ||
| 114 | and therefore the driver re-uses the DesignWare core functions to | ||
| 115 | implement the driver. | ||
| 116 | |||
| 117 | config PCI_LAYERSCAPE | ||
| 118 | bool "Freescale Layerscape PCIe controller" | ||
| 119 | depends on OF && (ARM || ARCH_LAYERSCAPE || COMPILE_TEST) | ||
| 120 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 121 | select MFD_SYSCON | ||
| 122 | select PCIE_DW_HOST | ||
| 123 | help | ||
| 124 | Say Y here if you want PCIe controller support on Layerscape SoCs. | ||
| 125 | |||
| 126 | config PCI_HISI | ||
| 127 | depends on OF && (ARM64 || COMPILE_TEST) | ||
| 128 | bool "HiSilicon Hip05 and Hip06 SoCs PCIe controllers" | ||
| 129 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 130 | select PCIE_DW_HOST | ||
| 131 | select PCI_HOST_COMMON | ||
| 132 | help | ||
| 133 | Say Y here if you want PCIe controller support on HiSilicon | ||
| 134 | Hip05 and Hip06 SoCs | ||
| 135 | |||
| 136 | config PCIE_QCOM | ||
| 137 | bool "Qualcomm PCIe controller" | ||
| 138 | depends on OF && (ARCH_QCOM || COMPILE_TEST) | ||
| 139 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 140 | select PCIE_DW_HOST | ||
| 141 | help | ||
| 142 | Say Y here to enable PCIe controller support on Qualcomm SoCs. The | ||
| 143 | PCIe controller uses the DesignWare core plus Qualcomm-specific | ||
| 144 | hardware wrappers. | ||
| 145 | |||
| 146 | config PCIE_ARMADA_8K | ||
| 147 | bool "Marvell Armada-8K PCIe controller" | ||
| 148 | depends on ARCH_MVEBU || COMPILE_TEST | ||
| 149 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 150 | select PCIE_DW_HOST | ||
| 151 | help | ||
| 152 | Say Y here if you want to enable PCIe controller support on | ||
| 153 | Armada-8K SoCs. The PCIe controller on Armada-8K is based on | ||
| 154 | DesignWare hardware and therefore the driver re-uses the | ||
| 155 | DesignWare core functions to implement the driver. | ||
| 156 | |||
| 157 | config PCIE_ARTPEC6 | ||
| 158 | bool | ||
| 159 | |||
| 160 | config PCIE_ARTPEC6_HOST | ||
| 161 | bool "Axis ARTPEC-6 PCIe controller Host Mode" | ||
| 162 | depends on MACH_ARTPEC6 || COMPILE_TEST | ||
| 163 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 164 | select PCIE_DW_HOST | ||
| 165 | select PCIE_ARTPEC6 | ||
| 166 | help | ||
| 167 | Enables support for the PCIe controller in the ARTPEC-6 SoC to work in | ||
| 168 | host mode. This uses the DesignWare core. | ||
| 169 | |||
| 170 | config PCIE_ARTPEC6_EP | ||
| 171 | bool "Axis ARTPEC-6 PCIe controller Endpoint Mode" | ||
| 172 | depends on MACH_ARTPEC6 || COMPILE_TEST | ||
| 173 | depends on PCI_ENDPOINT | ||
| 174 | select PCIE_DW_EP | ||
| 175 | select PCIE_ARTPEC6 | ||
| 176 | help | ||
| 177 | Enables support for the PCIe controller in the ARTPEC-6 SoC to work in | ||
| 178 | endpoint mode. This uses the DesignWare core. | ||
| 179 | |||
| 180 | config PCIE_KIRIN | ||
| 181 | depends on OF && (ARM64 || COMPILE_TEST) | ||
| 182 | bool "HiSilicon Kirin series SoCs PCIe controllers" | ||
| 183 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 184 | select PCIE_DW_HOST | ||
| 185 | help | ||
| 186 | Say Y here if you want PCIe controller support | ||
| 187 | on HiSilicon Kirin series SoCs. | ||
| 188 | |||
| 189 | config PCIE_HISI_STB | ||
| 190 | bool "HiSilicon STB SoCs PCIe controllers" | ||
| 191 | depends on ARCH_HISI || COMPILE_TEST | ||
| 192 | depends on PCI_MSI_IRQ_DOMAIN | ||
| 193 | select PCIE_DW_HOST | ||
| 194 | help | ||
| 195 | Say Y here if you want PCIe controller support on HiSilicon STB SoCs | ||
| 196 | |||
| 197 | endmenu | ||
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile new file mode 100644 index 000000000000..5d2ce72c7a52 --- /dev/null +++ b/drivers/pci/controller/dwc/Makefile | |||
| @@ -0,0 +1,30 @@ | |||
| 1 | # SPDX-License-Identifier: GPL-2.0 | ||
| 2 | obj-$(CONFIG_PCIE_DW) += pcie-designware.o | ||
| 3 | obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o | ||
| 4 | obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o | ||
| 5 | obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o | ||
| 6 | obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o | ||
| 7 | obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o | ||
| 8 | obj-$(CONFIG_PCI_IMX6) += pci-imx6.o | ||
| 9 | obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o | ||
| 10 | obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o | ||
| 11 | obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o | ||
| 12 | obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o | ||
| 13 | obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o | ||
| 14 | obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o | ||
| 15 | obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o | ||
| 16 | obj-$(CONFIG_PCIE_HISI_STB) += pcie-histb.o | ||
| 17 | |||
| 18 | # The following drivers are for devices that use the generic ACPI | ||
| 19 | # pci_root.c driver but don't support standard ECAM config access. | ||
| 20 | # They contain MCFG quirks to replace the generic ECAM accessors with | ||
| 21 | # device-specific ones that are shared with the DT driver. | ||
| 22 | |||
| 23 | # The ACPI driver is generic and should not require driver-specific | ||
| 24 | # config options to be enabled, so we always build these drivers on | ||
| 25 | # ARM64 and use internal ifdefs to only build the pieces we need | ||
| 26 | # depending on whether ACPI, the DT driver, or both are enabled. | ||
| 27 | |||
| 28 | ifdef CONFIG_PCI | ||
| 29 | obj-$(CONFIG_ARM64) += pcie-hisi.o | ||
| 30 | endif | ||
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c new file mode 100644 index 000000000000..cfaeef81d868 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-dra7xx.c | |||
| @@ -0,0 +1,846 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs | ||
| 4 | * | ||
| 5 | * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com | ||
| 6 | * | ||
| 7 | * Authors: Kishon Vijay Abraham I <kishon@ti.com> | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <linux/delay.h> | ||
| 11 | #include <linux/device.h> | ||
| 12 | #include <linux/err.h> | ||
| 13 | #include <linux/interrupt.h> | ||
| 14 | #include <linux/irq.h> | ||
| 15 | #include <linux/irqdomain.h> | ||
| 16 | #include <linux/kernel.h> | ||
| 17 | #include <linux/init.h> | ||
| 18 | #include <linux/of_device.h> | ||
| 19 | #include <linux/of_gpio.h> | ||
| 20 | #include <linux/of_pci.h> | ||
| 21 | #include <linux/pci.h> | ||
| 22 | #include <linux/phy/phy.h> | ||
| 23 | #include <linux/platform_device.h> | ||
| 24 | #include <linux/pm_runtime.h> | ||
| 25 | #include <linux/resource.h> | ||
| 26 | #include <linux/types.h> | ||
| 27 | #include <linux/mfd/syscon.h> | ||
| 28 | #include <linux/regmap.h> | ||
| 29 | |||
| 30 | #include "../../pci.h" | ||
| 31 | #include "pcie-designware.h" | ||
| 32 | |||
| 33 | /* PCIe controller wrapper DRA7XX configuration registers */ | ||
| 34 | |||
| 35 | #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN 0x0024 | ||
| 36 | #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN 0x0028 | ||
| 37 | #define ERR_SYS BIT(0) | ||
| 38 | #define ERR_FATAL BIT(1) | ||
| 39 | #define ERR_NONFATAL BIT(2) | ||
| 40 | #define ERR_COR BIT(3) | ||
| 41 | #define ERR_AXI BIT(4) | ||
| 42 | #define ERR_ECRC BIT(5) | ||
| 43 | #define PME_TURN_OFF BIT(8) | ||
| 44 | #define PME_TO_ACK BIT(9) | ||
| 45 | #define PM_PME BIT(10) | ||
| 46 | #define LINK_REQ_RST BIT(11) | ||
| 47 | #define LINK_UP_EVT BIT(12) | ||
| 48 | #define CFG_BME_EVT BIT(13) | ||
| 49 | #define CFG_MSE_EVT BIT(14) | ||
| 50 | #define INTERRUPTS (ERR_SYS | ERR_FATAL | ERR_NONFATAL | ERR_COR | ERR_AXI | \ | ||
| 51 | ERR_ECRC | PME_TURN_OFF | PME_TO_ACK | PM_PME | \ | ||
| 52 | LINK_REQ_RST | LINK_UP_EVT | CFG_BME_EVT | CFG_MSE_EVT) | ||
| 53 | |||
| 54 | #define PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI 0x0034 | ||
| 55 | #define PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI 0x0038 | ||
| 56 | #define INTA BIT(0) | ||
| 57 | #define INTB BIT(1) | ||
| 58 | #define INTC BIT(2) | ||
| 59 | #define INTD BIT(3) | ||
| 60 | #define MSI BIT(4) | ||
| 61 | #define LEG_EP_INTERRUPTS (INTA | INTB | INTC | INTD) | ||
| 62 | |||
| 63 | #define PCIECTRL_TI_CONF_DEVICE_TYPE 0x0100 | ||
| 64 | #define DEVICE_TYPE_EP 0x0 | ||
| 65 | #define DEVICE_TYPE_LEG_EP 0x1 | ||
| 66 | #define DEVICE_TYPE_RC 0x4 | ||
| 67 | |||
| 68 | #define PCIECTRL_DRA7XX_CONF_DEVICE_CMD 0x0104 | ||
| 69 | #define LTSSM_EN 0x1 | ||
| 70 | |||
| 71 | #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C | ||
| 72 | #define LINK_UP BIT(16) | ||
| 73 | #define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF | ||
| 74 | |||
| 75 | #define EXP_CAP_ID_OFFSET 0x70 | ||
| 76 | |||
| 77 | #define PCIECTRL_TI_CONF_INTX_ASSERT 0x0124 | ||
| 78 | #define PCIECTRL_TI_CONF_INTX_DEASSERT 0x0128 | ||
| 79 | |||
| 80 | #define PCIECTRL_TI_CONF_MSI_XMT 0x012c | ||
| 81 | #define MSI_REQ_GRANT BIT(0) | ||
| 82 | #define MSI_VECTOR_SHIFT 7 | ||
| 83 | |||
| 84 | struct dra7xx_pcie { | ||
| 85 | struct dw_pcie *pci; | ||
| 86 | void __iomem *base; /* DT ti_conf */ | ||
| 87 | int phy_count; /* DT phy-names count */ | ||
| 88 | struct phy **phy; | ||
| 89 | int link_gen; | ||
| 90 | struct irq_domain *irq_domain; | ||
| 91 | enum dw_pcie_device_mode mode; | ||
| 92 | }; | ||
| 93 | |||
| 94 | struct dra7xx_pcie_of_data { | ||
| 95 | enum dw_pcie_device_mode mode; | ||
| 96 | }; | ||
| 97 | |||
| 98 | #define to_dra7xx_pcie(x) dev_get_drvdata((x)->dev) | ||
| 99 | |||
| 100 | static inline u32 dra7xx_pcie_readl(struct dra7xx_pcie *pcie, u32 offset) | ||
| 101 | { | ||
| 102 | return readl(pcie->base + offset); | ||
| 103 | } | ||
| 104 | |||
| 105 | static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset, | ||
| 106 | u32 value) | ||
| 107 | { | ||
| 108 | writel(value, pcie->base + offset); | ||
| 109 | } | ||
| 110 | |||
| 111 | static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) | ||
| 112 | { | ||
| 113 | return pci_addr & DRA7XX_CPU_TO_BUS_ADDR; | ||
| 114 | } | ||
| 115 | |||
| 116 | static int dra7xx_pcie_link_up(struct dw_pcie *pci) | ||
| 117 | { | ||
| 118 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | ||
| 119 | u32 reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_PHY_CS); | ||
| 120 | |||
| 121 | return !!(reg & LINK_UP); | ||
| 122 | } | ||
| 123 | |||
| 124 | static void dra7xx_pcie_stop_link(struct dw_pcie *pci) | ||
| 125 | { | ||
| 126 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | ||
| 127 | u32 reg; | ||
| 128 | |||
| 129 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); | ||
| 130 | reg &= ~LTSSM_EN; | ||
| 131 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); | ||
| 132 | } | ||
| 133 | |||
| 134 | static int dra7xx_pcie_establish_link(struct dw_pcie *pci) | ||
| 135 | { | ||
| 136 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | ||
| 137 | struct device *dev = pci->dev; | ||
| 138 | u32 reg; | ||
| 139 | u32 exp_cap_off = EXP_CAP_ID_OFFSET; | ||
| 140 | |||
| 141 | if (dw_pcie_link_up(pci)) { | ||
| 142 | dev_err(dev, "link is already up\n"); | ||
| 143 | return 0; | ||
| 144 | } | ||
| 145 | |||
| 146 | if (dra7xx->link_gen == 1) { | ||
| 147 | dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP, | ||
| 148 | 4, ®); | ||
| 149 | if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { | ||
| 150 | reg &= ~((u32)PCI_EXP_LNKCAP_SLS); | ||
| 151 | reg |= PCI_EXP_LNKCAP_SLS_2_5GB; | ||
| 152 | dw_pcie_write(pci->dbi_base + exp_cap_off + | ||
| 153 | PCI_EXP_LNKCAP, 4, reg); | ||
| 154 | } | ||
| 155 | |||
| 156 | dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2, | ||
| 157 | 2, ®); | ||
| 158 | if ((reg & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { | ||
| 159 | reg &= ~((u32)PCI_EXP_LNKCAP_SLS); | ||
| 160 | reg |= PCI_EXP_LNKCAP_SLS_2_5GB; | ||
| 161 | dw_pcie_write(pci->dbi_base + exp_cap_off + | ||
| 162 | PCI_EXP_LNKCTL2, 2, reg); | ||
| 163 | } | ||
| 164 | } | ||
| 165 | |||
| 166 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); | ||
| 167 | reg |= LTSSM_EN; | ||
| 168 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); | ||
| 169 | |||
| 170 | return 0; | ||
| 171 | } | ||
| 172 | |||
| 173 | static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) | ||
| 174 | { | ||
| 175 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, | ||
| 176 | LEG_EP_INTERRUPTS | MSI); | ||
| 177 | |||
| 178 | dra7xx_pcie_writel(dra7xx, | ||
| 179 | PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, | ||
| 180 | MSI | LEG_EP_INTERRUPTS); | ||
| 181 | } | ||
| 182 | |||
| 183 | static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) | ||
| 184 | { | ||
| 185 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, | ||
| 186 | INTERRUPTS); | ||
| 187 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, | ||
| 188 | INTERRUPTS); | ||
| 189 | } | ||
| 190 | |||
| 191 | static void dra7xx_pcie_enable_interrupts(struct dra7xx_pcie *dra7xx) | ||
| 192 | { | ||
| 193 | dra7xx_pcie_enable_wrapper_interrupts(dra7xx); | ||
| 194 | dra7xx_pcie_enable_msi_interrupts(dra7xx); | ||
| 195 | } | ||
| 196 | |||
| 197 | static int dra7xx_pcie_host_init(struct pcie_port *pp) | ||
| 198 | { | ||
| 199 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 200 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | ||
| 201 | |||
| 202 | dw_pcie_setup_rc(pp); | ||
| 203 | |||
| 204 | dra7xx_pcie_establish_link(pci); | ||
| 205 | dw_pcie_wait_for_link(pci); | ||
| 206 | dw_pcie_msi_init(pp); | ||
| 207 | dra7xx_pcie_enable_interrupts(dra7xx); | ||
| 208 | |||
| 209 | return 0; | ||
| 210 | } | ||
| 211 | |||
| 212 | static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { | ||
| 213 | .host_init = dra7xx_pcie_host_init, | ||
| 214 | }; | ||
| 215 | |||
| 216 | static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, | ||
| 217 | irq_hw_number_t hwirq) | ||
| 218 | { | ||
| 219 | irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); | ||
| 220 | irq_set_chip_data(irq, domain->host_data); | ||
| 221 | |||
| 222 | return 0; | ||
| 223 | } | ||
| 224 | |||
| 225 | static const struct irq_domain_ops intx_domain_ops = { | ||
| 226 | .map = dra7xx_pcie_intx_map, | ||
| 227 | .xlate = pci_irqd_intx_xlate, | ||
| 228 | }; | ||
| 229 | |||
| 230 | static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) | ||
| 231 | { | ||
| 232 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 233 | struct device *dev = pci->dev; | ||
| 234 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | ||
| 235 | struct device_node *node = dev->of_node; | ||
| 236 | struct device_node *pcie_intc_node = of_get_next_child(node, NULL); | ||
| 237 | |||
| 238 | if (!pcie_intc_node) { | ||
| 239 | dev_err(dev, "No PCIe Intc node found\n"); | ||
| 240 | return -ENODEV; | ||
| 241 | } | ||
| 242 | |||
| 243 | dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, | ||
| 244 | &intx_domain_ops, pp); | ||
| 245 | if (!dra7xx->irq_domain) { | ||
| 246 | dev_err(dev, "Failed to get a INTx IRQ domain\n"); | ||
| 247 | return -ENODEV; | ||
| 248 | } | ||
| 249 | |||
| 250 | return 0; | ||
| 251 | } | ||
| 252 | |||
| 253 | static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg) | ||
| 254 | { | ||
| 255 | struct dra7xx_pcie *dra7xx = arg; | ||
| 256 | struct dw_pcie *pci = dra7xx->pci; | ||
| 257 | struct pcie_port *pp = &pci->pp; | ||
| 258 | unsigned long reg; | ||
| 259 | u32 virq, bit; | ||
| 260 | |||
| 261 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI); | ||
| 262 | |||
| 263 | switch (reg) { | ||
| 264 | case MSI: | ||
| 265 | dw_handle_msi_irq(pp); | ||
| 266 | break; | ||
| 267 | case INTA: | ||
| 268 | case INTB: | ||
| 269 | case INTC: | ||
| 270 | case INTD: | ||
| 271 | for_each_set_bit(bit, ®, PCI_NUM_INTX) { | ||
| 272 | virq = irq_find_mapping(dra7xx->irq_domain, bit); | ||
| 273 | if (virq) | ||
| 274 | generic_handle_irq(virq); | ||
| 275 | } | ||
| 276 | break; | ||
| 277 | } | ||
| 278 | |||
| 279 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, reg); | ||
| 280 | |||
| 281 | return IRQ_HANDLED; | ||
| 282 | } | ||
| 283 | |||
| 284 | static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg) | ||
| 285 | { | ||
| 286 | struct dra7xx_pcie *dra7xx = arg; | ||
| 287 | struct dw_pcie *pci = dra7xx->pci; | ||
| 288 | struct device *dev = pci->dev; | ||
| 289 | struct dw_pcie_ep *ep = &pci->ep; | ||
| 290 | u32 reg; | ||
| 291 | |||
| 292 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN); | ||
| 293 | |||
| 294 | if (reg & ERR_SYS) | ||
| 295 | dev_dbg(dev, "System Error\n"); | ||
| 296 | |||
| 297 | if (reg & ERR_FATAL) | ||
| 298 | dev_dbg(dev, "Fatal Error\n"); | ||
| 299 | |||
| 300 | if (reg & ERR_NONFATAL) | ||
| 301 | dev_dbg(dev, "Non Fatal Error\n"); | ||
| 302 | |||
| 303 | if (reg & ERR_COR) | ||
| 304 | dev_dbg(dev, "Correctable Error\n"); | ||
| 305 | |||
| 306 | if (reg & ERR_AXI) | ||
| 307 | dev_dbg(dev, "AXI tag lookup fatal Error\n"); | ||
| 308 | |||
| 309 | if (reg & ERR_ECRC) | ||
| 310 | dev_dbg(dev, "ECRC Error\n"); | ||
| 311 | |||
| 312 | if (reg & PME_TURN_OFF) | ||
| 313 | dev_dbg(dev, | ||
| 314 | "Power Management Event Turn-Off message received\n"); | ||
| 315 | |||
| 316 | if (reg & PME_TO_ACK) | ||
| 317 | dev_dbg(dev, | ||
| 318 | "Power Management Turn-Off Ack message received\n"); | ||
| 319 | |||
| 320 | if (reg & PM_PME) | ||
| 321 | dev_dbg(dev, "PM Power Management Event message received\n"); | ||
| 322 | |||
| 323 | if (reg & LINK_REQ_RST) | ||
| 324 | dev_dbg(dev, "Link Request Reset\n"); | ||
| 325 | |||
| 326 | if (reg & LINK_UP_EVT) { | ||
| 327 | if (dra7xx->mode == DW_PCIE_EP_TYPE) | ||
| 328 | dw_pcie_ep_linkup(ep); | ||
| 329 | dev_dbg(dev, "Link-up state change\n"); | ||
| 330 | } | ||
| 331 | |||
| 332 | if (reg & CFG_BME_EVT) | ||
| 333 | dev_dbg(dev, "CFG 'Bus Master Enable' change\n"); | ||
| 334 | |||
| 335 | if (reg & CFG_MSE_EVT) | ||
| 336 | dev_dbg(dev, "CFG 'Memory Space Enable' change\n"); | ||
| 337 | |||
| 338 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, reg); | ||
| 339 | |||
| 340 | return IRQ_HANDLED; | ||
| 341 | } | ||
| 342 | |||
| 343 | static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep) | ||
| 344 | { | ||
| 345 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
| 346 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | ||
| 347 | enum pci_barno bar; | ||
| 348 | |||
| 349 | for (bar = BAR_0; bar <= BAR_5; bar++) | ||
| 350 | dw_pcie_ep_reset_bar(pci, bar); | ||
| 351 | |||
| 352 | dra7xx_pcie_enable_wrapper_interrupts(dra7xx); | ||
| 353 | } | ||
| 354 | |||
| 355 | static void dra7xx_pcie_raise_legacy_irq(struct dra7xx_pcie *dra7xx) | ||
| 356 | { | ||
| 357 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_ASSERT, 0x1); | ||
| 358 | mdelay(1); | ||
| 359 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_INTX_DEASSERT, 0x1); | ||
| 360 | } | ||
| 361 | |||
| 362 | static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx, | ||
| 363 | u8 interrupt_num) | ||
| 364 | { | ||
| 365 | u32 reg; | ||
| 366 | |||
| 367 | reg = (interrupt_num - 1) << MSI_VECTOR_SHIFT; | ||
| 368 | reg |= MSI_REQ_GRANT; | ||
| 369 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg); | ||
| 370 | } | ||
| 371 | |||
| 372 | static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, | ||
| 373 | enum pci_epc_irq_type type, u8 interrupt_num) | ||
| 374 | { | ||
| 375 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
| 376 | struct dra7xx_pcie *dra7xx = to_dra7xx_pcie(pci); | ||
| 377 | |||
| 378 | switch (type) { | ||
| 379 | case PCI_EPC_IRQ_LEGACY: | ||
| 380 | dra7xx_pcie_raise_legacy_irq(dra7xx); | ||
| 381 | break; | ||
| 382 | case PCI_EPC_IRQ_MSI: | ||
| 383 | dra7xx_pcie_raise_msi_irq(dra7xx, interrupt_num); | ||
| 384 | break; | ||
| 385 | default: | ||
| 386 | dev_err(pci->dev, "UNKNOWN IRQ type\n"); | ||
| 387 | } | ||
| 388 | |||
| 389 | return 0; | ||
| 390 | } | ||
| 391 | |||
| 392 | static struct dw_pcie_ep_ops pcie_ep_ops = { | ||
| 393 | .ep_init = dra7xx_pcie_ep_init, | ||
| 394 | .raise_irq = dra7xx_pcie_raise_irq, | ||
| 395 | }; | ||
| 396 | |||
| 397 | static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, | ||
| 398 | struct platform_device *pdev) | ||
| 399 | { | ||
| 400 | int ret; | ||
| 401 | struct dw_pcie_ep *ep; | ||
| 402 | struct resource *res; | ||
| 403 | struct device *dev = &pdev->dev; | ||
| 404 | struct dw_pcie *pci = dra7xx->pci; | ||
| 405 | |||
| 406 | ep = &pci->ep; | ||
| 407 | ep->ops = &pcie_ep_ops; | ||
| 408 | |||
| 409 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics"); | ||
| 410 | pci->dbi_base = devm_ioremap_resource(dev, res); | ||
| 411 | if (IS_ERR(pci->dbi_base)) | ||
| 412 | return PTR_ERR(pci->dbi_base); | ||
| 413 | |||
| 414 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2"); | ||
| 415 | pci->dbi_base2 = devm_ioremap_resource(dev, res); | ||
| 416 | if (IS_ERR(pci->dbi_base2)) | ||
| 417 | return PTR_ERR(pci->dbi_base2); | ||
| 418 | |||
| 419 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); | ||
| 420 | if (!res) | ||
| 421 | return -EINVAL; | ||
| 422 | |||
| 423 | ep->phys_base = res->start; | ||
| 424 | ep->addr_size = resource_size(res); | ||
| 425 | |||
| 426 | ret = dw_pcie_ep_init(ep); | ||
| 427 | if (ret) { | ||
| 428 | dev_err(dev, "failed to initialize endpoint\n"); | ||
| 429 | return ret; | ||
| 430 | } | ||
| 431 | |||
| 432 | return 0; | ||
| 433 | } | ||
| 434 | |||
| 435 | static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, | ||
| 436 | struct platform_device *pdev) | ||
| 437 | { | ||
| 438 | int ret; | ||
| 439 | struct dw_pcie *pci = dra7xx->pci; | ||
| 440 | struct pcie_port *pp = &pci->pp; | ||
| 441 | struct device *dev = pci->dev; | ||
| 442 | struct resource *res; | ||
| 443 | |||
| 444 | pp->irq = platform_get_irq(pdev, 1); | ||
| 445 | if (pp->irq < 0) { | ||
| 446 | dev_err(dev, "missing IRQ resource\n"); | ||
| 447 | return pp->irq; | ||
| 448 | } | ||
| 449 | |||
| 450 | ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler, | ||
| 451 | IRQF_SHARED | IRQF_NO_THREAD, | ||
| 452 | "dra7-pcie-msi", dra7xx); | ||
| 453 | if (ret) { | ||
| 454 | dev_err(dev, "failed to request irq\n"); | ||
| 455 | return ret; | ||
| 456 | } | ||
| 457 | |||
| 458 | ret = dra7xx_pcie_init_irq_domain(pp); | ||
| 459 | if (ret < 0) | ||
| 460 | return ret; | ||
| 461 | |||
| 462 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics"); | ||
| 463 | pci->dbi_base = devm_ioremap_resource(dev, res); | ||
| 464 | if (IS_ERR(pci->dbi_base)) | ||
| 465 | return PTR_ERR(pci->dbi_base); | ||
| 466 | |||
| 467 | pp->ops = &dra7xx_pcie_host_ops; | ||
| 468 | |||
| 469 | ret = dw_pcie_host_init(pp); | ||
| 470 | if (ret) { | ||
| 471 | dev_err(dev, "failed to initialize host\n"); | ||
| 472 | return ret; | ||
| 473 | } | ||
| 474 | |||
| 475 | return 0; | ||
| 476 | } | ||
| 477 | |||
| 478 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
| 479 | .cpu_addr_fixup = dra7xx_pcie_cpu_addr_fixup, | ||
| 480 | .start_link = dra7xx_pcie_establish_link, | ||
| 481 | .stop_link = dra7xx_pcie_stop_link, | ||
| 482 | .link_up = dra7xx_pcie_link_up, | ||
| 483 | }; | ||
| 484 | |||
| 485 | static void dra7xx_pcie_disable_phy(struct dra7xx_pcie *dra7xx) | ||
| 486 | { | ||
| 487 | int phy_count = dra7xx->phy_count; | ||
| 488 | |||
| 489 | while (phy_count--) { | ||
| 490 | phy_power_off(dra7xx->phy[phy_count]); | ||
| 491 | phy_exit(dra7xx->phy[phy_count]); | ||
| 492 | } | ||
| 493 | } | ||
| 494 | |||
| 495 | static int dra7xx_pcie_enable_phy(struct dra7xx_pcie *dra7xx) | ||
| 496 | { | ||
| 497 | int phy_count = dra7xx->phy_count; | ||
| 498 | int ret; | ||
| 499 | int i; | ||
| 500 | |||
| 501 | for (i = 0; i < phy_count; i++) { | ||
| 502 | ret = phy_init(dra7xx->phy[i]); | ||
| 503 | if (ret < 0) | ||
| 504 | goto err_phy; | ||
| 505 | |||
| 506 | ret = phy_power_on(dra7xx->phy[i]); | ||
| 507 | if (ret < 0) { | ||
| 508 | phy_exit(dra7xx->phy[i]); | ||
| 509 | goto err_phy; | ||
| 510 | } | ||
| 511 | } | ||
| 512 | |||
| 513 | return 0; | ||
| 514 | |||
| 515 | err_phy: | ||
| 516 | while (--i >= 0) { | ||
| 517 | phy_power_off(dra7xx->phy[i]); | ||
| 518 | phy_exit(dra7xx->phy[i]); | ||
| 519 | } | ||
| 520 | |||
| 521 | return ret; | ||
| 522 | } | ||
| 523 | |||
| 524 | static const struct dra7xx_pcie_of_data dra7xx_pcie_rc_of_data = { | ||
| 525 | .mode = DW_PCIE_RC_TYPE, | ||
| 526 | }; | ||
| 527 | |||
| 528 | static const struct dra7xx_pcie_of_data dra7xx_pcie_ep_of_data = { | ||
| 529 | .mode = DW_PCIE_EP_TYPE, | ||
| 530 | }; | ||
| 531 | |||
| 532 | static const struct of_device_id of_dra7xx_pcie_match[] = { | ||
| 533 | { | ||
| 534 | .compatible = "ti,dra7-pcie", | ||
| 535 | .data = &dra7xx_pcie_rc_of_data, | ||
| 536 | }, | ||
| 537 | { | ||
| 538 | .compatible = "ti,dra7-pcie-ep", | ||
| 539 | .data = &dra7xx_pcie_ep_of_data, | ||
| 540 | }, | ||
| 541 | {}, | ||
| 542 | }; | ||
| 543 | |||
| 544 | /* | ||
| 545 | * dra7xx_pcie_ep_unaligned_memaccess: workaround for AM572x/AM571x Errata i870 | ||
| 546 | * @dra7xx: the dra7xx device where the workaround should be applied | ||
| 547 | * | ||
| 548 | * Access to the PCIe slave port that are not 32-bit aligned will result | ||
| 549 | * in incorrect mapping to TLP Address and Byte enable fields. Therefore, | ||
| 550 | * byte and half-word accesses are not possible to byte offset 0x1, 0x2, or | ||
| 551 | * 0x3. | ||
| 552 | * | ||
| 553 | * To avoid this issue set PCIE_SS1_AXI2OCP_LEGACY_MODE_ENABLE to 1. | ||
| 554 | */ | ||
| 555 | static int dra7xx_pcie_ep_unaligned_memaccess(struct device *dev) | ||
| 556 | { | ||
| 557 | int ret; | ||
| 558 | struct device_node *np = dev->of_node; | ||
| 559 | struct of_phandle_args args; | ||
| 560 | struct regmap *regmap; | ||
| 561 | |||
| 562 | regmap = syscon_regmap_lookup_by_phandle(np, | ||
| 563 | "ti,syscon-unaligned-access"); | ||
| 564 | if (IS_ERR(regmap)) { | ||
| 565 | dev_dbg(dev, "can't get ti,syscon-unaligned-access\n"); | ||
| 566 | return -EINVAL; | ||
| 567 | } | ||
| 568 | |||
| 569 | ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-unaligned-access", | ||
| 570 | 2, 0, &args); | ||
| 571 | if (ret) { | ||
| 572 | dev_err(dev, "failed to parse ti,syscon-unaligned-access\n"); | ||
| 573 | return ret; | ||
| 574 | } | ||
| 575 | |||
| 576 | ret = regmap_update_bits(regmap, args.args[0], args.args[1], | ||
| 577 | args.args[1]); | ||
| 578 | if (ret) | ||
| 579 | dev_err(dev, "failed to enable unaligned access\n"); | ||
| 580 | |||
| 581 | of_node_put(args.np); | ||
| 582 | |||
| 583 | return ret; | ||
| 584 | } | ||
| 585 | |||
| 586 | static int __init dra7xx_pcie_probe(struct platform_device *pdev) | ||
| 587 | { | ||
| 588 | u32 reg; | ||
| 589 | int ret; | ||
| 590 | int irq; | ||
| 591 | int i; | ||
| 592 | int phy_count; | ||
| 593 | struct phy **phy; | ||
| 594 | struct device_link **link; | ||
| 595 | void __iomem *base; | ||
| 596 | struct resource *res; | ||
| 597 | struct dw_pcie *pci; | ||
| 598 | struct dra7xx_pcie *dra7xx; | ||
| 599 | struct device *dev = &pdev->dev; | ||
| 600 | struct device_node *np = dev->of_node; | ||
| 601 | char name[10]; | ||
| 602 | struct gpio_desc *reset; | ||
| 603 | const struct of_device_id *match; | ||
| 604 | const struct dra7xx_pcie_of_data *data; | ||
| 605 | enum dw_pcie_device_mode mode; | ||
| 606 | |||
| 607 | match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev); | ||
| 608 | if (!match) | ||
| 609 | return -EINVAL; | ||
| 610 | |||
| 611 | data = (struct dra7xx_pcie_of_data *)match->data; | ||
| 612 | mode = (enum dw_pcie_device_mode)data->mode; | ||
| 613 | |||
| 614 | dra7xx = devm_kzalloc(dev, sizeof(*dra7xx), GFP_KERNEL); | ||
| 615 | if (!dra7xx) | ||
| 616 | return -ENOMEM; | ||
| 617 | |||
| 618 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
| 619 | if (!pci) | ||
| 620 | return -ENOMEM; | ||
| 621 | |||
| 622 | pci->dev = dev; | ||
| 623 | pci->ops = &dw_pcie_ops; | ||
| 624 | |||
| 625 | irq = platform_get_irq(pdev, 0); | ||
| 626 | if (irq < 0) { | ||
| 627 | dev_err(dev, "missing IRQ resource: %d\n", irq); | ||
| 628 | return irq; | ||
| 629 | } | ||
| 630 | |||
| 631 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ti_conf"); | ||
| 632 | base = devm_ioremap_nocache(dev, res->start, resource_size(res)); | ||
| 633 | if (!base) | ||
| 634 | return -ENOMEM; | ||
| 635 | |||
| 636 | phy_count = of_property_count_strings(np, "phy-names"); | ||
| 637 | if (phy_count < 0) { | ||
| 638 | dev_err(dev, "unable to find the strings\n"); | ||
| 639 | return phy_count; | ||
| 640 | } | ||
| 641 | |||
| 642 | phy = devm_kzalloc(dev, sizeof(*phy) * phy_count, GFP_KERNEL); | ||
| 643 | if (!phy) | ||
| 644 | return -ENOMEM; | ||
| 645 | |||
| 646 | link = devm_kzalloc(dev, sizeof(*link) * phy_count, GFP_KERNEL); | ||
| 647 | if (!link) | ||
| 648 | return -ENOMEM; | ||
| 649 | |||
| 650 | for (i = 0; i < phy_count; i++) { | ||
| 651 | snprintf(name, sizeof(name), "pcie-phy%d", i); | ||
| 652 | phy[i] = devm_phy_get(dev, name); | ||
| 653 | if (IS_ERR(phy[i])) | ||
| 654 | return PTR_ERR(phy[i]); | ||
| 655 | |||
| 656 | link[i] = device_link_add(dev, &phy[i]->dev, DL_FLAG_STATELESS); | ||
| 657 | if (!link[i]) { | ||
| 658 | ret = -EINVAL; | ||
| 659 | goto err_link; | ||
| 660 | } | ||
| 661 | } | ||
| 662 | |||
| 663 | dra7xx->base = base; | ||
| 664 | dra7xx->phy = phy; | ||
| 665 | dra7xx->pci = pci; | ||
| 666 | dra7xx->phy_count = phy_count; | ||
| 667 | |||
| 668 | ret = dra7xx_pcie_enable_phy(dra7xx); | ||
| 669 | if (ret) { | ||
| 670 | dev_err(dev, "failed to enable phy\n"); | ||
| 671 | return ret; | ||
| 672 | } | ||
| 673 | |||
| 674 | platform_set_drvdata(pdev, dra7xx); | ||
| 675 | |||
| 676 | pm_runtime_enable(dev); | ||
| 677 | ret = pm_runtime_get_sync(dev); | ||
| 678 | if (ret < 0) { | ||
| 679 | dev_err(dev, "pm_runtime_get_sync failed\n"); | ||
| 680 | goto err_get_sync; | ||
| 681 | } | ||
| 682 | |||
| 683 | reset = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH); | ||
| 684 | if (IS_ERR(reset)) { | ||
| 685 | ret = PTR_ERR(reset); | ||
| 686 | dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret); | ||
| 687 | goto err_gpio; | ||
| 688 | } | ||
| 689 | |||
| 690 | reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD); | ||
| 691 | reg &= ~LTSSM_EN; | ||
| 692 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_DEVICE_CMD, reg); | ||
| 693 | |||
| 694 | dra7xx->link_gen = of_pci_get_max_link_speed(np); | ||
| 695 | if (dra7xx->link_gen < 0 || dra7xx->link_gen > 2) | ||
| 696 | dra7xx->link_gen = 2; | ||
| 697 | |||
| 698 | switch (mode) { | ||
| 699 | case DW_PCIE_RC_TYPE: | ||
| 700 | if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) { | ||
| 701 | ret = -ENODEV; | ||
| 702 | goto err_gpio; | ||
| 703 | } | ||
| 704 | |||
| 705 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, | ||
| 706 | DEVICE_TYPE_RC); | ||
| 707 | ret = dra7xx_add_pcie_port(dra7xx, pdev); | ||
| 708 | if (ret < 0) | ||
| 709 | goto err_gpio; | ||
| 710 | break; | ||
| 711 | case DW_PCIE_EP_TYPE: | ||
| 712 | if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) { | ||
| 713 | ret = -ENODEV; | ||
| 714 | goto err_gpio; | ||
| 715 | } | ||
| 716 | |||
| 717 | dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE, | ||
| 718 | DEVICE_TYPE_EP); | ||
| 719 | |||
| 720 | ret = dra7xx_pcie_ep_unaligned_memaccess(dev); | ||
| 721 | if (ret) | ||
| 722 | goto err_gpio; | ||
| 723 | |||
| 724 | ret = dra7xx_add_pcie_ep(dra7xx, pdev); | ||
| 725 | if (ret < 0) | ||
| 726 | goto err_gpio; | ||
| 727 | break; | ||
| 728 | default: | ||
| 729 | dev_err(dev, "INVALID device type %d\n", mode); | ||
| 730 | } | ||
| 731 | dra7xx->mode = mode; | ||
| 732 | |||
| 733 | ret = devm_request_irq(dev, irq, dra7xx_pcie_irq_handler, | ||
| 734 | IRQF_SHARED, "dra7xx-pcie-main", dra7xx); | ||
| 735 | if (ret) { | ||
| 736 | dev_err(dev, "failed to request irq\n"); | ||
| 737 | goto err_gpio; | ||
| 738 | } | ||
| 739 | |||
| 740 | return 0; | ||
| 741 | |||
| 742 | err_gpio: | ||
| 743 | pm_runtime_put(dev); | ||
| 744 | |||
| 745 | err_get_sync: | ||
| 746 | pm_runtime_disable(dev); | ||
| 747 | dra7xx_pcie_disable_phy(dra7xx); | ||
| 748 | |||
| 749 | err_link: | ||
| 750 | while (--i >= 0) | ||
| 751 | device_link_del(link[i]); | ||
| 752 | |||
| 753 | return ret; | ||
| 754 | } | ||
| 755 | |||
| 756 | #ifdef CONFIG_PM_SLEEP | ||
| 757 | static int dra7xx_pcie_suspend(struct device *dev) | ||
| 758 | { | ||
| 759 | struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); | ||
| 760 | struct dw_pcie *pci = dra7xx->pci; | ||
| 761 | u32 val; | ||
| 762 | |||
| 763 | if (dra7xx->mode != DW_PCIE_RC_TYPE) | ||
| 764 | return 0; | ||
| 765 | |||
| 766 | /* clear MSE */ | ||
| 767 | val = dw_pcie_readl_dbi(pci, PCI_COMMAND); | ||
| 768 | val &= ~PCI_COMMAND_MEMORY; | ||
| 769 | dw_pcie_writel_dbi(pci, PCI_COMMAND, val); | ||
| 770 | |||
| 771 | return 0; | ||
| 772 | } | ||
| 773 | |||
| 774 | static int dra7xx_pcie_resume(struct device *dev) | ||
| 775 | { | ||
| 776 | struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); | ||
| 777 | struct dw_pcie *pci = dra7xx->pci; | ||
| 778 | u32 val; | ||
| 779 | |||
| 780 | if (dra7xx->mode != DW_PCIE_RC_TYPE) | ||
| 781 | return 0; | ||
| 782 | |||
| 783 | /* set MSE */ | ||
| 784 | val = dw_pcie_readl_dbi(pci, PCI_COMMAND); | ||
| 785 | val |= PCI_COMMAND_MEMORY; | ||
| 786 | dw_pcie_writel_dbi(pci, PCI_COMMAND, val); | ||
| 787 | |||
| 788 | return 0; | ||
| 789 | } | ||
| 790 | |||
| 791 | static int dra7xx_pcie_suspend_noirq(struct device *dev) | ||
| 792 | { | ||
| 793 | struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); | ||
| 794 | |||
| 795 | dra7xx_pcie_disable_phy(dra7xx); | ||
| 796 | |||
| 797 | return 0; | ||
| 798 | } | ||
| 799 | |||
| 800 | static int dra7xx_pcie_resume_noirq(struct device *dev) | ||
| 801 | { | ||
| 802 | struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); | ||
| 803 | int ret; | ||
| 804 | |||
| 805 | ret = dra7xx_pcie_enable_phy(dra7xx); | ||
| 806 | if (ret) { | ||
| 807 | dev_err(dev, "failed to enable phy\n"); | ||
| 808 | return ret; | ||
| 809 | } | ||
| 810 | |||
| 811 | return 0; | ||
| 812 | } | ||
| 813 | #endif | ||
| 814 | |||
| 815 | static void dra7xx_pcie_shutdown(struct platform_device *pdev) | ||
| 816 | { | ||
| 817 | struct device *dev = &pdev->dev; | ||
| 818 | struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev); | ||
| 819 | int ret; | ||
| 820 | |||
| 821 | dra7xx_pcie_stop_link(dra7xx->pci); | ||
| 822 | |||
| 823 | ret = pm_runtime_put_sync(dev); | ||
| 824 | if (ret < 0) | ||
| 825 | dev_dbg(dev, "pm_runtime_put_sync failed\n"); | ||
| 826 | |||
| 827 | pm_runtime_disable(dev); | ||
| 828 | dra7xx_pcie_disable_phy(dra7xx); | ||
| 829 | } | ||
| 830 | |||
| 831 | static const struct dev_pm_ops dra7xx_pcie_pm_ops = { | ||
| 832 | SET_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend, dra7xx_pcie_resume) | ||
| 833 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(dra7xx_pcie_suspend_noirq, | ||
| 834 | dra7xx_pcie_resume_noirq) | ||
| 835 | }; | ||
| 836 | |||
| 837 | static struct platform_driver dra7xx_pcie_driver = { | ||
| 838 | .driver = { | ||
| 839 | .name = "dra7-pcie", | ||
| 840 | .of_match_table = of_dra7xx_pcie_match, | ||
| 841 | .suppress_bind_attrs = true, | ||
| 842 | .pm = &dra7xx_pcie_pm_ops, | ||
| 843 | }, | ||
| 844 | .shutdown = dra7xx_pcie_shutdown, | ||
| 845 | }; | ||
| 846 | builtin_platform_driver_probe(dra7xx_pcie_driver, dra7xx_pcie_probe); | ||
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c new file mode 100644 index 000000000000..4cc1e5df8c79 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-exynos.c | |||
| @@ -0,0 +1,539 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * PCIe host controller driver for Samsung EXYNOS SoCs | ||
| 4 | * | ||
| 5 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. | ||
| 6 | * http://www.samsung.com | ||
| 7 | * | ||
| 8 | * Author: Jingoo Han <jg1.han@samsung.com> | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/clk.h> | ||
| 12 | #include <linux/delay.h> | ||
| 13 | #include <linux/gpio.h> | ||
| 14 | #include <linux/interrupt.h> | ||
| 15 | #include <linux/kernel.h> | ||
| 16 | #include <linux/init.h> | ||
| 17 | #include <linux/of_device.h> | ||
| 18 | #include <linux/of_gpio.h> | ||
| 19 | #include <linux/pci.h> | ||
| 20 | #include <linux/platform_device.h> | ||
| 21 | #include <linux/phy/phy.h> | ||
| 22 | #include <linux/resource.h> | ||
| 23 | #include <linux/signal.h> | ||
| 24 | #include <linux/types.h> | ||
| 25 | |||
| 26 | #include "pcie-designware.h" | ||
| 27 | |||
| 28 | #define to_exynos_pcie(x) dev_get_drvdata((x)->dev) | ||
| 29 | |||
| 30 | /* PCIe ELBI registers */ | ||
| 31 | #define PCIE_IRQ_PULSE 0x000 | ||
| 32 | #define IRQ_INTA_ASSERT BIT(0) | ||
| 33 | #define IRQ_INTB_ASSERT BIT(2) | ||
| 34 | #define IRQ_INTC_ASSERT BIT(4) | ||
| 35 | #define IRQ_INTD_ASSERT BIT(6) | ||
| 36 | #define PCIE_IRQ_LEVEL 0x004 | ||
| 37 | #define PCIE_IRQ_SPECIAL 0x008 | ||
| 38 | #define PCIE_IRQ_EN_PULSE 0x00c | ||
| 39 | #define PCIE_IRQ_EN_LEVEL 0x010 | ||
| 40 | #define IRQ_MSI_ENABLE BIT(2) | ||
| 41 | #define PCIE_IRQ_EN_SPECIAL 0x014 | ||
| 42 | #define PCIE_PWR_RESET 0x018 | ||
| 43 | #define PCIE_CORE_RESET 0x01c | ||
| 44 | #define PCIE_CORE_RESET_ENABLE BIT(0) | ||
| 45 | #define PCIE_STICKY_RESET 0x020 | ||
| 46 | #define PCIE_NONSTICKY_RESET 0x024 | ||
| 47 | #define PCIE_APP_INIT_RESET 0x028 | ||
| 48 | #define PCIE_APP_LTSSM_ENABLE 0x02c | ||
| 49 | #define PCIE_ELBI_RDLH_LINKUP 0x064 | ||
| 50 | #define PCIE_ELBI_LTSSM_ENABLE 0x1 | ||
| 51 | #define PCIE_ELBI_SLV_AWMISC 0x11c | ||
| 52 | #define PCIE_ELBI_SLV_ARMISC 0x120 | ||
| 53 | #define PCIE_ELBI_SLV_DBI_ENABLE BIT(21) | ||
| 54 | |||
| 55 | struct exynos_pcie_mem_res { | ||
| 56 | void __iomem *elbi_base; /* DT 0th resource: PCIe CTRL */ | ||
| 57 | }; | ||
| 58 | |||
| 59 | struct exynos_pcie_clk_res { | ||
| 60 | struct clk *clk; | ||
| 61 | struct clk *bus_clk; | ||
| 62 | }; | ||
| 63 | |||
| 64 | struct exynos_pcie { | ||
| 65 | struct dw_pcie *pci; | ||
| 66 | struct exynos_pcie_mem_res *mem_res; | ||
| 67 | struct exynos_pcie_clk_res *clk_res; | ||
| 68 | const struct exynos_pcie_ops *ops; | ||
| 69 | int reset_gpio; | ||
| 70 | |||
| 71 | struct phy *phy; | ||
| 72 | }; | ||
| 73 | |||
| 74 | struct exynos_pcie_ops { | ||
| 75 | int (*get_mem_resources)(struct platform_device *pdev, | ||
| 76 | struct exynos_pcie *ep); | ||
| 77 | int (*get_clk_resources)(struct exynos_pcie *ep); | ||
| 78 | int (*init_clk_resources)(struct exynos_pcie *ep); | ||
| 79 | void (*deinit_clk_resources)(struct exynos_pcie *ep); | ||
| 80 | }; | ||
| 81 | |||
| 82 | static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev, | ||
| 83 | struct exynos_pcie *ep) | ||
| 84 | { | ||
| 85 | struct dw_pcie *pci = ep->pci; | ||
| 86 | struct device *dev = pci->dev; | ||
| 87 | struct resource *res; | ||
| 88 | |||
| 89 | ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL); | ||
| 90 | if (!ep->mem_res) | ||
| 91 | return -ENOMEM; | ||
| 92 | |||
| 93 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 94 | ep->mem_res->elbi_base = devm_ioremap_resource(dev, res); | ||
| 95 | if (IS_ERR(ep->mem_res->elbi_base)) | ||
| 96 | return PTR_ERR(ep->mem_res->elbi_base); | ||
| 97 | |||
| 98 | return 0; | ||
| 99 | } | ||
| 100 | |||
| 101 | static int exynos5440_pcie_get_clk_resources(struct exynos_pcie *ep) | ||
| 102 | { | ||
| 103 | struct dw_pcie *pci = ep->pci; | ||
| 104 | struct device *dev = pci->dev; | ||
| 105 | |||
| 106 | ep->clk_res = devm_kzalloc(dev, sizeof(*ep->clk_res), GFP_KERNEL); | ||
| 107 | if (!ep->clk_res) | ||
| 108 | return -ENOMEM; | ||
| 109 | |||
| 110 | ep->clk_res->clk = devm_clk_get(dev, "pcie"); | ||
| 111 | if (IS_ERR(ep->clk_res->clk)) { | ||
| 112 | dev_err(dev, "Failed to get pcie rc clock\n"); | ||
| 113 | return PTR_ERR(ep->clk_res->clk); | ||
| 114 | } | ||
| 115 | |||
| 116 | ep->clk_res->bus_clk = devm_clk_get(dev, "pcie_bus"); | ||
| 117 | if (IS_ERR(ep->clk_res->bus_clk)) { | ||
| 118 | dev_err(dev, "Failed to get pcie bus clock\n"); | ||
| 119 | return PTR_ERR(ep->clk_res->bus_clk); | ||
| 120 | } | ||
| 121 | |||
| 122 | return 0; | ||
| 123 | } | ||
| 124 | |||
| 125 | static int exynos5440_pcie_init_clk_resources(struct exynos_pcie *ep) | ||
| 126 | { | ||
| 127 | struct dw_pcie *pci = ep->pci; | ||
| 128 | struct device *dev = pci->dev; | ||
| 129 | int ret; | ||
| 130 | |||
| 131 | ret = clk_prepare_enable(ep->clk_res->clk); | ||
| 132 | if (ret) { | ||
| 133 | dev_err(dev, "cannot enable pcie rc clock"); | ||
| 134 | return ret; | ||
| 135 | } | ||
| 136 | |||
| 137 | ret = clk_prepare_enable(ep->clk_res->bus_clk); | ||
| 138 | if (ret) { | ||
| 139 | dev_err(dev, "cannot enable pcie bus clock"); | ||
| 140 | goto err_bus_clk; | ||
| 141 | } | ||
| 142 | |||
| 143 | return 0; | ||
| 144 | |||
| 145 | err_bus_clk: | ||
| 146 | clk_disable_unprepare(ep->clk_res->clk); | ||
| 147 | |||
| 148 | return ret; | ||
| 149 | } | ||
| 150 | |||
| 151 | static void exynos5440_pcie_deinit_clk_resources(struct exynos_pcie *ep) | ||
| 152 | { | ||
| 153 | clk_disable_unprepare(ep->clk_res->bus_clk); | ||
| 154 | clk_disable_unprepare(ep->clk_res->clk); | ||
| 155 | } | ||
| 156 | |||
| 157 | static const struct exynos_pcie_ops exynos5440_pcie_ops = { | ||
| 158 | .get_mem_resources = exynos5440_pcie_get_mem_resources, | ||
| 159 | .get_clk_resources = exynos5440_pcie_get_clk_resources, | ||
| 160 | .init_clk_resources = exynos5440_pcie_init_clk_resources, | ||
| 161 | .deinit_clk_resources = exynos5440_pcie_deinit_clk_resources, | ||
| 162 | }; | ||
| 163 | |||
| 164 | static void exynos_pcie_writel(void __iomem *base, u32 val, u32 reg) | ||
| 165 | { | ||
| 166 | writel(val, base + reg); | ||
| 167 | } | ||
| 168 | |||
| 169 | static u32 exynos_pcie_readl(void __iomem *base, u32 reg) | ||
| 170 | { | ||
| 171 | return readl(base + reg); | ||
| 172 | } | ||
| 173 | |||
| 174 | static void exynos_pcie_sideband_dbi_w_mode(struct exynos_pcie *ep, bool on) | ||
| 175 | { | ||
| 176 | u32 val; | ||
| 177 | |||
| 178 | val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_AWMISC); | ||
| 179 | if (on) | ||
| 180 | val |= PCIE_ELBI_SLV_DBI_ENABLE; | ||
| 181 | else | ||
| 182 | val &= ~PCIE_ELBI_SLV_DBI_ENABLE; | ||
| 183 | exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_AWMISC); | ||
| 184 | } | ||
| 185 | |||
| 186 | static void exynos_pcie_sideband_dbi_r_mode(struct exynos_pcie *ep, bool on) | ||
| 187 | { | ||
| 188 | u32 val; | ||
| 189 | |||
| 190 | val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_SLV_ARMISC); | ||
| 191 | if (on) | ||
| 192 | val |= PCIE_ELBI_SLV_DBI_ENABLE; | ||
| 193 | else | ||
| 194 | val &= ~PCIE_ELBI_SLV_DBI_ENABLE; | ||
| 195 | exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_ELBI_SLV_ARMISC); | ||
| 196 | } | ||
| 197 | |||
| 198 | static void exynos_pcie_assert_core_reset(struct exynos_pcie *ep) | ||
| 199 | { | ||
| 200 | u32 val; | ||
| 201 | |||
| 202 | val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET); | ||
| 203 | val &= ~PCIE_CORE_RESET_ENABLE; | ||
| 204 | exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET); | ||
| 205 | exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_PWR_RESET); | ||
| 206 | exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_STICKY_RESET); | ||
| 207 | exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_NONSTICKY_RESET); | ||
| 208 | } | ||
| 209 | |||
| 210 | static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep) | ||
| 211 | { | ||
| 212 | u32 val; | ||
| 213 | |||
| 214 | val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_CORE_RESET); | ||
| 215 | val |= PCIE_CORE_RESET_ENABLE; | ||
| 216 | |||
| 217 | exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_CORE_RESET); | ||
| 218 | exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_STICKY_RESET); | ||
| 219 | exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_NONSTICKY_RESET); | ||
| 220 | exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_APP_INIT_RESET); | ||
| 221 | exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_APP_INIT_RESET); | ||
| 222 | } | ||
| 223 | |||
| 224 | static void exynos_pcie_assert_reset(struct exynos_pcie *ep) | ||
| 225 | { | ||
| 226 | struct dw_pcie *pci = ep->pci; | ||
| 227 | struct device *dev = pci->dev; | ||
| 228 | |||
| 229 | if (ep->reset_gpio >= 0) | ||
| 230 | devm_gpio_request_one(dev, ep->reset_gpio, | ||
| 231 | GPIOF_OUT_INIT_HIGH, "RESET"); | ||
| 232 | } | ||
| 233 | |||
| 234 | static int exynos_pcie_establish_link(struct exynos_pcie *ep) | ||
| 235 | { | ||
| 236 | struct dw_pcie *pci = ep->pci; | ||
| 237 | struct pcie_port *pp = &pci->pp; | ||
| 238 | struct device *dev = pci->dev; | ||
| 239 | |||
| 240 | if (dw_pcie_link_up(pci)) { | ||
| 241 | dev_err(dev, "Link already up\n"); | ||
| 242 | return 0; | ||
| 243 | } | ||
| 244 | |||
| 245 | exynos_pcie_assert_core_reset(ep); | ||
| 246 | |||
| 247 | phy_reset(ep->phy); | ||
| 248 | |||
| 249 | exynos_pcie_writel(ep->mem_res->elbi_base, 1, | ||
| 250 | PCIE_PWR_RESET); | ||
| 251 | |||
| 252 | phy_power_on(ep->phy); | ||
| 253 | phy_init(ep->phy); | ||
| 254 | |||
| 255 | exynos_pcie_deassert_core_reset(ep); | ||
| 256 | dw_pcie_setup_rc(pp); | ||
| 257 | exynos_pcie_assert_reset(ep); | ||
| 258 | |||
| 259 | /* assert LTSSM enable */ | ||
| 260 | exynos_pcie_writel(ep->mem_res->elbi_base, PCIE_ELBI_LTSSM_ENABLE, | ||
| 261 | PCIE_APP_LTSSM_ENABLE); | ||
| 262 | |||
| 263 | /* check if the link is up or not */ | ||
| 264 | if (!dw_pcie_wait_for_link(pci)) | ||
| 265 | return 0; | ||
| 266 | |||
| 267 | phy_power_off(ep->phy); | ||
| 268 | return -ETIMEDOUT; | ||
| 269 | } | ||
| 270 | |||
| 271 | static void exynos_pcie_clear_irq_pulse(struct exynos_pcie *ep) | ||
| 272 | { | ||
| 273 | u32 val; | ||
| 274 | |||
| 275 | val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_PULSE); | ||
| 276 | exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_PULSE); | ||
| 277 | } | ||
| 278 | |||
| 279 | static void exynos_pcie_enable_irq_pulse(struct exynos_pcie *ep) | ||
| 280 | { | ||
| 281 | u32 val; | ||
| 282 | |||
| 283 | /* enable INTX interrupt */ | ||
| 284 | val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT | | ||
| 285 | IRQ_INTC_ASSERT | IRQ_INTD_ASSERT; | ||
| 286 | exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_PULSE); | ||
| 287 | } | ||
| 288 | |||
| 289 | static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg) | ||
| 290 | { | ||
| 291 | struct exynos_pcie *ep = arg; | ||
| 292 | |||
| 293 | exynos_pcie_clear_irq_pulse(ep); | ||
| 294 | return IRQ_HANDLED; | ||
| 295 | } | ||
| 296 | |||
| 297 | static void exynos_pcie_msi_init(struct exynos_pcie *ep) | ||
| 298 | { | ||
| 299 | struct dw_pcie *pci = ep->pci; | ||
| 300 | struct pcie_port *pp = &pci->pp; | ||
| 301 | u32 val; | ||
| 302 | |||
| 303 | dw_pcie_msi_init(pp); | ||
| 304 | |||
| 305 | /* enable MSI interrupt */ | ||
| 306 | val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_IRQ_EN_LEVEL); | ||
| 307 | val |= IRQ_MSI_ENABLE; | ||
| 308 | exynos_pcie_writel(ep->mem_res->elbi_base, val, PCIE_IRQ_EN_LEVEL); | ||
| 309 | } | ||
| 310 | |||
| 311 | static void exynos_pcie_enable_interrupts(struct exynos_pcie *ep) | ||
| 312 | { | ||
| 313 | exynos_pcie_enable_irq_pulse(ep); | ||
| 314 | |||
| 315 | if (IS_ENABLED(CONFIG_PCI_MSI)) | ||
| 316 | exynos_pcie_msi_init(ep); | ||
| 317 | } | ||
| 318 | |||
| 319 | static u32 exynos_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, | ||
| 320 | u32 reg, size_t size) | ||
| 321 | { | ||
| 322 | struct exynos_pcie *ep = to_exynos_pcie(pci); | ||
| 323 | u32 val; | ||
| 324 | |||
| 325 | exynos_pcie_sideband_dbi_r_mode(ep, true); | ||
| 326 | dw_pcie_read(base + reg, size, &val); | ||
| 327 | exynos_pcie_sideband_dbi_r_mode(ep, false); | ||
| 328 | return val; | ||
| 329 | } | ||
| 330 | |||
| 331 | static void exynos_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, | ||
| 332 | u32 reg, size_t size, u32 val) | ||
| 333 | { | ||
| 334 | struct exynos_pcie *ep = to_exynos_pcie(pci); | ||
| 335 | |||
| 336 | exynos_pcie_sideband_dbi_w_mode(ep, true); | ||
| 337 | dw_pcie_write(base + reg, size, val); | ||
| 338 | exynos_pcie_sideband_dbi_w_mode(ep, false); | ||
| 339 | } | ||
| 340 | |||
| 341 | static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, | ||
| 342 | u32 *val) | ||
| 343 | { | ||
| 344 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 345 | struct exynos_pcie *ep = to_exynos_pcie(pci); | ||
| 346 | int ret; | ||
| 347 | |||
| 348 | exynos_pcie_sideband_dbi_r_mode(ep, true); | ||
| 349 | ret = dw_pcie_read(pci->dbi_base + where, size, val); | ||
| 350 | exynos_pcie_sideband_dbi_r_mode(ep, false); | ||
| 351 | return ret; | ||
| 352 | } | ||
| 353 | |||
| 354 | static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, | ||
| 355 | u32 val) | ||
| 356 | { | ||
| 357 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 358 | struct exynos_pcie *ep = to_exynos_pcie(pci); | ||
| 359 | int ret; | ||
| 360 | |||
| 361 | exynos_pcie_sideband_dbi_w_mode(ep, true); | ||
| 362 | ret = dw_pcie_write(pci->dbi_base + where, size, val); | ||
| 363 | exynos_pcie_sideband_dbi_w_mode(ep, false); | ||
| 364 | return ret; | ||
| 365 | } | ||
| 366 | |||
| 367 | static int exynos_pcie_link_up(struct dw_pcie *pci) | ||
| 368 | { | ||
| 369 | struct exynos_pcie *ep = to_exynos_pcie(pci); | ||
| 370 | u32 val; | ||
| 371 | |||
| 372 | val = exynos_pcie_readl(ep->mem_res->elbi_base, PCIE_ELBI_RDLH_LINKUP); | ||
| 373 | if (val == PCIE_ELBI_LTSSM_ENABLE) | ||
| 374 | return 1; | ||
| 375 | |||
| 376 | return 0; | ||
| 377 | } | ||
| 378 | |||
| 379 | static int exynos_pcie_host_init(struct pcie_port *pp) | ||
| 380 | { | ||
| 381 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 382 | struct exynos_pcie *ep = to_exynos_pcie(pci); | ||
| 383 | |||
| 384 | exynos_pcie_establish_link(ep); | ||
| 385 | exynos_pcie_enable_interrupts(ep); | ||
| 386 | |||
| 387 | return 0; | ||
| 388 | } | ||
| 389 | |||
| 390 | static const struct dw_pcie_host_ops exynos_pcie_host_ops = { | ||
| 391 | .rd_own_conf = exynos_pcie_rd_own_conf, | ||
| 392 | .wr_own_conf = exynos_pcie_wr_own_conf, | ||
| 393 | .host_init = exynos_pcie_host_init, | ||
| 394 | }; | ||
| 395 | |||
| 396 | static int __init exynos_add_pcie_port(struct exynos_pcie *ep, | ||
| 397 | struct platform_device *pdev) | ||
| 398 | { | ||
| 399 | struct dw_pcie *pci = ep->pci; | ||
| 400 | struct pcie_port *pp = &pci->pp; | ||
| 401 | struct device *dev = &pdev->dev; | ||
| 402 | int ret; | ||
| 403 | |||
| 404 | pp->irq = platform_get_irq(pdev, 1); | ||
| 405 | if (pp->irq < 0) { | ||
| 406 | dev_err(dev, "failed to get irq\n"); | ||
| 407 | return pp->irq; | ||
| 408 | } | ||
| 409 | ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler, | ||
| 410 | IRQF_SHARED, "exynos-pcie", ep); | ||
| 411 | if (ret) { | ||
| 412 | dev_err(dev, "failed to request irq\n"); | ||
| 413 | return ret; | ||
| 414 | } | ||
| 415 | |||
| 416 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
| 417 | pp->msi_irq = platform_get_irq(pdev, 0); | ||
| 418 | if (pp->msi_irq < 0) { | ||
| 419 | dev_err(dev, "failed to get msi irq\n"); | ||
| 420 | return pp->msi_irq; | ||
| 421 | } | ||
| 422 | } | ||
| 423 | |||
| 424 | pp->root_bus_nr = -1; | ||
| 425 | pp->ops = &exynos_pcie_host_ops; | ||
| 426 | |||
| 427 | ret = dw_pcie_host_init(pp); | ||
| 428 | if (ret) { | ||
| 429 | dev_err(dev, "failed to initialize host\n"); | ||
| 430 | return ret; | ||
| 431 | } | ||
| 432 | |||
| 433 | return 0; | ||
| 434 | } | ||
| 435 | |||
| 436 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
| 437 | .read_dbi = exynos_pcie_read_dbi, | ||
| 438 | .write_dbi = exynos_pcie_write_dbi, | ||
| 439 | .link_up = exynos_pcie_link_up, | ||
| 440 | }; | ||
| 441 | |||
| 442 | static int __init exynos_pcie_probe(struct platform_device *pdev) | ||
| 443 | { | ||
| 444 | struct device *dev = &pdev->dev; | ||
| 445 | struct dw_pcie *pci; | ||
| 446 | struct exynos_pcie *ep; | ||
| 447 | struct device_node *np = dev->of_node; | ||
| 448 | int ret; | ||
| 449 | |||
| 450 | ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); | ||
| 451 | if (!ep) | ||
| 452 | return -ENOMEM; | ||
| 453 | |||
| 454 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
| 455 | if (!pci) | ||
| 456 | return -ENOMEM; | ||
| 457 | |||
| 458 | pci->dev = dev; | ||
| 459 | pci->ops = &dw_pcie_ops; | ||
| 460 | |||
| 461 | ep->pci = pci; | ||
| 462 | ep->ops = (const struct exynos_pcie_ops *) | ||
| 463 | of_device_get_match_data(dev); | ||
| 464 | |||
| 465 | ep->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0); | ||
| 466 | |||
| 467 | ep->phy = devm_of_phy_get(dev, np, NULL); | ||
| 468 | if (IS_ERR(ep->phy)) { | ||
| 469 | if (PTR_ERR(ep->phy) == -EPROBE_DEFER) | ||
| 470 | return PTR_ERR(ep->phy); | ||
| 471 | |||
| 472 | ep->phy = NULL; | ||
| 473 | } | ||
| 474 | |||
| 475 | if (ep->ops && ep->ops->get_mem_resources) { | ||
| 476 | ret = ep->ops->get_mem_resources(pdev, ep); | ||
| 477 | if (ret) | ||
| 478 | return ret; | ||
| 479 | } | ||
| 480 | |||
| 481 | if (ep->ops && ep->ops->get_clk_resources && | ||
| 482 | ep->ops->init_clk_resources) { | ||
| 483 | ret = ep->ops->get_clk_resources(ep); | ||
| 484 | if (ret) | ||
| 485 | return ret; | ||
| 486 | ret = ep->ops->init_clk_resources(ep); | ||
| 487 | if (ret) | ||
| 488 | return ret; | ||
| 489 | } | ||
| 490 | |||
| 491 | platform_set_drvdata(pdev, ep); | ||
| 492 | |||
| 493 | ret = exynos_add_pcie_port(ep, pdev); | ||
| 494 | if (ret < 0) | ||
| 495 | goto fail_probe; | ||
| 496 | |||
| 497 | return 0; | ||
| 498 | |||
| 499 | fail_probe: | ||
| 500 | phy_exit(ep->phy); | ||
| 501 | |||
| 502 | if (ep->ops && ep->ops->deinit_clk_resources) | ||
| 503 | ep->ops->deinit_clk_resources(ep); | ||
| 504 | return ret; | ||
| 505 | } | ||
| 506 | |||
| 507 | static int __exit exynos_pcie_remove(struct platform_device *pdev) | ||
| 508 | { | ||
| 509 | struct exynos_pcie *ep = platform_get_drvdata(pdev); | ||
| 510 | |||
| 511 | if (ep->ops && ep->ops->deinit_clk_resources) | ||
| 512 | ep->ops->deinit_clk_resources(ep); | ||
| 513 | |||
| 514 | return 0; | ||
| 515 | } | ||
| 516 | |||
| 517 | static const struct of_device_id exynos_pcie_of_match[] = { | ||
| 518 | { | ||
| 519 | .compatible = "samsung,exynos5440-pcie", | ||
| 520 | .data = &exynos5440_pcie_ops | ||
| 521 | }, | ||
| 522 | {}, | ||
| 523 | }; | ||
| 524 | |||
| 525 | static struct platform_driver exynos_pcie_driver = { | ||
| 526 | .remove = __exit_p(exynos_pcie_remove), | ||
| 527 | .driver = { | ||
| 528 | .name = "exynos-pcie", | ||
| 529 | .of_match_table = exynos_pcie_of_match, | ||
| 530 | }, | ||
| 531 | }; | ||
| 532 | |||
| 533 | /* Exynos PCIe driver does not allow module unload */ | ||
| 534 | |||
| 535 | static int __init exynos_pcie_init(void) | ||
| 536 | { | ||
| 537 | return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe); | ||
| 538 | } | ||
| 539 | subsys_initcall(exynos_pcie_init); | ||
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c new file mode 100644 index 000000000000..80f604602783 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-imx6.c | |||
| @@ -0,0 +1,871 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * PCIe host controller driver for Freescale i.MX6 SoCs | ||
| 4 | * | ||
| 5 | * Copyright (C) 2013 Kosagi | ||
| 6 | * http://www.kosagi.com | ||
| 7 | * | ||
| 8 | * Author: Sean Cross <xobs@kosagi.com> | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/clk.h> | ||
| 12 | #include <linux/delay.h> | ||
| 13 | #include <linux/gpio.h> | ||
| 14 | #include <linux/kernel.h> | ||
| 15 | #include <linux/mfd/syscon.h> | ||
| 16 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> | ||
| 17 | #include <linux/mfd/syscon/imx7-iomuxc-gpr.h> | ||
| 18 | #include <linux/module.h> | ||
| 19 | #include <linux/of_gpio.h> | ||
| 20 | #include <linux/of_device.h> | ||
| 21 | #include <linux/pci.h> | ||
| 22 | #include <linux/platform_device.h> | ||
| 23 | #include <linux/regmap.h> | ||
| 24 | #include <linux/regulator/consumer.h> | ||
| 25 | #include <linux/resource.h> | ||
| 26 | #include <linux/signal.h> | ||
| 27 | #include <linux/types.h> | ||
| 28 | #include <linux/interrupt.h> | ||
| 29 | #include <linux/reset.h> | ||
| 30 | |||
| 31 | #include "pcie-designware.h" | ||
| 32 | |||
| 33 | #define to_imx6_pcie(x) dev_get_drvdata((x)->dev) | ||
| 34 | |||
| 35 | enum imx6_pcie_variants { | ||
| 36 | IMX6Q, | ||
| 37 | IMX6SX, | ||
| 38 | IMX6QP, | ||
| 39 | IMX7D, | ||
| 40 | }; | ||
| 41 | |||
| 42 | struct imx6_pcie { | ||
| 43 | struct dw_pcie *pci; | ||
| 44 | int reset_gpio; | ||
| 45 | bool gpio_active_high; | ||
| 46 | struct clk *pcie_bus; | ||
| 47 | struct clk *pcie_phy; | ||
| 48 | struct clk *pcie_inbound_axi; | ||
| 49 | struct clk *pcie; | ||
| 50 | struct regmap *iomuxc_gpr; | ||
| 51 | struct reset_control *pciephy_reset; | ||
| 52 | struct reset_control *apps_reset; | ||
| 53 | enum imx6_pcie_variants variant; | ||
| 54 | u32 tx_deemph_gen1; | ||
| 55 | u32 tx_deemph_gen2_3p5db; | ||
| 56 | u32 tx_deemph_gen2_6db; | ||
| 57 | u32 tx_swing_full; | ||
| 58 | u32 tx_swing_low; | ||
| 59 | int link_gen; | ||
| 60 | struct regulator *vpcie; | ||
| 61 | }; | ||
| 62 | |||
| 63 | /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ | ||
| 64 | #define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000 | ||
| 65 | #define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50 | ||
| 66 | #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 | ||
| 67 | |||
| 68 | /* PCIe Root Complex registers (memory-mapped) */ | ||
| 69 | #define PCIE_RC_LCR 0x7c | ||
| 70 | #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1 | ||
| 71 | #define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2 | ||
| 72 | #define PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK 0xf | ||
| 73 | |||
| 74 | #define PCIE_RC_LCSR 0x80 | ||
| 75 | |||
| 76 | /* PCIe Port Logic registers (memory-mapped) */ | ||
| 77 | #define PL_OFFSET 0x700 | ||
| 78 | #define PCIE_PL_PFLR (PL_OFFSET + 0x08) | ||
| 79 | #define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16) | ||
| 80 | #define PCIE_PL_PFLR_FORCE_LINK (1 << 15) | ||
| 81 | #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) | ||
| 82 | #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) | ||
| 83 | #define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING (1 << 29) | ||
| 84 | #define PCIE_PHY_DEBUG_R1_XMLH_LINK_UP (1 << 4) | ||
| 85 | |||
| 86 | #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) | ||
| 87 | #define PCIE_PHY_CTRL_DATA_LOC 0 | ||
| 88 | #define PCIE_PHY_CTRL_CAP_ADR_LOC 16 | ||
| 89 | #define PCIE_PHY_CTRL_CAP_DAT_LOC 17 | ||
| 90 | #define PCIE_PHY_CTRL_WR_LOC 18 | ||
| 91 | #define PCIE_PHY_CTRL_RD_LOC 19 | ||
| 92 | |||
| 93 | #define PCIE_PHY_STAT (PL_OFFSET + 0x110) | ||
| 94 | #define PCIE_PHY_STAT_ACK_LOC 16 | ||
| 95 | |||
| 96 | #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C | ||
| 97 | #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) | ||
| 98 | |||
| 99 | /* PHY registers (not memory-mapped) */ | ||
| 100 | #define PCIE_PHY_RX_ASIC_OUT 0x100D | ||
| 101 | #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) | ||
| 102 | |||
| 103 | #define PHY_RX_OVRD_IN_LO 0x1005 | ||
| 104 | #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) | ||
| 105 | #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3) | ||
| 106 | |||
| 107 | static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val) | ||
| 108 | { | ||
| 109 | struct dw_pcie *pci = imx6_pcie->pci; | ||
| 110 | u32 val; | ||
| 111 | u32 max_iterations = 10; | ||
| 112 | u32 wait_counter = 0; | ||
| 113 | |||
| 114 | do { | ||
| 115 | val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); | ||
| 116 | val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1; | ||
| 117 | wait_counter++; | ||
| 118 | |||
| 119 | if (val == exp_val) | ||
| 120 | return 0; | ||
| 121 | |||
| 122 | udelay(1); | ||
| 123 | } while (wait_counter < max_iterations); | ||
| 124 | |||
| 125 | return -ETIMEDOUT; | ||
| 126 | } | ||
| 127 | |||
| 128 | static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr) | ||
| 129 | { | ||
| 130 | struct dw_pcie *pci = imx6_pcie->pci; | ||
| 131 | u32 val; | ||
| 132 | int ret; | ||
| 133 | |||
| 134 | val = addr << PCIE_PHY_CTRL_DATA_LOC; | ||
| 135 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); | ||
| 136 | |||
| 137 | val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC); | ||
| 138 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); | ||
| 139 | |||
| 140 | ret = pcie_phy_poll_ack(imx6_pcie, 1); | ||
| 141 | if (ret) | ||
| 142 | return ret; | ||
| 143 | |||
| 144 | val = addr << PCIE_PHY_CTRL_DATA_LOC; | ||
| 145 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); | ||
| 146 | |||
| 147 | return pcie_phy_poll_ack(imx6_pcie, 0); | ||
| 148 | } | ||
| 149 | |||
| 150 | /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ | ||
| 151 | static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data) | ||
| 152 | { | ||
| 153 | struct dw_pcie *pci = imx6_pcie->pci; | ||
| 154 | u32 val, phy_ctl; | ||
| 155 | int ret; | ||
| 156 | |||
| 157 | ret = pcie_phy_wait_ack(imx6_pcie, addr); | ||
| 158 | if (ret) | ||
| 159 | return ret; | ||
| 160 | |||
| 161 | /* assert Read signal */ | ||
| 162 | phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC; | ||
| 163 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl); | ||
| 164 | |||
| 165 | ret = pcie_phy_poll_ack(imx6_pcie, 1); | ||
| 166 | if (ret) | ||
| 167 | return ret; | ||
| 168 | |||
| 169 | val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); | ||
| 170 | *data = val & 0xffff; | ||
| 171 | |||
| 172 | /* deassert Read signal */ | ||
| 173 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00); | ||
| 174 | |||
| 175 | return pcie_phy_poll_ack(imx6_pcie, 0); | ||
| 176 | } | ||
| 177 | |||
| 178 | static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data) | ||
| 179 | { | ||
| 180 | struct dw_pcie *pci = imx6_pcie->pci; | ||
| 181 | u32 var; | ||
| 182 | int ret; | ||
| 183 | |||
| 184 | /* write addr */ | ||
| 185 | /* cap addr */ | ||
| 186 | ret = pcie_phy_wait_ack(imx6_pcie, addr); | ||
| 187 | if (ret) | ||
| 188 | return ret; | ||
| 189 | |||
| 190 | var = data << PCIE_PHY_CTRL_DATA_LOC; | ||
| 191 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); | ||
| 192 | |||
| 193 | /* capture data */ | ||
| 194 | var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC); | ||
| 195 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); | ||
| 196 | |||
| 197 | ret = pcie_phy_poll_ack(imx6_pcie, 1); | ||
| 198 | if (ret) | ||
| 199 | return ret; | ||
| 200 | |||
| 201 | /* deassert cap data */ | ||
| 202 | var = data << PCIE_PHY_CTRL_DATA_LOC; | ||
| 203 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); | ||
| 204 | |||
| 205 | /* wait for ack de-assertion */ | ||
| 206 | ret = pcie_phy_poll_ack(imx6_pcie, 0); | ||
| 207 | if (ret) | ||
| 208 | return ret; | ||
| 209 | |||
| 210 | /* assert wr signal */ | ||
| 211 | var = 0x1 << PCIE_PHY_CTRL_WR_LOC; | ||
| 212 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); | ||
| 213 | |||
| 214 | /* wait for ack */ | ||
| 215 | ret = pcie_phy_poll_ack(imx6_pcie, 1); | ||
| 216 | if (ret) | ||
| 217 | return ret; | ||
| 218 | |||
| 219 | /* deassert wr signal */ | ||
| 220 | var = data << PCIE_PHY_CTRL_DATA_LOC; | ||
| 221 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); | ||
| 222 | |||
| 223 | /* wait for ack de-assertion */ | ||
| 224 | ret = pcie_phy_poll_ack(imx6_pcie, 0); | ||
| 225 | if (ret) | ||
| 226 | return ret; | ||
| 227 | |||
| 228 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x0); | ||
| 229 | |||
| 230 | return 0; | ||
| 231 | } | ||
| 232 | |||
| 233 | static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie) | ||
| 234 | { | ||
| 235 | u32 tmp; | ||
| 236 | |||
| 237 | pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); | ||
| 238 | tmp |= (PHY_RX_OVRD_IN_LO_RX_DATA_EN | | ||
| 239 | PHY_RX_OVRD_IN_LO_RX_PLL_EN); | ||
| 240 | pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); | ||
| 241 | |||
| 242 | usleep_range(2000, 3000); | ||
| 243 | |||
| 244 | pcie_phy_read(imx6_pcie, PHY_RX_OVRD_IN_LO, &tmp); | ||
| 245 | tmp &= ~(PHY_RX_OVRD_IN_LO_RX_DATA_EN | | ||
| 246 | PHY_RX_OVRD_IN_LO_RX_PLL_EN); | ||
| 247 | pcie_phy_write(imx6_pcie, PHY_RX_OVRD_IN_LO, tmp); | ||
| 248 | } | ||
| 249 | |||
| 250 | /* Added for PCI abort handling */ | ||
| 251 | static int imx6q_pcie_abort_handler(unsigned long addr, | ||
| 252 | unsigned int fsr, struct pt_regs *regs) | ||
| 253 | { | ||
| 254 | unsigned long pc = instruction_pointer(regs); | ||
| 255 | unsigned long instr = *(unsigned long *)pc; | ||
| 256 | int reg = (instr >> 12) & 15; | ||
| 257 | |||
| 258 | /* | ||
| 259 | * If the instruction being executed was a read, | ||
| 260 | * make it look like it read all-ones. | ||
| 261 | */ | ||
| 262 | if ((instr & 0x0c100000) == 0x04100000) { | ||
| 263 | unsigned long val; | ||
| 264 | |||
| 265 | if (instr & 0x00400000) | ||
| 266 | val = 255; | ||
| 267 | else | ||
| 268 | val = -1; | ||
| 269 | |||
| 270 | regs->uregs[reg] = val; | ||
| 271 | regs->ARM_pc += 4; | ||
| 272 | return 0; | ||
| 273 | } | ||
| 274 | |||
| 275 | if ((instr & 0x0e100090) == 0x00100090) { | ||
| 276 | regs->uregs[reg] = -1; | ||
| 277 | regs->ARM_pc += 4; | ||
| 278 | return 0; | ||
| 279 | } | ||
| 280 | |||
| 281 | return 1; | ||
| 282 | } | ||
| 283 | |||
| 284 | static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) | ||
| 285 | { | ||
| 286 | struct device *dev = imx6_pcie->pci->dev; | ||
| 287 | |||
| 288 | switch (imx6_pcie->variant) { | ||
| 289 | case IMX7D: | ||
| 290 | reset_control_assert(imx6_pcie->pciephy_reset); | ||
| 291 | reset_control_assert(imx6_pcie->apps_reset); | ||
| 292 | break; | ||
| 293 | case IMX6SX: | ||
| 294 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
| 295 | IMX6SX_GPR12_PCIE_TEST_POWERDOWN, | ||
| 296 | IMX6SX_GPR12_PCIE_TEST_POWERDOWN); | ||
| 297 | /* Force PCIe PHY reset */ | ||
| 298 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, | ||
| 299 | IMX6SX_GPR5_PCIE_BTNRST_RESET, | ||
| 300 | IMX6SX_GPR5_PCIE_BTNRST_RESET); | ||
| 301 | break; | ||
| 302 | case IMX6QP: | ||
| 303 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
| 304 | IMX6Q_GPR1_PCIE_SW_RST, | ||
| 305 | IMX6Q_GPR1_PCIE_SW_RST); | ||
| 306 | break; | ||
| 307 | case IMX6Q: | ||
| 308 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
| 309 | IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18); | ||
| 310 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
| 311 | IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); | ||
| 312 | break; | ||
| 313 | } | ||
| 314 | |||
| 315 | if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { | ||
| 316 | int ret = regulator_disable(imx6_pcie->vpcie); | ||
| 317 | |||
| 318 | if (ret) | ||
| 319 | dev_err(dev, "failed to disable vpcie regulator: %d\n", | ||
| 320 | ret); | ||
| 321 | } | ||
| 322 | } | ||
| 323 | |||
| 324 | static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) | ||
| 325 | { | ||
| 326 | struct dw_pcie *pci = imx6_pcie->pci; | ||
| 327 | struct device *dev = pci->dev; | ||
| 328 | int ret = 0; | ||
| 329 | |||
| 330 | switch (imx6_pcie->variant) { | ||
| 331 | case IMX6SX: | ||
| 332 | ret = clk_prepare_enable(imx6_pcie->pcie_inbound_axi); | ||
| 333 | if (ret) { | ||
| 334 | dev_err(dev, "unable to enable pcie_axi clock\n"); | ||
| 335 | break; | ||
| 336 | } | ||
| 337 | |||
| 338 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
| 339 | IMX6SX_GPR12_PCIE_TEST_POWERDOWN, 0); | ||
| 340 | break; | ||
| 341 | case IMX6QP: /* FALLTHROUGH */ | ||
| 342 | case IMX6Q: | ||
| 343 | /* power up core phy and enable ref clock */ | ||
| 344 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
| 345 | IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18); | ||
| 346 | /* | ||
| 347 | * the async reset input need ref clock to sync internally, | ||
| 348 | * when the ref clock comes after reset, internal synced | ||
| 349 | * reset time is too short, cannot meet the requirement. | ||
| 350 | * add one ~10us delay here. | ||
| 351 | */ | ||
| 352 | udelay(10); | ||
| 353 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
| 354 | IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); | ||
| 355 | break; | ||
| 356 | case IMX7D: | ||
| 357 | break; | ||
| 358 | } | ||
| 359 | |||
| 360 | return ret; | ||
| 361 | } | ||
| 362 | |||
| 363 | static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie) | ||
| 364 | { | ||
| 365 | u32 val; | ||
| 366 | unsigned int retries; | ||
| 367 | struct device *dev = imx6_pcie->pci->dev; | ||
| 368 | |||
| 369 | for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) { | ||
| 370 | regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val); | ||
| 371 | |||
| 372 | if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED) | ||
| 373 | return; | ||
| 374 | |||
| 375 | usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN, | ||
| 376 | PHY_PLL_LOCK_WAIT_USLEEP_MAX); | ||
| 377 | } | ||
| 378 | |||
| 379 | dev_err(dev, "PCIe PLL lock timeout\n"); | ||
| 380 | } | ||
| 381 | |||
| 382 | static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) | ||
| 383 | { | ||
| 384 | struct dw_pcie *pci = imx6_pcie->pci; | ||
| 385 | struct device *dev = pci->dev; | ||
| 386 | int ret; | ||
| 387 | |||
| 388 | if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) { | ||
| 389 | ret = regulator_enable(imx6_pcie->vpcie); | ||
| 390 | if (ret) { | ||
| 391 | dev_err(dev, "failed to enable vpcie regulator: %d\n", | ||
| 392 | ret); | ||
| 393 | return; | ||
| 394 | } | ||
| 395 | } | ||
| 396 | |||
| 397 | ret = clk_prepare_enable(imx6_pcie->pcie_phy); | ||
| 398 | if (ret) { | ||
| 399 | dev_err(dev, "unable to enable pcie_phy clock\n"); | ||
| 400 | goto err_pcie_phy; | ||
| 401 | } | ||
| 402 | |||
| 403 | ret = clk_prepare_enable(imx6_pcie->pcie_bus); | ||
| 404 | if (ret) { | ||
| 405 | dev_err(dev, "unable to enable pcie_bus clock\n"); | ||
| 406 | goto err_pcie_bus; | ||
| 407 | } | ||
| 408 | |||
| 409 | ret = clk_prepare_enable(imx6_pcie->pcie); | ||
| 410 | if (ret) { | ||
| 411 | dev_err(dev, "unable to enable pcie clock\n"); | ||
| 412 | goto err_pcie; | ||
| 413 | } | ||
| 414 | |||
| 415 | ret = imx6_pcie_enable_ref_clk(imx6_pcie); | ||
| 416 | if (ret) { | ||
| 417 | dev_err(dev, "unable to enable pcie ref clock\n"); | ||
| 418 | goto err_ref_clk; | ||
| 419 | } | ||
| 420 | |||
| 421 | /* allow the clocks to stabilize */ | ||
| 422 | usleep_range(200, 500); | ||
| 423 | |||
| 424 | /* Some boards don't have PCIe reset GPIO. */ | ||
| 425 | if (gpio_is_valid(imx6_pcie->reset_gpio)) { | ||
| 426 | gpio_set_value_cansleep(imx6_pcie->reset_gpio, | ||
| 427 | imx6_pcie->gpio_active_high); | ||
| 428 | msleep(100); | ||
| 429 | gpio_set_value_cansleep(imx6_pcie->reset_gpio, | ||
| 430 | !imx6_pcie->gpio_active_high); | ||
| 431 | } | ||
| 432 | |||
| 433 | switch (imx6_pcie->variant) { | ||
| 434 | case IMX7D: | ||
| 435 | reset_control_deassert(imx6_pcie->pciephy_reset); | ||
| 436 | imx7d_pcie_wait_for_phy_pll_lock(imx6_pcie); | ||
| 437 | break; | ||
| 438 | case IMX6SX: | ||
| 439 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR5, | ||
| 440 | IMX6SX_GPR5_PCIE_BTNRST_RESET, 0); | ||
| 441 | break; | ||
| 442 | case IMX6QP: | ||
| 443 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | ||
| 444 | IMX6Q_GPR1_PCIE_SW_RST, 0); | ||
| 445 | |||
| 446 | usleep_range(200, 500); | ||
| 447 | break; | ||
| 448 | case IMX6Q: /* Nothing to do */ | ||
| 449 | break; | ||
| 450 | } | ||
| 451 | |||
| 452 | return; | ||
| 453 | |||
| 454 | err_ref_clk: | ||
| 455 | clk_disable_unprepare(imx6_pcie->pcie); | ||
| 456 | err_pcie: | ||
| 457 | clk_disable_unprepare(imx6_pcie->pcie_bus); | ||
| 458 | err_pcie_bus: | ||
| 459 | clk_disable_unprepare(imx6_pcie->pcie_phy); | ||
| 460 | err_pcie_phy: | ||
| 461 | if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { | ||
| 462 | ret = regulator_disable(imx6_pcie->vpcie); | ||
| 463 | if (ret) | ||
| 464 | dev_err(dev, "failed to disable vpcie regulator: %d\n", | ||
| 465 | ret); | ||
| 466 | } | ||
| 467 | } | ||
| 468 | |||
| 469 | static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) | ||
| 470 | { | ||
| 471 | switch (imx6_pcie->variant) { | ||
| 472 | case IMX7D: | ||
| 473 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
| 474 | IMX7D_GPR12_PCIE_PHY_REFCLK_SEL, 0); | ||
| 475 | break; | ||
| 476 | case IMX6SX: | ||
| 477 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
| 478 | IMX6SX_GPR12_PCIE_RX_EQ_MASK, | ||
| 479 | IMX6SX_GPR12_PCIE_RX_EQ_2); | ||
| 480 | /* FALLTHROUGH */ | ||
| 481 | default: | ||
| 482 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
| 483 | IMX6Q_GPR12_PCIE_CTL_2, 0 << 10); | ||
| 484 | |||
| 485 | /* configure constant input signal to the pcie ctrl and phy */ | ||
| 486 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
| 487 | IMX6Q_GPR12_LOS_LEVEL, 9 << 4); | ||
| 488 | |||
| 489 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
| 490 | IMX6Q_GPR8_TX_DEEMPH_GEN1, | ||
| 491 | imx6_pcie->tx_deemph_gen1 << 0); | ||
| 492 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
| 493 | IMX6Q_GPR8_TX_DEEMPH_GEN2_3P5DB, | ||
| 494 | imx6_pcie->tx_deemph_gen2_3p5db << 6); | ||
| 495 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
| 496 | IMX6Q_GPR8_TX_DEEMPH_GEN2_6DB, | ||
| 497 | imx6_pcie->tx_deemph_gen2_6db << 12); | ||
| 498 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
| 499 | IMX6Q_GPR8_TX_SWING_FULL, | ||
| 500 | imx6_pcie->tx_swing_full << 18); | ||
| 501 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR8, | ||
| 502 | IMX6Q_GPR8_TX_SWING_LOW, | ||
| 503 | imx6_pcie->tx_swing_low << 25); | ||
| 504 | break; | ||
| 505 | } | ||
| 506 | |||
| 507 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
| 508 | IMX6Q_GPR12_DEVICE_TYPE, PCI_EXP_TYPE_ROOT_PORT << 12); | ||
| 509 | } | ||
| 510 | |||
| 511 | static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie) | ||
| 512 | { | ||
| 513 | struct dw_pcie *pci = imx6_pcie->pci; | ||
| 514 | struct device *dev = pci->dev; | ||
| 515 | |||
| 516 | /* check if the link is up or not */ | ||
| 517 | if (!dw_pcie_wait_for_link(pci)) | ||
| 518 | return 0; | ||
| 519 | |||
| 520 | dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", | ||
| 521 | dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0), | ||
| 522 | dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1)); | ||
| 523 | return -ETIMEDOUT; | ||
| 524 | } | ||
| 525 | |||
| 526 | static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie) | ||
| 527 | { | ||
| 528 | struct dw_pcie *pci = imx6_pcie->pci; | ||
| 529 | struct device *dev = pci->dev; | ||
| 530 | u32 tmp; | ||
| 531 | unsigned int retries; | ||
| 532 | |||
| 533 | for (retries = 0; retries < 200; retries++) { | ||
| 534 | tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); | ||
| 535 | /* Test if the speed change finished. */ | ||
| 536 | if (!(tmp & PORT_LOGIC_SPEED_CHANGE)) | ||
| 537 | return 0; | ||
| 538 | usleep_range(100, 1000); | ||
| 539 | } | ||
| 540 | |||
| 541 | dev_err(dev, "Speed change timeout\n"); | ||
| 542 | return -EINVAL; | ||
| 543 | } | ||
| 544 | |||
| 545 | static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) | ||
| 546 | { | ||
| 547 | struct dw_pcie *pci = imx6_pcie->pci; | ||
| 548 | struct device *dev = pci->dev; | ||
| 549 | u32 tmp; | ||
| 550 | int ret; | ||
| 551 | |||
| 552 | /* | ||
| 553 | * Force Gen1 operation when starting the link. In case the link is | ||
| 554 | * started in Gen2 mode, there is a possibility the devices on the | ||
| 555 | * bus will not be detected at all. This happens with PCIe switches. | ||
| 556 | */ | ||
| 557 | tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR); | ||
| 558 | tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; | ||
| 559 | tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1; | ||
| 560 | dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); | ||
| 561 | |||
| 562 | /* Start LTSSM. */ | ||
| 563 | if (imx6_pcie->variant == IMX7D) | ||
| 564 | reset_control_deassert(imx6_pcie->apps_reset); | ||
| 565 | else | ||
| 566 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, | ||
| 567 | IMX6Q_GPR12_PCIE_CTL_2, 1 << 10); | ||
| 568 | |||
| 569 | ret = imx6_pcie_wait_for_link(imx6_pcie); | ||
| 570 | if (ret) | ||
| 571 | goto err_reset_phy; | ||
| 572 | |||
| 573 | if (imx6_pcie->link_gen == 2) { | ||
| 574 | /* Allow Gen2 mode after the link is up. */ | ||
| 575 | tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCR); | ||
| 576 | tmp &= ~PCIE_RC_LCR_MAX_LINK_SPEEDS_MASK; | ||
| 577 | tmp |= PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2; | ||
| 578 | dw_pcie_writel_dbi(pci, PCIE_RC_LCR, tmp); | ||
| 579 | |||
| 580 | /* | ||
| 581 | * Start Directed Speed Change so the best possible | ||
| 582 | * speed both link partners support can be negotiated. | ||
| 583 | */ | ||
| 584 | tmp = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); | ||
| 585 | tmp |= PORT_LOGIC_SPEED_CHANGE; | ||
| 586 | dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, tmp); | ||
| 587 | |||
| 588 | if (imx6_pcie->variant != IMX7D) { | ||
| 589 | /* | ||
| 590 | * On i.MX7, DIRECT_SPEED_CHANGE behaves differently | ||
| 591 | * from i.MX6 family when no link speed transition | ||
| 592 | * occurs and we go Gen1 -> yep, Gen1. The difference | ||
| 593 | * is that, in such case, it will not be cleared by HW | ||
| 594 | * which will cause the following code to report false | ||
| 595 | * failure. | ||
| 596 | */ | ||
| 597 | |||
| 598 | ret = imx6_pcie_wait_for_speed_change(imx6_pcie); | ||
| 599 | if (ret) { | ||
| 600 | dev_err(dev, "Failed to bring link up!\n"); | ||
| 601 | goto err_reset_phy; | ||
| 602 | } | ||
| 603 | } | ||
| 604 | |||
| 605 | /* Make sure link training is finished as well! */ | ||
| 606 | ret = imx6_pcie_wait_for_link(imx6_pcie); | ||
| 607 | if (ret) { | ||
| 608 | dev_err(dev, "Failed to bring link up!\n"); | ||
| 609 | goto err_reset_phy; | ||
| 610 | } | ||
| 611 | } else { | ||
| 612 | dev_info(dev, "Link: Gen2 disabled\n"); | ||
| 613 | } | ||
| 614 | |||
| 615 | tmp = dw_pcie_readl_dbi(pci, PCIE_RC_LCSR); | ||
| 616 | dev_info(dev, "Link up, Gen%i\n", (tmp >> 16) & 0xf); | ||
| 617 | return 0; | ||
| 618 | |||
| 619 | err_reset_phy: | ||
| 620 | dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", | ||
| 621 | dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0), | ||
| 622 | dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1)); | ||
| 623 | imx6_pcie_reset_phy(imx6_pcie); | ||
| 624 | return ret; | ||
| 625 | } | ||
| 626 | |||
| 627 | static int imx6_pcie_host_init(struct pcie_port *pp) | ||
| 628 | { | ||
| 629 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 630 | struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); | ||
| 631 | |||
| 632 | imx6_pcie_assert_core_reset(imx6_pcie); | ||
| 633 | imx6_pcie_init_phy(imx6_pcie); | ||
| 634 | imx6_pcie_deassert_core_reset(imx6_pcie); | ||
| 635 | dw_pcie_setup_rc(pp); | ||
| 636 | imx6_pcie_establish_link(imx6_pcie); | ||
| 637 | |||
| 638 | if (IS_ENABLED(CONFIG_PCI_MSI)) | ||
| 639 | dw_pcie_msi_init(pp); | ||
| 640 | |||
| 641 | return 0; | ||
| 642 | } | ||
| 643 | |||
| 644 | static int imx6_pcie_link_up(struct dw_pcie *pci) | ||
| 645 | { | ||
| 646 | return dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1) & | ||
| 647 | PCIE_PHY_DEBUG_R1_XMLH_LINK_UP; | ||
| 648 | } | ||
| 649 | |||
| 650 | static const struct dw_pcie_host_ops imx6_pcie_host_ops = { | ||
| 651 | .host_init = imx6_pcie_host_init, | ||
| 652 | }; | ||
| 653 | |||
| 654 | static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie, | ||
| 655 | struct platform_device *pdev) | ||
| 656 | { | ||
| 657 | struct dw_pcie *pci = imx6_pcie->pci; | ||
| 658 | struct pcie_port *pp = &pci->pp; | ||
| 659 | struct device *dev = &pdev->dev; | ||
| 660 | int ret; | ||
| 661 | |||
| 662 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
| 663 | pp->msi_irq = platform_get_irq_byname(pdev, "msi"); | ||
| 664 | if (pp->msi_irq <= 0) { | ||
| 665 | dev_err(dev, "failed to get MSI irq\n"); | ||
| 666 | return -ENODEV; | ||
| 667 | } | ||
| 668 | } | ||
| 669 | |||
| 670 | pp->root_bus_nr = -1; | ||
| 671 | pp->ops = &imx6_pcie_host_ops; | ||
| 672 | |||
| 673 | ret = dw_pcie_host_init(pp); | ||
| 674 | if (ret) { | ||
| 675 | dev_err(dev, "failed to initialize host\n"); | ||
| 676 | return ret; | ||
| 677 | } | ||
| 678 | |||
| 679 | return 0; | ||
| 680 | } | ||
| 681 | |||
| 682 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
| 683 | .link_up = imx6_pcie_link_up, | ||
| 684 | }; | ||
| 685 | |||
| 686 | static int imx6_pcie_probe(struct platform_device *pdev) | ||
| 687 | { | ||
| 688 | struct device *dev = &pdev->dev; | ||
| 689 | struct dw_pcie *pci; | ||
| 690 | struct imx6_pcie *imx6_pcie; | ||
| 691 | struct resource *dbi_base; | ||
| 692 | struct device_node *node = dev->of_node; | ||
| 693 | int ret; | ||
| 694 | |||
| 695 | imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL); | ||
| 696 | if (!imx6_pcie) | ||
| 697 | return -ENOMEM; | ||
| 698 | |||
| 699 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
| 700 | if (!pci) | ||
| 701 | return -ENOMEM; | ||
| 702 | |||
| 703 | pci->dev = dev; | ||
| 704 | pci->ops = &dw_pcie_ops; | ||
| 705 | |||
| 706 | imx6_pcie->pci = pci; | ||
| 707 | imx6_pcie->variant = | ||
| 708 | (enum imx6_pcie_variants)of_device_get_match_data(dev); | ||
| 709 | |||
| 710 | dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 711 | pci->dbi_base = devm_ioremap_resource(dev, dbi_base); | ||
| 712 | if (IS_ERR(pci->dbi_base)) | ||
| 713 | return PTR_ERR(pci->dbi_base); | ||
| 714 | |||
| 715 | /* Fetch GPIOs */ | ||
| 716 | imx6_pcie->reset_gpio = of_get_named_gpio(node, "reset-gpio", 0); | ||
| 717 | imx6_pcie->gpio_active_high = of_property_read_bool(node, | ||
| 718 | "reset-gpio-active-high"); | ||
| 719 | if (gpio_is_valid(imx6_pcie->reset_gpio)) { | ||
| 720 | ret = devm_gpio_request_one(dev, imx6_pcie->reset_gpio, | ||
| 721 | imx6_pcie->gpio_active_high ? | ||
| 722 | GPIOF_OUT_INIT_HIGH : | ||
| 723 | GPIOF_OUT_INIT_LOW, | ||
| 724 | "PCIe reset"); | ||
| 725 | if (ret) { | ||
| 726 | dev_err(dev, "unable to get reset gpio\n"); | ||
| 727 | return ret; | ||
| 728 | } | ||
| 729 | } else if (imx6_pcie->reset_gpio == -EPROBE_DEFER) { | ||
| 730 | return imx6_pcie->reset_gpio; | ||
| 731 | } | ||
| 732 | |||
| 733 | /* Fetch clocks */ | ||
| 734 | imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy"); | ||
| 735 | if (IS_ERR(imx6_pcie->pcie_phy)) { | ||
| 736 | dev_err(dev, "pcie_phy clock source missing or invalid\n"); | ||
| 737 | return PTR_ERR(imx6_pcie->pcie_phy); | ||
| 738 | } | ||
| 739 | |||
| 740 | imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus"); | ||
| 741 | if (IS_ERR(imx6_pcie->pcie_bus)) { | ||
| 742 | dev_err(dev, "pcie_bus clock source missing or invalid\n"); | ||
| 743 | return PTR_ERR(imx6_pcie->pcie_bus); | ||
| 744 | } | ||
| 745 | |||
| 746 | imx6_pcie->pcie = devm_clk_get(dev, "pcie"); | ||
| 747 | if (IS_ERR(imx6_pcie->pcie)) { | ||
| 748 | dev_err(dev, "pcie clock source missing or invalid\n"); | ||
| 749 | return PTR_ERR(imx6_pcie->pcie); | ||
| 750 | } | ||
| 751 | |||
| 752 | switch (imx6_pcie->variant) { | ||
| 753 | case IMX6SX: | ||
| 754 | imx6_pcie->pcie_inbound_axi = devm_clk_get(dev, | ||
| 755 | "pcie_inbound_axi"); | ||
| 756 | if (IS_ERR(imx6_pcie->pcie_inbound_axi)) { | ||
| 757 | dev_err(dev, "pcie_inbound_axi clock missing or invalid\n"); | ||
| 758 | return PTR_ERR(imx6_pcie->pcie_inbound_axi); | ||
| 759 | } | ||
| 760 | break; | ||
| 761 | case IMX7D: | ||
| 762 | imx6_pcie->pciephy_reset = devm_reset_control_get_exclusive(dev, | ||
| 763 | "pciephy"); | ||
| 764 | if (IS_ERR(imx6_pcie->pciephy_reset)) { | ||
| 765 | dev_err(dev, "Failed to get PCIEPHY reset control\n"); | ||
| 766 | return PTR_ERR(imx6_pcie->pciephy_reset); | ||
| 767 | } | ||
| 768 | |||
| 769 | imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev, | ||
| 770 | "apps"); | ||
| 771 | if (IS_ERR(imx6_pcie->apps_reset)) { | ||
| 772 | dev_err(dev, "Failed to get PCIE APPS reset control\n"); | ||
| 773 | return PTR_ERR(imx6_pcie->apps_reset); | ||
| 774 | } | ||
| 775 | break; | ||
| 776 | default: | ||
| 777 | break; | ||
| 778 | } | ||
| 779 | |||
| 780 | /* Grab GPR config register range */ | ||
| 781 | imx6_pcie->iomuxc_gpr = | ||
| 782 | syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr"); | ||
| 783 | if (IS_ERR(imx6_pcie->iomuxc_gpr)) { | ||
| 784 | dev_err(dev, "unable to find iomuxc registers\n"); | ||
| 785 | return PTR_ERR(imx6_pcie->iomuxc_gpr); | ||
| 786 | } | ||
| 787 | |||
| 788 | /* Grab PCIe PHY Tx Settings */ | ||
| 789 | if (of_property_read_u32(node, "fsl,tx-deemph-gen1", | ||
| 790 | &imx6_pcie->tx_deemph_gen1)) | ||
| 791 | imx6_pcie->tx_deemph_gen1 = 0; | ||
| 792 | |||
| 793 | if (of_property_read_u32(node, "fsl,tx-deemph-gen2-3p5db", | ||
| 794 | &imx6_pcie->tx_deemph_gen2_3p5db)) | ||
| 795 | imx6_pcie->tx_deemph_gen2_3p5db = 0; | ||
| 796 | |||
| 797 | if (of_property_read_u32(node, "fsl,tx-deemph-gen2-6db", | ||
| 798 | &imx6_pcie->tx_deemph_gen2_6db)) | ||
| 799 | imx6_pcie->tx_deemph_gen2_6db = 20; | ||
| 800 | |||
| 801 | if (of_property_read_u32(node, "fsl,tx-swing-full", | ||
| 802 | &imx6_pcie->tx_swing_full)) | ||
| 803 | imx6_pcie->tx_swing_full = 127; | ||
| 804 | |||
| 805 | if (of_property_read_u32(node, "fsl,tx-swing-low", | ||
| 806 | &imx6_pcie->tx_swing_low)) | ||
| 807 | imx6_pcie->tx_swing_low = 127; | ||
| 808 | |||
| 809 | /* Limit link speed */ | ||
| 810 | ret = of_property_read_u32(node, "fsl,max-link-speed", | ||
| 811 | &imx6_pcie->link_gen); | ||
| 812 | if (ret) | ||
| 813 | imx6_pcie->link_gen = 1; | ||
| 814 | |||
| 815 | imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); | ||
| 816 | if (IS_ERR(imx6_pcie->vpcie)) { | ||
| 817 | if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER) | ||
| 818 | return -EPROBE_DEFER; | ||
| 819 | imx6_pcie->vpcie = NULL; | ||
| 820 | } | ||
| 821 | |||
| 822 | platform_set_drvdata(pdev, imx6_pcie); | ||
| 823 | |||
| 824 | ret = imx6_add_pcie_port(imx6_pcie, pdev); | ||
| 825 | if (ret < 0) | ||
| 826 | return ret; | ||
| 827 | |||
| 828 | return 0; | ||
| 829 | } | ||
| 830 | |||
| 831 | static void imx6_pcie_shutdown(struct platform_device *pdev) | ||
| 832 | { | ||
| 833 | struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev); | ||
| 834 | |||
| 835 | /* bring down link, so bootloader gets clean state in case of reboot */ | ||
| 836 | imx6_pcie_assert_core_reset(imx6_pcie); | ||
| 837 | } | ||
| 838 | |||
| 839 | static const struct of_device_id imx6_pcie_of_match[] = { | ||
| 840 | { .compatible = "fsl,imx6q-pcie", .data = (void *)IMX6Q, }, | ||
| 841 | { .compatible = "fsl,imx6sx-pcie", .data = (void *)IMX6SX, }, | ||
| 842 | { .compatible = "fsl,imx6qp-pcie", .data = (void *)IMX6QP, }, | ||
| 843 | { .compatible = "fsl,imx7d-pcie", .data = (void *)IMX7D, }, | ||
| 844 | {}, | ||
| 845 | }; | ||
| 846 | |||
| 847 | static struct platform_driver imx6_pcie_driver = { | ||
| 848 | .driver = { | ||
| 849 | .name = "imx6q-pcie", | ||
| 850 | .of_match_table = imx6_pcie_of_match, | ||
| 851 | .suppress_bind_attrs = true, | ||
| 852 | }, | ||
| 853 | .probe = imx6_pcie_probe, | ||
| 854 | .shutdown = imx6_pcie_shutdown, | ||
| 855 | }; | ||
| 856 | |||
| 857 | static int __init imx6_pcie_init(void) | ||
| 858 | { | ||
| 859 | /* | ||
| 860 | * Since probe() can be deferred we need to make sure that | ||
| 861 | * hook_fault_code is not called after __init memory is freed | ||
| 862 | * by kernel and since imx6q_pcie_abort_handler() is a no-op, | ||
| 863 | * we can install the handler here without risking it | ||
| 864 | * accessing some uninitialized driver state. | ||
| 865 | */ | ||
| 866 | hook_fault_code(8, imx6q_pcie_abort_handler, SIGBUS, 0, | ||
| 867 | "external abort on non-linefetch"); | ||
| 868 | |||
| 869 | return platform_driver_register(&imx6_pcie_driver); | ||
| 870 | } | ||
| 871 | device_initcall(imx6_pcie_init); | ||
diff --git a/drivers/pci/controller/dwc/pci-keystone-dw.c b/drivers/pci/controller/dwc/pci-keystone-dw.c new file mode 100644 index 000000000000..0682213328e9 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-keystone-dw.c | |||
| @@ -0,0 +1,484 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * DesignWare application register space functions for Keystone PCI controller | ||
| 4 | * | ||
| 5 | * Copyright (C) 2013-2014 Texas Instruments., Ltd. | ||
| 6 | * http://www.ti.com | ||
| 7 | * | ||
| 8 | * Author: Murali Karicheri <m-karicheri2@ti.com> | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/irq.h> | ||
| 12 | #include <linux/irqdomain.h> | ||
| 13 | #include <linux/irqreturn.h> | ||
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/of.h> | ||
| 16 | #include <linux/of_pci.h> | ||
| 17 | #include <linux/pci.h> | ||
| 18 | #include <linux/platform_device.h> | ||
| 19 | |||
| 20 | #include "pcie-designware.h" | ||
| 21 | #include "pci-keystone.h" | ||
| 22 | |||
| 23 | /* Application register defines */ | ||
| 24 | #define LTSSM_EN_VAL 1 | ||
| 25 | #define LTSSM_STATE_MASK 0x1f | ||
| 26 | #define LTSSM_STATE_L0 0x11 | ||
| 27 | #define DBI_CS2_EN_VAL 0x20 | ||
| 28 | #define OB_XLAT_EN_VAL 2 | ||
| 29 | |||
| 30 | /* Application registers */ | ||
| 31 | #define CMD_STATUS 0x004 | ||
| 32 | #define CFG_SETUP 0x008 | ||
| 33 | #define OB_SIZE 0x030 | ||
| 34 | #define CFG_PCIM_WIN_SZ_IDX 3 | ||
| 35 | #define CFG_PCIM_WIN_CNT 32 | ||
| 36 | #define SPACE0_REMOTE_CFG_OFFSET 0x1000 | ||
| 37 | #define OB_OFFSET_INDEX(n) (0x200 + (8 * n)) | ||
| 38 | #define OB_OFFSET_HI(n) (0x204 + (8 * n)) | ||
| 39 | |||
| 40 | /* IRQ register defines */ | ||
| 41 | #define IRQ_EOI 0x050 | ||
| 42 | #define IRQ_STATUS 0x184 | ||
| 43 | #define IRQ_ENABLE_SET 0x188 | ||
| 44 | #define IRQ_ENABLE_CLR 0x18c | ||
| 45 | |||
| 46 | #define MSI_IRQ 0x054 | ||
| 47 | #define MSI0_IRQ_STATUS 0x104 | ||
| 48 | #define MSI0_IRQ_ENABLE_SET 0x108 | ||
| 49 | #define MSI0_IRQ_ENABLE_CLR 0x10c | ||
| 50 | #define IRQ_STATUS 0x184 | ||
| 51 | #define MSI_IRQ_OFFSET 4 | ||
| 52 | |||
| 53 | /* Error IRQ bits */ | ||
| 54 | #define ERR_AER BIT(5) /* ECRC error */ | ||
| 55 | #define ERR_AXI BIT(4) /* AXI tag lookup fatal error */ | ||
| 56 | #define ERR_CORR BIT(3) /* Correctable error */ | ||
| 57 | #define ERR_NONFATAL BIT(2) /* Non-fatal error */ | ||
| 58 | #define ERR_FATAL BIT(1) /* Fatal error */ | ||
| 59 | #define ERR_SYS BIT(0) /* System (fatal, non-fatal, or correctable) */ | ||
| 60 | #define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \ | ||
| 61 | ERR_NONFATAL | ERR_FATAL | ERR_SYS) | ||
| 62 | #define ERR_FATAL_IRQ (ERR_FATAL | ERR_AXI) | ||
| 63 | #define ERR_IRQ_STATUS_RAW 0x1c0 | ||
| 64 | #define ERR_IRQ_STATUS 0x1c4 | ||
| 65 | #define ERR_IRQ_ENABLE_SET 0x1c8 | ||
| 66 | #define ERR_IRQ_ENABLE_CLR 0x1cc | ||
| 67 | |||
| 68 | /* Config space registers */ | ||
| 69 | #define DEBUG0 0x728 | ||
| 70 | |||
| 71 | #define to_keystone_pcie(x) dev_get_drvdata((x)->dev) | ||
| 72 | |||
| 73 | static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, | ||
| 74 | u32 *bit_pos) | ||
| 75 | { | ||
| 76 | *reg_offset = offset % 8; | ||
| 77 | *bit_pos = offset >> 3; | ||
| 78 | } | ||
| 79 | |||
| 80 | phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp) | ||
| 81 | { | ||
| 82 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 83 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
| 84 | |||
| 85 | return ks_pcie->app.start + MSI_IRQ; | ||
| 86 | } | ||
| 87 | |||
| 88 | static u32 ks_dw_app_readl(struct keystone_pcie *ks_pcie, u32 offset) | ||
| 89 | { | ||
| 90 | return readl(ks_pcie->va_app_base + offset); | ||
| 91 | } | ||
| 92 | |||
| 93 | static void ks_dw_app_writel(struct keystone_pcie *ks_pcie, u32 offset, u32 val) | ||
| 94 | { | ||
| 95 | writel(val, ks_pcie->va_app_base + offset); | ||
| 96 | } | ||
| 97 | |||
| 98 | void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset) | ||
| 99 | { | ||
| 100 | struct dw_pcie *pci = ks_pcie->pci; | ||
| 101 | struct pcie_port *pp = &pci->pp; | ||
| 102 | struct device *dev = pci->dev; | ||
| 103 | u32 pending, vector; | ||
| 104 | int src, virq; | ||
| 105 | |||
| 106 | pending = ks_dw_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4)); | ||
| 107 | |||
| 108 | /* | ||
| 109 | * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit | ||
| 110 | * shows 1, 9, 17, 25 and so forth | ||
| 111 | */ | ||
| 112 | for (src = 0; src < 4; src++) { | ||
| 113 | if (BIT(src) & pending) { | ||
| 114 | vector = offset + (src << 3); | ||
| 115 | virq = irq_linear_revmap(pp->irq_domain, vector); | ||
| 116 | dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", | ||
| 117 | src, vector, virq); | ||
| 118 | generic_handle_irq(virq); | ||
| 119 | } | ||
| 120 | } | ||
| 121 | } | ||
| 122 | |||
| 123 | void ks_dw_pcie_msi_irq_ack(int irq, struct pcie_port *pp) | ||
| 124 | { | ||
| 125 | u32 reg_offset, bit_pos; | ||
| 126 | struct keystone_pcie *ks_pcie; | ||
| 127 | struct dw_pcie *pci; | ||
| 128 | |||
| 129 | pci = to_dw_pcie_from_pp(pp); | ||
| 130 | ks_pcie = to_keystone_pcie(pci); | ||
| 131 | update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); | ||
| 132 | |||
| 133 | ks_dw_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4), | ||
| 134 | BIT(bit_pos)); | ||
| 135 | ks_dw_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET); | ||
| 136 | } | ||
| 137 | |||
| 138 | void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq) | ||
| 139 | { | ||
| 140 | u32 reg_offset, bit_pos; | ||
| 141 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 142 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
| 143 | |||
| 144 | update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); | ||
| 145 | ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4), | ||
| 146 | BIT(bit_pos)); | ||
| 147 | } | ||
| 148 | |||
| 149 | void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) | ||
| 150 | { | ||
| 151 | u32 reg_offset, bit_pos; | ||
| 152 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 153 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
| 154 | |||
| 155 | update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); | ||
| 156 | ks_dw_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4), | ||
| 157 | BIT(bit_pos)); | ||
| 158 | } | ||
| 159 | |||
| 160 | int ks_dw_pcie_msi_host_init(struct pcie_port *pp) | ||
| 161 | { | ||
| 162 | return dw_pcie_allocate_domains(pp); | ||
| 163 | } | ||
| 164 | |||
| 165 | void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie) | ||
| 166 | { | ||
| 167 | int i; | ||
| 168 | |||
| 169 | for (i = 0; i < PCI_NUM_INTX; i++) | ||
| 170 | ks_dw_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1); | ||
| 171 | } | ||
| 172 | |||
| 173 | void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset) | ||
| 174 | { | ||
| 175 | struct dw_pcie *pci = ks_pcie->pci; | ||
| 176 | struct device *dev = pci->dev; | ||
| 177 | u32 pending; | ||
| 178 | int virq; | ||
| 179 | |||
| 180 | pending = ks_dw_app_readl(ks_pcie, IRQ_STATUS + (offset << 4)); | ||
| 181 | |||
| 182 | if (BIT(0) & pending) { | ||
| 183 | virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset); | ||
| 184 | dev_dbg(dev, ": irq: irq_offset %d, virq %d\n", offset, virq); | ||
| 185 | generic_handle_irq(virq); | ||
| 186 | } | ||
| 187 | |||
| 188 | /* EOI the INTx interrupt */ | ||
| 189 | ks_dw_app_writel(ks_pcie, IRQ_EOI, offset); | ||
| 190 | } | ||
| 191 | |||
| 192 | void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie) | ||
| 193 | { | ||
| 194 | ks_dw_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL); | ||
| 195 | } | ||
| 196 | |||
| 197 | irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie) | ||
| 198 | { | ||
| 199 | u32 status; | ||
| 200 | |||
| 201 | status = ks_dw_app_readl(ks_pcie, ERR_IRQ_STATUS_RAW) & ERR_IRQ_ALL; | ||
| 202 | if (!status) | ||
| 203 | return IRQ_NONE; | ||
| 204 | |||
| 205 | if (status & ERR_FATAL_IRQ) | ||
| 206 | dev_err(ks_pcie->pci->dev, "fatal error (status %#010x)\n", | ||
| 207 | status); | ||
| 208 | |||
| 209 | /* Ack the IRQ; status bits are RW1C */ | ||
| 210 | ks_dw_app_writel(ks_pcie, ERR_IRQ_STATUS, status); | ||
| 211 | return IRQ_HANDLED; | ||
| 212 | } | ||
| 213 | |||
| 214 | static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d) | ||
| 215 | { | ||
| 216 | } | ||
| 217 | |||
| 218 | static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d) | ||
| 219 | { | ||
| 220 | } | ||
| 221 | |||
| 222 | static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d) | ||
| 223 | { | ||
| 224 | } | ||
| 225 | |||
| 226 | static struct irq_chip ks_dw_pcie_legacy_irq_chip = { | ||
| 227 | .name = "Keystone-PCI-Legacy-IRQ", | ||
| 228 | .irq_ack = ks_dw_pcie_ack_legacy_irq, | ||
| 229 | .irq_mask = ks_dw_pcie_mask_legacy_irq, | ||
| 230 | .irq_unmask = ks_dw_pcie_unmask_legacy_irq, | ||
| 231 | }; | ||
| 232 | |||
| 233 | static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d, | ||
| 234 | unsigned int irq, irq_hw_number_t hw_irq) | ||
| 235 | { | ||
| 236 | irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip, | ||
| 237 | handle_level_irq); | ||
| 238 | irq_set_chip_data(irq, d->host_data); | ||
| 239 | |||
| 240 | return 0; | ||
| 241 | } | ||
| 242 | |||
| 243 | static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = { | ||
| 244 | .map = ks_dw_pcie_init_legacy_irq_map, | ||
| 245 | .xlate = irq_domain_xlate_onetwocell, | ||
| 246 | }; | ||
| 247 | |||
| 248 | /** | ||
| 249 | * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask | ||
| 250 | * registers | ||
| 251 | * | ||
| 252 | * Since modification of dbi_cs2 involves different clock domain, read the | ||
| 253 | * status back to ensure the transition is complete. | ||
| 254 | */ | ||
| 255 | static void ks_dw_pcie_set_dbi_mode(struct keystone_pcie *ks_pcie) | ||
| 256 | { | ||
| 257 | u32 val; | ||
| 258 | |||
| 259 | val = ks_dw_app_readl(ks_pcie, CMD_STATUS); | ||
| 260 | ks_dw_app_writel(ks_pcie, CMD_STATUS, DBI_CS2_EN_VAL | val); | ||
| 261 | |||
| 262 | do { | ||
| 263 | val = ks_dw_app_readl(ks_pcie, CMD_STATUS); | ||
| 264 | } while (!(val & DBI_CS2_EN_VAL)); | ||
| 265 | } | ||
| 266 | |||
| 267 | /** | ||
| 268 | * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode | ||
| 269 | * | ||
| 270 | * Since modification of dbi_cs2 involves different clock domain, read the | ||
| 271 | * status back to ensure the transition is complete. | ||
| 272 | */ | ||
| 273 | static void ks_dw_pcie_clear_dbi_mode(struct keystone_pcie *ks_pcie) | ||
| 274 | { | ||
| 275 | u32 val; | ||
| 276 | |||
| 277 | val = ks_dw_app_readl(ks_pcie, CMD_STATUS); | ||
| 278 | ks_dw_app_writel(ks_pcie, CMD_STATUS, ~DBI_CS2_EN_VAL & val); | ||
| 279 | |||
| 280 | do { | ||
| 281 | val = ks_dw_app_readl(ks_pcie, CMD_STATUS); | ||
| 282 | } while (val & DBI_CS2_EN_VAL); | ||
| 283 | } | ||
| 284 | |||
| 285 | void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) | ||
| 286 | { | ||
| 287 | struct dw_pcie *pci = ks_pcie->pci; | ||
| 288 | struct pcie_port *pp = &pci->pp; | ||
| 289 | u32 start = pp->mem->start, end = pp->mem->end; | ||
| 290 | int i, tr_size; | ||
| 291 | u32 val; | ||
| 292 | |||
| 293 | /* Disable BARs for inbound access */ | ||
| 294 | ks_dw_pcie_set_dbi_mode(ks_pcie); | ||
| 295 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0); | ||
| 296 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0); | ||
| 297 | ks_dw_pcie_clear_dbi_mode(ks_pcie); | ||
| 298 | |||
| 299 | /* Set outbound translation size per window division */ | ||
| 300 | ks_dw_app_writel(ks_pcie, OB_SIZE, CFG_PCIM_WIN_SZ_IDX & 0x7); | ||
| 301 | |||
| 302 | tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M; | ||
| 303 | |||
| 304 | /* Using Direct 1:1 mapping of RC <-> PCI memory space */ | ||
| 305 | for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) { | ||
| 306 | ks_dw_app_writel(ks_pcie, OB_OFFSET_INDEX(i), start | 1); | ||
| 307 | ks_dw_app_writel(ks_pcie, OB_OFFSET_HI(i), 0); | ||
| 308 | start += tr_size; | ||
| 309 | } | ||
| 310 | |||
| 311 | /* Enable OB translation */ | ||
| 312 | val = ks_dw_app_readl(ks_pcie, CMD_STATUS); | ||
| 313 | ks_dw_app_writel(ks_pcie, CMD_STATUS, OB_XLAT_EN_VAL | val); | ||
| 314 | } | ||
| 315 | |||
| 316 | /** | ||
| 317 | * ks_pcie_cfg_setup() - Set up configuration space address for a device | ||
| 318 | * | ||
| 319 | * @ks_pcie: ptr to keystone_pcie structure | ||
| 320 | * @bus: Bus number the device is residing on | ||
| 321 | * @devfn: device, function number info | ||
| 322 | * | ||
| 323 | * Forms and returns the address of configuration space mapped in PCIESS | ||
| 324 | * address space 0. Also configures CFG_SETUP for remote configuration space | ||
| 325 | * access. | ||
| 326 | * | ||
| 327 | * The address space has two regions to access configuration - local and remote. | ||
| 328 | * We access local region for bus 0 (as RC is attached on bus 0) and remote | ||
| 329 | * region for others with TYPE 1 access when bus > 1. As for device on bus = 1, | ||
| 330 | * we will do TYPE 0 access as it will be on our secondary bus (logical). | ||
| 331 | * CFG_SETUP is needed only for remote configuration access. | ||
| 332 | */ | ||
| 333 | static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus, | ||
| 334 | unsigned int devfn) | ||
| 335 | { | ||
| 336 | u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn); | ||
| 337 | struct dw_pcie *pci = ks_pcie->pci; | ||
| 338 | struct pcie_port *pp = &pci->pp; | ||
| 339 | u32 regval; | ||
| 340 | |||
| 341 | if (bus == 0) | ||
| 342 | return pci->dbi_base; | ||
| 343 | |||
| 344 | regval = (bus << 16) | (device << 8) | function; | ||
| 345 | |||
| 346 | /* | ||
| 347 | * Since Bus#1 will be a virtual bus, we need to have TYPE0 | ||
| 348 | * access only. | ||
| 349 | * TYPE 1 | ||
| 350 | */ | ||
| 351 | if (bus != 1) | ||
| 352 | regval |= BIT(24); | ||
| 353 | |||
| 354 | ks_dw_app_writel(ks_pcie, CFG_SETUP, regval); | ||
| 355 | return pp->va_cfg0_base; | ||
| 356 | } | ||
| 357 | |||
| 358 | int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, | ||
| 359 | unsigned int devfn, int where, int size, u32 *val) | ||
| 360 | { | ||
| 361 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 362 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
| 363 | u8 bus_num = bus->number; | ||
| 364 | void __iomem *addr; | ||
| 365 | |||
| 366 | addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); | ||
| 367 | |||
| 368 | return dw_pcie_read(addr + where, size, val); | ||
| 369 | } | ||
| 370 | |||
| 371 | int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, | ||
| 372 | unsigned int devfn, int where, int size, u32 val) | ||
| 373 | { | ||
| 374 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 375 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
| 376 | u8 bus_num = bus->number; | ||
| 377 | void __iomem *addr; | ||
| 378 | |||
| 379 | addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); | ||
| 380 | |||
| 381 | return dw_pcie_write(addr + where, size, val); | ||
| 382 | } | ||
| 383 | |||
| 384 | /** | ||
| 385 | * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization | ||
| 386 | * | ||
| 387 | * This sets BAR0 to enable inbound access for MSI_IRQ register | ||
| 388 | */ | ||
| 389 | void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp) | ||
| 390 | { | ||
| 391 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 392 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
| 393 | |||
| 394 | /* Configure and set up BAR0 */ | ||
| 395 | ks_dw_pcie_set_dbi_mode(ks_pcie); | ||
| 396 | |||
| 397 | /* Enable BAR0 */ | ||
| 398 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 1); | ||
| 399 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, SZ_4K - 1); | ||
| 400 | |||
| 401 | ks_dw_pcie_clear_dbi_mode(ks_pcie); | ||
| 402 | |||
| 403 | /* | ||
| 404 | * For BAR0, just setting bus address for inbound writes (MSI) should | ||
| 405 | * be sufficient. Use physical address to avoid any conflicts. | ||
| 406 | */ | ||
| 407 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, ks_pcie->app.start); | ||
| 408 | } | ||
| 409 | |||
| 410 | /** | ||
| 411 | * ks_dw_pcie_link_up() - Check if link up | ||
| 412 | */ | ||
| 413 | int ks_dw_pcie_link_up(struct dw_pcie *pci) | ||
| 414 | { | ||
| 415 | u32 val; | ||
| 416 | |||
| 417 | val = dw_pcie_readl_dbi(pci, DEBUG0); | ||
| 418 | return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0; | ||
| 419 | } | ||
| 420 | |||
| 421 | void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie) | ||
| 422 | { | ||
| 423 | u32 val; | ||
| 424 | |||
| 425 | /* Disable Link training */ | ||
| 426 | val = ks_dw_app_readl(ks_pcie, CMD_STATUS); | ||
| 427 | val &= ~LTSSM_EN_VAL; | ||
| 428 | ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); | ||
| 429 | |||
| 430 | /* Initiate Link Training */ | ||
| 431 | val = ks_dw_app_readl(ks_pcie, CMD_STATUS); | ||
| 432 | ks_dw_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); | ||
| 433 | } | ||
| 434 | |||
| 435 | /** | ||
| 436 | * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware | ||
| 437 | * | ||
| 438 | * Ioremap the register resources, initialize legacy irq domain | ||
| 439 | * and call dw_pcie_v3_65_host_init() API to initialize the Keystone | ||
| 440 | * PCI host controller. | ||
| 441 | */ | ||
| 442 | int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, | ||
| 443 | struct device_node *msi_intc_np) | ||
| 444 | { | ||
| 445 | struct dw_pcie *pci = ks_pcie->pci; | ||
| 446 | struct pcie_port *pp = &pci->pp; | ||
| 447 | struct device *dev = pci->dev; | ||
| 448 | struct platform_device *pdev = to_platform_device(dev); | ||
| 449 | struct resource *res; | ||
| 450 | |||
| 451 | /* Index 0 is the config reg. space address */ | ||
| 452 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 453 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); | ||
| 454 | if (IS_ERR(pci->dbi_base)) | ||
| 455 | return PTR_ERR(pci->dbi_base); | ||
| 456 | |||
| 457 | /* | ||
| 458 | * We set these same and is used in pcie rd/wr_other_conf | ||
| 459 | * functions | ||
| 460 | */ | ||
| 461 | pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET; | ||
| 462 | pp->va_cfg1_base = pp->va_cfg0_base; | ||
| 463 | |||
| 464 | /* Index 1 is the application reg. space address */ | ||
| 465 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 466 | ks_pcie->va_app_base = devm_ioremap_resource(dev, res); | ||
| 467 | if (IS_ERR(ks_pcie->va_app_base)) | ||
| 468 | return PTR_ERR(ks_pcie->va_app_base); | ||
| 469 | |||
| 470 | ks_pcie->app = *res; | ||
| 471 | |||
| 472 | /* Create legacy IRQ domain */ | ||
| 473 | ks_pcie->legacy_irq_domain = | ||
| 474 | irq_domain_add_linear(ks_pcie->legacy_intc_np, | ||
| 475 | PCI_NUM_INTX, | ||
| 476 | &ks_dw_pcie_legacy_irq_domain_ops, | ||
| 477 | NULL); | ||
| 478 | if (!ks_pcie->legacy_irq_domain) { | ||
| 479 | dev_err(dev, "Failed to add irq domain for legacy irqs\n"); | ||
| 480 | return -EINVAL; | ||
| 481 | } | ||
| 482 | |||
| 483 | return dw_pcie_host_init(pp); | ||
| 484 | } | ||
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c new file mode 100644 index 000000000000..3722a5f31e5e --- /dev/null +++ b/drivers/pci/controller/dwc/pci-keystone.c | |||
| @@ -0,0 +1,457 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * PCIe host controller driver for Texas Instruments Keystone SoCs | ||
| 4 | * | ||
| 5 | * Copyright (C) 2013-2014 Texas Instruments., Ltd. | ||
| 6 | * http://www.ti.com | ||
| 7 | * | ||
| 8 | * Author: Murali Karicheri <m-karicheri2@ti.com> | ||
| 9 | * Implementation based on pci-exynos.c and pcie-designware.c | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/irqchip/chained_irq.h> | ||
| 13 | #include <linux/clk.h> | ||
| 14 | #include <linux/delay.h> | ||
| 15 | #include <linux/interrupt.h> | ||
| 16 | #include <linux/irqdomain.h> | ||
| 17 | #include <linux/init.h> | ||
| 18 | #include <linux/msi.h> | ||
| 19 | #include <linux/of_irq.h> | ||
| 20 | #include <linux/of.h> | ||
| 21 | #include <linux/of_pci.h> | ||
| 22 | #include <linux/platform_device.h> | ||
| 23 | #include <linux/phy/phy.h> | ||
| 24 | #include <linux/resource.h> | ||
| 25 | #include <linux/signal.h> | ||
| 26 | |||
| 27 | #include "pcie-designware.h" | ||
| 28 | #include "pci-keystone.h" | ||
| 29 | |||
| 30 | #define DRIVER_NAME "keystone-pcie" | ||
| 31 | |||
| 32 | /* DEV_STAT_CTRL */ | ||
| 33 | #define PCIE_CAP_BASE 0x70 | ||
| 34 | |||
| 35 | /* PCIE controller device IDs */ | ||
| 36 | #define PCIE_RC_K2HK 0xb008 | ||
| 37 | #define PCIE_RC_K2E 0xb009 | ||
| 38 | #define PCIE_RC_K2L 0xb00a | ||
| 39 | |||
| 40 | #define to_keystone_pcie(x) dev_get_drvdata((x)->dev) | ||
| 41 | |||
| 42 | static void quirk_limit_mrrs(struct pci_dev *dev) | ||
| 43 | { | ||
| 44 | struct pci_bus *bus = dev->bus; | ||
| 45 | struct pci_dev *bridge = bus->self; | ||
| 46 | static const struct pci_device_id rc_pci_devids[] = { | ||
| 47 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK), | ||
| 48 | .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, | ||
| 49 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E), | ||
| 50 | .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, | ||
| 51 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L), | ||
| 52 | .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, }, | ||
| 53 | { 0, }, | ||
| 54 | }; | ||
| 55 | |||
| 56 | if (pci_is_root_bus(bus)) | ||
| 57 | return; | ||
| 58 | |||
| 59 | /* look for the host bridge */ | ||
| 60 | while (!pci_is_root_bus(bus)) { | ||
| 61 | bridge = bus->self; | ||
| 62 | bus = bus->parent; | ||
| 63 | } | ||
| 64 | |||
| 65 | if (bridge) { | ||
| 66 | /* | ||
| 67 | * Keystone PCI controller has a h/w limitation of | ||
| 68 | * 256 bytes maximum read request size. It can't handle | ||
| 69 | * anything higher than this. So force this limit on | ||
| 70 | * all downstream devices. | ||
| 71 | */ | ||
| 72 | if (pci_match_id(rc_pci_devids, bridge)) { | ||
| 73 | if (pcie_get_readrq(dev) > 256) { | ||
| 74 | dev_info(&dev->dev, "limiting MRRS to 256\n"); | ||
| 75 | pcie_set_readrq(dev, 256); | ||
| 76 | } | ||
| 77 | } | ||
| 78 | } | ||
| 79 | } | ||
| 80 | DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs); | ||
| 81 | |||
| 82 | static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) | ||
| 83 | { | ||
| 84 | struct dw_pcie *pci = ks_pcie->pci; | ||
| 85 | struct pcie_port *pp = &pci->pp; | ||
| 86 | struct device *dev = pci->dev; | ||
| 87 | unsigned int retries; | ||
| 88 | |||
| 89 | dw_pcie_setup_rc(pp); | ||
| 90 | |||
| 91 | if (dw_pcie_link_up(pci)) { | ||
| 92 | dev_info(dev, "Link already up\n"); | ||
| 93 | return 0; | ||
| 94 | } | ||
| 95 | |||
| 96 | /* check if the link is up or not */ | ||
| 97 | for (retries = 0; retries < 5; retries++) { | ||
| 98 | ks_dw_pcie_initiate_link_train(ks_pcie); | ||
| 99 | if (!dw_pcie_wait_for_link(pci)) | ||
| 100 | return 0; | ||
| 101 | } | ||
| 102 | |||
| 103 | dev_err(dev, "phy link never came up\n"); | ||
| 104 | return -ETIMEDOUT; | ||
| 105 | } | ||
| 106 | |||
| 107 | static void ks_pcie_msi_irq_handler(struct irq_desc *desc) | ||
| 108 | { | ||
| 109 | unsigned int irq = irq_desc_get_irq(desc); | ||
| 110 | struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); | ||
| 111 | u32 offset = irq - ks_pcie->msi_host_irqs[0]; | ||
| 112 | struct dw_pcie *pci = ks_pcie->pci; | ||
| 113 | struct device *dev = pci->dev; | ||
| 114 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
| 115 | |||
| 116 | dev_dbg(dev, "%s, irq %d\n", __func__, irq); | ||
| 117 | |||
| 118 | /* | ||
| 119 | * The chained irq handler installation would have replaced normal | ||
| 120 | * interrupt driver handler so we need to take care of mask/unmask and | ||
| 121 | * ack operation. | ||
| 122 | */ | ||
| 123 | chained_irq_enter(chip, desc); | ||
| 124 | ks_dw_pcie_handle_msi_irq(ks_pcie, offset); | ||
| 125 | chained_irq_exit(chip, desc); | ||
| 126 | } | ||
| 127 | |||
| 128 | /** | ||
| 129 | * ks_pcie_legacy_irq_handler() - Handle legacy interrupt | ||
| 130 | * @irq: IRQ line for legacy interrupts | ||
| 131 | * @desc: Pointer to irq descriptor | ||
| 132 | * | ||
| 133 | * Traverse through pending legacy interrupts and invoke handler for each. Also | ||
| 134 | * takes care of interrupt controller level mask/ack operation. | ||
| 135 | */ | ||
| 136 | static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) | ||
| 137 | { | ||
| 138 | unsigned int irq = irq_desc_get_irq(desc); | ||
| 139 | struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); | ||
| 140 | struct dw_pcie *pci = ks_pcie->pci; | ||
| 141 | struct device *dev = pci->dev; | ||
| 142 | u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0]; | ||
| 143 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
| 144 | |||
| 145 | dev_dbg(dev, ": Handling legacy irq %d\n", irq); | ||
| 146 | |||
| 147 | /* | ||
| 148 | * The chained irq handler installation would have replaced normal | ||
| 149 | * interrupt driver handler so we need to take care of mask/unmask and | ||
| 150 | * ack operation. | ||
| 151 | */ | ||
| 152 | chained_irq_enter(chip, desc); | ||
| 153 | ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset); | ||
| 154 | chained_irq_exit(chip, desc); | ||
| 155 | } | ||
| 156 | |||
| 157 | static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, | ||
| 158 | char *controller, int *num_irqs) | ||
| 159 | { | ||
| 160 | int temp, max_host_irqs, legacy = 1, *host_irqs; | ||
| 161 | struct device *dev = ks_pcie->pci->dev; | ||
| 162 | struct device_node *np_pcie = dev->of_node, **np_temp; | ||
| 163 | |||
| 164 | if (!strcmp(controller, "msi-interrupt-controller")) | ||
| 165 | legacy = 0; | ||
| 166 | |||
| 167 | if (legacy) { | ||
| 168 | np_temp = &ks_pcie->legacy_intc_np; | ||
| 169 | max_host_irqs = PCI_NUM_INTX; | ||
| 170 | host_irqs = &ks_pcie->legacy_host_irqs[0]; | ||
| 171 | } else { | ||
| 172 | np_temp = &ks_pcie->msi_intc_np; | ||
| 173 | max_host_irqs = MAX_MSI_HOST_IRQS; | ||
| 174 | host_irqs = &ks_pcie->msi_host_irqs[0]; | ||
| 175 | } | ||
| 176 | |||
| 177 | /* interrupt controller is in a child node */ | ||
| 178 | *np_temp = of_get_child_by_name(np_pcie, controller); | ||
| 179 | if (!(*np_temp)) { | ||
| 180 | dev_err(dev, "Node for %s is absent\n", controller); | ||
| 181 | return -EINVAL; | ||
| 182 | } | ||
| 183 | |||
| 184 | temp = of_irq_count(*np_temp); | ||
| 185 | if (!temp) { | ||
| 186 | dev_err(dev, "No IRQ entries in %s\n", controller); | ||
| 187 | of_node_put(*np_temp); | ||
| 188 | return -EINVAL; | ||
| 189 | } | ||
| 190 | |||
| 191 | if (temp > max_host_irqs) | ||
| 192 | dev_warn(dev, "Too many %s interrupts defined %u\n", | ||
| 193 | (legacy ? "legacy" : "MSI"), temp); | ||
| 194 | |||
| 195 | /* | ||
| 196 | * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to | ||
| 197 | * 7 (MSI) | ||
| 198 | */ | ||
| 199 | for (temp = 0; temp < max_host_irqs; temp++) { | ||
| 200 | host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp); | ||
| 201 | if (!host_irqs[temp]) | ||
| 202 | break; | ||
| 203 | } | ||
| 204 | |||
| 205 | of_node_put(*np_temp); | ||
| 206 | |||
| 207 | if (temp) { | ||
| 208 | *num_irqs = temp; | ||
| 209 | return 0; | ||
| 210 | } | ||
| 211 | |||
| 212 | return -EINVAL; | ||
| 213 | } | ||
| 214 | |||
| 215 | static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) | ||
| 216 | { | ||
| 217 | int i; | ||
| 218 | |||
| 219 | /* Legacy IRQ */ | ||
| 220 | for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) { | ||
| 221 | irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i], | ||
| 222 | ks_pcie_legacy_irq_handler, | ||
| 223 | ks_pcie); | ||
| 224 | } | ||
| 225 | ks_dw_pcie_enable_legacy_irqs(ks_pcie); | ||
| 226 | |||
| 227 | /* MSI IRQ */ | ||
| 228 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
| 229 | for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) { | ||
| 230 | irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i], | ||
| 231 | ks_pcie_msi_irq_handler, | ||
| 232 | ks_pcie); | ||
| 233 | } | ||
| 234 | } | ||
| 235 | |||
| 236 | if (ks_pcie->error_irq > 0) | ||
| 237 | ks_dw_pcie_enable_error_irq(ks_pcie); | ||
| 238 | } | ||
| 239 | |||
| 240 | /* | ||
| 241 | * When a PCI device does not exist during config cycles, keystone host gets a | ||
| 242 | * bus error instead of returning 0xffffffff. This handler always returns 0 | ||
| 243 | * for this kind of faults. | ||
| 244 | */ | ||
| 245 | static int keystone_pcie_fault(unsigned long addr, unsigned int fsr, | ||
| 246 | struct pt_regs *regs) | ||
| 247 | { | ||
| 248 | unsigned long instr = *(unsigned long *) instruction_pointer(regs); | ||
| 249 | |||
| 250 | if ((instr & 0x0e100090) == 0x00100090) { | ||
| 251 | int reg = (instr >> 12) & 15; | ||
| 252 | |||
| 253 | regs->uregs[reg] = -1; | ||
| 254 | regs->ARM_pc += 4; | ||
| 255 | } | ||
| 256 | |||
| 257 | return 0; | ||
| 258 | } | ||
| 259 | |||
| 260 | static int __init ks_pcie_host_init(struct pcie_port *pp) | ||
| 261 | { | ||
| 262 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 263 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
| 264 | u32 val; | ||
| 265 | |||
| 266 | ks_pcie_establish_link(ks_pcie); | ||
| 267 | ks_dw_pcie_setup_rc_app_regs(ks_pcie); | ||
| 268 | ks_pcie_setup_interrupts(ks_pcie); | ||
| 269 | writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8), | ||
| 270 | pci->dbi_base + PCI_IO_BASE); | ||
| 271 | |||
| 272 | /* update the Vendor ID */ | ||
| 273 | writew(ks_pcie->device_id, pci->dbi_base + PCI_DEVICE_ID); | ||
| 274 | |||
| 275 | /* update the DEV_STAT_CTRL to publish right mrrs */ | ||
| 276 | val = readl(pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); | ||
| 277 | val &= ~PCI_EXP_DEVCTL_READRQ; | ||
| 278 | /* set the mrrs to 256 bytes */ | ||
| 279 | val |= BIT(12); | ||
| 280 | writel(val, pci->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL); | ||
| 281 | |||
| 282 | /* | ||
| 283 | * PCIe access errors that result into OCP errors are caught by ARM as | ||
| 284 | * "External aborts" | ||
| 285 | */ | ||
| 286 | hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0, | ||
| 287 | "Asynchronous external abort"); | ||
| 288 | |||
| 289 | return 0; | ||
| 290 | } | ||
| 291 | |||
| 292 | static const struct dw_pcie_host_ops keystone_pcie_host_ops = { | ||
| 293 | .rd_other_conf = ks_dw_pcie_rd_other_conf, | ||
| 294 | .wr_other_conf = ks_dw_pcie_wr_other_conf, | ||
| 295 | .host_init = ks_pcie_host_init, | ||
| 296 | .msi_set_irq = ks_dw_pcie_msi_set_irq, | ||
| 297 | .msi_clear_irq = ks_dw_pcie_msi_clear_irq, | ||
| 298 | .get_msi_addr = ks_dw_pcie_get_msi_addr, | ||
| 299 | .msi_host_init = ks_dw_pcie_msi_host_init, | ||
| 300 | .msi_irq_ack = ks_dw_pcie_msi_irq_ack, | ||
| 301 | .scan_bus = ks_dw_pcie_v3_65_scan_bus, | ||
| 302 | }; | ||
| 303 | |||
| 304 | static irqreturn_t pcie_err_irq_handler(int irq, void *priv) | ||
| 305 | { | ||
| 306 | struct keystone_pcie *ks_pcie = priv; | ||
| 307 | |||
| 308 | return ks_dw_pcie_handle_error_irq(ks_pcie); | ||
| 309 | } | ||
| 310 | |||
| 311 | static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie, | ||
| 312 | struct platform_device *pdev) | ||
| 313 | { | ||
| 314 | struct dw_pcie *pci = ks_pcie->pci; | ||
| 315 | struct pcie_port *pp = &pci->pp; | ||
| 316 | struct device *dev = &pdev->dev; | ||
| 317 | int ret; | ||
| 318 | |||
| 319 | ret = ks_pcie_get_irq_controller_info(ks_pcie, | ||
| 320 | "legacy-interrupt-controller", | ||
| 321 | &ks_pcie->num_legacy_host_irqs); | ||
| 322 | if (ret) | ||
| 323 | return ret; | ||
| 324 | |||
| 325 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
| 326 | ret = ks_pcie_get_irq_controller_info(ks_pcie, | ||
| 327 | "msi-interrupt-controller", | ||
| 328 | &ks_pcie->num_msi_host_irqs); | ||
| 329 | if (ret) | ||
| 330 | return ret; | ||
| 331 | } | ||
| 332 | |||
| 333 | /* | ||
| 334 | * Index 0 is the platform interrupt for error interrupt | ||
| 335 | * from RC. This is optional. | ||
| 336 | */ | ||
| 337 | ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0); | ||
| 338 | if (ks_pcie->error_irq <= 0) | ||
| 339 | dev_info(dev, "no error IRQ defined\n"); | ||
| 340 | else { | ||
| 341 | ret = request_irq(ks_pcie->error_irq, pcie_err_irq_handler, | ||
| 342 | IRQF_SHARED, "pcie-error-irq", ks_pcie); | ||
| 343 | if (ret < 0) { | ||
| 344 | dev_err(dev, "failed to request error IRQ %d\n", | ||
| 345 | ks_pcie->error_irq); | ||
| 346 | return ret; | ||
| 347 | } | ||
| 348 | } | ||
| 349 | |||
| 350 | pp->root_bus_nr = -1; | ||
| 351 | pp->ops = &keystone_pcie_host_ops; | ||
| 352 | ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np); | ||
| 353 | if (ret) { | ||
| 354 | dev_err(dev, "failed to initialize host\n"); | ||
| 355 | return ret; | ||
| 356 | } | ||
| 357 | |||
| 358 | return 0; | ||
| 359 | } | ||
| 360 | |||
| 361 | static const struct of_device_id ks_pcie_of_match[] = { | ||
| 362 | { | ||
| 363 | .type = "pci", | ||
| 364 | .compatible = "ti,keystone-pcie", | ||
| 365 | }, | ||
| 366 | { }, | ||
| 367 | }; | ||
| 368 | |||
| 369 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
| 370 | .link_up = ks_dw_pcie_link_up, | ||
| 371 | }; | ||
| 372 | |||
| 373 | static int __exit ks_pcie_remove(struct platform_device *pdev) | ||
| 374 | { | ||
| 375 | struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev); | ||
| 376 | |||
| 377 | clk_disable_unprepare(ks_pcie->clk); | ||
| 378 | |||
| 379 | return 0; | ||
| 380 | } | ||
| 381 | |||
| 382 | static int __init ks_pcie_probe(struct platform_device *pdev) | ||
| 383 | { | ||
| 384 | struct device *dev = &pdev->dev; | ||
| 385 | struct dw_pcie *pci; | ||
| 386 | struct keystone_pcie *ks_pcie; | ||
| 387 | struct resource *res; | ||
| 388 | void __iomem *reg_p; | ||
| 389 | struct phy *phy; | ||
| 390 | int ret; | ||
| 391 | |||
| 392 | ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL); | ||
| 393 | if (!ks_pcie) | ||
| 394 | return -ENOMEM; | ||
| 395 | |||
| 396 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
| 397 | if (!pci) | ||
| 398 | return -ENOMEM; | ||
| 399 | |||
| 400 | pci->dev = dev; | ||
| 401 | pci->ops = &dw_pcie_ops; | ||
| 402 | |||
| 403 | ks_pcie->pci = pci; | ||
| 404 | |||
| 405 | /* initialize SerDes Phy if present */ | ||
| 406 | phy = devm_phy_get(dev, "pcie-phy"); | ||
| 407 | if (PTR_ERR_OR_ZERO(phy) == -EPROBE_DEFER) | ||
| 408 | return PTR_ERR(phy); | ||
| 409 | |||
| 410 | if (!IS_ERR_OR_NULL(phy)) { | ||
| 411 | ret = phy_init(phy); | ||
| 412 | if (ret < 0) | ||
| 413 | return ret; | ||
| 414 | } | ||
| 415 | |||
| 416 | /* index 2 is to read PCI DEVICE_ID */ | ||
| 417 | res = platform_get_resource(pdev, IORESOURCE_MEM, 2); | ||
| 418 | reg_p = devm_ioremap_resource(dev, res); | ||
| 419 | if (IS_ERR(reg_p)) | ||
| 420 | return PTR_ERR(reg_p); | ||
| 421 | ks_pcie->device_id = readl(reg_p) >> 16; | ||
| 422 | devm_iounmap(dev, reg_p); | ||
| 423 | devm_release_mem_region(dev, res->start, resource_size(res)); | ||
| 424 | |||
| 425 | ks_pcie->np = dev->of_node; | ||
| 426 | platform_set_drvdata(pdev, ks_pcie); | ||
| 427 | ks_pcie->clk = devm_clk_get(dev, "pcie"); | ||
| 428 | if (IS_ERR(ks_pcie->clk)) { | ||
| 429 | dev_err(dev, "Failed to get pcie rc clock\n"); | ||
| 430 | return PTR_ERR(ks_pcie->clk); | ||
| 431 | } | ||
| 432 | ret = clk_prepare_enable(ks_pcie->clk); | ||
| 433 | if (ret) | ||
| 434 | return ret; | ||
| 435 | |||
| 436 | platform_set_drvdata(pdev, ks_pcie); | ||
| 437 | |||
| 438 | ret = ks_add_pcie_port(ks_pcie, pdev); | ||
| 439 | if (ret < 0) | ||
| 440 | goto fail_clk; | ||
| 441 | |||
| 442 | return 0; | ||
| 443 | fail_clk: | ||
| 444 | clk_disable_unprepare(ks_pcie->clk); | ||
| 445 | |||
| 446 | return ret; | ||
| 447 | } | ||
| 448 | |||
| 449 | static struct platform_driver ks_pcie_driver __refdata = { | ||
| 450 | .probe = ks_pcie_probe, | ||
| 451 | .remove = __exit_p(ks_pcie_remove), | ||
| 452 | .driver = { | ||
| 453 | .name = "keystone-pcie", | ||
| 454 | .of_match_table = of_match_ptr(ks_pcie_of_match), | ||
| 455 | }, | ||
| 456 | }; | ||
| 457 | builtin_platform_driver(ks_pcie_driver); | ||
diff --git a/drivers/pci/controller/dwc/pci-keystone.h b/drivers/pci/controller/dwc/pci-keystone.h new file mode 100644 index 000000000000..8a13da391543 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-keystone.h | |||
| @@ -0,0 +1,57 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Keystone PCI Controller's common includes | ||
| 4 | * | ||
| 5 | * Copyright (C) 2013-2014 Texas Instruments., Ltd. | ||
| 6 | * http://www.ti.com | ||
| 7 | * | ||
| 8 | * Author: Murali Karicheri <m-karicheri2@ti.com> | ||
| 9 | */ | ||
| 10 | |||
| 11 | #define MAX_MSI_HOST_IRQS 8 | ||
| 12 | |||
| 13 | struct keystone_pcie { | ||
| 14 | struct dw_pcie *pci; | ||
| 15 | struct clk *clk; | ||
| 16 | /* PCI Device ID */ | ||
| 17 | u32 device_id; | ||
| 18 | int num_legacy_host_irqs; | ||
| 19 | int legacy_host_irqs[PCI_NUM_INTX]; | ||
| 20 | struct device_node *legacy_intc_np; | ||
| 21 | |||
| 22 | int num_msi_host_irqs; | ||
| 23 | int msi_host_irqs[MAX_MSI_HOST_IRQS]; | ||
| 24 | struct device_node *msi_intc_np; | ||
| 25 | struct irq_domain *legacy_irq_domain; | ||
| 26 | struct device_node *np; | ||
| 27 | |||
| 28 | int error_irq; | ||
| 29 | |||
| 30 | /* Application register space */ | ||
| 31 | void __iomem *va_app_base; /* DT 1st resource */ | ||
| 32 | struct resource app; | ||
| 33 | }; | ||
| 34 | |||
| 35 | /* Keystone DW specific MSI controller APIs/definitions */ | ||
| 36 | void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset); | ||
| 37 | phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp); | ||
| 38 | |||
| 39 | /* Keystone specific PCI controller APIs */ | ||
| 40 | void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie); | ||
| 41 | void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset); | ||
| 42 | void ks_dw_pcie_enable_error_irq(struct keystone_pcie *ks_pcie); | ||
| 43 | irqreturn_t ks_dw_pcie_handle_error_irq(struct keystone_pcie *ks_pcie); | ||
| 44 | int ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie, | ||
| 45 | struct device_node *msi_intc_np); | ||
| 46 | int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, | ||
| 47 | unsigned int devfn, int where, int size, u32 val); | ||
| 48 | int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, | ||
| 49 | unsigned int devfn, int where, int size, u32 *val); | ||
| 50 | void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie); | ||
| 51 | void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie); | ||
| 52 | void ks_dw_pcie_msi_irq_ack(int i, struct pcie_port *pp); | ||
| 53 | void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq); | ||
| 54 | void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq); | ||
| 55 | void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp); | ||
| 56 | int ks_dw_pcie_msi_host_init(struct pcie_port *pp); | ||
| 57 | int ks_dw_pcie_link_up(struct dw_pcie *pci); | ||
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c new file mode 100644 index 000000000000..3724d3ef7008 --- /dev/null +++ b/drivers/pci/controller/dwc/pci-layerscape.c | |||
| @@ -0,0 +1,341 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * PCIe host controller driver for Freescale Layerscape SoCs | ||
| 4 | * | ||
| 5 | * Copyright (C) 2014 Freescale Semiconductor. | ||
| 6 | * | ||
| 7 | * Author: Minghuan Lian <Minghuan.Lian@freescale.com> | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <linux/kernel.h> | ||
| 11 | #include <linux/interrupt.h> | ||
| 12 | #include <linux/init.h> | ||
| 13 | #include <linux/of_pci.h> | ||
| 14 | #include <linux/of_platform.h> | ||
| 15 | #include <linux/of_irq.h> | ||
| 16 | #include <linux/of_address.h> | ||
| 17 | #include <linux/pci.h> | ||
| 18 | #include <linux/platform_device.h> | ||
| 19 | #include <linux/resource.h> | ||
| 20 | #include <linux/mfd/syscon.h> | ||
| 21 | #include <linux/regmap.h> | ||
| 22 | |||
| 23 | #include "pcie-designware.h" | ||
| 24 | |||
| 25 | /* PEX1/2 Misc Ports Status Register */ | ||
| 26 | #define SCFG_PEXMSCPORTSR(pex_idx) (0x94 + (pex_idx) * 4) | ||
| 27 | #define LTSSM_STATE_SHIFT 20 | ||
| 28 | #define LTSSM_STATE_MASK 0x3f | ||
| 29 | #define LTSSM_PCIE_L0 0x11 /* L0 state */ | ||
| 30 | |||
| 31 | /* PEX Internal Configuration Registers */ | ||
| 32 | #define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */ | ||
| 33 | #define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */ | ||
| 34 | #define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */ | ||
| 35 | |||
| 36 | #define PCIE_IATU_NUM 6 | ||
| 37 | |||
| 38 | struct ls_pcie_drvdata { | ||
| 39 | u32 lut_offset; | ||
| 40 | u32 ltssm_shift; | ||
| 41 | u32 lut_dbg; | ||
| 42 | const struct dw_pcie_host_ops *ops; | ||
| 43 | const struct dw_pcie_ops *dw_pcie_ops; | ||
| 44 | }; | ||
| 45 | |||
| 46 | struct ls_pcie { | ||
| 47 | struct dw_pcie *pci; | ||
| 48 | void __iomem *lut; | ||
| 49 | struct regmap *scfg; | ||
| 50 | const struct ls_pcie_drvdata *drvdata; | ||
| 51 | int index; | ||
| 52 | }; | ||
| 53 | |||
| 54 | #define to_ls_pcie(x) dev_get_drvdata((x)->dev) | ||
| 55 | |||
| 56 | static bool ls_pcie_is_bridge(struct ls_pcie *pcie) | ||
| 57 | { | ||
| 58 | struct dw_pcie *pci = pcie->pci; | ||
| 59 | u32 header_type; | ||
| 60 | |||
| 61 | header_type = ioread8(pci->dbi_base + PCI_HEADER_TYPE); | ||
| 62 | header_type &= 0x7f; | ||
| 63 | |||
| 64 | return header_type == PCI_HEADER_TYPE_BRIDGE; | ||
| 65 | } | ||
| 66 | |||
| 67 | /* Clear multi-function bit */ | ||
| 68 | static void ls_pcie_clear_multifunction(struct ls_pcie *pcie) | ||
| 69 | { | ||
| 70 | struct dw_pcie *pci = pcie->pci; | ||
| 71 | |||
| 72 | iowrite8(PCI_HEADER_TYPE_BRIDGE, pci->dbi_base + PCI_HEADER_TYPE); | ||
| 73 | } | ||
| 74 | |||
| 75 | /* Drop MSG TLP except for Vendor MSG */ | ||
| 76 | static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) | ||
| 77 | { | ||
| 78 | u32 val; | ||
| 79 | struct dw_pcie *pci = pcie->pci; | ||
| 80 | |||
| 81 | val = ioread32(pci->dbi_base + PCIE_STRFMR1); | ||
| 82 | val &= 0xDFFFFFFF; | ||
| 83 | iowrite32(val, pci->dbi_base + PCIE_STRFMR1); | ||
| 84 | } | ||
| 85 | |||
| 86 | static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie) | ||
| 87 | { | ||
| 88 | int i; | ||
| 89 | |||
| 90 | for (i = 0; i < PCIE_IATU_NUM; i++) | ||
| 91 | dw_pcie_disable_atu(pcie->pci, DW_PCIE_REGION_OUTBOUND, i); | ||
| 92 | } | ||
| 93 | |||
| 94 | static int ls1021_pcie_link_up(struct dw_pcie *pci) | ||
| 95 | { | ||
| 96 | u32 state; | ||
| 97 | struct ls_pcie *pcie = to_ls_pcie(pci); | ||
| 98 | |||
| 99 | if (!pcie->scfg) | ||
| 100 | return 0; | ||
| 101 | |||
| 102 | regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state); | ||
| 103 | state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK; | ||
| 104 | |||
| 105 | if (state < LTSSM_PCIE_L0) | ||
| 106 | return 0; | ||
| 107 | |||
| 108 | return 1; | ||
| 109 | } | ||
| 110 | |||
| 111 | static int ls_pcie_link_up(struct dw_pcie *pci) | ||
| 112 | { | ||
| 113 | struct ls_pcie *pcie = to_ls_pcie(pci); | ||
| 114 | u32 state; | ||
| 115 | |||
| 116 | state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >> | ||
| 117 | pcie->drvdata->ltssm_shift) & | ||
| 118 | LTSSM_STATE_MASK; | ||
| 119 | |||
| 120 | if (state < LTSSM_PCIE_L0) | ||
| 121 | return 0; | ||
| 122 | |||
| 123 | return 1; | ||
| 124 | } | ||
| 125 | |||
| 126 | /* Forward error response of outbound non-posted requests */ | ||
| 127 | static void ls_pcie_fix_error_response(struct ls_pcie *pcie) | ||
| 128 | { | ||
| 129 | struct dw_pcie *pci = pcie->pci; | ||
| 130 | |||
| 131 | iowrite32(PCIE_ABSERR_SETTING, pci->dbi_base + PCIE_ABSERR); | ||
| 132 | } | ||
| 133 | |||
| 134 | static int ls_pcie_host_init(struct pcie_port *pp) | ||
| 135 | { | ||
| 136 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 137 | struct ls_pcie *pcie = to_ls_pcie(pci); | ||
| 138 | |||
| 139 | /* | ||
| 140 | * Disable outbound windows configured by the bootloader to avoid | ||
| 141 | * one transaction hitting multiple outbound windows. | ||
| 142 | * dw_pcie_setup_rc() will reconfigure the outbound windows. | ||
| 143 | */ | ||
| 144 | ls_pcie_disable_outbound_atus(pcie); | ||
| 145 | ls_pcie_fix_error_response(pcie); | ||
| 146 | |||
| 147 | dw_pcie_dbi_ro_wr_en(pci); | ||
| 148 | ls_pcie_clear_multifunction(pcie); | ||
| 149 | dw_pcie_dbi_ro_wr_dis(pci); | ||
| 150 | |||
| 151 | ls_pcie_drop_msg_tlp(pcie); | ||
| 152 | |||
| 153 | dw_pcie_setup_rc(pp); | ||
| 154 | |||
| 155 | return 0; | ||
| 156 | } | ||
| 157 | |||
| 158 | static int ls1021_pcie_host_init(struct pcie_port *pp) | ||
| 159 | { | ||
| 160 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 161 | struct ls_pcie *pcie = to_ls_pcie(pci); | ||
| 162 | struct device *dev = pci->dev; | ||
| 163 | u32 index[2]; | ||
| 164 | int ret; | ||
| 165 | |||
| 166 | pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node, | ||
| 167 | "fsl,pcie-scfg"); | ||
| 168 | if (IS_ERR(pcie->scfg)) { | ||
| 169 | ret = PTR_ERR(pcie->scfg); | ||
| 170 | dev_err(dev, "No syscfg phandle specified\n"); | ||
| 171 | pcie->scfg = NULL; | ||
| 172 | return ret; | ||
| 173 | } | ||
| 174 | |||
| 175 | if (of_property_read_u32_array(dev->of_node, | ||
| 176 | "fsl,pcie-scfg", index, 2)) { | ||
| 177 | pcie->scfg = NULL; | ||
| 178 | return -EINVAL; | ||
| 179 | } | ||
| 180 | pcie->index = index[1]; | ||
| 181 | |||
| 182 | return ls_pcie_host_init(pp); | ||
| 183 | } | ||
| 184 | |||
| 185 | static int ls_pcie_msi_host_init(struct pcie_port *pp) | ||
| 186 | { | ||
| 187 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 188 | struct device *dev = pci->dev; | ||
| 189 | struct device_node *np = dev->of_node; | ||
| 190 | struct device_node *msi_node; | ||
| 191 | |||
| 192 | /* | ||
| 193 | * The MSI domain is set by the generic of_msi_configure(). This | ||
| 194 | * .msi_host_init() function keeps us from doing the default MSI | ||
| 195 | * domain setup in dw_pcie_host_init() and also enforces the | ||
| 196 | * requirement that "msi-parent" exists. | ||
| 197 | */ | ||
| 198 | msi_node = of_parse_phandle(np, "msi-parent", 0); | ||
| 199 | if (!msi_node) { | ||
| 200 | dev_err(dev, "failed to find msi-parent\n"); | ||
| 201 | return -EINVAL; | ||
| 202 | } | ||
| 203 | |||
| 204 | return 0; | ||
| 205 | } | ||
| 206 | |||
| 207 | static const struct dw_pcie_host_ops ls1021_pcie_host_ops = { | ||
| 208 | .host_init = ls1021_pcie_host_init, | ||
| 209 | .msi_host_init = ls_pcie_msi_host_init, | ||
| 210 | }; | ||
| 211 | |||
| 212 | static const struct dw_pcie_host_ops ls_pcie_host_ops = { | ||
| 213 | .host_init = ls_pcie_host_init, | ||
| 214 | .msi_host_init = ls_pcie_msi_host_init, | ||
| 215 | }; | ||
| 216 | |||
| 217 | static const struct dw_pcie_ops dw_ls1021_pcie_ops = { | ||
| 218 | .link_up = ls1021_pcie_link_up, | ||
| 219 | }; | ||
| 220 | |||
| 221 | static const struct dw_pcie_ops dw_ls_pcie_ops = { | ||
| 222 | .link_up = ls_pcie_link_up, | ||
| 223 | }; | ||
| 224 | |||
| 225 | static struct ls_pcie_drvdata ls1021_drvdata = { | ||
| 226 | .ops = &ls1021_pcie_host_ops, | ||
| 227 | .dw_pcie_ops = &dw_ls1021_pcie_ops, | ||
| 228 | }; | ||
| 229 | |||
| 230 | static struct ls_pcie_drvdata ls1043_drvdata = { | ||
| 231 | .lut_offset = 0x10000, | ||
| 232 | .ltssm_shift = 24, | ||
| 233 | .lut_dbg = 0x7fc, | ||
| 234 | .ops = &ls_pcie_host_ops, | ||
| 235 | .dw_pcie_ops = &dw_ls_pcie_ops, | ||
| 236 | }; | ||
| 237 | |||
| 238 | static struct ls_pcie_drvdata ls1046_drvdata = { | ||
| 239 | .lut_offset = 0x80000, | ||
| 240 | .ltssm_shift = 24, | ||
| 241 | .lut_dbg = 0x407fc, | ||
| 242 | .ops = &ls_pcie_host_ops, | ||
| 243 | .dw_pcie_ops = &dw_ls_pcie_ops, | ||
| 244 | }; | ||
| 245 | |||
| 246 | static struct ls_pcie_drvdata ls2080_drvdata = { | ||
| 247 | .lut_offset = 0x80000, | ||
| 248 | .ltssm_shift = 0, | ||
| 249 | .lut_dbg = 0x7fc, | ||
| 250 | .ops = &ls_pcie_host_ops, | ||
| 251 | .dw_pcie_ops = &dw_ls_pcie_ops, | ||
| 252 | }; | ||
| 253 | |||
| 254 | static struct ls_pcie_drvdata ls2088_drvdata = { | ||
| 255 | .lut_offset = 0x80000, | ||
| 256 | .ltssm_shift = 0, | ||
| 257 | .lut_dbg = 0x407fc, | ||
| 258 | .ops = &ls_pcie_host_ops, | ||
| 259 | .dw_pcie_ops = &dw_ls_pcie_ops, | ||
| 260 | }; | ||
| 261 | |||
| 262 | static const struct of_device_id ls_pcie_of_match[] = { | ||
| 263 | { .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata }, | ||
| 264 | { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, | ||
| 265 | { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, | ||
| 266 | { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata }, | ||
| 267 | { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, | ||
| 268 | { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, | ||
| 269 | { .compatible = "fsl,ls2088a-pcie", .data = &ls2088_drvdata }, | ||
| 270 | { .compatible = "fsl,ls1088a-pcie", .data = &ls2088_drvdata }, | ||
| 271 | { }, | ||
| 272 | }; | ||
| 273 | |||
| 274 | static int __init ls_add_pcie_port(struct ls_pcie *pcie) | ||
| 275 | { | ||
| 276 | struct dw_pcie *pci = pcie->pci; | ||
| 277 | struct pcie_port *pp = &pci->pp; | ||
| 278 | struct device *dev = pci->dev; | ||
| 279 | int ret; | ||
| 280 | |||
| 281 | pp->ops = pcie->drvdata->ops; | ||
| 282 | |||
| 283 | ret = dw_pcie_host_init(pp); | ||
| 284 | if (ret) { | ||
| 285 | dev_err(dev, "failed to initialize host\n"); | ||
| 286 | return ret; | ||
| 287 | } | ||
| 288 | |||
| 289 | return 0; | ||
| 290 | } | ||
| 291 | |||
| 292 | static int __init ls_pcie_probe(struct platform_device *pdev) | ||
| 293 | { | ||
| 294 | struct device *dev = &pdev->dev; | ||
| 295 | struct dw_pcie *pci; | ||
| 296 | struct ls_pcie *pcie; | ||
| 297 | struct resource *dbi_base; | ||
| 298 | int ret; | ||
| 299 | |||
| 300 | pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); | ||
| 301 | if (!pcie) | ||
| 302 | return -ENOMEM; | ||
| 303 | |||
| 304 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
| 305 | if (!pci) | ||
| 306 | return -ENOMEM; | ||
| 307 | |||
| 308 | pcie->drvdata = of_device_get_match_data(dev); | ||
| 309 | |||
| 310 | pci->dev = dev; | ||
| 311 | pci->ops = pcie->drvdata->dw_pcie_ops; | ||
| 312 | |||
| 313 | pcie->pci = pci; | ||
| 314 | |||
| 315 | dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); | ||
| 316 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); | ||
| 317 | if (IS_ERR(pci->dbi_base)) | ||
| 318 | return PTR_ERR(pci->dbi_base); | ||
| 319 | |||
| 320 | pcie->lut = pci->dbi_base + pcie->drvdata->lut_offset; | ||
| 321 | |||
| 322 | if (!ls_pcie_is_bridge(pcie)) | ||
| 323 | return -ENODEV; | ||
| 324 | |||
| 325 | platform_set_drvdata(pdev, pcie); | ||
| 326 | |||
| 327 | ret = ls_add_pcie_port(pcie); | ||
| 328 | if (ret < 0) | ||
| 329 | return ret; | ||
| 330 | |||
| 331 | return 0; | ||
| 332 | } | ||
| 333 | |||
| 334 | static struct platform_driver ls_pcie_driver = { | ||
| 335 | .driver = { | ||
| 336 | .name = "layerscape-pcie", | ||
| 337 | .of_match_table = ls_pcie_of_match, | ||
| 338 | .suppress_bind_attrs = true, | ||
| 339 | }, | ||
| 340 | }; | ||
| 341 | builtin_platform_driver_probe(ls_pcie_driver, ls_pcie_probe); | ||
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c new file mode 100644 index 000000000000..072fd7ecc29f --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-armada8k.c | |||
| @@ -0,0 +1,282 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * PCIe host controller driver for Marvell Armada-8K SoCs | ||
| 4 | * | ||
| 5 | * Armada-8K PCIe Glue Layer Source Code | ||
| 6 | * | ||
| 7 | * Copyright (C) 2016 Marvell Technology Group Ltd. | ||
| 8 | * | ||
| 9 | * Author: Yehuda Yitshak <yehuday@marvell.com> | ||
| 10 | * Author: Shadi Ammouri <shadi@marvell.com> | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/clk.h> | ||
| 14 | #include <linux/delay.h> | ||
| 15 | #include <linux/interrupt.h> | ||
| 16 | #include <linux/kernel.h> | ||
| 17 | #include <linux/init.h> | ||
| 18 | #include <linux/of.h> | ||
| 19 | #include <linux/pci.h> | ||
| 20 | #include <linux/phy/phy.h> | ||
| 21 | #include <linux/platform_device.h> | ||
| 22 | #include <linux/resource.h> | ||
| 23 | #include <linux/of_pci.h> | ||
| 24 | #include <linux/of_irq.h> | ||
| 25 | |||
| 26 | #include "pcie-designware.h" | ||
| 27 | |||
| 28 | struct armada8k_pcie { | ||
| 29 | struct dw_pcie *pci; | ||
| 30 | struct clk *clk; | ||
| 31 | struct clk *clk_reg; | ||
| 32 | }; | ||
| 33 | |||
| 34 | #define PCIE_VENDOR_REGS_OFFSET 0x8000 | ||
| 35 | |||
| 36 | #define PCIE_GLOBAL_CONTROL_REG (PCIE_VENDOR_REGS_OFFSET + 0x0) | ||
| 37 | #define PCIE_APP_LTSSM_EN BIT(2) | ||
| 38 | #define PCIE_DEVICE_TYPE_SHIFT 4 | ||
| 39 | #define PCIE_DEVICE_TYPE_MASK 0xF | ||
| 40 | #define PCIE_DEVICE_TYPE_RC 0x4 /* Root complex */ | ||
| 41 | |||
| 42 | #define PCIE_GLOBAL_STATUS_REG (PCIE_VENDOR_REGS_OFFSET + 0x8) | ||
| 43 | #define PCIE_GLB_STS_RDLH_LINK_UP BIT(1) | ||
| 44 | #define PCIE_GLB_STS_PHY_LINK_UP BIT(9) | ||
| 45 | |||
| 46 | #define PCIE_GLOBAL_INT_CAUSE1_REG (PCIE_VENDOR_REGS_OFFSET + 0x1C) | ||
| 47 | #define PCIE_GLOBAL_INT_MASK1_REG (PCIE_VENDOR_REGS_OFFSET + 0x20) | ||
| 48 | #define PCIE_INT_A_ASSERT_MASK BIT(9) | ||
| 49 | #define PCIE_INT_B_ASSERT_MASK BIT(10) | ||
| 50 | #define PCIE_INT_C_ASSERT_MASK BIT(11) | ||
| 51 | #define PCIE_INT_D_ASSERT_MASK BIT(12) | ||
| 52 | |||
| 53 | #define PCIE_ARCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x50) | ||
| 54 | #define PCIE_AWCACHE_TRC_REG (PCIE_VENDOR_REGS_OFFSET + 0x54) | ||
| 55 | #define PCIE_ARUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x5C) | ||
| 56 | #define PCIE_AWUSER_REG (PCIE_VENDOR_REGS_OFFSET + 0x60) | ||
| 57 | /* | ||
| 58 | * AR/AW Cache defauls: Normal memory, Write-Back, Read / Write | ||
| 59 | * allocate | ||
| 60 | */ | ||
| 61 | #define ARCACHE_DEFAULT_VALUE 0x3511 | ||
| 62 | #define AWCACHE_DEFAULT_VALUE 0x5311 | ||
| 63 | |||
| 64 | #define DOMAIN_OUTER_SHAREABLE 0x2 | ||
| 65 | #define AX_USER_DOMAIN_MASK 0x3 | ||
| 66 | #define AX_USER_DOMAIN_SHIFT 4 | ||
| 67 | |||
| 68 | #define to_armada8k_pcie(x) dev_get_drvdata((x)->dev) | ||
| 69 | |||
| 70 | static int armada8k_pcie_link_up(struct dw_pcie *pci) | ||
| 71 | { | ||
| 72 | u32 reg; | ||
| 73 | u32 mask = PCIE_GLB_STS_RDLH_LINK_UP | PCIE_GLB_STS_PHY_LINK_UP; | ||
| 74 | |||
| 75 | reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_STATUS_REG); | ||
| 76 | |||
| 77 | if ((reg & mask) == mask) | ||
| 78 | return 1; | ||
| 79 | |||
| 80 | dev_dbg(pci->dev, "No link detected (Global-Status: 0x%08x).\n", reg); | ||
| 81 | return 0; | ||
| 82 | } | ||
| 83 | |||
| 84 | static void armada8k_pcie_establish_link(struct armada8k_pcie *pcie) | ||
| 85 | { | ||
| 86 | struct dw_pcie *pci = pcie->pci; | ||
| 87 | u32 reg; | ||
| 88 | |||
| 89 | if (!dw_pcie_link_up(pci)) { | ||
| 90 | /* Disable LTSSM state machine to enable configuration */ | ||
| 91 | reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG); | ||
| 92 | reg &= ~(PCIE_APP_LTSSM_EN); | ||
| 93 | dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg); | ||
| 94 | } | ||
| 95 | |||
| 96 | /* Set the device to root complex mode */ | ||
| 97 | reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG); | ||
| 98 | reg &= ~(PCIE_DEVICE_TYPE_MASK << PCIE_DEVICE_TYPE_SHIFT); | ||
| 99 | reg |= PCIE_DEVICE_TYPE_RC << PCIE_DEVICE_TYPE_SHIFT; | ||
| 100 | dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg); | ||
| 101 | |||
| 102 | /* Set the PCIe master AxCache attributes */ | ||
| 103 | dw_pcie_writel_dbi(pci, PCIE_ARCACHE_TRC_REG, ARCACHE_DEFAULT_VALUE); | ||
| 104 | dw_pcie_writel_dbi(pci, PCIE_AWCACHE_TRC_REG, AWCACHE_DEFAULT_VALUE); | ||
| 105 | |||
| 106 | /* Set the PCIe master AxDomain attributes */ | ||
| 107 | reg = dw_pcie_readl_dbi(pci, PCIE_ARUSER_REG); | ||
| 108 | reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); | ||
| 109 | reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; | ||
| 110 | dw_pcie_writel_dbi(pci, PCIE_ARUSER_REG, reg); | ||
| 111 | |||
| 112 | reg = dw_pcie_readl_dbi(pci, PCIE_AWUSER_REG); | ||
| 113 | reg &= ~(AX_USER_DOMAIN_MASK << AX_USER_DOMAIN_SHIFT); | ||
| 114 | reg |= DOMAIN_OUTER_SHAREABLE << AX_USER_DOMAIN_SHIFT; | ||
| 115 | dw_pcie_writel_dbi(pci, PCIE_AWUSER_REG, reg); | ||
| 116 | |||
| 117 | /* Enable INT A-D interrupts */ | ||
| 118 | reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG); | ||
| 119 | reg |= PCIE_INT_A_ASSERT_MASK | PCIE_INT_B_ASSERT_MASK | | ||
| 120 | PCIE_INT_C_ASSERT_MASK | PCIE_INT_D_ASSERT_MASK; | ||
| 121 | dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_MASK1_REG, reg); | ||
| 122 | |||
| 123 | if (!dw_pcie_link_up(pci)) { | ||
| 124 | /* Configuration done. Start LTSSM */ | ||
| 125 | reg = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_CONTROL_REG); | ||
| 126 | reg |= PCIE_APP_LTSSM_EN; | ||
| 127 | dw_pcie_writel_dbi(pci, PCIE_GLOBAL_CONTROL_REG, reg); | ||
| 128 | } | ||
| 129 | |||
| 130 | /* Wait until the link becomes active again */ | ||
| 131 | if (dw_pcie_wait_for_link(pci)) | ||
| 132 | dev_err(pci->dev, "Link not up after reconfiguration\n"); | ||
| 133 | } | ||
| 134 | |||
| 135 | static int armada8k_pcie_host_init(struct pcie_port *pp) | ||
| 136 | { | ||
| 137 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 138 | struct armada8k_pcie *pcie = to_armada8k_pcie(pci); | ||
| 139 | |||
| 140 | dw_pcie_setup_rc(pp); | ||
| 141 | armada8k_pcie_establish_link(pcie); | ||
| 142 | |||
| 143 | return 0; | ||
| 144 | } | ||
| 145 | |||
| 146 | static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg) | ||
| 147 | { | ||
| 148 | struct armada8k_pcie *pcie = arg; | ||
| 149 | struct dw_pcie *pci = pcie->pci; | ||
| 150 | u32 val; | ||
| 151 | |||
| 152 | /* | ||
| 153 | * Interrupts are directly handled by the device driver of the | ||
| 154 | * PCI device. However, they are also latched into the PCIe | ||
| 155 | * controller, so we simply discard them. | ||
| 156 | */ | ||
| 157 | val = dw_pcie_readl_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG); | ||
| 158 | dw_pcie_writel_dbi(pci, PCIE_GLOBAL_INT_CAUSE1_REG, val); | ||
| 159 | |||
| 160 | return IRQ_HANDLED; | ||
| 161 | } | ||
| 162 | |||
| 163 | static const struct dw_pcie_host_ops armada8k_pcie_host_ops = { | ||
| 164 | .host_init = armada8k_pcie_host_init, | ||
| 165 | }; | ||
| 166 | |||
| 167 | static int armada8k_add_pcie_port(struct armada8k_pcie *pcie, | ||
| 168 | struct platform_device *pdev) | ||
| 169 | { | ||
| 170 | struct dw_pcie *pci = pcie->pci; | ||
| 171 | struct pcie_port *pp = &pci->pp; | ||
| 172 | struct device *dev = &pdev->dev; | ||
| 173 | int ret; | ||
| 174 | |||
| 175 | pp->root_bus_nr = -1; | ||
| 176 | pp->ops = &armada8k_pcie_host_ops; | ||
| 177 | |||
| 178 | pp->irq = platform_get_irq(pdev, 0); | ||
| 179 | if (pp->irq < 0) { | ||
| 180 | dev_err(dev, "failed to get irq for port\n"); | ||
| 181 | return pp->irq; | ||
| 182 | } | ||
| 183 | |||
| 184 | ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler, | ||
| 185 | IRQF_SHARED, "armada8k-pcie", pcie); | ||
| 186 | if (ret) { | ||
| 187 | dev_err(dev, "failed to request irq %d\n", pp->irq); | ||
| 188 | return ret; | ||
| 189 | } | ||
| 190 | |||
| 191 | ret = dw_pcie_host_init(pp); | ||
| 192 | if (ret) { | ||
| 193 | dev_err(dev, "failed to initialize host: %d\n", ret); | ||
| 194 | return ret; | ||
| 195 | } | ||
| 196 | |||
| 197 | return 0; | ||
| 198 | } | ||
| 199 | |||
| 200 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
| 201 | .link_up = armada8k_pcie_link_up, | ||
| 202 | }; | ||
| 203 | |||
| 204 | static int armada8k_pcie_probe(struct platform_device *pdev) | ||
| 205 | { | ||
| 206 | struct dw_pcie *pci; | ||
| 207 | struct armada8k_pcie *pcie; | ||
| 208 | struct device *dev = &pdev->dev; | ||
| 209 | struct resource *base; | ||
| 210 | int ret; | ||
| 211 | |||
| 212 | pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); | ||
| 213 | if (!pcie) | ||
| 214 | return -ENOMEM; | ||
| 215 | |||
| 216 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
| 217 | if (!pci) | ||
| 218 | return -ENOMEM; | ||
| 219 | |||
| 220 | pci->dev = dev; | ||
| 221 | pci->ops = &dw_pcie_ops; | ||
| 222 | |||
| 223 | pcie->pci = pci; | ||
| 224 | |||
| 225 | pcie->clk = devm_clk_get(dev, NULL); | ||
| 226 | if (IS_ERR(pcie->clk)) | ||
| 227 | return PTR_ERR(pcie->clk); | ||
| 228 | |||
| 229 | ret = clk_prepare_enable(pcie->clk); | ||
| 230 | if (ret) | ||
| 231 | return ret; | ||
| 232 | |||
| 233 | pcie->clk_reg = devm_clk_get(dev, "reg"); | ||
| 234 | if (pcie->clk_reg == ERR_PTR(-EPROBE_DEFER)) { | ||
| 235 | ret = -EPROBE_DEFER; | ||
| 236 | goto fail; | ||
| 237 | } | ||
| 238 | if (!IS_ERR(pcie->clk_reg)) { | ||
| 239 | ret = clk_prepare_enable(pcie->clk_reg); | ||
| 240 | if (ret) | ||
| 241 | goto fail_clkreg; | ||
| 242 | } | ||
| 243 | |||
| 244 | /* Get the dw-pcie unit configuration/control registers base. */ | ||
| 245 | base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); | ||
| 246 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, base); | ||
| 247 | if (IS_ERR(pci->dbi_base)) { | ||
| 248 | dev_err(dev, "couldn't remap regs base %p\n", base); | ||
| 249 | ret = PTR_ERR(pci->dbi_base); | ||
| 250 | goto fail_clkreg; | ||
| 251 | } | ||
| 252 | |||
| 253 | platform_set_drvdata(pdev, pcie); | ||
| 254 | |||
| 255 | ret = armada8k_add_pcie_port(pcie, pdev); | ||
| 256 | if (ret) | ||
| 257 | goto fail_clkreg; | ||
| 258 | |||
| 259 | return 0; | ||
| 260 | |||
| 261 | fail_clkreg: | ||
| 262 | clk_disable_unprepare(pcie->clk_reg); | ||
| 263 | fail: | ||
| 264 | clk_disable_unprepare(pcie->clk); | ||
| 265 | |||
| 266 | return ret; | ||
| 267 | } | ||
| 268 | |||
| 269 | static const struct of_device_id armada8k_pcie_of_match[] = { | ||
| 270 | { .compatible = "marvell,armada8k-pcie", }, | ||
| 271 | {}, | ||
| 272 | }; | ||
| 273 | |||
| 274 | static struct platform_driver armada8k_pcie_driver = { | ||
| 275 | .probe = armada8k_pcie_probe, | ||
| 276 | .driver = { | ||
| 277 | .name = "armada8k-pcie", | ||
| 278 | .of_match_table = of_match_ptr(armada8k_pcie_of_match), | ||
| 279 | .suppress_bind_attrs = true, | ||
| 280 | }, | ||
| 281 | }; | ||
| 282 | builtin_platform_driver(armada8k_pcie_driver); | ||
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c new file mode 100644 index 000000000000..321b56cfd5d0 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-artpec6.c | |||
| @@ -0,0 +1,618 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * PCIe host controller driver for Axis ARTPEC-6 SoC | ||
| 4 | * | ||
| 5 | * Author: Niklas Cassel <niklas.cassel@axis.com> | ||
| 6 | * | ||
| 7 | * Based on work done by Phil Edworthy <phil@edworthys.org> | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <linux/delay.h> | ||
| 11 | #include <linux/kernel.h> | ||
| 12 | #include <linux/init.h> | ||
| 13 | #include <linux/of_device.h> | ||
| 14 | #include <linux/pci.h> | ||
| 15 | #include <linux/platform_device.h> | ||
| 16 | #include <linux/resource.h> | ||
| 17 | #include <linux/signal.h> | ||
| 18 | #include <linux/types.h> | ||
| 19 | #include <linux/interrupt.h> | ||
| 20 | #include <linux/mfd/syscon.h> | ||
| 21 | #include <linux/regmap.h> | ||
| 22 | |||
| 23 | #include "pcie-designware.h" | ||
| 24 | |||
| 25 | #define to_artpec6_pcie(x) dev_get_drvdata((x)->dev) | ||
| 26 | |||
| 27 | enum artpec_pcie_variants { | ||
| 28 | ARTPEC6, | ||
| 29 | ARTPEC7, | ||
| 30 | }; | ||
| 31 | |||
| 32 | struct artpec6_pcie { | ||
| 33 | struct dw_pcie *pci; | ||
| 34 | struct regmap *regmap; /* DT axis,syscon-pcie */ | ||
| 35 | void __iomem *phy_base; /* DT phy */ | ||
| 36 | enum artpec_pcie_variants variant; | ||
| 37 | enum dw_pcie_device_mode mode; | ||
| 38 | }; | ||
| 39 | |||
| 40 | struct artpec_pcie_of_data { | ||
| 41 | enum artpec_pcie_variants variant; | ||
| 42 | enum dw_pcie_device_mode mode; | ||
| 43 | }; | ||
| 44 | |||
| 45 | static const struct of_device_id artpec6_pcie_of_match[]; | ||
| 46 | |||
| 47 | /* PCIe Port Logic registers (memory-mapped) */ | ||
| 48 | #define PL_OFFSET 0x700 | ||
| 49 | |||
| 50 | #define ACK_F_ASPM_CTRL_OFF (PL_OFFSET + 0xc) | ||
| 51 | #define ACK_N_FTS_MASK GENMASK(15, 8) | ||
| 52 | #define ACK_N_FTS(x) (((x) << 8) & ACK_N_FTS_MASK) | ||
| 53 | |||
| 54 | #define FAST_TRAINING_SEQ_MASK GENMASK(7, 0) | ||
| 55 | #define FAST_TRAINING_SEQ(x) (((x) << 0) & FAST_TRAINING_SEQ_MASK) | ||
| 56 | |||
| 57 | /* ARTPEC-6 specific registers */ | ||
| 58 | #define PCIECFG 0x18 | ||
| 59 | #define PCIECFG_DBG_OEN BIT(24) | ||
| 60 | #define PCIECFG_CORE_RESET_REQ BIT(21) | ||
| 61 | #define PCIECFG_LTSSM_ENABLE BIT(20) | ||
| 62 | #define PCIECFG_DEVICE_TYPE_MASK GENMASK(19, 16) | ||
| 63 | #define PCIECFG_CLKREQ_B BIT(11) | ||
| 64 | #define PCIECFG_REFCLK_ENABLE BIT(10) | ||
| 65 | #define PCIECFG_PLL_ENABLE BIT(9) | ||
| 66 | #define PCIECFG_PCLK_ENABLE BIT(8) | ||
| 67 | #define PCIECFG_RISRCREN BIT(4) | ||
| 68 | #define PCIECFG_MODE_TX_DRV_EN BIT(3) | ||
| 69 | #define PCIECFG_CISRREN BIT(2) | ||
| 70 | #define PCIECFG_MACRO_ENABLE BIT(0) | ||
| 71 | /* ARTPEC-7 specific fields */ | ||
| 72 | #define PCIECFG_REFCLKSEL BIT(23) | ||
| 73 | #define PCIECFG_NOC_RESET BIT(3) | ||
| 74 | |||
| 75 | #define PCIESTAT 0x1c | ||
| 76 | /* ARTPEC-7 specific fields */ | ||
| 77 | #define PCIESTAT_EXTREFCLK BIT(3) | ||
| 78 | |||
| 79 | #define NOCCFG 0x40 | ||
| 80 | #define NOCCFG_ENABLE_CLK_PCIE BIT(4) | ||
| 81 | #define NOCCFG_POWER_PCIE_IDLEACK BIT(3) | ||
| 82 | #define NOCCFG_POWER_PCIE_IDLE BIT(2) | ||
| 83 | #define NOCCFG_POWER_PCIE_IDLEREQ BIT(1) | ||
| 84 | |||
| 85 | #define PHY_STATUS 0x118 | ||
| 86 | #define PHY_COSPLLLOCK BIT(0) | ||
| 87 | |||
| 88 | #define PHY_TX_ASIC_OUT 0x4040 | ||
| 89 | #define PHY_TX_ASIC_OUT_TX_ACK BIT(0) | ||
| 90 | |||
| 91 | #define PHY_RX_ASIC_OUT 0x405c | ||
| 92 | #define PHY_RX_ASIC_OUT_ACK BIT(0) | ||
| 93 | |||
| 94 | static u32 artpec6_pcie_readl(struct artpec6_pcie *artpec6_pcie, u32 offset) | ||
| 95 | { | ||
| 96 | u32 val; | ||
| 97 | |||
| 98 | regmap_read(artpec6_pcie->regmap, offset, &val); | ||
| 99 | return val; | ||
| 100 | } | ||
| 101 | |||
| 102 | static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u32 val) | ||
| 103 | { | ||
| 104 | regmap_write(artpec6_pcie->regmap, offset, val); | ||
| 105 | } | ||
| 106 | |||
| 107 | static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr) | ||
| 108 | { | ||
| 109 | struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); | ||
| 110 | struct pcie_port *pp = &pci->pp; | ||
| 111 | struct dw_pcie_ep *ep = &pci->ep; | ||
| 112 | |||
| 113 | switch (artpec6_pcie->mode) { | ||
| 114 | case DW_PCIE_RC_TYPE: | ||
| 115 | return pci_addr - pp->cfg0_base; | ||
| 116 | case DW_PCIE_EP_TYPE: | ||
| 117 | return pci_addr - ep->phys_base; | ||
| 118 | default: | ||
| 119 | dev_err(pci->dev, "UNKNOWN device type\n"); | ||
| 120 | } | ||
| 121 | return pci_addr; | ||
| 122 | } | ||
| 123 | |||
| 124 | static int artpec6_pcie_establish_link(struct dw_pcie *pci) | ||
| 125 | { | ||
| 126 | struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); | ||
| 127 | u32 val; | ||
| 128 | |||
| 129 | val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); | ||
| 130 | val |= PCIECFG_LTSSM_ENABLE; | ||
| 131 | artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); | ||
| 132 | |||
| 133 | return 0; | ||
| 134 | } | ||
| 135 | |||
| 136 | static void artpec6_pcie_stop_link(struct dw_pcie *pci) | ||
| 137 | { | ||
| 138 | struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); | ||
| 139 | u32 val; | ||
| 140 | |||
| 141 | val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); | ||
| 142 | val &= ~PCIECFG_LTSSM_ENABLE; | ||
| 143 | artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); | ||
| 144 | } | ||
| 145 | |||
| 146 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
| 147 | .cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup, | ||
| 148 | .start_link = artpec6_pcie_establish_link, | ||
| 149 | .stop_link = artpec6_pcie_stop_link, | ||
| 150 | }; | ||
| 151 | |||
| 152 | static void artpec6_pcie_wait_for_phy_a6(struct artpec6_pcie *artpec6_pcie) | ||
| 153 | { | ||
| 154 | struct dw_pcie *pci = artpec6_pcie->pci; | ||
| 155 | struct device *dev = pci->dev; | ||
| 156 | u32 val; | ||
| 157 | unsigned int retries; | ||
| 158 | |||
| 159 | retries = 50; | ||
| 160 | do { | ||
| 161 | usleep_range(1000, 2000); | ||
| 162 | val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); | ||
| 163 | retries--; | ||
| 164 | } while (retries && | ||
| 165 | (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE))); | ||
| 166 | if (!retries) | ||
| 167 | dev_err(dev, "PCIe clock manager did not leave idle state\n"); | ||
| 168 | |||
| 169 | retries = 50; | ||
| 170 | do { | ||
| 171 | usleep_range(1000, 2000); | ||
| 172 | val = readl(artpec6_pcie->phy_base + PHY_STATUS); | ||
| 173 | retries--; | ||
| 174 | } while (retries && !(val & PHY_COSPLLLOCK)); | ||
| 175 | if (!retries) | ||
| 176 | dev_err(dev, "PHY PLL did not lock\n"); | ||
| 177 | } | ||
| 178 | |||
| 179 | static void artpec6_pcie_wait_for_phy_a7(struct artpec6_pcie *artpec6_pcie) | ||
| 180 | { | ||
| 181 | struct dw_pcie *pci = artpec6_pcie->pci; | ||
| 182 | struct device *dev = pci->dev; | ||
| 183 | u32 val; | ||
| 184 | u16 phy_status_tx, phy_status_rx; | ||
| 185 | unsigned int retries; | ||
| 186 | |||
| 187 | retries = 50; | ||
| 188 | do { | ||
| 189 | usleep_range(1000, 2000); | ||
| 190 | val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); | ||
| 191 | retries--; | ||
| 192 | } while (retries && | ||
| 193 | (val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE))); | ||
| 194 | if (!retries) | ||
| 195 | dev_err(dev, "PCIe clock manager did not leave idle state\n"); | ||
| 196 | |||
| 197 | retries = 50; | ||
| 198 | do { | ||
| 199 | usleep_range(1000, 2000); | ||
| 200 | phy_status_tx = readw(artpec6_pcie->phy_base + PHY_TX_ASIC_OUT); | ||
| 201 | phy_status_rx = readw(artpec6_pcie->phy_base + PHY_RX_ASIC_OUT); | ||
| 202 | retries--; | ||
| 203 | } while (retries && ((phy_status_tx & PHY_TX_ASIC_OUT_TX_ACK) || | ||
| 204 | (phy_status_rx & PHY_RX_ASIC_OUT_ACK))); | ||
| 205 | if (!retries) | ||
| 206 | dev_err(dev, "PHY did not enter Pn state\n"); | ||
| 207 | } | ||
| 208 | |||
| 209 | static void artpec6_pcie_wait_for_phy(struct artpec6_pcie *artpec6_pcie) | ||
| 210 | { | ||
| 211 | switch (artpec6_pcie->variant) { | ||
| 212 | case ARTPEC6: | ||
| 213 | artpec6_pcie_wait_for_phy_a6(artpec6_pcie); | ||
| 214 | break; | ||
| 215 | case ARTPEC7: | ||
| 216 | artpec6_pcie_wait_for_phy_a7(artpec6_pcie); | ||
| 217 | break; | ||
| 218 | } | ||
| 219 | } | ||
| 220 | |||
| 221 | static void artpec6_pcie_init_phy_a6(struct artpec6_pcie *artpec6_pcie) | ||
| 222 | { | ||
| 223 | u32 val; | ||
| 224 | |||
| 225 | val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); | ||
| 226 | val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */ | ||
| 227 | PCIECFG_MODE_TX_DRV_EN | | ||
| 228 | PCIECFG_CISRREN | /* Reference clock term. 100 Ohm */ | ||
| 229 | PCIECFG_MACRO_ENABLE; | ||
| 230 | val |= PCIECFG_REFCLK_ENABLE; | ||
| 231 | val &= ~PCIECFG_DBG_OEN; | ||
| 232 | val &= ~PCIECFG_CLKREQ_B; | ||
| 233 | artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); | ||
| 234 | usleep_range(5000, 6000); | ||
| 235 | |||
| 236 | val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); | ||
| 237 | val |= NOCCFG_ENABLE_CLK_PCIE; | ||
| 238 | artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); | ||
| 239 | usleep_range(20, 30); | ||
| 240 | |||
| 241 | val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); | ||
| 242 | val |= PCIECFG_PCLK_ENABLE | PCIECFG_PLL_ENABLE; | ||
| 243 | artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); | ||
| 244 | usleep_range(6000, 7000); | ||
| 245 | |||
| 246 | val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); | ||
| 247 | val &= ~NOCCFG_POWER_PCIE_IDLEREQ; | ||
| 248 | artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); | ||
| 249 | } | ||
| 250 | |||
| 251 | static void artpec6_pcie_init_phy_a7(struct artpec6_pcie *artpec6_pcie) | ||
| 252 | { | ||
| 253 | struct dw_pcie *pci = artpec6_pcie->pci; | ||
| 254 | u32 val; | ||
| 255 | bool extrefclk; | ||
| 256 | |||
| 257 | /* Check if external reference clock is connected */ | ||
| 258 | val = artpec6_pcie_readl(artpec6_pcie, PCIESTAT); | ||
| 259 | extrefclk = !!(val & PCIESTAT_EXTREFCLK); | ||
| 260 | dev_dbg(pci->dev, "Using reference clock: %s\n", | ||
| 261 | extrefclk ? "external" : "internal"); | ||
| 262 | |||
| 263 | val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); | ||
| 264 | val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */ | ||
| 265 | PCIECFG_PCLK_ENABLE; | ||
| 266 | if (extrefclk) | ||
| 267 | val |= PCIECFG_REFCLKSEL; | ||
| 268 | else | ||
| 269 | val &= ~PCIECFG_REFCLKSEL; | ||
| 270 | artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); | ||
| 271 | usleep_range(10, 20); | ||
| 272 | |||
| 273 | val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); | ||
| 274 | val |= NOCCFG_ENABLE_CLK_PCIE; | ||
| 275 | artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); | ||
| 276 | usleep_range(20, 30); | ||
| 277 | |||
| 278 | val = artpec6_pcie_readl(artpec6_pcie, NOCCFG); | ||
| 279 | val &= ~NOCCFG_POWER_PCIE_IDLEREQ; | ||
| 280 | artpec6_pcie_writel(artpec6_pcie, NOCCFG, val); | ||
| 281 | } | ||
| 282 | |||
| 283 | static void artpec6_pcie_init_phy(struct artpec6_pcie *artpec6_pcie) | ||
| 284 | { | ||
| 285 | switch (artpec6_pcie->variant) { | ||
| 286 | case ARTPEC6: | ||
| 287 | artpec6_pcie_init_phy_a6(artpec6_pcie); | ||
| 288 | break; | ||
| 289 | case ARTPEC7: | ||
| 290 | artpec6_pcie_init_phy_a7(artpec6_pcie); | ||
| 291 | break; | ||
| 292 | } | ||
| 293 | } | ||
| 294 | |||
| 295 | static void artpec6_pcie_set_nfts(struct artpec6_pcie *artpec6_pcie) | ||
| 296 | { | ||
| 297 | struct dw_pcie *pci = artpec6_pcie->pci; | ||
| 298 | u32 val; | ||
| 299 | |||
| 300 | if (artpec6_pcie->variant != ARTPEC7) | ||
| 301 | return; | ||
| 302 | |||
| 303 | /* | ||
| 304 | * Increase the N_FTS (Number of Fast Training Sequences) | ||
| 305 | * to be transmitted when transitioning from L0s to L0. | ||
| 306 | */ | ||
| 307 | val = dw_pcie_readl_dbi(pci, ACK_F_ASPM_CTRL_OFF); | ||
| 308 | val &= ~ACK_N_FTS_MASK; | ||
| 309 | val |= ACK_N_FTS(180); | ||
| 310 | dw_pcie_writel_dbi(pci, ACK_F_ASPM_CTRL_OFF, val); | ||
| 311 | |||
| 312 | /* | ||
| 313 | * Set the Number of Fast Training Sequences that the core | ||
| 314 | * advertises as its N_FTS during Gen2 or Gen3 link training. | ||
| 315 | */ | ||
| 316 | val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); | ||
| 317 | val &= ~FAST_TRAINING_SEQ_MASK; | ||
| 318 | val |= FAST_TRAINING_SEQ(180); | ||
| 319 | dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); | ||
| 320 | } | ||
| 321 | |||
| 322 | static void artpec6_pcie_assert_core_reset(struct artpec6_pcie *artpec6_pcie) | ||
| 323 | { | ||
| 324 | u32 val; | ||
| 325 | |||
| 326 | val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); | ||
| 327 | switch (artpec6_pcie->variant) { | ||
| 328 | case ARTPEC6: | ||
| 329 | val |= PCIECFG_CORE_RESET_REQ; | ||
| 330 | break; | ||
| 331 | case ARTPEC7: | ||
| 332 | val &= ~PCIECFG_NOC_RESET; | ||
| 333 | break; | ||
| 334 | } | ||
| 335 | artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); | ||
| 336 | } | ||
| 337 | |||
| 338 | static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie) | ||
| 339 | { | ||
| 340 | u32 val; | ||
| 341 | |||
| 342 | val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); | ||
| 343 | switch (artpec6_pcie->variant) { | ||
| 344 | case ARTPEC6: | ||
| 345 | val &= ~PCIECFG_CORE_RESET_REQ; | ||
| 346 | break; | ||
| 347 | case ARTPEC7: | ||
| 348 | val |= PCIECFG_NOC_RESET; | ||
| 349 | break; | ||
| 350 | } | ||
| 351 | artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); | ||
| 352 | usleep_range(100, 200); | ||
| 353 | } | ||
| 354 | |||
| 355 | static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie) | ||
| 356 | { | ||
| 357 | struct dw_pcie *pci = artpec6_pcie->pci; | ||
| 358 | struct pcie_port *pp = &pci->pp; | ||
| 359 | |||
| 360 | if (IS_ENABLED(CONFIG_PCI_MSI)) | ||
| 361 | dw_pcie_msi_init(pp); | ||
| 362 | } | ||
| 363 | |||
| 364 | static int artpec6_pcie_host_init(struct pcie_port *pp) | ||
| 365 | { | ||
| 366 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 367 | struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); | ||
| 368 | |||
| 369 | artpec6_pcie_assert_core_reset(artpec6_pcie); | ||
| 370 | artpec6_pcie_init_phy(artpec6_pcie); | ||
| 371 | artpec6_pcie_deassert_core_reset(artpec6_pcie); | ||
| 372 | artpec6_pcie_wait_for_phy(artpec6_pcie); | ||
| 373 | artpec6_pcie_set_nfts(artpec6_pcie); | ||
| 374 | dw_pcie_setup_rc(pp); | ||
| 375 | artpec6_pcie_establish_link(pci); | ||
| 376 | dw_pcie_wait_for_link(pci); | ||
| 377 | artpec6_pcie_enable_interrupts(artpec6_pcie); | ||
| 378 | |||
| 379 | return 0; | ||
| 380 | } | ||
| 381 | |||
| 382 | static const struct dw_pcie_host_ops artpec6_pcie_host_ops = { | ||
| 383 | .host_init = artpec6_pcie_host_init, | ||
| 384 | }; | ||
| 385 | |||
| 386 | static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie, | ||
| 387 | struct platform_device *pdev) | ||
| 388 | { | ||
| 389 | struct dw_pcie *pci = artpec6_pcie->pci; | ||
| 390 | struct pcie_port *pp = &pci->pp; | ||
| 391 | struct device *dev = pci->dev; | ||
| 392 | int ret; | ||
| 393 | |||
| 394 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
| 395 | pp->msi_irq = platform_get_irq_byname(pdev, "msi"); | ||
| 396 | if (pp->msi_irq < 0) { | ||
| 397 | dev_err(dev, "failed to get MSI irq\n"); | ||
| 398 | return pp->msi_irq; | ||
| 399 | } | ||
| 400 | } | ||
| 401 | |||
| 402 | pp->root_bus_nr = -1; | ||
| 403 | pp->ops = &artpec6_pcie_host_ops; | ||
| 404 | |||
| 405 | ret = dw_pcie_host_init(pp); | ||
| 406 | if (ret) { | ||
| 407 | dev_err(dev, "failed to initialize host\n"); | ||
| 408 | return ret; | ||
| 409 | } | ||
| 410 | |||
| 411 | return 0; | ||
| 412 | } | ||
| 413 | |||
| 414 | static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep) | ||
| 415 | { | ||
| 416 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
| 417 | struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci); | ||
| 418 | enum pci_barno bar; | ||
| 419 | |||
| 420 | artpec6_pcie_assert_core_reset(artpec6_pcie); | ||
| 421 | artpec6_pcie_init_phy(artpec6_pcie); | ||
| 422 | artpec6_pcie_deassert_core_reset(artpec6_pcie); | ||
| 423 | artpec6_pcie_wait_for_phy(artpec6_pcie); | ||
| 424 | artpec6_pcie_set_nfts(artpec6_pcie); | ||
| 425 | |||
| 426 | for (bar = BAR_0; bar <= BAR_5; bar++) | ||
| 427 | dw_pcie_ep_reset_bar(pci, bar); | ||
| 428 | } | ||
| 429 | |||
| 430 | static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, | ||
| 431 | enum pci_epc_irq_type type, u8 interrupt_num) | ||
| 432 | { | ||
| 433 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
| 434 | |||
| 435 | switch (type) { | ||
| 436 | case PCI_EPC_IRQ_LEGACY: | ||
| 437 | dev_err(pci->dev, "EP cannot trigger legacy IRQs\n"); | ||
| 438 | return -EINVAL; | ||
| 439 | case PCI_EPC_IRQ_MSI: | ||
| 440 | return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); | ||
| 441 | default: | ||
| 442 | dev_err(pci->dev, "UNKNOWN IRQ type\n"); | ||
| 443 | } | ||
| 444 | |||
| 445 | return 0; | ||
| 446 | } | ||
| 447 | |||
| 448 | static struct dw_pcie_ep_ops pcie_ep_ops = { | ||
| 449 | .ep_init = artpec6_pcie_ep_init, | ||
| 450 | .raise_irq = artpec6_pcie_raise_irq, | ||
| 451 | }; | ||
| 452 | |||
| 453 | static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie, | ||
| 454 | struct platform_device *pdev) | ||
| 455 | { | ||
| 456 | int ret; | ||
| 457 | struct dw_pcie_ep *ep; | ||
| 458 | struct resource *res; | ||
| 459 | struct device *dev = &pdev->dev; | ||
| 460 | struct dw_pcie *pci = artpec6_pcie->pci; | ||
| 461 | |||
| 462 | ep = &pci->ep; | ||
| 463 | ep->ops = &pcie_ep_ops; | ||
| 464 | |||
| 465 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); | ||
| 466 | pci->dbi_base2 = devm_ioremap_resource(dev, res); | ||
| 467 | if (IS_ERR(pci->dbi_base2)) | ||
| 468 | return PTR_ERR(pci->dbi_base2); | ||
| 469 | |||
| 470 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); | ||
| 471 | if (!res) | ||
| 472 | return -EINVAL; | ||
| 473 | |||
| 474 | ep->phys_base = res->start; | ||
| 475 | ep->addr_size = resource_size(res); | ||
| 476 | |||
| 477 | ret = dw_pcie_ep_init(ep); | ||
| 478 | if (ret) { | ||
| 479 | dev_err(dev, "failed to initialize endpoint\n"); | ||
| 480 | return ret; | ||
| 481 | } | ||
| 482 | |||
| 483 | return 0; | ||
| 484 | } | ||
| 485 | |||
| 486 | static int artpec6_pcie_probe(struct platform_device *pdev) | ||
| 487 | { | ||
| 488 | struct device *dev = &pdev->dev; | ||
| 489 | struct dw_pcie *pci; | ||
| 490 | struct artpec6_pcie *artpec6_pcie; | ||
| 491 | struct resource *dbi_base; | ||
| 492 | struct resource *phy_base; | ||
| 493 | int ret; | ||
| 494 | const struct of_device_id *match; | ||
| 495 | const struct artpec_pcie_of_data *data; | ||
| 496 | enum artpec_pcie_variants variant; | ||
| 497 | enum dw_pcie_device_mode mode; | ||
| 498 | |||
| 499 | match = of_match_device(artpec6_pcie_of_match, dev); | ||
| 500 | if (!match) | ||
| 501 | return -EINVAL; | ||
| 502 | |||
| 503 | data = (struct artpec_pcie_of_data *)match->data; | ||
| 504 | variant = (enum artpec_pcie_variants)data->variant; | ||
| 505 | mode = (enum dw_pcie_device_mode)data->mode; | ||
| 506 | |||
| 507 | artpec6_pcie = devm_kzalloc(dev, sizeof(*artpec6_pcie), GFP_KERNEL); | ||
| 508 | if (!artpec6_pcie) | ||
| 509 | return -ENOMEM; | ||
| 510 | |||
| 511 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
| 512 | if (!pci) | ||
| 513 | return -ENOMEM; | ||
| 514 | |||
| 515 | pci->dev = dev; | ||
| 516 | pci->ops = &dw_pcie_ops; | ||
| 517 | |||
| 518 | artpec6_pcie->pci = pci; | ||
| 519 | artpec6_pcie->variant = variant; | ||
| 520 | artpec6_pcie->mode = mode; | ||
| 521 | |||
| 522 | dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); | ||
| 523 | pci->dbi_base = devm_ioremap_resource(dev, dbi_base); | ||
| 524 | if (IS_ERR(pci->dbi_base)) | ||
| 525 | return PTR_ERR(pci->dbi_base); | ||
| 526 | |||
| 527 | phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); | ||
| 528 | artpec6_pcie->phy_base = devm_ioremap_resource(dev, phy_base); | ||
| 529 | if (IS_ERR(artpec6_pcie->phy_base)) | ||
| 530 | return PTR_ERR(artpec6_pcie->phy_base); | ||
| 531 | |||
| 532 | artpec6_pcie->regmap = | ||
| 533 | syscon_regmap_lookup_by_phandle(dev->of_node, | ||
| 534 | "axis,syscon-pcie"); | ||
| 535 | if (IS_ERR(artpec6_pcie->regmap)) | ||
| 536 | return PTR_ERR(artpec6_pcie->regmap); | ||
| 537 | |||
| 538 | platform_set_drvdata(pdev, artpec6_pcie); | ||
| 539 | |||
| 540 | switch (artpec6_pcie->mode) { | ||
| 541 | case DW_PCIE_RC_TYPE: | ||
| 542 | if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_HOST)) | ||
| 543 | return -ENODEV; | ||
| 544 | |||
| 545 | ret = artpec6_add_pcie_port(artpec6_pcie, pdev); | ||
| 546 | if (ret < 0) | ||
| 547 | return ret; | ||
| 548 | break; | ||
| 549 | case DW_PCIE_EP_TYPE: { | ||
| 550 | u32 val; | ||
| 551 | |||
| 552 | if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_EP)) | ||
| 553 | return -ENODEV; | ||
| 554 | |||
| 555 | val = artpec6_pcie_readl(artpec6_pcie, PCIECFG); | ||
| 556 | val &= ~PCIECFG_DEVICE_TYPE_MASK; | ||
| 557 | artpec6_pcie_writel(artpec6_pcie, PCIECFG, val); | ||
| 558 | ret = artpec6_add_pcie_ep(artpec6_pcie, pdev); | ||
| 559 | if (ret < 0) | ||
| 560 | return ret; | ||
| 561 | break; | ||
| 562 | } | ||
| 563 | default: | ||
| 564 | dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode); | ||
| 565 | } | ||
| 566 | |||
| 567 | return 0; | ||
| 568 | } | ||
| 569 | |||
| 570 | static const struct artpec_pcie_of_data artpec6_pcie_rc_of_data = { | ||
| 571 | .variant = ARTPEC6, | ||
| 572 | .mode = DW_PCIE_RC_TYPE, | ||
| 573 | }; | ||
| 574 | |||
| 575 | static const struct artpec_pcie_of_data artpec6_pcie_ep_of_data = { | ||
| 576 | .variant = ARTPEC6, | ||
| 577 | .mode = DW_PCIE_EP_TYPE, | ||
| 578 | }; | ||
| 579 | |||
| 580 | static const struct artpec_pcie_of_data artpec7_pcie_rc_of_data = { | ||
| 581 | .variant = ARTPEC7, | ||
| 582 | .mode = DW_PCIE_RC_TYPE, | ||
| 583 | }; | ||
| 584 | |||
| 585 | static const struct artpec_pcie_of_data artpec7_pcie_ep_of_data = { | ||
| 586 | .variant = ARTPEC7, | ||
| 587 | .mode = DW_PCIE_EP_TYPE, | ||
| 588 | }; | ||
| 589 | |||
| 590 | static const struct of_device_id artpec6_pcie_of_match[] = { | ||
| 591 | { | ||
| 592 | .compatible = "axis,artpec6-pcie", | ||
| 593 | .data = &artpec6_pcie_rc_of_data, | ||
| 594 | }, | ||
| 595 | { | ||
| 596 | .compatible = "axis,artpec6-pcie-ep", | ||
| 597 | .data = &artpec6_pcie_ep_of_data, | ||
| 598 | }, | ||
| 599 | { | ||
| 600 | .compatible = "axis,artpec7-pcie", | ||
| 601 | .data = &artpec7_pcie_rc_of_data, | ||
| 602 | }, | ||
| 603 | { | ||
| 604 | .compatible = "axis,artpec7-pcie-ep", | ||
| 605 | .data = &artpec7_pcie_ep_of_data, | ||
| 606 | }, | ||
| 607 | {}, | ||
| 608 | }; | ||
| 609 | |||
| 610 | static struct platform_driver artpec6_pcie_driver = { | ||
| 611 | .probe = artpec6_pcie_probe, | ||
| 612 | .driver = { | ||
| 613 | .name = "artpec6-pcie", | ||
| 614 | .of_match_table = artpec6_pcie_of_match, | ||
| 615 | .suppress_bind_attrs = true, | ||
| 616 | }, | ||
| 617 | }; | ||
| 618 | builtin_platform_driver(artpec6_pcie_driver); | ||
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c new file mode 100644 index 000000000000..1eec4415a77f --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c | |||
| @@ -0,0 +1,422 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /** | ||
| 3 | * Synopsys DesignWare PCIe Endpoint controller driver | ||
| 4 | * | ||
| 5 | * Copyright (C) 2017 Texas Instruments | ||
| 6 | * Author: Kishon Vijay Abraham I <kishon@ti.com> | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/of.h> | ||
| 10 | |||
| 11 | #include "pcie-designware.h" | ||
| 12 | #include <linux/pci-epc.h> | ||
| 13 | #include <linux/pci-epf.h> | ||
| 14 | |||
| 15 | void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) | ||
| 16 | { | ||
| 17 | struct pci_epc *epc = ep->epc; | ||
| 18 | |||
| 19 | pci_epc_linkup(epc); | ||
| 20 | } | ||
| 21 | |||
| 22 | static void __dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar, | ||
| 23 | int flags) | ||
| 24 | { | ||
| 25 | u32 reg; | ||
| 26 | |||
| 27 | reg = PCI_BASE_ADDRESS_0 + (4 * bar); | ||
| 28 | dw_pcie_dbi_ro_wr_en(pci); | ||
| 29 | dw_pcie_writel_dbi2(pci, reg, 0x0); | ||
| 30 | dw_pcie_writel_dbi(pci, reg, 0x0); | ||
| 31 | if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { | ||
| 32 | dw_pcie_writel_dbi2(pci, reg + 4, 0x0); | ||
| 33 | dw_pcie_writel_dbi(pci, reg + 4, 0x0); | ||
| 34 | } | ||
| 35 | dw_pcie_dbi_ro_wr_dis(pci); | ||
| 36 | } | ||
| 37 | |||
| 38 | void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) | ||
| 39 | { | ||
| 40 | __dw_pcie_ep_reset_bar(pci, bar, 0); | ||
| 41 | } | ||
| 42 | |||
| 43 | static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no, | ||
| 44 | struct pci_epf_header *hdr) | ||
| 45 | { | ||
| 46 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 47 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
| 48 | |||
| 49 | dw_pcie_dbi_ro_wr_en(pci); | ||
| 50 | dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid); | ||
| 51 | dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid); | ||
| 52 | dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid); | ||
| 53 | dw_pcie_writeb_dbi(pci, PCI_CLASS_PROG, hdr->progif_code); | ||
| 54 | dw_pcie_writew_dbi(pci, PCI_CLASS_DEVICE, | ||
| 55 | hdr->subclass_code | hdr->baseclass_code << 8); | ||
| 56 | dw_pcie_writeb_dbi(pci, PCI_CACHE_LINE_SIZE, | ||
| 57 | hdr->cache_line_size); | ||
| 58 | dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_VENDOR_ID, | ||
| 59 | hdr->subsys_vendor_id); | ||
| 60 | dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id); | ||
| 61 | dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN, | ||
| 62 | hdr->interrupt_pin); | ||
| 63 | dw_pcie_dbi_ro_wr_dis(pci); | ||
| 64 | |||
| 65 | return 0; | ||
| 66 | } | ||
| 67 | |||
| 68 | static int dw_pcie_ep_inbound_atu(struct dw_pcie_ep *ep, enum pci_barno bar, | ||
| 69 | dma_addr_t cpu_addr, | ||
| 70 | enum dw_pcie_as_type as_type) | ||
| 71 | { | ||
| 72 | int ret; | ||
| 73 | u32 free_win; | ||
| 74 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
| 75 | |||
| 76 | free_win = find_first_zero_bit(ep->ib_window_map, ep->num_ib_windows); | ||
| 77 | if (free_win >= ep->num_ib_windows) { | ||
| 78 | dev_err(pci->dev, "No free inbound window\n"); | ||
| 79 | return -EINVAL; | ||
| 80 | } | ||
| 81 | |||
| 82 | ret = dw_pcie_prog_inbound_atu(pci, free_win, bar, cpu_addr, | ||
| 83 | as_type); | ||
| 84 | if (ret < 0) { | ||
| 85 | dev_err(pci->dev, "Failed to program IB window\n"); | ||
| 86 | return ret; | ||
| 87 | } | ||
| 88 | |||
| 89 | ep->bar_to_atu[bar] = free_win; | ||
| 90 | set_bit(free_win, ep->ib_window_map); | ||
| 91 | |||
| 92 | return 0; | ||
| 93 | } | ||
| 94 | |||
| 95 | static int dw_pcie_ep_outbound_atu(struct dw_pcie_ep *ep, phys_addr_t phys_addr, | ||
| 96 | u64 pci_addr, size_t size) | ||
| 97 | { | ||
| 98 | u32 free_win; | ||
| 99 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
| 100 | |||
| 101 | free_win = find_first_zero_bit(ep->ob_window_map, ep->num_ob_windows); | ||
| 102 | if (free_win >= ep->num_ob_windows) { | ||
| 103 | dev_err(pci->dev, "No free outbound window\n"); | ||
| 104 | return -EINVAL; | ||
| 105 | } | ||
| 106 | |||
| 107 | dw_pcie_prog_outbound_atu(pci, free_win, PCIE_ATU_TYPE_MEM, | ||
| 108 | phys_addr, pci_addr, size); | ||
| 109 | |||
| 110 | set_bit(free_win, ep->ob_window_map); | ||
| 111 | ep->outbound_addr[free_win] = phys_addr; | ||
| 112 | |||
| 113 | return 0; | ||
| 114 | } | ||
| 115 | |||
| 116 | static void dw_pcie_ep_clear_bar(struct pci_epc *epc, u8 func_no, | ||
| 117 | struct pci_epf_bar *epf_bar) | ||
| 118 | { | ||
| 119 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 120 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
| 121 | enum pci_barno bar = epf_bar->barno; | ||
| 122 | u32 atu_index = ep->bar_to_atu[bar]; | ||
| 123 | |||
| 124 | __dw_pcie_ep_reset_bar(pci, bar, epf_bar->flags); | ||
| 125 | |||
| 126 | dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_INBOUND); | ||
| 127 | clear_bit(atu_index, ep->ib_window_map); | ||
| 128 | } | ||
| 129 | |||
| 130 | static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no, | ||
| 131 | struct pci_epf_bar *epf_bar) | ||
| 132 | { | ||
| 133 | int ret; | ||
| 134 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 135 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
| 136 | enum pci_barno bar = epf_bar->barno; | ||
| 137 | size_t size = epf_bar->size; | ||
| 138 | int flags = epf_bar->flags; | ||
| 139 | enum dw_pcie_as_type as_type; | ||
| 140 | u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar); | ||
| 141 | |||
| 142 | if (!(flags & PCI_BASE_ADDRESS_SPACE)) | ||
| 143 | as_type = DW_PCIE_AS_MEM; | ||
| 144 | else | ||
| 145 | as_type = DW_PCIE_AS_IO; | ||
| 146 | |||
| 147 | ret = dw_pcie_ep_inbound_atu(ep, bar, epf_bar->phys_addr, as_type); | ||
| 148 | if (ret) | ||
| 149 | return ret; | ||
| 150 | |||
| 151 | dw_pcie_dbi_ro_wr_en(pci); | ||
| 152 | |||
| 153 | dw_pcie_writel_dbi2(pci, reg, lower_32_bits(size - 1)); | ||
| 154 | dw_pcie_writel_dbi(pci, reg, flags); | ||
| 155 | |||
| 156 | if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) { | ||
| 157 | dw_pcie_writel_dbi2(pci, reg + 4, upper_32_bits(size - 1)); | ||
| 158 | dw_pcie_writel_dbi(pci, reg + 4, 0); | ||
| 159 | } | ||
| 160 | |||
| 161 | dw_pcie_dbi_ro_wr_dis(pci); | ||
| 162 | |||
| 163 | return 0; | ||
| 164 | } | ||
| 165 | |||
| 166 | static int dw_pcie_find_index(struct dw_pcie_ep *ep, phys_addr_t addr, | ||
| 167 | u32 *atu_index) | ||
| 168 | { | ||
| 169 | u32 index; | ||
| 170 | |||
| 171 | for (index = 0; index < ep->num_ob_windows; index++) { | ||
| 172 | if (ep->outbound_addr[index] != addr) | ||
| 173 | continue; | ||
| 174 | *atu_index = index; | ||
| 175 | return 0; | ||
| 176 | } | ||
| 177 | |||
| 178 | return -EINVAL; | ||
| 179 | } | ||
| 180 | |||
| 181 | static void dw_pcie_ep_unmap_addr(struct pci_epc *epc, u8 func_no, | ||
| 182 | phys_addr_t addr) | ||
| 183 | { | ||
| 184 | int ret; | ||
| 185 | u32 atu_index; | ||
| 186 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 187 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
| 188 | |||
| 189 | ret = dw_pcie_find_index(ep, addr, &atu_index); | ||
| 190 | if (ret < 0) | ||
| 191 | return; | ||
| 192 | |||
| 193 | dw_pcie_disable_atu(pci, atu_index, DW_PCIE_REGION_OUTBOUND); | ||
| 194 | clear_bit(atu_index, ep->ob_window_map); | ||
| 195 | } | ||
| 196 | |||
| 197 | static int dw_pcie_ep_map_addr(struct pci_epc *epc, u8 func_no, | ||
| 198 | phys_addr_t addr, | ||
| 199 | u64 pci_addr, size_t size) | ||
| 200 | { | ||
| 201 | int ret; | ||
| 202 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 203 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
| 204 | |||
| 205 | ret = dw_pcie_ep_outbound_atu(ep, addr, pci_addr, size); | ||
| 206 | if (ret) { | ||
| 207 | dev_err(pci->dev, "Failed to enable address\n"); | ||
| 208 | return ret; | ||
| 209 | } | ||
| 210 | |||
| 211 | return 0; | ||
| 212 | } | ||
| 213 | |||
| 214 | static int dw_pcie_ep_get_msi(struct pci_epc *epc, u8 func_no) | ||
| 215 | { | ||
| 216 | int val; | ||
| 217 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 218 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
| 219 | |||
| 220 | val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL); | ||
| 221 | if (!(val & MSI_CAP_MSI_EN_MASK)) | ||
| 222 | return -EINVAL; | ||
| 223 | |||
| 224 | val = (val & MSI_CAP_MME_MASK) >> MSI_CAP_MME_SHIFT; | ||
| 225 | return val; | ||
| 226 | } | ||
| 227 | |||
| 228 | static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 encode_int) | ||
| 229 | { | ||
| 230 | int val; | ||
| 231 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 232 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
| 233 | |||
| 234 | val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL); | ||
| 235 | val &= ~MSI_CAP_MMC_MASK; | ||
| 236 | val |= (encode_int << MSI_CAP_MMC_SHIFT) & MSI_CAP_MMC_MASK; | ||
| 237 | dw_pcie_dbi_ro_wr_en(pci); | ||
| 238 | dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val); | ||
| 239 | dw_pcie_dbi_ro_wr_dis(pci); | ||
| 240 | |||
| 241 | return 0; | ||
| 242 | } | ||
| 243 | |||
| 244 | static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no, | ||
| 245 | enum pci_epc_irq_type type, u8 interrupt_num) | ||
| 246 | { | ||
| 247 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 248 | |||
| 249 | if (!ep->ops->raise_irq) | ||
| 250 | return -EINVAL; | ||
| 251 | |||
| 252 | return ep->ops->raise_irq(ep, func_no, type, interrupt_num); | ||
| 253 | } | ||
| 254 | |||
| 255 | static void dw_pcie_ep_stop(struct pci_epc *epc) | ||
| 256 | { | ||
| 257 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 258 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
| 259 | |||
| 260 | if (!pci->ops->stop_link) | ||
| 261 | return; | ||
| 262 | |||
| 263 | pci->ops->stop_link(pci); | ||
| 264 | } | ||
| 265 | |||
| 266 | static int dw_pcie_ep_start(struct pci_epc *epc) | ||
| 267 | { | ||
| 268 | struct dw_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 269 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
| 270 | |||
| 271 | if (!pci->ops->start_link) | ||
| 272 | return -EINVAL; | ||
| 273 | |||
| 274 | return pci->ops->start_link(pci); | ||
| 275 | } | ||
| 276 | |||
| 277 | static const struct pci_epc_ops epc_ops = { | ||
| 278 | .write_header = dw_pcie_ep_write_header, | ||
| 279 | .set_bar = dw_pcie_ep_set_bar, | ||
| 280 | .clear_bar = dw_pcie_ep_clear_bar, | ||
| 281 | .map_addr = dw_pcie_ep_map_addr, | ||
| 282 | .unmap_addr = dw_pcie_ep_unmap_addr, | ||
| 283 | .set_msi = dw_pcie_ep_set_msi, | ||
| 284 | .get_msi = dw_pcie_ep_get_msi, | ||
| 285 | .raise_irq = dw_pcie_ep_raise_irq, | ||
| 286 | .start = dw_pcie_ep_start, | ||
| 287 | .stop = dw_pcie_ep_stop, | ||
| 288 | }; | ||
| 289 | |||
| 290 | int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, | ||
| 291 | u8 interrupt_num) | ||
| 292 | { | ||
| 293 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
| 294 | struct pci_epc *epc = ep->epc; | ||
| 295 | u16 msg_ctrl, msg_data; | ||
| 296 | u32 msg_addr_lower, msg_addr_upper; | ||
| 297 | u64 msg_addr; | ||
| 298 | bool has_upper; | ||
| 299 | int ret; | ||
| 300 | |||
| 301 | /* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */ | ||
| 302 | msg_ctrl = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL); | ||
| 303 | has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT); | ||
| 304 | msg_addr_lower = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32); | ||
| 305 | if (has_upper) { | ||
| 306 | msg_addr_upper = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32); | ||
| 307 | msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_64); | ||
| 308 | } else { | ||
| 309 | msg_addr_upper = 0; | ||
| 310 | msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_32); | ||
| 311 | } | ||
| 312 | msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; | ||
| 313 | ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, | ||
| 314 | epc->mem->page_size); | ||
| 315 | if (ret) | ||
| 316 | return ret; | ||
| 317 | |||
| 318 | writel(msg_data | (interrupt_num - 1), ep->msi_mem); | ||
| 319 | |||
| 320 | dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); | ||
| 321 | |||
| 322 | return 0; | ||
| 323 | } | ||
| 324 | |||
| 325 | void dw_pcie_ep_exit(struct dw_pcie_ep *ep) | ||
| 326 | { | ||
| 327 | struct pci_epc *epc = ep->epc; | ||
| 328 | |||
| 329 | pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem, | ||
| 330 | epc->mem->page_size); | ||
| 331 | |||
| 332 | pci_epc_mem_exit(epc); | ||
| 333 | } | ||
| 334 | |||
| 335 | int dw_pcie_ep_init(struct dw_pcie_ep *ep) | ||
| 336 | { | ||
| 337 | int ret; | ||
| 338 | void *addr; | ||
| 339 | struct pci_epc *epc; | ||
| 340 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
| 341 | struct device *dev = pci->dev; | ||
| 342 | struct device_node *np = dev->of_node; | ||
| 343 | |||
| 344 | if (!pci->dbi_base || !pci->dbi_base2) { | ||
| 345 | dev_err(dev, "dbi_base/dbi_base2 is not populated\n"); | ||
| 346 | return -EINVAL; | ||
| 347 | } | ||
| 348 | |||
| 349 | ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows); | ||
| 350 | if (ret < 0) { | ||
| 351 | dev_err(dev, "Unable to read *num-ib-windows* property\n"); | ||
| 352 | return ret; | ||
| 353 | } | ||
| 354 | if (ep->num_ib_windows > MAX_IATU_IN) { | ||
| 355 | dev_err(dev, "Invalid *num-ib-windows*\n"); | ||
| 356 | return -EINVAL; | ||
| 357 | } | ||
| 358 | |||
| 359 | ret = of_property_read_u32(np, "num-ob-windows", &ep->num_ob_windows); | ||
| 360 | if (ret < 0) { | ||
| 361 | dev_err(dev, "Unable to read *num-ob-windows* property\n"); | ||
| 362 | return ret; | ||
| 363 | } | ||
| 364 | if (ep->num_ob_windows > MAX_IATU_OUT) { | ||
| 365 | dev_err(dev, "Invalid *num-ob-windows*\n"); | ||
| 366 | return -EINVAL; | ||
| 367 | } | ||
| 368 | |||
| 369 | ep->ib_window_map = devm_kzalloc(dev, sizeof(long) * | ||
| 370 | BITS_TO_LONGS(ep->num_ib_windows), | ||
| 371 | GFP_KERNEL); | ||
| 372 | if (!ep->ib_window_map) | ||
| 373 | return -ENOMEM; | ||
| 374 | |||
| 375 | ep->ob_window_map = devm_kzalloc(dev, sizeof(long) * | ||
| 376 | BITS_TO_LONGS(ep->num_ob_windows), | ||
| 377 | GFP_KERNEL); | ||
| 378 | if (!ep->ob_window_map) | ||
| 379 | return -ENOMEM; | ||
| 380 | |||
| 381 | addr = devm_kzalloc(dev, sizeof(phys_addr_t) * ep->num_ob_windows, | ||
| 382 | GFP_KERNEL); | ||
| 383 | if (!addr) | ||
| 384 | return -ENOMEM; | ||
| 385 | ep->outbound_addr = addr; | ||
| 386 | |||
| 387 | if (ep->ops->ep_init) | ||
| 388 | ep->ops->ep_init(ep); | ||
| 389 | |||
| 390 | epc = devm_pci_epc_create(dev, &epc_ops); | ||
| 391 | if (IS_ERR(epc)) { | ||
| 392 | dev_err(dev, "Failed to create epc device\n"); | ||
| 393 | return PTR_ERR(epc); | ||
| 394 | } | ||
| 395 | |||
| 396 | ret = of_property_read_u8(np, "max-functions", &epc->max_functions); | ||
| 397 | if (ret < 0) | ||
| 398 | epc->max_functions = 1; | ||
| 399 | |||
| 400 | ret = __pci_epc_mem_init(epc, ep->phys_base, ep->addr_size, | ||
| 401 | ep->page_size); | ||
| 402 | if (ret < 0) { | ||
| 403 | dev_err(dev, "Failed to initialize address space\n"); | ||
| 404 | return ret; | ||
| 405 | } | ||
| 406 | |||
| 407 | ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys, | ||
| 408 | epc->mem->page_size); | ||
| 409 | if (!ep->msi_mem) { | ||
| 410 | dev_err(dev, "Failed to reserve memory for MSI\n"); | ||
| 411 | return -ENOMEM; | ||
| 412 | } | ||
| 413 | |||
| 414 | epc->features = EPC_FEATURE_NO_LINKUP_NOTIFIER; | ||
| 415 | EPC_FEATURE_SET_BAR(epc->features, BAR_0); | ||
| 416 | |||
| 417 | ep->epc = epc; | ||
| 418 | epc_set_drvdata(epc, ep); | ||
| 419 | dw_pcie_setup(pci); | ||
| 420 | |||
| 421 | return 0; | ||
| 422 | } | ||
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c new file mode 100644 index 000000000000..781aa03aeede --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware-host.c | |||
| @@ -0,0 +1,722 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Synopsys DesignWare PCIe host controller driver | ||
| 4 | * | ||
| 5 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. | ||
| 6 | * http://www.samsung.com | ||
| 7 | * | ||
| 8 | * Author: Jingoo Han <jg1.han@samsung.com> | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/irqchip/chained_irq.h> | ||
| 12 | #include <linux/irqdomain.h> | ||
| 13 | #include <linux/of_address.h> | ||
| 14 | #include <linux/of_pci.h> | ||
| 15 | #include <linux/pci_regs.h> | ||
| 16 | #include <linux/platform_device.h> | ||
| 17 | |||
| 18 | #include "../../pci.h" | ||
| 19 | #include "pcie-designware.h" | ||
| 20 | |||
| 21 | static struct pci_ops dw_pcie_ops; | ||
| 22 | |||
| 23 | static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, | ||
| 24 | u32 *val) | ||
| 25 | { | ||
| 26 | struct dw_pcie *pci; | ||
| 27 | |||
| 28 | if (pp->ops->rd_own_conf) | ||
| 29 | return pp->ops->rd_own_conf(pp, where, size, val); | ||
| 30 | |||
| 31 | pci = to_dw_pcie_from_pp(pp); | ||
| 32 | return dw_pcie_read(pci->dbi_base + where, size, val); | ||
| 33 | } | ||
| 34 | |||
| 35 | static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, | ||
| 36 | u32 val) | ||
| 37 | { | ||
| 38 | struct dw_pcie *pci; | ||
| 39 | |||
| 40 | if (pp->ops->wr_own_conf) | ||
| 41 | return pp->ops->wr_own_conf(pp, where, size, val); | ||
| 42 | |||
| 43 | pci = to_dw_pcie_from_pp(pp); | ||
| 44 | return dw_pcie_write(pci->dbi_base + where, size, val); | ||
| 45 | } | ||
| 46 | |||
| 47 | static void dw_msi_ack_irq(struct irq_data *d) | ||
| 48 | { | ||
| 49 | irq_chip_ack_parent(d); | ||
| 50 | } | ||
| 51 | |||
| 52 | static void dw_msi_mask_irq(struct irq_data *d) | ||
| 53 | { | ||
| 54 | pci_msi_mask_irq(d); | ||
| 55 | irq_chip_mask_parent(d); | ||
| 56 | } | ||
| 57 | |||
| 58 | static void dw_msi_unmask_irq(struct irq_data *d) | ||
| 59 | { | ||
| 60 | pci_msi_unmask_irq(d); | ||
| 61 | irq_chip_unmask_parent(d); | ||
| 62 | } | ||
| 63 | |||
| 64 | static struct irq_chip dw_pcie_msi_irq_chip = { | ||
| 65 | .name = "PCI-MSI", | ||
| 66 | .irq_ack = dw_msi_ack_irq, | ||
| 67 | .irq_mask = dw_msi_mask_irq, | ||
| 68 | .irq_unmask = dw_msi_unmask_irq, | ||
| 69 | }; | ||
| 70 | |||
| 71 | static struct msi_domain_info dw_pcie_msi_domain_info = { | ||
| 72 | .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | | ||
| 73 | MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI), | ||
| 74 | .chip = &dw_pcie_msi_irq_chip, | ||
| 75 | }; | ||
| 76 | |||
| 77 | /* MSI int handler */ | ||
| 78 | irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) | ||
| 79 | { | ||
| 80 | int i, pos, irq; | ||
| 81 | u32 val, num_ctrls; | ||
| 82 | irqreturn_t ret = IRQ_NONE; | ||
| 83 | |||
| 84 | num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; | ||
| 85 | |||
| 86 | for (i = 0; i < num_ctrls; i++) { | ||
| 87 | dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + | ||
| 88 | (i * MSI_REG_CTRL_BLOCK_SIZE), | ||
| 89 | 4, &val); | ||
| 90 | if (!val) | ||
| 91 | continue; | ||
| 92 | |||
| 93 | ret = IRQ_HANDLED; | ||
| 94 | pos = 0; | ||
| 95 | while ((pos = find_next_bit((unsigned long *) &val, | ||
| 96 | MAX_MSI_IRQS_PER_CTRL, | ||
| 97 | pos)) != MAX_MSI_IRQS_PER_CTRL) { | ||
| 98 | irq = irq_find_mapping(pp->irq_domain, | ||
| 99 | (i * MAX_MSI_IRQS_PER_CTRL) + | ||
| 100 | pos); | ||
| 101 | generic_handle_irq(irq); | ||
| 102 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + | ||
| 103 | (i * MSI_REG_CTRL_BLOCK_SIZE), | ||
| 104 | 4, 1 << pos); | ||
| 105 | pos++; | ||
| 106 | } | ||
| 107 | } | ||
| 108 | |||
| 109 | return ret; | ||
| 110 | } | ||
| 111 | |||
| 112 | /* Chained MSI interrupt service routine */ | ||
| 113 | static void dw_chained_msi_isr(struct irq_desc *desc) | ||
| 114 | { | ||
| 115 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
| 116 | struct pcie_port *pp; | ||
| 117 | |||
| 118 | chained_irq_enter(chip, desc); | ||
| 119 | |||
| 120 | pp = irq_desc_get_handler_data(desc); | ||
| 121 | dw_handle_msi_irq(pp); | ||
| 122 | |||
| 123 | chained_irq_exit(chip, desc); | ||
| 124 | } | ||
| 125 | |||
| 126 | static void dw_pci_setup_msi_msg(struct irq_data *data, struct msi_msg *msg) | ||
| 127 | { | ||
| 128 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); | ||
| 129 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 130 | u64 msi_target; | ||
| 131 | |||
| 132 | if (pp->ops->get_msi_addr) | ||
| 133 | msi_target = pp->ops->get_msi_addr(pp); | ||
| 134 | else | ||
| 135 | msi_target = (u64)pp->msi_data; | ||
| 136 | |||
| 137 | msg->address_lo = lower_32_bits(msi_target); | ||
| 138 | msg->address_hi = upper_32_bits(msi_target); | ||
| 139 | |||
| 140 | if (pp->ops->get_msi_data) | ||
| 141 | msg->data = pp->ops->get_msi_data(pp, data->hwirq); | ||
| 142 | else | ||
| 143 | msg->data = data->hwirq; | ||
| 144 | |||
| 145 | dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", | ||
| 146 | (int)data->hwirq, msg->address_hi, msg->address_lo); | ||
| 147 | } | ||
| 148 | |||
| 149 | static int dw_pci_msi_set_affinity(struct irq_data *irq_data, | ||
| 150 | const struct cpumask *mask, bool force) | ||
| 151 | { | ||
| 152 | return -EINVAL; | ||
| 153 | } | ||
| 154 | |||
| 155 | static void dw_pci_bottom_mask(struct irq_data *data) | ||
| 156 | { | ||
| 157 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); | ||
| 158 | unsigned int res, bit, ctrl; | ||
| 159 | unsigned long flags; | ||
| 160 | |||
| 161 | raw_spin_lock_irqsave(&pp->lock, flags); | ||
| 162 | |||
| 163 | if (pp->ops->msi_clear_irq) { | ||
| 164 | pp->ops->msi_clear_irq(pp, data->hwirq); | ||
| 165 | } else { | ||
| 166 | ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL; | ||
| 167 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; | ||
| 168 | bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; | ||
| 169 | |||
| 170 | pp->irq_status[ctrl] &= ~(1 << bit); | ||
| 171 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, | ||
| 172 | pp->irq_status[ctrl]); | ||
| 173 | } | ||
| 174 | |||
| 175 | raw_spin_unlock_irqrestore(&pp->lock, flags); | ||
| 176 | } | ||
| 177 | |||
| 178 | static void dw_pci_bottom_unmask(struct irq_data *data) | ||
| 179 | { | ||
| 180 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); | ||
| 181 | unsigned int res, bit, ctrl; | ||
| 182 | unsigned long flags; | ||
| 183 | |||
| 184 | raw_spin_lock_irqsave(&pp->lock, flags); | ||
| 185 | |||
| 186 | if (pp->ops->msi_set_irq) { | ||
| 187 | pp->ops->msi_set_irq(pp, data->hwirq); | ||
| 188 | } else { | ||
| 189 | ctrl = data->hwirq / MAX_MSI_IRQS_PER_CTRL; | ||
| 190 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; | ||
| 191 | bit = data->hwirq % MAX_MSI_IRQS_PER_CTRL; | ||
| 192 | |||
| 193 | pp->irq_status[ctrl] |= 1 << bit; | ||
| 194 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, | ||
| 195 | pp->irq_status[ctrl]); | ||
| 196 | } | ||
| 197 | |||
| 198 | raw_spin_unlock_irqrestore(&pp->lock, flags); | ||
| 199 | } | ||
| 200 | |||
| 201 | static void dw_pci_bottom_ack(struct irq_data *d) | ||
| 202 | { | ||
| 203 | struct msi_desc *msi = irq_data_get_msi_desc(d); | ||
| 204 | struct pcie_port *pp; | ||
| 205 | |||
| 206 | pp = msi_desc_to_pci_sysdata(msi); | ||
| 207 | |||
| 208 | if (pp->ops->msi_irq_ack) | ||
| 209 | pp->ops->msi_irq_ack(d->hwirq, pp); | ||
| 210 | } | ||
| 211 | |||
| 212 | static struct irq_chip dw_pci_msi_bottom_irq_chip = { | ||
| 213 | .name = "DWPCI-MSI", | ||
| 214 | .irq_ack = dw_pci_bottom_ack, | ||
| 215 | .irq_compose_msi_msg = dw_pci_setup_msi_msg, | ||
| 216 | .irq_set_affinity = dw_pci_msi_set_affinity, | ||
| 217 | .irq_mask = dw_pci_bottom_mask, | ||
| 218 | .irq_unmask = dw_pci_bottom_unmask, | ||
| 219 | }; | ||
| 220 | |||
| 221 | static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, | ||
| 222 | unsigned int virq, unsigned int nr_irqs, | ||
| 223 | void *args) | ||
| 224 | { | ||
| 225 | struct pcie_port *pp = domain->host_data; | ||
| 226 | unsigned long flags; | ||
| 227 | u32 i; | ||
| 228 | int bit; | ||
| 229 | |||
| 230 | raw_spin_lock_irqsave(&pp->lock, flags); | ||
| 231 | |||
| 232 | bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors, | ||
| 233 | order_base_2(nr_irqs)); | ||
| 234 | |||
| 235 | raw_spin_unlock_irqrestore(&pp->lock, flags); | ||
| 236 | |||
| 237 | if (bit < 0) | ||
| 238 | return -ENOSPC; | ||
| 239 | |||
| 240 | for (i = 0; i < nr_irqs; i++) | ||
| 241 | irq_domain_set_info(domain, virq + i, bit + i, | ||
| 242 | &dw_pci_msi_bottom_irq_chip, | ||
| 243 | pp, handle_edge_irq, | ||
| 244 | NULL, NULL); | ||
| 245 | |||
| 246 | return 0; | ||
| 247 | } | ||
| 248 | |||
| 249 | static void dw_pcie_irq_domain_free(struct irq_domain *domain, | ||
| 250 | unsigned int virq, unsigned int nr_irqs) | ||
| 251 | { | ||
| 252 | struct irq_data *data = irq_domain_get_irq_data(domain, virq); | ||
| 253 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); | ||
| 254 | unsigned long flags; | ||
| 255 | |||
| 256 | raw_spin_lock_irqsave(&pp->lock, flags); | ||
| 257 | |||
| 258 | bitmap_release_region(pp->msi_irq_in_use, data->hwirq, | ||
| 259 | order_base_2(nr_irqs)); | ||
| 260 | |||
| 261 | raw_spin_unlock_irqrestore(&pp->lock, flags); | ||
| 262 | } | ||
| 263 | |||
| 264 | static const struct irq_domain_ops dw_pcie_msi_domain_ops = { | ||
| 265 | .alloc = dw_pcie_irq_domain_alloc, | ||
| 266 | .free = dw_pcie_irq_domain_free, | ||
| 267 | }; | ||
| 268 | |||
| 269 | int dw_pcie_allocate_domains(struct pcie_port *pp) | ||
| 270 | { | ||
| 271 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 272 | struct fwnode_handle *fwnode = of_node_to_fwnode(pci->dev->of_node); | ||
| 273 | |||
| 274 | pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors, | ||
| 275 | &dw_pcie_msi_domain_ops, pp); | ||
| 276 | if (!pp->irq_domain) { | ||
| 277 | dev_err(pci->dev, "Failed to create IRQ domain\n"); | ||
| 278 | return -ENOMEM; | ||
| 279 | } | ||
| 280 | |||
| 281 | pp->msi_domain = pci_msi_create_irq_domain(fwnode, | ||
| 282 | &dw_pcie_msi_domain_info, | ||
| 283 | pp->irq_domain); | ||
| 284 | if (!pp->msi_domain) { | ||
| 285 | dev_err(pci->dev, "Failed to create MSI domain\n"); | ||
| 286 | irq_domain_remove(pp->irq_domain); | ||
| 287 | return -ENOMEM; | ||
| 288 | } | ||
| 289 | |||
| 290 | return 0; | ||
| 291 | } | ||
| 292 | |||
| 293 | void dw_pcie_free_msi(struct pcie_port *pp) | ||
| 294 | { | ||
| 295 | irq_set_chained_handler(pp->msi_irq, NULL); | ||
| 296 | irq_set_handler_data(pp->msi_irq, NULL); | ||
| 297 | |||
| 298 | irq_domain_remove(pp->msi_domain); | ||
| 299 | irq_domain_remove(pp->irq_domain); | ||
| 300 | } | ||
| 301 | |||
| 302 | void dw_pcie_msi_init(struct pcie_port *pp) | ||
| 303 | { | ||
| 304 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 305 | struct device *dev = pci->dev; | ||
| 306 | struct page *page; | ||
| 307 | u64 msi_target; | ||
| 308 | |||
| 309 | page = alloc_page(GFP_KERNEL); | ||
| 310 | pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); | ||
| 311 | if (dma_mapping_error(dev, pp->msi_data)) { | ||
| 312 | dev_err(dev, "Failed to map MSI data\n"); | ||
| 313 | __free_page(page); | ||
| 314 | return; | ||
| 315 | } | ||
| 316 | msi_target = (u64)pp->msi_data; | ||
| 317 | |||
| 318 | /* Program the msi_data */ | ||
| 319 | dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4, | ||
| 320 | lower_32_bits(msi_target)); | ||
| 321 | dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, | ||
| 322 | upper_32_bits(msi_target)); | ||
| 323 | } | ||
| 324 | |||
| 325 | int dw_pcie_host_init(struct pcie_port *pp) | ||
| 326 | { | ||
| 327 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 328 | struct device *dev = pci->dev; | ||
| 329 | struct device_node *np = dev->of_node; | ||
| 330 | struct platform_device *pdev = to_platform_device(dev); | ||
| 331 | struct resource_entry *win, *tmp; | ||
| 332 | struct pci_bus *bus, *child; | ||
| 333 | struct pci_host_bridge *bridge; | ||
| 334 | struct resource *cfg_res; | ||
| 335 | int ret; | ||
| 336 | |||
| 337 | raw_spin_lock_init(&pci->pp.lock); | ||
| 338 | |||
| 339 | cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); | ||
| 340 | if (cfg_res) { | ||
| 341 | pp->cfg0_size = resource_size(cfg_res) >> 1; | ||
| 342 | pp->cfg1_size = resource_size(cfg_res) >> 1; | ||
| 343 | pp->cfg0_base = cfg_res->start; | ||
| 344 | pp->cfg1_base = cfg_res->start + pp->cfg0_size; | ||
| 345 | } else if (!pp->va_cfg0_base) { | ||
| 346 | dev_err(dev, "Missing *config* reg space\n"); | ||
| 347 | } | ||
| 348 | |||
| 349 | bridge = pci_alloc_host_bridge(0); | ||
| 350 | if (!bridge) | ||
| 351 | return -ENOMEM; | ||
| 352 | |||
| 353 | ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, | ||
| 354 | &bridge->windows, &pp->io_base); | ||
| 355 | if (ret) | ||
| 356 | return ret; | ||
| 357 | |||
| 358 | ret = devm_request_pci_bus_resources(dev, &bridge->windows); | ||
| 359 | if (ret) | ||
| 360 | goto error; | ||
| 361 | |||
| 362 | /* Get the I/O and memory ranges from DT */ | ||
| 363 | resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { | ||
| 364 | switch (resource_type(win->res)) { | ||
| 365 | case IORESOURCE_IO: | ||
| 366 | ret = pci_remap_iospace(win->res, pp->io_base); | ||
| 367 | if (ret) { | ||
| 368 | dev_warn(dev, "Error %d: failed to map resource %pR\n", | ||
| 369 | ret, win->res); | ||
| 370 | resource_list_destroy_entry(win); | ||
| 371 | } else { | ||
| 372 | pp->io = win->res; | ||
| 373 | pp->io->name = "I/O"; | ||
| 374 | pp->io_size = resource_size(pp->io); | ||
| 375 | pp->io_bus_addr = pp->io->start - win->offset; | ||
| 376 | } | ||
| 377 | break; | ||
| 378 | case IORESOURCE_MEM: | ||
| 379 | pp->mem = win->res; | ||
| 380 | pp->mem->name = "MEM"; | ||
| 381 | pp->mem_size = resource_size(pp->mem); | ||
| 382 | pp->mem_bus_addr = pp->mem->start - win->offset; | ||
| 383 | break; | ||
| 384 | case 0: | ||
| 385 | pp->cfg = win->res; | ||
| 386 | pp->cfg0_size = resource_size(pp->cfg) >> 1; | ||
| 387 | pp->cfg1_size = resource_size(pp->cfg) >> 1; | ||
| 388 | pp->cfg0_base = pp->cfg->start; | ||
| 389 | pp->cfg1_base = pp->cfg->start + pp->cfg0_size; | ||
| 390 | break; | ||
| 391 | case IORESOURCE_BUS: | ||
| 392 | pp->busn = win->res; | ||
| 393 | break; | ||
| 394 | } | ||
| 395 | } | ||
| 396 | |||
| 397 | if (!pci->dbi_base) { | ||
| 398 | pci->dbi_base = devm_pci_remap_cfgspace(dev, | ||
| 399 | pp->cfg->start, | ||
| 400 | resource_size(pp->cfg)); | ||
| 401 | if (!pci->dbi_base) { | ||
| 402 | dev_err(dev, "Error with ioremap\n"); | ||
| 403 | ret = -ENOMEM; | ||
| 404 | goto error; | ||
| 405 | } | ||
| 406 | } | ||
| 407 | |||
| 408 | pp->mem_base = pp->mem->start; | ||
| 409 | |||
| 410 | if (!pp->va_cfg0_base) { | ||
| 411 | pp->va_cfg0_base = devm_pci_remap_cfgspace(dev, | ||
| 412 | pp->cfg0_base, pp->cfg0_size); | ||
| 413 | if (!pp->va_cfg0_base) { | ||
| 414 | dev_err(dev, "Error with ioremap in function\n"); | ||
| 415 | ret = -ENOMEM; | ||
| 416 | goto error; | ||
| 417 | } | ||
| 418 | } | ||
| 419 | |||
| 420 | if (!pp->va_cfg1_base) { | ||
| 421 | pp->va_cfg1_base = devm_pci_remap_cfgspace(dev, | ||
| 422 | pp->cfg1_base, | ||
| 423 | pp->cfg1_size); | ||
| 424 | if (!pp->va_cfg1_base) { | ||
| 425 | dev_err(dev, "Error with ioremap\n"); | ||
| 426 | ret = -ENOMEM; | ||
| 427 | goto error; | ||
| 428 | } | ||
| 429 | } | ||
| 430 | |||
| 431 | ret = of_property_read_u32(np, "num-viewport", &pci->num_viewport); | ||
| 432 | if (ret) | ||
| 433 | pci->num_viewport = 2; | ||
| 434 | |||
| 435 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
| 436 | /* | ||
| 437 | * If a specific SoC driver needs to change the | ||
| 438 | * default number of vectors, it needs to implement | ||
| 439 | * the set_num_vectors callback. | ||
| 440 | */ | ||
| 441 | if (!pp->ops->set_num_vectors) { | ||
| 442 | pp->num_vectors = MSI_DEF_NUM_VECTORS; | ||
| 443 | } else { | ||
| 444 | pp->ops->set_num_vectors(pp); | ||
| 445 | |||
| 446 | if (pp->num_vectors > MAX_MSI_IRQS || | ||
| 447 | pp->num_vectors == 0) { | ||
| 448 | dev_err(dev, | ||
| 449 | "Invalid number of vectors\n"); | ||
| 450 | goto error; | ||
| 451 | } | ||
| 452 | } | ||
| 453 | |||
| 454 | if (!pp->ops->msi_host_init) { | ||
| 455 | ret = dw_pcie_allocate_domains(pp); | ||
| 456 | if (ret) | ||
| 457 | goto error; | ||
| 458 | |||
| 459 | if (pp->msi_irq) | ||
| 460 | irq_set_chained_handler_and_data(pp->msi_irq, | ||
| 461 | dw_chained_msi_isr, | ||
| 462 | pp); | ||
| 463 | } else { | ||
| 464 | ret = pp->ops->msi_host_init(pp); | ||
| 465 | if (ret < 0) | ||
| 466 | goto error; | ||
| 467 | } | ||
| 468 | } | ||
| 469 | |||
| 470 | if (pp->ops->host_init) { | ||
| 471 | ret = pp->ops->host_init(pp); | ||
| 472 | if (ret) | ||
| 473 | goto error; | ||
| 474 | } | ||
| 475 | |||
| 476 | pp->root_bus_nr = pp->busn->start; | ||
| 477 | |||
| 478 | bridge->dev.parent = dev; | ||
| 479 | bridge->sysdata = pp; | ||
| 480 | bridge->busnr = pp->root_bus_nr; | ||
| 481 | bridge->ops = &dw_pcie_ops; | ||
| 482 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 483 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 484 | |||
| 485 | ret = pci_scan_root_bus_bridge(bridge); | ||
| 486 | if (ret) | ||
| 487 | goto error; | ||
| 488 | |||
| 489 | bus = bridge->bus; | ||
| 490 | |||
| 491 | if (pp->ops->scan_bus) | ||
| 492 | pp->ops->scan_bus(pp); | ||
| 493 | |||
| 494 | pci_bus_size_bridges(bus); | ||
| 495 | pci_bus_assign_resources(bus); | ||
| 496 | |||
| 497 | list_for_each_entry(child, &bus->children, node) | ||
| 498 | pcie_bus_configure_settings(child); | ||
| 499 | |||
| 500 | pci_bus_add_devices(bus); | ||
| 501 | return 0; | ||
| 502 | |||
| 503 | error: | ||
| 504 | pci_free_host_bridge(bridge); | ||
| 505 | return ret; | ||
| 506 | } | ||
| 507 | |||
| 508 | static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, | ||
| 509 | u32 devfn, int where, int size, u32 *val) | ||
| 510 | { | ||
| 511 | int ret, type; | ||
| 512 | u32 busdev, cfg_size; | ||
| 513 | u64 cpu_addr; | ||
| 514 | void __iomem *va_cfg_base; | ||
| 515 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 516 | |||
| 517 | if (pp->ops->rd_other_conf) | ||
| 518 | return pp->ops->rd_other_conf(pp, bus, devfn, where, size, val); | ||
| 519 | |||
| 520 | busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | | ||
| 521 | PCIE_ATU_FUNC(PCI_FUNC(devfn)); | ||
| 522 | |||
| 523 | if (bus->parent->number == pp->root_bus_nr) { | ||
| 524 | type = PCIE_ATU_TYPE_CFG0; | ||
| 525 | cpu_addr = pp->cfg0_base; | ||
| 526 | cfg_size = pp->cfg0_size; | ||
| 527 | va_cfg_base = pp->va_cfg0_base; | ||
| 528 | } else { | ||
| 529 | type = PCIE_ATU_TYPE_CFG1; | ||
| 530 | cpu_addr = pp->cfg1_base; | ||
| 531 | cfg_size = pp->cfg1_size; | ||
| 532 | va_cfg_base = pp->va_cfg1_base; | ||
| 533 | } | ||
| 534 | |||
| 535 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, | ||
| 536 | type, cpu_addr, | ||
| 537 | busdev, cfg_size); | ||
| 538 | ret = dw_pcie_read(va_cfg_base + where, size, val); | ||
| 539 | if (pci->num_viewport <= 2) | ||
| 540 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, | ||
| 541 | PCIE_ATU_TYPE_IO, pp->io_base, | ||
| 542 | pp->io_bus_addr, pp->io_size); | ||
| 543 | |||
| 544 | return ret; | ||
| 545 | } | ||
| 546 | |||
| 547 | static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, | ||
| 548 | u32 devfn, int where, int size, u32 val) | ||
| 549 | { | ||
| 550 | int ret, type; | ||
| 551 | u32 busdev, cfg_size; | ||
| 552 | u64 cpu_addr; | ||
| 553 | void __iomem *va_cfg_base; | ||
| 554 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 555 | |||
| 556 | if (pp->ops->wr_other_conf) | ||
| 557 | return pp->ops->wr_other_conf(pp, bus, devfn, where, size, val); | ||
| 558 | |||
| 559 | busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | | ||
| 560 | PCIE_ATU_FUNC(PCI_FUNC(devfn)); | ||
| 561 | |||
| 562 | if (bus->parent->number == pp->root_bus_nr) { | ||
| 563 | type = PCIE_ATU_TYPE_CFG0; | ||
| 564 | cpu_addr = pp->cfg0_base; | ||
| 565 | cfg_size = pp->cfg0_size; | ||
| 566 | va_cfg_base = pp->va_cfg0_base; | ||
| 567 | } else { | ||
| 568 | type = PCIE_ATU_TYPE_CFG1; | ||
| 569 | cpu_addr = pp->cfg1_base; | ||
| 570 | cfg_size = pp->cfg1_size; | ||
| 571 | va_cfg_base = pp->va_cfg1_base; | ||
| 572 | } | ||
| 573 | |||
| 574 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, | ||
| 575 | type, cpu_addr, | ||
| 576 | busdev, cfg_size); | ||
| 577 | ret = dw_pcie_write(va_cfg_base + where, size, val); | ||
| 578 | if (pci->num_viewport <= 2) | ||
| 579 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX1, | ||
| 580 | PCIE_ATU_TYPE_IO, pp->io_base, | ||
| 581 | pp->io_bus_addr, pp->io_size); | ||
| 582 | |||
| 583 | return ret; | ||
| 584 | } | ||
| 585 | |||
| 586 | static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus, | ||
| 587 | int dev) | ||
| 588 | { | ||
| 589 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 590 | |||
| 591 | /* If there is no link, then there is no device */ | ||
| 592 | if (bus->number != pp->root_bus_nr) { | ||
| 593 | if (!dw_pcie_link_up(pci)) | ||
| 594 | return 0; | ||
| 595 | } | ||
| 596 | |||
| 597 | /* Access only one slot on each root port */ | ||
| 598 | if (bus->number == pp->root_bus_nr && dev > 0) | ||
| 599 | return 0; | ||
| 600 | |||
| 601 | return 1; | ||
| 602 | } | ||
| 603 | |||
| 604 | static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, | ||
| 605 | int size, u32 *val) | ||
| 606 | { | ||
| 607 | struct pcie_port *pp = bus->sysdata; | ||
| 608 | |||
| 609 | if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) { | ||
| 610 | *val = 0xffffffff; | ||
| 611 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 612 | } | ||
| 613 | |||
| 614 | if (bus->number == pp->root_bus_nr) | ||
| 615 | return dw_pcie_rd_own_conf(pp, where, size, val); | ||
| 616 | |||
| 617 | return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val); | ||
| 618 | } | ||
| 619 | |||
| 620 | static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, | ||
| 621 | int where, int size, u32 val) | ||
| 622 | { | ||
| 623 | struct pcie_port *pp = bus->sysdata; | ||
| 624 | |||
| 625 | if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) | ||
| 626 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 627 | |||
| 628 | if (bus->number == pp->root_bus_nr) | ||
| 629 | return dw_pcie_wr_own_conf(pp, where, size, val); | ||
| 630 | |||
| 631 | return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val); | ||
| 632 | } | ||
| 633 | |||
| 634 | static struct pci_ops dw_pcie_ops = { | ||
| 635 | .read = dw_pcie_rd_conf, | ||
| 636 | .write = dw_pcie_wr_conf, | ||
| 637 | }; | ||
| 638 | |||
| 639 | static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci) | ||
| 640 | { | ||
| 641 | u32 val; | ||
| 642 | |||
| 643 | val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); | ||
| 644 | if (val == 0xffffffff) | ||
| 645 | return 1; | ||
| 646 | |||
| 647 | return 0; | ||
| 648 | } | ||
| 649 | |||
| 650 | void dw_pcie_setup_rc(struct pcie_port *pp) | ||
| 651 | { | ||
| 652 | u32 val, ctrl, num_ctrls; | ||
| 653 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 654 | |||
| 655 | dw_pcie_setup(pci); | ||
| 656 | |||
| 657 | num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; | ||
| 658 | |||
| 659 | /* Initialize IRQ Status array */ | ||
| 660 | for (ctrl = 0; ctrl < num_ctrls; ctrl++) | ||
| 661 | dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + | ||
| 662 | (ctrl * MSI_REG_CTRL_BLOCK_SIZE), | ||
| 663 | 4, &pp->irq_status[ctrl]); | ||
| 664 | |||
| 665 | /* Setup RC BARs */ | ||
| 666 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, 0x00000004); | ||
| 667 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0x00000000); | ||
| 668 | |||
| 669 | /* Setup interrupt pins */ | ||
| 670 | dw_pcie_dbi_ro_wr_en(pci); | ||
| 671 | val = dw_pcie_readl_dbi(pci, PCI_INTERRUPT_LINE); | ||
| 672 | val &= 0xffff00ff; | ||
| 673 | val |= 0x00000100; | ||
| 674 | dw_pcie_writel_dbi(pci, PCI_INTERRUPT_LINE, val); | ||
| 675 | dw_pcie_dbi_ro_wr_dis(pci); | ||
| 676 | |||
| 677 | /* Setup bus numbers */ | ||
| 678 | val = dw_pcie_readl_dbi(pci, PCI_PRIMARY_BUS); | ||
| 679 | val &= 0xff000000; | ||
| 680 | val |= 0x00ff0100; | ||
| 681 | dw_pcie_writel_dbi(pci, PCI_PRIMARY_BUS, val); | ||
| 682 | |||
| 683 | /* Setup command register */ | ||
| 684 | val = dw_pcie_readl_dbi(pci, PCI_COMMAND); | ||
| 685 | val &= 0xffff0000; | ||
| 686 | val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | | ||
| 687 | PCI_COMMAND_MASTER | PCI_COMMAND_SERR; | ||
| 688 | dw_pcie_writel_dbi(pci, PCI_COMMAND, val); | ||
| 689 | |||
| 690 | /* | ||
| 691 | * If the platform provides ->rd_other_conf, it means the platform | ||
| 692 | * uses its own address translation component rather than ATU, so | ||
| 693 | * we should not program the ATU here. | ||
| 694 | */ | ||
| 695 | if (!pp->ops->rd_other_conf) { | ||
| 696 | /* Get iATU unroll support */ | ||
| 697 | pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci); | ||
| 698 | dev_dbg(pci->dev, "iATU unroll: %s\n", | ||
| 699 | pci->iatu_unroll_enabled ? "enabled" : "disabled"); | ||
| 700 | |||
| 701 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0, | ||
| 702 | PCIE_ATU_TYPE_MEM, pp->mem_base, | ||
| 703 | pp->mem_bus_addr, pp->mem_size); | ||
| 704 | if (pci->num_viewport > 2) | ||
| 705 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX2, | ||
| 706 | PCIE_ATU_TYPE_IO, pp->io_base, | ||
| 707 | pp->io_bus_addr, pp->io_size); | ||
| 708 | } | ||
| 709 | |||
| 710 | dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); | ||
| 711 | |||
| 712 | /* Enable write permission for the DBI read-only register */ | ||
| 713 | dw_pcie_dbi_ro_wr_en(pci); | ||
| 714 | /* Program correct class for RC */ | ||
| 715 | dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); | ||
| 716 | /* Better disable write permission right after the update */ | ||
| 717 | dw_pcie_dbi_ro_wr_dis(pci); | ||
| 718 | |||
| 719 | dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); | ||
| 720 | val |= PORT_LOGIC_SPEED_CHANGE; | ||
| 721 | dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); | ||
| 722 | } | ||
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c new file mode 100644 index 000000000000..5937fed4c938 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware-plat.c | |||
| @@ -0,0 +1,259 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * PCIe RC driver for Synopsys DesignWare Core | ||
| 4 | * | ||
| 5 | * Copyright (C) 2015-2016 Synopsys, Inc. (www.synopsys.com) | ||
| 6 | * | ||
| 7 | * Authors: Joao Pinto <Joao.Pinto@synopsys.com> | ||
| 8 | */ | ||
| 9 | #include <linux/clk.h> | ||
| 10 | #include <linux/delay.h> | ||
| 11 | #include <linux/gpio.h> | ||
| 12 | #include <linux/interrupt.h> | ||
| 13 | #include <linux/kernel.h> | ||
| 14 | #include <linux/init.h> | ||
| 15 | #include <linux/of_device.h> | ||
| 16 | #include <linux/of_gpio.h> | ||
| 17 | #include <linux/pci.h> | ||
| 18 | #include <linux/platform_device.h> | ||
| 19 | #include <linux/resource.h> | ||
| 20 | #include <linux/signal.h> | ||
| 21 | #include <linux/types.h> | ||
| 22 | #include <linux/regmap.h> | ||
| 23 | |||
| 24 | #include "pcie-designware.h" | ||
| 25 | |||
| 26 | struct dw_plat_pcie { | ||
| 27 | struct dw_pcie *pci; | ||
| 28 | struct regmap *regmap; | ||
| 29 | enum dw_pcie_device_mode mode; | ||
| 30 | }; | ||
| 31 | |||
| 32 | struct dw_plat_pcie_of_data { | ||
| 33 | enum dw_pcie_device_mode mode; | ||
| 34 | }; | ||
| 35 | |||
| 36 | static const struct of_device_id dw_plat_pcie_of_match[]; | ||
| 37 | |||
| 38 | static int dw_plat_pcie_host_init(struct pcie_port *pp) | ||
| 39 | { | ||
| 40 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 41 | |||
| 42 | dw_pcie_setup_rc(pp); | ||
| 43 | dw_pcie_wait_for_link(pci); | ||
| 44 | |||
| 45 | if (IS_ENABLED(CONFIG_PCI_MSI)) | ||
| 46 | dw_pcie_msi_init(pp); | ||
| 47 | |||
| 48 | return 0; | ||
| 49 | } | ||
| 50 | |||
| 51 | static void dw_plat_set_num_vectors(struct pcie_port *pp) | ||
| 52 | { | ||
| 53 | pp->num_vectors = MAX_MSI_IRQS; | ||
| 54 | } | ||
| 55 | |||
| 56 | static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = { | ||
| 57 | .host_init = dw_plat_pcie_host_init, | ||
| 58 | .set_num_vectors = dw_plat_set_num_vectors, | ||
| 59 | }; | ||
| 60 | |||
| 61 | static int dw_plat_pcie_establish_link(struct dw_pcie *pci) | ||
| 62 | { | ||
| 63 | return 0; | ||
| 64 | } | ||
| 65 | |||
| 66 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
| 67 | .start_link = dw_plat_pcie_establish_link, | ||
| 68 | }; | ||
| 69 | |||
| 70 | static void dw_plat_pcie_ep_init(struct dw_pcie_ep *ep) | ||
| 71 | { | ||
| 72 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
| 73 | enum pci_barno bar; | ||
| 74 | |||
| 75 | for (bar = BAR_0; bar <= BAR_5; bar++) | ||
| 76 | dw_pcie_ep_reset_bar(pci, bar); | ||
| 77 | } | ||
| 78 | |||
| 79 | static int dw_plat_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, | ||
| 80 | enum pci_epc_irq_type type, | ||
| 81 | u8 interrupt_num) | ||
| 82 | { | ||
| 83 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
| 84 | |||
| 85 | switch (type) { | ||
| 86 | case PCI_EPC_IRQ_LEGACY: | ||
| 87 | dev_err(pci->dev, "EP cannot trigger legacy IRQs\n"); | ||
| 88 | return -EINVAL; | ||
| 89 | case PCI_EPC_IRQ_MSI: | ||
| 90 | return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); | ||
| 91 | default: | ||
| 92 | dev_err(pci->dev, "UNKNOWN IRQ type\n"); | ||
| 93 | } | ||
| 94 | |||
| 95 | return 0; | ||
| 96 | } | ||
| 97 | |||
| 98 | static struct dw_pcie_ep_ops pcie_ep_ops = { | ||
| 99 | .ep_init = dw_plat_pcie_ep_init, | ||
| 100 | .raise_irq = dw_plat_pcie_ep_raise_irq, | ||
| 101 | }; | ||
| 102 | |||
| 103 | static int dw_plat_add_pcie_port(struct dw_plat_pcie *dw_plat_pcie, | ||
| 104 | struct platform_device *pdev) | ||
| 105 | { | ||
| 106 | struct dw_pcie *pci = dw_plat_pcie->pci; | ||
| 107 | struct pcie_port *pp = &pci->pp; | ||
| 108 | struct device *dev = &pdev->dev; | ||
| 109 | int ret; | ||
| 110 | |||
| 111 | pp->irq = platform_get_irq(pdev, 1); | ||
| 112 | if (pp->irq < 0) | ||
| 113 | return pp->irq; | ||
| 114 | |||
| 115 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
| 116 | pp->msi_irq = platform_get_irq(pdev, 0); | ||
| 117 | if (pp->msi_irq < 0) | ||
| 118 | return pp->msi_irq; | ||
| 119 | } | ||
| 120 | |||
| 121 | pp->root_bus_nr = -1; | ||
| 122 | pp->ops = &dw_plat_pcie_host_ops; | ||
| 123 | |||
| 124 | ret = dw_pcie_host_init(pp); | ||
| 125 | if (ret) { | ||
| 126 | dev_err(dev, "Failed to initialize host\n"); | ||
| 127 | return ret; | ||
| 128 | } | ||
| 129 | |||
| 130 | return 0; | ||
| 131 | } | ||
| 132 | |||
| 133 | static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie, | ||
| 134 | struct platform_device *pdev) | ||
| 135 | { | ||
| 136 | int ret; | ||
| 137 | struct dw_pcie_ep *ep; | ||
| 138 | struct resource *res; | ||
| 139 | struct device *dev = &pdev->dev; | ||
| 140 | struct dw_pcie *pci = dw_plat_pcie->pci; | ||
| 141 | |||
| 142 | ep = &pci->ep; | ||
| 143 | ep->ops = &pcie_ep_ops; | ||
| 144 | |||
| 145 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); | ||
| 146 | pci->dbi_base2 = devm_ioremap_resource(dev, res); | ||
| 147 | if (IS_ERR(pci->dbi_base2)) | ||
| 148 | return PTR_ERR(pci->dbi_base2); | ||
| 149 | |||
| 150 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); | ||
| 151 | if (!res) | ||
| 152 | return -EINVAL; | ||
| 153 | |||
| 154 | ep->phys_base = res->start; | ||
| 155 | ep->addr_size = resource_size(res); | ||
| 156 | |||
| 157 | ret = dw_pcie_ep_init(ep); | ||
| 158 | if (ret) { | ||
| 159 | dev_err(dev, "Failed to initialize endpoint\n"); | ||
| 160 | return ret; | ||
| 161 | } | ||
| 162 | return 0; | ||
| 163 | } | ||
| 164 | |||
| 165 | static int dw_plat_pcie_probe(struct platform_device *pdev) | ||
| 166 | { | ||
| 167 | struct device *dev = &pdev->dev; | ||
| 168 | struct dw_plat_pcie *dw_plat_pcie; | ||
| 169 | struct dw_pcie *pci; | ||
| 170 | struct resource *res; /* Resource from DT */ | ||
| 171 | int ret; | ||
| 172 | const struct of_device_id *match; | ||
| 173 | const struct dw_plat_pcie_of_data *data; | ||
| 174 | enum dw_pcie_device_mode mode; | ||
| 175 | |||
| 176 | match = of_match_device(dw_plat_pcie_of_match, dev); | ||
| 177 | if (!match) | ||
| 178 | return -EINVAL; | ||
| 179 | |||
| 180 | data = (struct dw_plat_pcie_of_data *)match->data; | ||
| 181 | mode = (enum dw_pcie_device_mode)data->mode; | ||
| 182 | |||
| 183 | dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL); | ||
| 184 | if (!dw_plat_pcie) | ||
| 185 | return -ENOMEM; | ||
| 186 | |||
| 187 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
| 188 | if (!pci) | ||
| 189 | return -ENOMEM; | ||
| 190 | |||
| 191 | pci->dev = dev; | ||
| 192 | pci->ops = &dw_pcie_ops; | ||
| 193 | |||
| 194 | dw_plat_pcie->pci = pci; | ||
| 195 | dw_plat_pcie->mode = mode; | ||
| 196 | |||
| 197 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); | ||
| 198 | if (!res) | ||
| 199 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 200 | |||
| 201 | pci->dbi_base = devm_ioremap_resource(dev, res); | ||
| 202 | if (IS_ERR(pci->dbi_base)) | ||
| 203 | return PTR_ERR(pci->dbi_base); | ||
| 204 | |||
| 205 | platform_set_drvdata(pdev, dw_plat_pcie); | ||
| 206 | |||
| 207 | switch (dw_plat_pcie->mode) { | ||
| 208 | case DW_PCIE_RC_TYPE: | ||
| 209 | if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_HOST)) | ||
| 210 | return -ENODEV; | ||
| 211 | |||
| 212 | ret = dw_plat_add_pcie_port(dw_plat_pcie, pdev); | ||
| 213 | if (ret < 0) | ||
| 214 | return ret; | ||
| 215 | break; | ||
| 216 | case DW_PCIE_EP_TYPE: | ||
| 217 | if (!IS_ENABLED(CONFIG_PCIE_DW_PLAT_EP)) | ||
| 218 | return -ENODEV; | ||
| 219 | |||
| 220 | ret = dw_plat_add_pcie_ep(dw_plat_pcie, pdev); | ||
| 221 | if (ret < 0) | ||
| 222 | return ret; | ||
| 223 | break; | ||
| 224 | default: | ||
| 225 | dev_err(dev, "INVALID device type %d\n", dw_plat_pcie->mode); | ||
| 226 | } | ||
| 227 | |||
| 228 | return 0; | ||
| 229 | } | ||
| 230 | |||
| 231 | static const struct dw_plat_pcie_of_data dw_plat_pcie_rc_of_data = { | ||
| 232 | .mode = DW_PCIE_RC_TYPE, | ||
| 233 | }; | ||
| 234 | |||
| 235 | static const struct dw_plat_pcie_of_data dw_plat_pcie_ep_of_data = { | ||
| 236 | .mode = DW_PCIE_EP_TYPE, | ||
| 237 | }; | ||
| 238 | |||
| 239 | static const struct of_device_id dw_plat_pcie_of_match[] = { | ||
| 240 | { | ||
| 241 | .compatible = "snps,dw-pcie", | ||
| 242 | .data = &dw_plat_pcie_rc_of_data, | ||
| 243 | }, | ||
| 244 | { | ||
| 245 | .compatible = "snps,dw-pcie-ep", | ||
| 246 | .data = &dw_plat_pcie_ep_of_data, | ||
| 247 | }, | ||
| 248 | {}, | ||
| 249 | }; | ||
| 250 | |||
| 251 | static struct platform_driver dw_plat_pcie_driver = { | ||
| 252 | .driver = { | ||
| 253 | .name = "dw-pcie", | ||
| 254 | .of_match_table = dw_plat_pcie_of_match, | ||
| 255 | .suppress_bind_attrs = true, | ||
| 256 | }, | ||
| 257 | .probe = dw_plat_pcie_probe, | ||
| 258 | }; | ||
| 259 | builtin_platform_driver(dw_plat_pcie_driver); | ||
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c new file mode 100644 index 000000000000..778c4f76a884 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware.c | |||
| @@ -0,0 +1,394 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Synopsys DesignWare PCIe host controller driver | ||
| 4 | * | ||
| 5 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. | ||
| 6 | * http://www.samsung.com | ||
| 7 | * | ||
| 8 | * Author: Jingoo Han <jg1.han@samsung.com> | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/delay.h> | ||
| 12 | #include <linux/of.h> | ||
| 13 | #include <linux/types.h> | ||
| 14 | |||
| 15 | #include "pcie-designware.h" | ||
| 16 | |||
| 17 | /* PCIe Port Logic registers */ | ||
| 18 | #define PLR_OFFSET 0x700 | ||
| 19 | #define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c) | ||
| 20 | #define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4) | ||
| 21 | #define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29) | ||
| 22 | |||
| 23 | int dw_pcie_read(void __iomem *addr, int size, u32 *val) | ||
| 24 | { | ||
| 25 | if ((uintptr_t)addr & (size - 1)) { | ||
| 26 | *val = 0; | ||
| 27 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
| 28 | } | ||
| 29 | |||
| 30 | if (size == 4) { | ||
| 31 | *val = readl(addr); | ||
| 32 | } else if (size == 2) { | ||
| 33 | *val = readw(addr); | ||
| 34 | } else if (size == 1) { | ||
| 35 | *val = readb(addr); | ||
| 36 | } else { | ||
| 37 | *val = 0; | ||
| 38 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
| 39 | } | ||
| 40 | |||
| 41 | return PCIBIOS_SUCCESSFUL; | ||
| 42 | } | ||
| 43 | |||
| 44 | int dw_pcie_write(void __iomem *addr, int size, u32 val) | ||
| 45 | { | ||
| 46 | if ((uintptr_t)addr & (size - 1)) | ||
| 47 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
| 48 | |||
| 49 | if (size == 4) | ||
| 50 | writel(val, addr); | ||
| 51 | else if (size == 2) | ||
| 52 | writew(val, addr); | ||
| 53 | else if (size == 1) | ||
| 54 | writeb(val, addr); | ||
| 55 | else | ||
| 56 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
| 57 | |||
| 58 | return PCIBIOS_SUCCESSFUL; | ||
| 59 | } | ||
| 60 | |||
| 61 | u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, | ||
| 62 | size_t size) | ||
| 63 | { | ||
| 64 | int ret; | ||
| 65 | u32 val; | ||
| 66 | |||
| 67 | if (pci->ops->read_dbi) | ||
| 68 | return pci->ops->read_dbi(pci, base, reg, size); | ||
| 69 | |||
| 70 | ret = dw_pcie_read(base + reg, size, &val); | ||
| 71 | if (ret) | ||
| 72 | dev_err(pci->dev, "Read DBI address failed\n"); | ||
| 73 | |||
| 74 | return val; | ||
| 75 | } | ||
| 76 | |||
| 77 | void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, | ||
| 78 | size_t size, u32 val) | ||
| 79 | { | ||
| 80 | int ret; | ||
| 81 | |||
| 82 | if (pci->ops->write_dbi) { | ||
| 83 | pci->ops->write_dbi(pci, base, reg, size, val); | ||
| 84 | return; | ||
| 85 | } | ||
| 86 | |||
| 87 | ret = dw_pcie_write(base + reg, size, val); | ||
| 88 | if (ret) | ||
| 89 | dev_err(pci->dev, "Write DBI address failed\n"); | ||
| 90 | } | ||
| 91 | |||
| 92 | static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) | ||
| 93 | { | ||
| 94 | u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); | ||
| 95 | |||
| 96 | return dw_pcie_readl_dbi(pci, offset + reg); | ||
| 97 | } | ||
| 98 | |||
| 99 | static void dw_pcie_writel_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg, | ||
| 100 | u32 val) | ||
| 101 | { | ||
| 102 | u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); | ||
| 103 | |||
| 104 | dw_pcie_writel_dbi(pci, offset + reg, val); | ||
| 105 | } | ||
| 106 | |||
| 107 | static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index, | ||
| 108 | int type, u64 cpu_addr, | ||
| 109 | u64 pci_addr, u32 size) | ||
| 110 | { | ||
| 111 | u32 retries, val; | ||
| 112 | |||
| 113 | dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_BASE, | ||
| 114 | lower_32_bits(cpu_addr)); | ||
| 115 | dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_BASE, | ||
| 116 | upper_32_bits(cpu_addr)); | ||
| 117 | dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LIMIT, | ||
| 118 | lower_32_bits(cpu_addr + size - 1)); | ||
| 119 | dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, | ||
| 120 | lower_32_bits(pci_addr)); | ||
| 121 | dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, | ||
| 122 | upper_32_bits(pci_addr)); | ||
| 123 | dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, | ||
| 124 | type); | ||
| 125 | dw_pcie_writel_ob_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, | ||
| 126 | PCIE_ATU_ENABLE); | ||
| 127 | |||
| 128 | /* | ||
| 129 | * Make sure ATU enable takes effect before any subsequent config | ||
| 130 | * and I/O accesses. | ||
| 131 | */ | ||
| 132 | for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { | ||
| 133 | val = dw_pcie_readl_ob_unroll(pci, index, | ||
| 134 | PCIE_ATU_UNR_REGION_CTRL2); | ||
| 135 | if (val & PCIE_ATU_ENABLE) | ||
| 136 | return; | ||
| 137 | |||
| 138 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); | ||
| 139 | } | ||
| 140 | dev_err(pci->dev, "Outbound iATU is not being enabled\n"); | ||
| 141 | } | ||
| 142 | |||
| 143 | void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type, | ||
| 144 | u64 cpu_addr, u64 pci_addr, u32 size) | ||
| 145 | { | ||
| 146 | u32 retries, val; | ||
| 147 | |||
| 148 | if (pci->ops->cpu_addr_fixup) | ||
| 149 | cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr); | ||
| 150 | |||
| 151 | if (pci->iatu_unroll_enabled) { | ||
| 152 | dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr, | ||
| 153 | pci_addr, size); | ||
| 154 | return; | ||
| 155 | } | ||
| 156 | |||
| 157 | dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, | ||
| 158 | PCIE_ATU_REGION_OUTBOUND | index); | ||
| 159 | dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_BASE, | ||
| 160 | lower_32_bits(cpu_addr)); | ||
| 161 | dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_BASE, | ||
| 162 | upper_32_bits(cpu_addr)); | ||
| 163 | dw_pcie_writel_dbi(pci, PCIE_ATU_LIMIT, | ||
| 164 | lower_32_bits(cpu_addr + size - 1)); | ||
| 165 | dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, | ||
| 166 | lower_32_bits(pci_addr)); | ||
| 167 | dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, | ||
| 168 | upper_32_bits(pci_addr)); | ||
| 169 | dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); | ||
| 170 | dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE); | ||
| 171 | |||
| 172 | /* | ||
| 173 | * Make sure ATU enable takes effect before any subsequent config | ||
| 174 | * and I/O accesses. | ||
| 175 | */ | ||
| 176 | for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { | ||
| 177 | val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); | ||
| 178 | if (val & PCIE_ATU_ENABLE) | ||
| 179 | return; | ||
| 180 | |||
| 181 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); | ||
| 182 | } | ||
| 183 | dev_err(pci->dev, "Outbound iATU is not being enabled\n"); | ||
| 184 | } | ||
| 185 | |||
| 186 | static u32 dw_pcie_readl_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg) | ||
| 187 | { | ||
| 188 | u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); | ||
| 189 | |||
| 190 | return dw_pcie_readl_dbi(pci, offset + reg); | ||
| 191 | } | ||
| 192 | |||
| 193 | static void dw_pcie_writel_ib_unroll(struct dw_pcie *pci, u32 index, u32 reg, | ||
| 194 | u32 val) | ||
| 195 | { | ||
| 196 | u32 offset = PCIE_GET_ATU_INB_UNR_REG_OFFSET(index); | ||
| 197 | |||
| 198 | dw_pcie_writel_dbi(pci, offset + reg, val); | ||
| 199 | } | ||
| 200 | |||
| 201 | static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index, | ||
| 202 | int bar, u64 cpu_addr, | ||
| 203 | enum dw_pcie_as_type as_type) | ||
| 204 | { | ||
| 205 | int type; | ||
| 206 | u32 retries, val; | ||
| 207 | |||
| 208 | dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_LOWER_TARGET, | ||
| 209 | lower_32_bits(cpu_addr)); | ||
| 210 | dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_UPPER_TARGET, | ||
| 211 | upper_32_bits(cpu_addr)); | ||
| 212 | |||
| 213 | switch (as_type) { | ||
| 214 | case DW_PCIE_AS_MEM: | ||
| 215 | type = PCIE_ATU_TYPE_MEM; | ||
| 216 | break; | ||
| 217 | case DW_PCIE_AS_IO: | ||
| 218 | type = PCIE_ATU_TYPE_IO; | ||
| 219 | break; | ||
| 220 | default: | ||
| 221 | return -EINVAL; | ||
| 222 | } | ||
| 223 | |||
| 224 | dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL1, type); | ||
| 225 | dw_pcie_writel_ib_unroll(pci, index, PCIE_ATU_UNR_REGION_CTRL2, | ||
| 226 | PCIE_ATU_ENABLE | | ||
| 227 | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); | ||
| 228 | |||
| 229 | /* | ||
| 230 | * Make sure ATU enable takes effect before any subsequent config | ||
| 231 | * and I/O accesses. | ||
| 232 | */ | ||
| 233 | for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { | ||
| 234 | val = dw_pcie_readl_ib_unroll(pci, index, | ||
| 235 | PCIE_ATU_UNR_REGION_CTRL2); | ||
| 236 | if (val & PCIE_ATU_ENABLE) | ||
| 237 | return 0; | ||
| 238 | |||
| 239 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); | ||
| 240 | } | ||
| 241 | dev_err(pci->dev, "Inbound iATU is not being enabled\n"); | ||
| 242 | |||
| 243 | return -EBUSY; | ||
| 244 | } | ||
| 245 | |||
| 246 | int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, | ||
| 247 | u64 cpu_addr, enum dw_pcie_as_type as_type) | ||
| 248 | { | ||
| 249 | int type; | ||
| 250 | u32 retries, val; | ||
| 251 | |||
| 252 | if (pci->iatu_unroll_enabled) | ||
| 253 | return dw_pcie_prog_inbound_atu_unroll(pci, index, bar, | ||
| 254 | cpu_addr, as_type); | ||
| 255 | |||
| 256 | dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, PCIE_ATU_REGION_INBOUND | | ||
| 257 | index); | ||
| 258 | dw_pcie_writel_dbi(pci, PCIE_ATU_LOWER_TARGET, lower_32_bits(cpu_addr)); | ||
| 259 | dw_pcie_writel_dbi(pci, PCIE_ATU_UPPER_TARGET, upper_32_bits(cpu_addr)); | ||
| 260 | |||
| 261 | switch (as_type) { | ||
| 262 | case DW_PCIE_AS_MEM: | ||
| 263 | type = PCIE_ATU_TYPE_MEM; | ||
| 264 | break; | ||
| 265 | case DW_PCIE_AS_IO: | ||
| 266 | type = PCIE_ATU_TYPE_IO; | ||
| 267 | break; | ||
| 268 | default: | ||
| 269 | return -EINVAL; | ||
| 270 | } | ||
| 271 | |||
| 272 | dw_pcie_writel_dbi(pci, PCIE_ATU_CR1, type); | ||
| 273 | dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, PCIE_ATU_ENABLE | ||
| 274 | | PCIE_ATU_BAR_MODE_ENABLE | (bar << 8)); | ||
| 275 | |||
| 276 | /* | ||
| 277 | * Make sure ATU enable takes effect before any subsequent config | ||
| 278 | * and I/O accesses. | ||
| 279 | */ | ||
| 280 | for (retries = 0; retries < LINK_WAIT_MAX_IATU_RETRIES; retries++) { | ||
| 281 | val = dw_pcie_readl_dbi(pci, PCIE_ATU_CR2); | ||
| 282 | if (val & PCIE_ATU_ENABLE) | ||
| 283 | return 0; | ||
| 284 | |||
| 285 | usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); | ||
| 286 | } | ||
| 287 | dev_err(pci->dev, "Inbound iATU is not being enabled\n"); | ||
| 288 | |||
| 289 | return -EBUSY; | ||
| 290 | } | ||
| 291 | |||
| 292 | void dw_pcie_disable_atu(struct dw_pcie *pci, int index, | ||
| 293 | enum dw_pcie_region_type type) | ||
| 294 | { | ||
| 295 | int region; | ||
| 296 | |||
| 297 | switch (type) { | ||
| 298 | case DW_PCIE_REGION_INBOUND: | ||
| 299 | region = PCIE_ATU_REGION_INBOUND; | ||
| 300 | break; | ||
| 301 | case DW_PCIE_REGION_OUTBOUND: | ||
| 302 | region = PCIE_ATU_REGION_OUTBOUND; | ||
| 303 | break; | ||
| 304 | default: | ||
| 305 | return; | ||
| 306 | } | ||
| 307 | |||
| 308 | dw_pcie_writel_dbi(pci, PCIE_ATU_VIEWPORT, region | index); | ||
| 309 | dw_pcie_writel_dbi(pci, PCIE_ATU_CR2, ~PCIE_ATU_ENABLE); | ||
| 310 | } | ||
| 311 | |||
| 312 | int dw_pcie_wait_for_link(struct dw_pcie *pci) | ||
| 313 | { | ||
| 314 | int retries; | ||
| 315 | |||
| 316 | /* Check if the link is up or not */ | ||
| 317 | for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { | ||
| 318 | if (dw_pcie_link_up(pci)) { | ||
| 319 | dev_info(pci->dev, "Link up\n"); | ||
| 320 | return 0; | ||
| 321 | } | ||
| 322 | usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); | ||
| 323 | } | ||
| 324 | |||
| 325 | dev_err(pci->dev, "Phy link never came up\n"); | ||
| 326 | |||
| 327 | return -ETIMEDOUT; | ||
| 328 | } | ||
| 329 | |||
| 330 | int dw_pcie_link_up(struct dw_pcie *pci) | ||
| 331 | { | ||
| 332 | u32 val; | ||
| 333 | |||
| 334 | if (pci->ops->link_up) | ||
| 335 | return pci->ops->link_up(pci); | ||
| 336 | |||
| 337 | val = readl(pci->dbi_base + PCIE_PHY_DEBUG_R1); | ||
| 338 | return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) && | ||
| 339 | (!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING))); | ||
| 340 | } | ||
| 341 | |||
| 342 | void dw_pcie_setup(struct dw_pcie *pci) | ||
| 343 | { | ||
| 344 | int ret; | ||
| 345 | u32 val; | ||
| 346 | u32 lanes; | ||
| 347 | struct device *dev = pci->dev; | ||
| 348 | struct device_node *np = dev->of_node; | ||
| 349 | |||
| 350 | ret = of_property_read_u32(np, "num-lanes", &lanes); | ||
| 351 | if (ret) | ||
| 352 | lanes = 0; | ||
| 353 | |||
| 354 | /* Set the number of lanes */ | ||
| 355 | val = dw_pcie_readl_dbi(pci, PCIE_PORT_LINK_CONTROL); | ||
| 356 | val &= ~PORT_LINK_MODE_MASK; | ||
| 357 | switch (lanes) { | ||
| 358 | case 1: | ||
| 359 | val |= PORT_LINK_MODE_1_LANES; | ||
| 360 | break; | ||
| 361 | case 2: | ||
| 362 | val |= PORT_LINK_MODE_2_LANES; | ||
| 363 | break; | ||
| 364 | case 4: | ||
| 365 | val |= PORT_LINK_MODE_4_LANES; | ||
| 366 | break; | ||
| 367 | case 8: | ||
| 368 | val |= PORT_LINK_MODE_8_LANES; | ||
| 369 | break; | ||
| 370 | default: | ||
| 371 | dev_err(pci->dev, "num-lanes %u: invalid value\n", lanes); | ||
| 372 | return; | ||
| 373 | } | ||
| 374 | dw_pcie_writel_dbi(pci, PCIE_PORT_LINK_CONTROL, val); | ||
| 375 | |||
| 376 | /* Set link width speed control register */ | ||
| 377 | val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL); | ||
| 378 | val &= ~PORT_LOGIC_LINK_WIDTH_MASK; | ||
| 379 | switch (lanes) { | ||
| 380 | case 1: | ||
| 381 | val |= PORT_LOGIC_LINK_WIDTH_1_LANES; | ||
| 382 | break; | ||
| 383 | case 2: | ||
| 384 | val |= PORT_LOGIC_LINK_WIDTH_2_LANES; | ||
| 385 | break; | ||
| 386 | case 4: | ||
| 387 | val |= PORT_LOGIC_LINK_WIDTH_4_LANES; | ||
| 388 | break; | ||
| 389 | case 8: | ||
| 390 | val |= PORT_LOGIC_LINK_WIDTH_8_LANES; | ||
| 391 | break; | ||
| 392 | } | ||
| 393 | dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val); | ||
| 394 | } | ||
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h new file mode 100644 index 000000000000..bee4e2535a61 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-designware.h | |||
| @@ -0,0 +1,387 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Synopsys DesignWare PCIe host controller driver | ||
| 4 | * | ||
| 5 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. | ||
| 6 | * http://www.samsung.com | ||
| 7 | * | ||
| 8 | * Author: Jingoo Han <jg1.han@samsung.com> | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef _PCIE_DESIGNWARE_H | ||
| 12 | #define _PCIE_DESIGNWARE_H | ||
| 13 | |||
| 14 | #include <linux/dma-mapping.h> | ||
| 15 | #include <linux/irq.h> | ||
| 16 | #include <linux/msi.h> | ||
| 17 | #include <linux/pci.h> | ||
| 18 | |||
| 19 | #include <linux/pci-epc.h> | ||
| 20 | #include <linux/pci-epf.h> | ||
| 21 | |||
| 22 | /* Parameters for the waiting for link up routine */ | ||
| 23 | #define LINK_WAIT_MAX_RETRIES 10 | ||
| 24 | #define LINK_WAIT_USLEEP_MIN 90000 | ||
| 25 | #define LINK_WAIT_USLEEP_MAX 100000 | ||
| 26 | |||
| 27 | /* Parameters for the waiting for iATU enabled routine */ | ||
| 28 | #define LINK_WAIT_MAX_IATU_RETRIES 5 | ||
| 29 | #define LINK_WAIT_IATU_MIN 9000 | ||
| 30 | #define LINK_WAIT_IATU_MAX 10000 | ||
| 31 | |||
| 32 | /* Synopsys-specific PCIe configuration registers */ | ||
| 33 | #define PCIE_PORT_LINK_CONTROL 0x710 | ||
| 34 | #define PORT_LINK_MODE_MASK (0x3f << 16) | ||
| 35 | #define PORT_LINK_MODE_1_LANES (0x1 << 16) | ||
| 36 | #define PORT_LINK_MODE_2_LANES (0x3 << 16) | ||
| 37 | #define PORT_LINK_MODE_4_LANES (0x7 << 16) | ||
| 38 | #define PORT_LINK_MODE_8_LANES (0xf << 16) | ||
| 39 | |||
| 40 | #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C | ||
| 41 | #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) | ||
| 42 | #define PORT_LOGIC_LINK_WIDTH_MASK (0x1f << 8) | ||
| 43 | #define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) | ||
| 44 | #define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) | ||
| 45 | #define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) | ||
| 46 | #define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8) | ||
| 47 | |||
| 48 | #define PCIE_MSI_ADDR_LO 0x820 | ||
| 49 | #define PCIE_MSI_ADDR_HI 0x824 | ||
| 50 | #define PCIE_MSI_INTR0_ENABLE 0x828 | ||
| 51 | #define PCIE_MSI_INTR0_MASK 0x82C | ||
| 52 | #define PCIE_MSI_INTR0_STATUS 0x830 | ||
| 53 | |||
| 54 | #define PCIE_ATU_VIEWPORT 0x900 | ||
| 55 | #define PCIE_ATU_REGION_INBOUND (0x1 << 31) | ||
| 56 | #define PCIE_ATU_REGION_OUTBOUND (0x0 << 31) | ||
| 57 | #define PCIE_ATU_REGION_INDEX2 (0x2 << 0) | ||
| 58 | #define PCIE_ATU_REGION_INDEX1 (0x1 << 0) | ||
| 59 | #define PCIE_ATU_REGION_INDEX0 (0x0 << 0) | ||
| 60 | #define PCIE_ATU_CR1 0x904 | ||
| 61 | #define PCIE_ATU_TYPE_MEM (0x0 << 0) | ||
| 62 | #define PCIE_ATU_TYPE_IO (0x2 << 0) | ||
| 63 | #define PCIE_ATU_TYPE_CFG0 (0x4 << 0) | ||
| 64 | #define PCIE_ATU_TYPE_CFG1 (0x5 << 0) | ||
| 65 | #define PCIE_ATU_CR2 0x908 | ||
| 66 | #define PCIE_ATU_ENABLE (0x1 << 31) | ||
| 67 | #define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30) | ||
| 68 | #define PCIE_ATU_LOWER_BASE 0x90C | ||
| 69 | #define PCIE_ATU_UPPER_BASE 0x910 | ||
| 70 | #define PCIE_ATU_LIMIT 0x914 | ||
| 71 | #define PCIE_ATU_LOWER_TARGET 0x918 | ||
| 72 | #define PCIE_ATU_BUS(x) (((x) & 0xff) << 24) | ||
| 73 | #define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19) | ||
| 74 | #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) | ||
| 75 | #define PCIE_ATU_UPPER_TARGET 0x91C | ||
| 76 | |||
| 77 | #define PCIE_MISC_CONTROL_1_OFF 0x8BC | ||
| 78 | #define PCIE_DBI_RO_WR_EN (0x1 << 0) | ||
| 79 | |||
| 80 | /* | ||
| 81 | * iATU Unroll-specific register definitions | ||
| 82 | * From 4.80 core version the address translation will be made by unroll | ||
| 83 | */ | ||
| 84 | #define PCIE_ATU_UNR_REGION_CTRL1 0x00 | ||
| 85 | #define PCIE_ATU_UNR_REGION_CTRL2 0x04 | ||
| 86 | #define PCIE_ATU_UNR_LOWER_BASE 0x08 | ||
| 87 | #define PCIE_ATU_UNR_UPPER_BASE 0x0C | ||
| 88 | #define PCIE_ATU_UNR_LIMIT 0x10 | ||
| 89 | #define PCIE_ATU_UNR_LOWER_TARGET 0x14 | ||
| 90 | #define PCIE_ATU_UNR_UPPER_TARGET 0x18 | ||
| 91 | |||
| 92 | /* Register address builder */ | ||
| 93 | #define PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(region) \ | ||
| 94 | ((0x3 << 20) | ((region) << 9)) | ||
| 95 | |||
| 96 | #define PCIE_GET_ATU_INB_UNR_REG_OFFSET(region) \ | ||
| 97 | ((0x3 << 20) | ((region) << 9) | (0x1 << 8)) | ||
| 98 | |||
| 99 | #define MSI_MESSAGE_CONTROL 0x52 | ||
| 100 | #define MSI_CAP_MMC_SHIFT 1 | ||
| 101 | #define MSI_CAP_MMC_MASK (7 << MSI_CAP_MMC_SHIFT) | ||
| 102 | #define MSI_CAP_MME_SHIFT 4 | ||
| 103 | #define MSI_CAP_MSI_EN_MASK 0x1 | ||
| 104 | #define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT) | ||
| 105 | #define MSI_MESSAGE_ADDR_L32 0x54 | ||
| 106 | #define MSI_MESSAGE_ADDR_U32 0x58 | ||
| 107 | #define MSI_MESSAGE_DATA_32 0x58 | ||
| 108 | #define MSI_MESSAGE_DATA_64 0x5C | ||
| 109 | |||
| 110 | #define MAX_MSI_IRQS 256 | ||
| 111 | #define MAX_MSI_IRQS_PER_CTRL 32 | ||
| 112 | #define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL) | ||
| 113 | #define MSI_REG_CTRL_BLOCK_SIZE 12 | ||
| 114 | #define MSI_DEF_NUM_VECTORS 32 | ||
| 115 | |||
| 116 | /* Maximum number of inbound/outbound iATUs */ | ||
| 117 | #define MAX_IATU_IN 256 | ||
| 118 | #define MAX_IATU_OUT 256 | ||
| 119 | |||
| 120 | struct pcie_port; | ||
| 121 | struct dw_pcie; | ||
| 122 | struct dw_pcie_ep; | ||
| 123 | |||
| 124 | enum dw_pcie_region_type { | ||
| 125 | DW_PCIE_REGION_UNKNOWN, | ||
| 126 | DW_PCIE_REGION_INBOUND, | ||
| 127 | DW_PCIE_REGION_OUTBOUND, | ||
| 128 | }; | ||
| 129 | |||
| 130 | enum dw_pcie_device_mode { | ||
| 131 | DW_PCIE_UNKNOWN_TYPE, | ||
| 132 | DW_PCIE_EP_TYPE, | ||
| 133 | DW_PCIE_LEG_EP_TYPE, | ||
| 134 | DW_PCIE_RC_TYPE, | ||
| 135 | }; | ||
| 136 | |||
| 137 | struct dw_pcie_host_ops { | ||
| 138 | int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val); | ||
| 139 | int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val); | ||
| 140 | int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus, | ||
| 141 | unsigned int devfn, int where, int size, u32 *val); | ||
| 142 | int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus, | ||
| 143 | unsigned int devfn, int where, int size, u32 val); | ||
| 144 | int (*host_init)(struct pcie_port *pp); | ||
| 145 | void (*msi_set_irq)(struct pcie_port *pp, int irq); | ||
| 146 | void (*msi_clear_irq)(struct pcie_port *pp, int irq); | ||
| 147 | phys_addr_t (*get_msi_addr)(struct pcie_port *pp); | ||
| 148 | u32 (*get_msi_data)(struct pcie_port *pp, int pos); | ||
| 149 | void (*scan_bus)(struct pcie_port *pp); | ||
| 150 | void (*set_num_vectors)(struct pcie_port *pp); | ||
| 151 | int (*msi_host_init)(struct pcie_port *pp); | ||
| 152 | void (*msi_irq_ack)(int irq, struct pcie_port *pp); | ||
| 153 | }; | ||
| 154 | |||
| 155 | struct pcie_port { | ||
| 156 | u8 root_bus_nr; | ||
| 157 | u64 cfg0_base; | ||
| 158 | void __iomem *va_cfg0_base; | ||
| 159 | u32 cfg0_size; | ||
| 160 | u64 cfg1_base; | ||
| 161 | void __iomem *va_cfg1_base; | ||
| 162 | u32 cfg1_size; | ||
| 163 | resource_size_t io_base; | ||
| 164 | phys_addr_t io_bus_addr; | ||
| 165 | u32 io_size; | ||
| 166 | u64 mem_base; | ||
| 167 | phys_addr_t mem_bus_addr; | ||
| 168 | u32 mem_size; | ||
| 169 | struct resource *cfg; | ||
| 170 | struct resource *io; | ||
| 171 | struct resource *mem; | ||
| 172 | struct resource *busn; | ||
| 173 | int irq; | ||
| 174 | const struct dw_pcie_host_ops *ops; | ||
| 175 | int msi_irq; | ||
| 176 | struct irq_domain *irq_domain; | ||
| 177 | struct irq_domain *msi_domain; | ||
| 178 | dma_addr_t msi_data; | ||
| 179 | u32 num_vectors; | ||
| 180 | u32 irq_status[MAX_MSI_CTRLS]; | ||
| 181 | raw_spinlock_t lock; | ||
| 182 | DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); | ||
| 183 | }; | ||
| 184 | |||
| 185 | enum dw_pcie_as_type { | ||
| 186 | DW_PCIE_AS_UNKNOWN, | ||
| 187 | DW_PCIE_AS_MEM, | ||
| 188 | DW_PCIE_AS_IO, | ||
| 189 | }; | ||
| 190 | |||
| 191 | struct dw_pcie_ep_ops { | ||
| 192 | void (*ep_init)(struct dw_pcie_ep *ep); | ||
| 193 | int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no, | ||
| 194 | enum pci_epc_irq_type type, u8 interrupt_num); | ||
| 195 | }; | ||
| 196 | |||
| 197 | struct dw_pcie_ep { | ||
| 198 | struct pci_epc *epc; | ||
| 199 | struct dw_pcie_ep_ops *ops; | ||
| 200 | phys_addr_t phys_base; | ||
| 201 | size_t addr_size; | ||
| 202 | size_t page_size; | ||
| 203 | u8 bar_to_atu[6]; | ||
| 204 | phys_addr_t *outbound_addr; | ||
| 205 | unsigned long *ib_window_map; | ||
| 206 | unsigned long *ob_window_map; | ||
| 207 | u32 num_ib_windows; | ||
| 208 | u32 num_ob_windows; | ||
| 209 | void __iomem *msi_mem; | ||
| 210 | phys_addr_t msi_mem_phys; | ||
| 211 | }; | ||
| 212 | |||
| 213 | struct dw_pcie_ops { | ||
| 214 | u64 (*cpu_addr_fixup)(struct dw_pcie *pcie, u64 cpu_addr); | ||
| 215 | u32 (*read_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg, | ||
| 216 | size_t size); | ||
| 217 | void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg, | ||
| 218 | size_t size, u32 val); | ||
| 219 | int (*link_up)(struct dw_pcie *pcie); | ||
| 220 | int (*start_link)(struct dw_pcie *pcie); | ||
| 221 | void (*stop_link)(struct dw_pcie *pcie); | ||
| 222 | }; | ||
| 223 | |||
| 224 | struct dw_pcie { | ||
| 225 | struct device *dev; | ||
| 226 | void __iomem *dbi_base; | ||
| 227 | void __iomem *dbi_base2; | ||
| 228 | u32 num_viewport; | ||
| 229 | u8 iatu_unroll_enabled; | ||
| 230 | struct pcie_port pp; | ||
| 231 | struct dw_pcie_ep ep; | ||
| 232 | const struct dw_pcie_ops *ops; | ||
| 233 | }; | ||
| 234 | |||
| 235 | #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) | ||
| 236 | |||
| 237 | #define to_dw_pcie_from_ep(endpoint) \ | ||
| 238 | container_of((endpoint), struct dw_pcie, ep) | ||
| 239 | |||
| 240 | int dw_pcie_read(void __iomem *addr, int size, u32 *val); | ||
| 241 | int dw_pcie_write(void __iomem *addr, int size, u32 val); | ||
| 242 | |||
| 243 | u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, | ||
| 244 | size_t size); | ||
| 245 | void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, | ||
| 246 | size_t size, u32 val); | ||
| 247 | int dw_pcie_link_up(struct dw_pcie *pci); | ||
| 248 | int dw_pcie_wait_for_link(struct dw_pcie *pci); | ||
| 249 | void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, | ||
| 250 | int type, u64 cpu_addr, u64 pci_addr, | ||
| 251 | u32 size); | ||
| 252 | int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar, | ||
| 253 | u64 cpu_addr, enum dw_pcie_as_type as_type); | ||
| 254 | void dw_pcie_disable_atu(struct dw_pcie *pci, int index, | ||
| 255 | enum dw_pcie_region_type type); | ||
| 256 | void dw_pcie_setup(struct dw_pcie *pci); | ||
| 257 | |||
| 258 | static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val) | ||
| 259 | { | ||
| 260 | __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x4, val); | ||
| 261 | } | ||
| 262 | |||
| 263 | static inline u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg) | ||
| 264 | { | ||
| 265 | return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x4); | ||
| 266 | } | ||
| 267 | |||
| 268 | static inline void dw_pcie_writew_dbi(struct dw_pcie *pci, u32 reg, u16 val) | ||
| 269 | { | ||
| 270 | __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x2, val); | ||
| 271 | } | ||
| 272 | |||
| 273 | static inline u16 dw_pcie_readw_dbi(struct dw_pcie *pci, u32 reg) | ||
| 274 | { | ||
| 275 | return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x2); | ||
| 276 | } | ||
| 277 | |||
| 278 | static inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, u32 reg, u8 val) | ||
| 279 | { | ||
| 280 | __dw_pcie_write_dbi(pci, pci->dbi_base, reg, 0x1, val); | ||
| 281 | } | ||
| 282 | |||
| 283 | static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg) | ||
| 284 | { | ||
| 285 | return __dw_pcie_read_dbi(pci, pci->dbi_base, reg, 0x1); | ||
| 286 | } | ||
| 287 | |||
| 288 | static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val) | ||
| 289 | { | ||
| 290 | __dw_pcie_write_dbi(pci, pci->dbi_base2, reg, 0x4, val); | ||
| 291 | } | ||
| 292 | |||
| 293 | static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg) | ||
| 294 | { | ||
| 295 | return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4); | ||
| 296 | } | ||
| 297 | |||
| 298 | static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci) | ||
| 299 | { | ||
| 300 | u32 reg; | ||
| 301 | u32 val; | ||
| 302 | |||
| 303 | reg = PCIE_MISC_CONTROL_1_OFF; | ||
| 304 | val = dw_pcie_readl_dbi(pci, reg); | ||
| 305 | val |= PCIE_DBI_RO_WR_EN; | ||
| 306 | dw_pcie_writel_dbi(pci, reg, val); | ||
| 307 | } | ||
| 308 | |||
| 309 | static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci) | ||
| 310 | { | ||
| 311 | u32 reg; | ||
| 312 | u32 val; | ||
| 313 | |||
| 314 | reg = PCIE_MISC_CONTROL_1_OFF; | ||
| 315 | val = dw_pcie_readl_dbi(pci, reg); | ||
| 316 | val &= ~PCIE_DBI_RO_WR_EN; | ||
| 317 | dw_pcie_writel_dbi(pci, reg, val); | ||
| 318 | } | ||
| 319 | |||
| 320 | #ifdef CONFIG_PCIE_DW_HOST | ||
| 321 | irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); | ||
| 322 | void dw_pcie_msi_init(struct pcie_port *pp); | ||
| 323 | void dw_pcie_free_msi(struct pcie_port *pp); | ||
| 324 | void dw_pcie_setup_rc(struct pcie_port *pp); | ||
| 325 | int dw_pcie_host_init(struct pcie_port *pp); | ||
| 326 | int dw_pcie_allocate_domains(struct pcie_port *pp); | ||
| 327 | #else | ||
| 328 | static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) | ||
| 329 | { | ||
| 330 | return IRQ_NONE; | ||
| 331 | } | ||
| 332 | |||
| 333 | static inline void dw_pcie_msi_init(struct pcie_port *pp) | ||
| 334 | { | ||
| 335 | } | ||
| 336 | |||
| 337 | static inline void dw_pcie_free_msi(struct pcie_port *pp) | ||
| 338 | { | ||
| 339 | } | ||
| 340 | |||
| 341 | static inline void dw_pcie_setup_rc(struct pcie_port *pp) | ||
| 342 | { | ||
| 343 | } | ||
| 344 | |||
| 345 | static inline int dw_pcie_host_init(struct pcie_port *pp) | ||
| 346 | { | ||
| 347 | return 0; | ||
| 348 | } | ||
| 349 | |||
| 350 | static inline int dw_pcie_allocate_domains(struct pcie_port *pp) | ||
| 351 | { | ||
| 352 | return 0; | ||
| 353 | } | ||
| 354 | #endif | ||
| 355 | |||
| 356 | #ifdef CONFIG_PCIE_DW_EP | ||
| 357 | void dw_pcie_ep_linkup(struct dw_pcie_ep *ep); | ||
| 358 | int dw_pcie_ep_init(struct dw_pcie_ep *ep); | ||
| 359 | void dw_pcie_ep_exit(struct dw_pcie_ep *ep); | ||
| 360 | int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, | ||
| 361 | u8 interrupt_num); | ||
| 362 | void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar); | ||
| 363 | #else | ||
| 364 | static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep) | ||
| 365 | { | ||
| 366 | } | ||
| 367 | |||
| 368 | static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep) | ||
| 369 | { | ||
| 370 | return 0; | ||
| 371 | } | ||
| 372 | |||
| 373 | static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep) | ||
| 374 | { | ||
| 375 | } | ||
| 376 | |||
| 377 | static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, | ||
| 378 | u8 interrupt_num) | ||
| 379 | { | ||
| 380 | return 0; | ||
| 381 | } | ||
| 382 | |||
| 383 | static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar) | ||
| 384 | { | ||
| 385 | } | ||
| 386 | #endif | ||
| 387 | #endif /* _PCIE_DESIGNWARE_H */ | ||
diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c new file mode 100644 index 000000000000..6d9e1b2b8f7b --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-hisi.c | |||
| @@ -0,0 +1,398 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * PCIe host controller driver for HiSilicon SoCs | ||
| 4 | * | ||
| 5 | * Copyright (C) 2015 HiSilicon Co., Ltd. http://www.hisilicon.com | ||
| 6 | * | ||
| 7 | * Authors: Zhou Wang <wangzhou1@hisilicon.com> | ||
| 8 | * Dacai Zhu <zhudacai@hisilicon.com> | ||
| 9 | * Gabriele Paoloni <gabriele.paoloni@huawei.com> | ||
| 10 | */ | ||
| 11 | #include <linux/interrupt.h> | ||
| 12 | #include <linux/init.h> | ||
| 13 | #include <linux/mfd/syscon.h> | ||
| 14 | #include <linux/of_address.h> | ||
| 15 | #include <linux/of_pci.h> | ||
| 16 | #include <linux/platform_device.h> | ||
| 17 | #include <linux/of_device.h> | ||
| 18 | #include <linux/pci.h> | ||
| 19 | #include <linux/pci-acpi.h> | ||
| 20 | #include <linux/pci-ecam.h> | ||
| 21 | #include <linux/regmap.h> | ||
| 22 | #include "../../pci.h" | ||
| 23 | |||
| 24 | #if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) | ||
| 25 | |||
| 26 | static int hisi_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, | ||
| 27 | int size, u32 *val) | ||
| 28 | { | ||
| 29 | struct pci_config_window *cfg = bus->sysdata; | ||
| 30 | int dev = PCI_SLOT(devfn); | ||
| 31 | |||
| 32 | if (bus->number == cfg->busr.start) { | ||
| 33 | /* access only one slot on each root port */ | ||
| 34 | if (dev > 0) | ||
| 35 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 36 | else | ||
| 37 | return pci_generic_config_read32(bus, devfn, where, | ||
| 38 | size, val); | ||
| 39 | } | ||
| 40 | |||
| 41 | return pci_generic_config_read(bus, devfn, where, size, val); | ||
| 42 | } | ||
| 43 | |||
| 44 | static int hisi_pcie_wr_conf(struct pci_bus *bus, u32 devfn, | ||
| 45 | int where, int size, u32 val) | ||
| 46 | { | ||
| 47 | struct pci_config_window *cfg = bus->sysdata; | ||
| 48 | int dev = PCI_SLOT(devfn); | ||
| 49 | |||
| 50 | if (bus->number == cfg->busr.start) { | ||
| 51 | /* access only one slot on each root port */ | ||
| 52 | if (dev > 0) | ||
| 53 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 54 | else | ||
| 55 | return pci_generic_config_write32(bus, devfn, where, | ||
| 56 | size, val); | ||
| 57 | } | ||
| 58 | |||
| 59 | return pci_generic_config_write(bus, devfn, where, size, val); | ||
| 60 | } | ||
| 61 | |||
| 62 | static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, | ||
| 63 | int where) | ||
| 64 | { | ||
| 65 | struct pci_config_window *cfg = bus->sysdata; | ||
| 66 | void __iomem *reg_base = cfg->priv; | ||
| 67 | |||
| 68 | if (bus->number == cfg->busr.start) | ||
| 69 | return reg_base + where; | ||
| 70 | else | ||
| 71 | return pci_ecam_map_bus(bus, devfn, where); | ||
| 72 | } | ||
| 73 | |||
| 74 | #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) | ||
| 75 | |||
| 76 | static int hisi_pcie_init(struct pci_config_window *cfg) | ||
| 77 | { | ||
| 78 | struct device *dev = cfg->parent; | ||
| 79 | struct acpi_device *adev = to_acpi_device(dev); | ||
| 80 | struct acpi_pci_root *root = acpi_driver_data(adev); | ||
| 81 | struct resource *res; | ||
| 82 | void __iomem *reg_base; | ||
| 83 | int ret; | ||
| 84 | |||
| 85 | /* | ||
| 86 | * Retrieve RC base and size from a HISI0081 device with _UID | ||
| 87 | * matching our segment. | ||
| 88 | */ | ||
| 89 | res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); | ||
| 90 | if (!res) | ||
| 91 | return -ENOMEM; | ||
| 92 | |||
| 93 | ret = acpi_get_rc_resources(dev, "HISI0081", root->segment, res); | ||
| 94 | if (ret) { | ||
| 95 | dev_err(dev, "can't get rc base address\n"); | ||
| 96 | return -ENOMEM; | ||
| 97 | } | ||
| 98 | |||
| 99 | reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); | ||
| 100 | if (!reg_base) | ||
| 101 | return -ENOMEM; | ||
| 102 | |||
| 103 | cfg->priv = reg_base; | ||
| 104 | return 0; | ||
| 105 | } | ||
| 106 | |||
| 107 | struct pci_ecam_ops hisi_pcie_ops = { | ||
| 108 | .bus_shift = 20, | ||
| 109 | .init = hisi_pcie_init, | ||
| 110 | .pci_ops = { | ||
| 111 | .map_bus = hisi_pcie_map_bus, | ||
| 112 | .read = hisi_pcie_rd_conf, | ||
| 113 | .write = hisi_pcie_wr_conf, | ||
| 114 | } | ||
| 115 | }; | ||
| 116 | |||
| 117 | #endif | ||
| 118 | |||
| 119 | #ifdef CONFIG_PCI_HISI | ||
| 120 | |||
| 121 | #include "pcie-designware.h" | ||
| 122 | |||
| 123 | #define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818 | ||
| 124 | #define PCIE_HIP06_CTRL_OFF 0x1000 | ||
| 125 | #define PCIE_SYS_STATE4 (PCIE_HIP06_CTRL_OFF + 0x31c) | ||
| 126 | #define PCIE_LTSSM_LINKUP_STATE 0x11 | ||
| 127 | #define PCIE_LTSSM_STATE_MASK 0x3F | ||
| 128 | |||
| 129 | #define to_hisi_pcie(x) dev_get_drvdata((x)->dev) | ||
| 130 | |||
| 131 | struct hisi_pcie; | ||
| 132 | |||
| 133 | struct pcie_soc_ops { | ||
| 134 | int (*hisi_pcie_link_up)(struct hisi_pcie *hisi_pcie); | ||
| 135 | }; | ||
| 136 | |||
| 137 | struct hisi_pcie { | ||
| 138 | struct dw_pcie *pci; | ||
| 139 | struct regmap *subctrl; | ||
| 140 | u32 port_id; | ||
| 141 | const struct pcie_soc_ops *soc_ops; | ||
| 142 | }; | ||
| 143 | |||
| 144 | /* HipXX PCIe host only supports 32-bit config access */ | ||
| 145 | static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size, | ||
| 146 | u32 *val) | ||
| 147 | { | ||
| 148 | u32 reg; | ||
| 149 | u32 reg_val; | ||
| 150 | void *walker = ®_val; | ||
| 151 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 152 | |||
| 153 | walker += (where & 0x3); | ||
| 154 | reg = where & ~0x3; | ||
| 155 | reg_val = dw_pcie_readl_dbi(pci, reg); | ||
| 156 | |||
| 157 | if (size == 1) | ||
| 158 | *val = *(u8 __force *) walker; | ||
| 159 | else if (size == 2) | ||
| 160 | *val = *(u16 __force *) walker; | ||
| 161 | else if (size == 4) | ||
| 162 | *val = reg_val; | ||
| 163 | else | ||
| 164 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
| 165 | |||
| 166 | return PCIBIOS_SUCCESSFUL; | ||
| 167 | } | ||
| 168 | |||
| 169 | /* HipXX PCIe host only supports 32-bit config access */ | ||
| 170 | static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size, | ||
| 171 | u32 val) | ||
| 172 | { | ||
| 173 | u32 reg_val; | ||
| 174 | u32 reg; | ||
| 175 | void *walker = ®_val; | ||
| 176 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 177 | |||
| 178 | walker += (where & 0x3); | ||
| 179 | reg = where & ~0x3; | ||
| 180 | if (size == 4) | ||
| 181 | dw_pcie_writel_dbi(pci, reg, val); | ||
| 182 | else if (size == 2) { | ||
| 183 | reg_val = dw_pcie_readl_dbi(pci, reg); | ||
| 184 | *(u16 __force *) walker = val; | ||
| 185 | dw_pcie_writel_dbi(pci, reg, reg_val); | ||
| 186 | } else if (size == 1) { | ||
| 187 | reg_val = dw_pcie_readl_dbi(pci, reg); | ||
| 188 | *(u8 __force *) walker = val; | ||
| 189 | dw_pcie_writel_dbi(pci, reg, reg_val); | ||
| 190 | } else | ||
| 191 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
| 192 | |||
| 193 | return PCIBIOS_SUCCESSFUL; | ||
| 194 | } | ||
| 195 | |||
| 196 | static int hisi_pcie_link_up_hip05(struct hisi_pcie *hisi_pcie) | ||
| 197 | { | ||
| 198 | u32 val; | ||
| 199 | |||
| 200 | regmap_read(hisi_pcie->subctrl, PCIE_SUBCTRL_SYS_STATE4_REG + | ||
| 201 | 0x100 * hisi_pcie->port_id, &val); | ||
| 202 | |||
| 203 | return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE); | ||
| 204 | } | ||
| 205 | |||
| 206 | static int hisi_pcie_link_up_hip06(struct hisi_pcie *hisi_pcie) | ||
| 207 | { | ||
| 208 | struct dw_pcie *pci = hisi_pcie->pci; | ||
| 209 | u32 val; | ||
| 210 | |||
| 211 | val = dw_pcie_readl_dbi(pci, PCIE_SYS_STATE4); | ||
| 212 | |||
| 213 | return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE); | ||
| 214 | } | ||
| 215 | |||
| 216 | static int hisi_pcie_link_up(struct dw_pcie *pci) | ||
| 217 | { | ||
| 218 | struct hisi_pcie *hisi_pcie = to_hisi_pcie(pci); | ||
| 219 | |||
| 220 | return hisi_pcie->soc_ops->hisi_pcie_link_up(hisi_pcie); | ||
| 221 | } | ||
| 222 | |||
| 223 | static const struct dw_pcie_host_ops hisi_pcie_host_ops = { | ||
| 224 | .rd_own_conf = hisi_pcie_cfg_read, | ||
| 225 | .wr_own_conf = hisi_pcie_cfg_write, | ||
| 226 | }; | ||
| 227 | |||
| 228 | static int hisi_add_pcie_port(struct hisi_pcie *hisi_pcie, | ||
| 229 | struct platform_device *pdev) | ||
| 230 | { | ||
| 231 | struct dw_pcie *pci = hisi_pcie->pci; | ||
| 232 | struct pcie_port *pp = &pci->pp; | ||
| 233 | struct device *dev = &pdev->dev; | ||
| 234 | int ret; | ||
| 235 | u32 port_id; | ||
| 236 | |||
| 237 | if (of_property_read_u32(dev->of_node, "port-id", &port_id)) { | ||
| 238 | dev_err(dev, "failed to read port-id\n"); | ||
| 239 | return -EINVAL; | ||
| 240 | } | ||
| 241 | if (port_id > 3) { | ||
| 242 | dev_err(dev, "Invalid port-id: %d\n", port_id); | ||
| 243 | return -EINVAL; | ||
| 244 | } | ||
| 245 | hisi_pcie->port_id = port_id; | ||
| 246 | |||
| 247 | pp->ops = &hisi_pcie_host_ops; | ||
| 248 | |||
| 249 | ret = dw_pcie_host_init(pp); | ||
| 250 | if (ret) { | ||
| 251 | dev_err(dev, "failed to initialize host\n"); | ||
| 252 | return ret; | ||
| 253 | } | ||
| 254 | |||
| 255 | return 0; | ||
| 256 | } | ||
| 257 | |||
| 258 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
| 259 | .link_up = hisi_pcie_link_up, | ||
| 260 | }; | ||
| 261 | |||
| 262 | static int hisi_pcie_probe(struct platform_device *pdev) | ||
| 263 | { | ||
| 264 | struct device *dev = &pdev->dev; | ||
| 265 | struct dw_pcie *pci; | ||
| 266 | struct hisi_pcie *hisi_pcie; | ||
| 267 | struct resource *reg; | ||
| 268 | int ret; | ||
| 269 | |||
| 270 | hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL); | ||
| 271 | if (!hisi_pcie) | ||
| 272 | return -ENOMEM; | ||
| 273 | |||
| 274 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
| 275 | if (!pci) | ||
| 276 | return -ENOMEM; | ||
| 277 | |||
| 278 | pci->dev = dev; | ||
| 279 | pci->ops = &dw_pcie_ops; | ||
| 280 | |||
| 281 | hisi_pcie->pci = pci; | ||
| 282 | |||
| 283 | hisi_pcie->soc_ops = of_device_get_match_data(dev); | ||
| 284 | |||
| 285 | hisi_pcie->subctrl = | ||
| 286 | syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl"); | ||
| 287 | if (IS_ERR(hisi_pcie->subctrl)) { | ||
| 288 | dev_err(dev, "cannot get subctrl base\n"); | ||
| 289 | return PTR_ERR(hisi_pcie->subctrl); | ||
| 290 | } | ||
| 291 | |||
| 292 | reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi"); | ||
| 293 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, reg); | ||
| 294 | if (IS_ERR(pci->dbi_base)) | ||
| 295 | return PTR_ERR(pci->dbi_base); | ||
| 296 | platform_set_drvdata(pdev, hisi_pcie); | ||
| 297 | |||
| 298 | ret = hisi_add_pcie_port(hisi_pcie, pdev); | ||
| 299 | if (ret) | ||
| 300 | return ret; | ||
| 301 | |||
| 302 | return 0; | ||
| 303 | } | ||
| 304 | |||
| 305 | static struct pcie_soc_ops hip05_ops = { | ||
| 306 | &hisi_pcie_link_up_hip05 | ||
| 307 | }; | ||
| 308 | |||
| 309 | static struct pcie_soc_ops hip06_ops = { | ||
| 310 | &hisi_pcie_link_up_hip06 | ||
| 311 | }; | ||
| 312 | |||
| 313 | static const struct of_device_id hisi_pcie_of_match[] = { | ||
| 314 | { | ||
| 315 | .compatible = "hisilicon,hip05-pcie", | ||
| 316 | .data = (void *) &hip05_ops, | ||
| 317 | }, | ||
| 318 | { | ||
| 319 | .compatible = "hisilicon,hip06-pcie", | ||
| 320 | .data = (void *) &hip06_ops, | ||
| 321 | }, | ||
| 322 | {}, | ||
| 323 | }; | ||
| 324 | |||
| 325 | static struct platform_driver hisi_pcie_driver = { | ||
| 326 | .probe = hisi_pcie_probe, | ||
| 327 | .driver = { | ||
| 328 | .name = "hisi-pcie", | ||
| 329 | .of_match_table = hisi_pcie_of_match, | ||
| 330 | .suppress_bind_attrs = true, | ||
| 331 | }, | ||
| 332 | }; | ||
| 333 | builtin_platform_driver(hisi_pcie_driver); | ||
| 334 | |||
| 335 | static int hisi_pcie_almost_ecam_probe(struct platform_device *pdev) | ||
| 336 | { | ||
| 337 | struct device *dev = &pdev->dev; | ||
| 338 | struct pci_ecam_ops *ops; | ||
| 339 | |||
| 340 | ops = (struct pci_ecam_ops *)of_device_get_match_data(dev); | ||
| 341 | return pci_host_common_probe(pdev, ops); | ||
| 342 | } | ||
| 343 | |||
| 344 | static int hisi_pcie_platform_init(struct pci_config_window *cfg) | ||
| 345 | { | ||
| 346 | struct device *dev = cfg->parent; | ||
| 347 | struct platform_device *pdev = to_platform_device(dev); | ||
| 348 | struct resource *res; | ||
| 349 | void __iomem *reg_base; | ||
| 350 | |||
| 351 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 352 | if (!res) { | ||
| 353 | dev_err(dev, "missing \"reg[1]\"property\n"); | ||
| 354 | return -EINVAL; | ||
| 355 | } | ||
| 356 | |||
| 357 | reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res)); | ||
| 358 | if (!reg_base) | ||
| 359 | return -ENOMEM; | ||
| 360 | |||
| 361 | cfg->priv = reg_base; | ||
| 362 | return 0; | ||
| 363 | } | ||
| 364 | |||
| 365 | struct pci_ecam_ops hisi_pcie_platform_ops = { | ||
| 366 | .bus_shift = 20, | ||
| 367 | .init = hisi_pcie_platform_init, | ||
| 368 | .pci_ops = { | ||
| 369 | .map_bus = hisi_pcie_map_bus, | ||
| 370 | .read = hisi_pcie_rd_conf, | ||
| 371 | .write = hisi_pcie_wr_conf, | ||
| 372 | } | ||
| 373 | }; | ||
| 374 | |||
| 375 | static const struct of_device_id hisi_pcie_almost_ecam_of_match[] = { | ||
| 376 | { | ||
| 377 | .compatible = "hisilicon,hip06-pcie-ecam", | ||
| 378 | .data = (void *) &hisi_pcie_platform_ops, | ||
| 379 | }, | ||
| 380 | { | ||
| 381 | .compatible = "hisilicon,hip07-pcie-ecam", | ||
| 382 | .data = (void *) &hisi_pcie_platform_ops, | ||
| 383 | }, | ||
| 384 | {}, | ||
| 385 | }; | ||
| 386 | |||
| 387 | static struct platform_driver hisi_pcie_almost_ecam_driver = { | ||
| 388 | .probe = hisi_pcie_almost_ecam_probe, | ||
| 389 | .driver = { | ||
| 390 | .name = "hisi-pcie-almost-ecam", | ||
| 391 | .of_match_table = hisi_pcie_almost_ecam_of_match, | ||
| 392 | .suppress_bind_attrs = true, | ||
| 393 | }, | ||
| 394 | }; | ||
| 395 | builtin_platform_driver(hisi_pcie_almost_ecam_driver); | ||
| 396 | |||
| 397 | #endif | ||
| 398 | #endif | ||
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c new file mode 100644 index 000000000000..3611d6ce9a92 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-histb.c | |||
| @@ -0,0 +1,472 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * PCIe host controller driver for HiSilicon STB SoCs | ||
| 4 | * | ||
| 5 | * Copyright (C) 2016-2017 HiSilicon Co., Ltd. http://www.hisilicon.com | ||
| 6 | * | ||
| 7 | * Authors: Ruqiang Ju <juruqiang@hisilicon.com> | ||
| 8 | * Jianguo Sun <sunjianguo1@huawei.com> | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/clk.h> | ||
| 12 | #include <linux/delay.h> | ||
| 13 | #include <linux/interrupt.h> | ||
| 14 | #include <linux/kernel.h> | ||
| 15 | #include <linux/module.h> | ||
| 16 | #include <linux/of.h> | ||
| 17 | #include <linux/of_gpio.h> | ||
| 18 | #include <linux/pci.h> | ||
| 19 | #include <linux/phy/phy.h> | ||
| 20 | #include <linux/platform_device.h> | ||
| 21 | #include <linux/resource.h> | ||
| 22 | #include <linux/reset.h> | ||
| 23 | |||
| 24 | #include "pcie-designware.h" | ||
| 25 | |||
| 26 | #define to_histb_pcie(x) dev_get_drvdata((x)->dev) | ||
| 27 | |||
| 28 | #define PCIE_SYS_CTRL0 0x0000 | ||
| 29 | #define PCIE_SYS_CTRL1 0x0004 | ||
| 30 | #define PCIE_SYS_CTRL7 0x001C | ||
| 31 | #define PCIE_SYS_CTRL13 0x0034 | ||
| 32 | #define PCIE_SYS_CTRL15 0x003C | ||
| 33 | #define PCIE_SYS_CTRL16 0x0040 | ||
| 34 | #define PCIE_SYS_CTRL17 0x0044 | ||
| 35 | |||
| 36 | #define PCIE_SYS_STAT0 0x0100 | ||
| 37 | #define PCIE_SYS_STAT4 0x0110 | ||
| 38 | |||
| 39 | #define PCIE_RDLH_LINK_UP BIT(5) | ||
| 40 | #define PCIE_XMLH_LINK_UP BIT(15) | ||
| 41 | #define PCIE_ELBI_SLV_DBI_ENABLE BIT(21) | ||
| 42 | #define PCIE_APP_LTSSM_ENABLE BIT(11) | ||
| 43 | |||
| 44 | #define PCIE_DEVICE_TYPE_MASK GENMASK(31, 28) | ||
| 45 | #define PCIE_WM_EP 0 | ||
| 46 | #define PCIE_WM_LEGACY BIT(1) | ||
| 47 | #define PCIE_WM_RC BIT(30) | ||
| 48 | |||
| 49 | #define PCIE_LTSSM_STATE_MASK GENMASK(5, 0) | ||
| 50 | #define PCIE_LTSSM_STATE_ACTIVE 0x11 | ||
| 51 | |||
| 52 | struct histb_pcie { | ||
| 53 | struct dw_pcie *pci; | ||
| 54 | struct clk *aux_clk; | ||
| 55 | struct clk *pipe_clk; | ||
| 56 | struct clk *sys_clk; | ||
| 57 | struct clk *bus_clk; | ||
| 58 | struct phy *phy; | ||
| 59 | struct reset_control *soft_reset; | ||
| 60 | struct reset_control *sys_reset; | ||
| 61 | struct reset_control *bus_reset; | ||
| 62 | void __iomem *ctrl; | ||
| 63 | int reset_gpio; | ||
| 64 | struct regulator *vpcie; | ||
| 65 | }; | ||
| 66 | |||
| 67 | static u32 histb_pcie_readl(struct histb_pcie *histb_pcie, u32 reg) | ||
| 68 | { | ||
| 69 | return readl(histb_pcie->ctrl + reg); | ||
| 70 | } | ||
| 71 | |||
| 72 | static void histb_pcie_writel(struct histb_pcie *histb_pcie, u32 reg, u32 val) | ||
| 73 | { | ||
| 74 | writel(val, histb_pcie->ctrl + reg); | ||
| 75 | } | ||
| 76 | |||
| 77 | static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable) | ||
| 78 | { | ||
| 79 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 80 | struct histb_pcie *hipcie = to_histb_pcie(pci); | ||
| 81 | u32 val; | ||
| 82 | |||
| 83 | val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0); | ||
| 84 | if (enable) | ||
| 85 | val |= PCIE_ELBI_SLV_DBI_ENABLE; | ||
| 86 | else | ||
| 87 | val &= ~PCIE_ELBI_SLV_DBI_ENABLE; | ||
| 88 | histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, val); | ||
| 89 | } | ||
| 90 | |||
| 91 | static void histb_pcie_dbi_r_mode(struct pcie_port *pp, bool enable) | ||
| 92 | { | ||
| 93 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 94 | struct histb_pcie *hipcie = to_histb_pcie(pci); | ||
| 95 | u32 val; | ||
| 96 | |||
| 97 | val = histb_pcie_readl(hipcie, PCIE_SYS_CTRL1); | ||
| 98 | if (enable) | ||
| 99 | val |= PCIE_ELBI_SLV_DBI_ENABLE; | ||
| 100 | else | ||
| 101 | val &= ~PCIE_ELBI_SLV_DBI_ENABLE; | ||
| 102 | histb_pcie_writel(hipcie, PCIE_SYS_CTRL1, val); | ||
| 103 | } | ||
| 104 | |||
| 105 | static u32 histb_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, | ||
| 106 | u32 reg, size_t size) | ||
| 107 | { | ||
| 108 | u32 val; | ||
| 109 | |||
| 110 | histb_pcie_dbi_r_mode(&pci->pp, true); | ||
| 111 | dw_pcie_read(base + reg, size, &val); | ||
| 112 | histb_pcie_dbi_r_mode(&pci->pp, false); | ||
| 113 | |||
| 114 | return val; | ||
| 115 | } | ||
| 116 | |||
| 117 | static void histb_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, | ||
| 118 | u32 reg, size_t size, u32 val) | ||
| 119 | { | ||
| 120 | histb_pcie_dbi_w_mode(&pci->pp, true); | ||
| 121 | dw_pcie_write(base + reg, size, val); | ||
| 122 | histb_pcie_dbi_w_mode(&pci->pp, false); | ||
| 123 | } | ||
| 124 | |||
| 125 | static int histb_pcie_rd_own_conf(struct pcie_port *pp, int where, | ||
| 126 | int size, u32 *val) | ||
| 127 | { | ||
| 128 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 129 | int ret; | ||
| 130 | |||
| 131 | histb_pcie_dbi_r_mode(pp, true); | ||
| 132 | ret = dw_pcie_read(pci->dbi_base + where, size, val); | ||
| 133 | histb_pcie_dbi_r_mode(pp, false); | ||
| 134 | |||
| 135 | return ret; | ||
| 136 | } | ||
| 137 | |||
| 138 | static int histb_pcie_wr_own_conf(struct pcie_port *pp, int where, | ||
| 139 | int size, u32 val) | ||
| 140 | { | ||
| 141 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 142 | int ret; | ||
| 143 | |||
| 144 | histb_pcie_dbi_w_mode(pp, true); | ||
| 145 | ret = dw_pcie_write(pci->dbi_base + where, size, val); | ||
| 146 | histb_pcie_dbi_w_mode(pp, false); | ||
| 147 | |||
| 148 | return ret; | ||
| 149 | } | ||
| 150 | |||
| 151 | static int histb_pcie_link_up(struct dw_pcie *pci) | ||
| 152 | { | ||
| 153 | struct histb_pcie *hipcie = to_histb_pcie(pci); | ||
| 154 | u32 regval; | ||
| 155 | u32 status; | ||
| 156 | |||
| 157 | regval = histb_pcie_readl(hipcie, PCIE_SYS_STAT0); | ||
| 158 | status = histb_pcie_readl(hipcie, PCIE_SYS_STAT4); | ||
| 159 | status &= PCIE_LTSSM_STATE_MASK; | ||
| 160 | if ((regval & PCIE_XMLH_LINK_UP) && (regval & PCIE_RDLH_LINK_UP) && | ||
| 161 | (status == PCIE_LTSSM_STATE_ACTIVE)) | ||
| 162 | return 1; | ||
| 163 | |||
| 164 | return 0; | ||
| 165 | } | ||
| 166 | |||
| 167 | static int histb_pcie_establish_link(struct pcie_port *pp) | ||
| 168 | { | ||
| 169 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 170 | struct histb_pcie *hipcie = to_histb_pcie(pci); | ||
| 171 | u32 regval; | ||
| 172 | |||
| 173 | if (dw_pcie_link_up(pci)) { | ||
| 174 | dev_info(pci->dev, "Link already up\n"); | ||
| 175 | return 0; | ||
| 176 | } | ||
| 177 | |||
| 178 | /* PCIe RC work mode */ | ||
| 179 | regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL0); | ||
| 180 | regval &= ~PCIE_DEVICE_TYPE_MASK; | ||
| 181 | regval |= PCIE_WM_RC; | ||
| 182 | histb_pcie_writel(hipcie, PCIE_SYS_CTRL0, regval); | ||
| 183 | |||
| 184 | /* setup root complex */ | ||
| 185 | dw_pcie_setup_rc(pp); | ||
| 186 | |||
| 187 | /* assert LTSSM enable */ | ||
| 188 | regval = histb_pcie_readl(hipcie, PCIE_SYS_CTRL7); | ||
| 189 | regval |= PCIE_APP_LTSSM_ENABLE; | ||
| 190 | histb_pcie_writel(hipcie, PCIE_SYS_CTRL7, regval); | ||
| 191 | |||
| 192 | return dw_pcie_wait_for_link(pci); | ||
| 193 | } | ||
| 194 | |||
| 195 | static int histb_pcie_host_init(struct pcie_port *pp) | ||
| 196 | { | ||
| 197 | histb_pcie_establish_link(pp); | ||
| 198 | |||
| 199 | if (IS_ENABLED(CONFIG_PCI_MSI)) | ||
| 200 | dw_pcie_msi_init(pp); | ||
| 201 | |||
| 202 | return 0; | ||
| 203 | } | ||
| 204 | |||
| 205 | static struct dw_pcie_host_ops histb_pcie_host_ops = { | ||
| 206 | .rd_own_conf = histb_pcie_rd_own_conf, | ||
| 207 | .wr_own_conf = histb_pcie_wr_own_conf, | ||
| 208 | .host_init = histb_pcie_host_init, | ||
| 209 | }; | ||
| 210 | |||
| 211 | static void histb_pcie_host_disable(struct histb_pcie *hipcie) | ||
| 212 | { | ||
| 213 | reset_control_assert(hipcie->soft_reset); | ||
| 214 | reset_control_assert(hipcie->sys_reset); | ||
| 215 | reset_control_assert(hipcie->bus_reset); | ||
| 216 | |||
| 217 | clk_disable_unprepare(hipcie->aux_clk); | ||
| 218 | clk_disable_unprepare(hipcie->pipe_clk); | ||
| 219 | clk_disable_unprepare(hipcie->sys_clk); | ||
| 220 | clk_disable_unprepare(hipcie->bus_clk); | ||
| 221 | |||
| 222 | if (gpio_is_valid(hipcie->reset_gpio)) | ||
| 223 | gpio_set_value_cansleep(hipcie->reset_gpio, 0); | ||
| 224 | |||
| 225 | if (hipcie->vpcie) | ||
| 226 | regulator_disable(hipcie->vpcie); | ||
| 227 | } | ||
| 228 | |||
| 229 | static int histb_pcie_host_enable(struct pcie_port *pp) | ||
| 230 | { | ||
| 231 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 232 | struct histb_pcie *hipcie = to_histb_pcie(pci); | ||
| 233 | struct device *dev = pci->dev; | ||
| 234 | int ret; | ||
| 235 | |||
| 236 | /* power on PCIe device if have */ | ||
| 237 | if (hipcie->vpcie) { | ||
| 238 | ret = regulator_enable(hipcie->vpcie); | ||
| 239 | if (ret) { | ||
| 240 | dev_err(dev, "failed to enable regulator: %d\n", ret); | ||
| 241 | return ret; | ||
| 242 | } | ||
| 243 | } | ||
| 244 | |||
| 245 | if (gpio_is_valid(hipcie->reset_gpio)) | ||
| 246 | gpio_set_value_cansleep(hipcie->reset_gpio, 1); | ||
| 247 | |||
| 248 | ret = clk_prepare_enable(hipcie->bus_clk); | ||
| 249 | if (ret) { | ||
| 250 | dev_err(dev, "cannot prepare/enable bus clk\n"); | ||
| 251 | goto err_bus_clk; | ||
| 252 | } | ||
| 253 | |||
| 254 | ret = clk_prepare_enable(hipcie->sys_clk); | ||
| 255 | if (ret) { | ||
| 256 | dev_err(dev, "cannot prepare/enable sys clk\n"); | ||
| 257 | goto err_sys_clk; | ||
| 258 | } | ||
| 259 | |||
| 260 | ret = clk_prepare_enable(hipcie->pipe_clk); | ||
| 261 | if (ret) { | ||
| 262 | dev_err(dev, "cannot prepare/enable pipe clk\n"); | ||
| 263 | goto err_pipe_clk; | ||
| 264 | } | ||
| 265 | |||
| 266 | ret = clk_prepare_enable(hipcie->aux_clk); | ||
| 267 | if (ret) { | ||
| 268 | dev_err(dev, "cannot prepare/enable aux clk\n"); | ||
| 269 | goto err_aux_clk; | ||
| 270 | } | ||
| 271 | |||
| 272 | reset_control_assert(hipcie->soft_reset); | ||
| 273 | reset_control_deassert(hipcie->soft_reset); | ||
| 274 | |||
| 275 | reset_control_assert(hipcie->sys_reset); | ||
| 276 | reset_control_deassert(hipcie->sys_reset); | ||
| 277 | |||
| 278 | reset_control_assert(hipcie->bus_reset); | ||
| 279 | reset_control_deassert(hipcie->bus_reset); | ||
| 280 | |||
| 281 | return 0; | ||
| 282 | |||
| 283 | err_aux_clk: | ||
| 284 | clk_disable_unprepare(hipcie->pipe_clk); | ||
| 285 | err_pipe_clk: | ||
| 286 | clk_disable_unprepare(hipcie->sys_clk); | ||
| 287 | err_sys_clk: | ||
| 288 | clk_disable_unprepare(hipcie->bus_clk); | ||
| 289 | err_bus_clk: | ||
| 290 | if (hipcie->vpcie) | ||
| 291 | regulator_disable(hipcie->vpcie); | ||
| 292 | |||
| 293 | return ret; | ||
| 294 | } | ||
| 295 | |||
| 296 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
| 297 | .read_dbi = histb_pcie_read_dbi, | ||
| 298 | .write_dbi = histb_pcie_write_dbi, | ||
| 299 | .link_up = histb_pcie_link_up, | ||
| 300 | }; | ||
| 301 | |||
| 302 | static int histb_pcie_probe(struct platform_device *pdev) | ||
| 303 | { | ||
| 304 | struct histb_pcie *hipcie; | ||
| 305 | struct dw_pcie *pci; | ||
| 306 | struct pcie_port *pp; | ||
| 307 | struct resource *res; | ||
| 308 | struct device_node *np = pdev->dev.of_node; | ||
| 309 | struct device *dev = &pdev->dev; | ||
| 310 | enum of_gpio_flags of_flags; | ||
| 311 | unsigned long flag = GPIOF_DIR_OUT; | ||
| 312 | int ret; | ||
| 313 | |||
| 314 | hipcie = devm_kzalloc(dev, sizeof(*hipcie), GFP_KERNEL); | ||
| 315 | if (!hipcie) | ||
| 316 | return -ENOMEM; | ||
| 317 | |||
| 318 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
| 319 | if (!pci) | ||
| 320 | return -ENOMEM; | ||
| 321 | |||
| 322 | hipcie->pci = pci; | ||
| 323 | pp = &pci->pp; | ||
| 324 | pci->dev = dev; | ||
| 325 | pci->ops = &dw_pcie_ops; | ||
| 326 | |||
| 327 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control"); | ||
| 328 | hipcie->ctrl = devm_ioremap_resource(dev, res); | ||
| 329 | if (IS_ERR(hipcie->ctrl)) { | ||
| 330 | dev_err(dev, "cannot get control reg base\n"); | ||
| 331 | return PTR_ERR(hipcie->ctrl); | ||
| 332 | } | ||
| 333 | |||
| 334 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc-dbi"); | ||
| 335 | pci->dbi_base = devm_ioremap_resource(dev, res); | ||
| 336 | if (IS_ERR(pci->dbi_base)) { | ||
| 337 | dev_err(dev, "cannot get rc-dbi base\n"); | ||
| 338 | return PTR_ERR(pci->dbi_base); | ||
| 339 | } | ||
| 340 | |||
| 341 | hipcie->vpcie = devm_regulator_get_optional(dev, "vpcie"); | ||
| 342 | if (IS_ERR(hipcie->vpcie)) { | ||
| 343 | if (PTR_ERR(hipcie->vpcie) == -EPROBE_DEFER) | ||
| 344 | return -EPROBE_DEFER; | ||
| 345 | hipcie->vpcie = NULL; | ||
| 346 | } | ||
| 347 | |||
| 348 | hipcie->reset_gpio = of_get_named_gpio_flags(np, | ||
| 349 | "reset-gpios", 0, &of_flags); | ||
| 350 | if (of_flags & OF_GPIO_ACTIVE_LOW) | ||
| 351 | flag |= GPIOF_ACTIVE_LOW; | ||
| 352 | if (gpio_is_valid(hipcie->reset_gpio)) { | ||
| 353 | ret = devm_gpio_request_one(dev, hipcie->reset_gpio, | ||
| 354 | flag, "PCIe device power control"); | ||
| 355 | if (ret) { | ||
| 356 | dev_err(dev, "unable to request gpio\n"); | ||
| 357 | return ret; | ||
| 358 | } | ||
| 359 | } | ||
| 360 | |||
| 361 | hipcie->aux_clk = devm_clk_get(dev, "aux"); | ||
| 362 | if (IS_ERR(hipcie->aux_clk)) { | ||
| 363 | dev_err(dev, "Failed to get PCIe aux clk\n"); | ||
| 364 | return PTR_ERR(hipcie->aux_clk); | ||
| 365 | } | ||
| 366 | |||
| 367 | hipcie->pipe_clk = devm_clk_get(dev, "pipe"); | ||
| 368 | if (IS_ERR(hipcie->pipe_clk)) { | ||
| 369 | dev_err(dev, "Failed to get PCIe pipe clk\n"); | ||
| 370 | return PTR_ERR(hipcie->pipe_clk); | ||
| 371 | } | ||
| 372 | |||
| 373 | hipcie->sys_clk = devm_clk_get(dev, "sys"); | ||
| 374 | if (IS_ERR(hipcie->sys_clk)) { | ||
| 375 | dev_err(dev, "Failed to get PCIEe sys clk\n"); | ||
| 376 | return PTR_ERR(hipcie->sys_clk); | ||
| 377 | } | ||
| 378 | |||
| 379 | hipcie->bus_clk = devm_clk_get(dev, "bus"); | ||
| 380 | if (IS_ERR(hipcie->bus_clk)) { | ||
| 381 | dev_err(dev, "Failed to get PCIe bus clk\n"); | ||
| 382 | return PTR_ERR(hipcie->bus_clk); | ||
| 383 | } | ||
| 384 | |||
| 385 | hipcie->soft_reset = devm_reset_control_get(dev, "soft"); | ||
| 386 | if (IS_ERR(hipcie->soft_reset)) { | ||
| 387 | dev_err(dev, "couldn't get soft reset\n"); | ||
| 388 | return PTR_ERR(hipcie->soft_reset); | ||
| 389 | } | ||
| 390 | |||
| 391 | hipcie->sys_reset = devm_reset_control_get(dev, "sys"); | ||
| 392 | if (IS_ERR(hipcie->sys_reset)) { | ||
| 393 | dev_err(dev, "couldn't get sys reset\n"); | ||
| 394 | return PTR_ERR(hipcie->sys_reset); | ||
| 395 | } | ||
| 396 | |||
| 397 | hipcie->bus_reset = devm_reset_control_get(dev, "bus"); | ||
| 398 | if (IS_ERR(hipcie->bus_reset)) { | ||
| 399 | dev_err(dev, "couldn't get bus reset\n"); | ||
| 400 | return PTR_ERR(hipcie->bus_reset); | ||
| 401 | } | ||
| 402 | |||
| 403 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
| 404 | pp->msi_irq = platform_get_irq_byname(pdev, "msi"); | ||
| 405 | if (pp->msi_irq < 0) { | ||
| 406 | dev_err(dev, "Failed to get MSI IRQ\n"); | ||
| 407 | return pp->msi_irq; | ||
| 408 | } | ||
| 409 | } | ||
| 410 | |||
| 411 | hipcie->phy = devm_phy_get(dev, "phy"); | ||
| 412 | if (IS_ERR(hipcie->phy)) { | ||
| 413 | dev_info(dev, "no pcie-phy found\n"); | ||
| 414 | hipcie->phy = NULL; | ||
| 415 | /* fall through here! | ||
| 416 | * if no pcie-phy found, phy init | ||
| 417 | * should be done under boot! | ||
| 418 | */ | ||
| 419 | } else { | ||
| 420 | phy_init(hipcie->phy); | ||
| 421 | } | ||
| 422 | |||
| 423 | pp->root_bus_nr = -1; | ||
| 424 | pp->ops = &histb_pcie_host_ops; | ||
| 425 | |||
| 426 | platform_set_drvdata(pdev, hipcie); | ||
| 427 | |||
| 428 | ret = histb_pcie_host_enable(pp); | ||
| 429 | if (ret) { | ||
| 430 | dev_err(dev, "failed to enable host\n"); | ||
| 431 | return ret; | ||
| 432 | } | ||
| 433 | |||
| 434 | ret = dw_pcie_host_init(pp); | ||
| 435 | if (ret) { | ||
| 436 | dev_err(dev, "failed to initialize host\n"); | ||
| 437 | return ret; | ||
| 438 | } | ||
| 439 | |||
| 440 | return 0; | ||
| 441 | } | ||
| 442 | |||
| 443 | static int histb_pcie_remove(struct platform_device *pdev) | ||
| 444 | { | ||
| 445 | struct histb_pcie *hipcie = platform_get_drvdata(pdev); | ||
| 446 | |||
| 447 | histb_pcie_host_disable(hipcie); | ||
| 448 | |||
| 449 | if (hipcie->phy) | ||
| 450 | phy_exit(hipcie->phy); | ||
| 451 | |||
| 452 | return 0; | ||
| 453 | } | ||
| 454 | |||
| 455 | static const struct of_device_id histb_pcie_of_match[] = { | ||
| 456 | { .compatible = "hisilicon,hi3798cv200-pcie", }, | ||
| 457 | {}, | ||
| 458 | }; | ||
| 459 | MODULE_DEVICE_TABLE(of, histb_pcie_of_match); | ||
| 460 | |||
| 461 | static struct platform_driver histb_pcie_platform_driver = { | ||
| 462 | .probe = histb_pcie_probe, | ||
| 463 | .remove = histb_pcie_remove, | ||
| 464 | .driver = { | ||
| 465 | .name = "histb-pcie", | ||
| 466 | .of_match_table = histb_pcie_of_match, | ||
| 467 | }, | ||
| 468 | }; | ||
| 469 | module_platform_driver(histb_pcie_platform_driver); | ||
| 470 | |||
| 471 | MODULE_DESCRIPTION("HiSilicon STB PCIe host controller driver"); | ||
| 472 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c new file mode 100644 index 000000000000..d2970a009eb5 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-kirin.c | |||
| @@ -0,0 +1,515 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * PCIe host controller driver for Kirin Phone SoCs | ||
| 4 | * | ||
| 5 | * Copyright (C) 2017 Hilisicon Electronics Co., Ltd. | ||
| 6 | * http://www.huawei.com | ||
| 7 | * | ||
| 8 | * Author: Xiaowei Song <songxiaowei@huawei.com> | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/compiler.h> | ||
| 12 | #include <linux/clk.h> | ||
| 13 | #include <linux/delay.h> | ||
| 14 | #include <linux/err.h> | ||
| 15 | #include <linux/gpio.h> | ||
| 16 | #include <linux/interrupt.h> | ||
| 17 | #include <linux/mfd/syscon.h> | ||
| 18 | #include <linux/of_address.h> | ||
| 19 | #include <linux/of_gpio.h> | ||
| 20 | #include <linux/of_pci.h> | ||
| 21 | #include <linux/pci.h> | ||
| 22 | #include <linux/pci_regs.h> | ||
| 23 | #include <linux/platform_device.h> | ||
| 24 | #include <linux/regmap.h> | ||
| 25 | #include <linux/resource.h> | ||
| 26 | #include <linux/types.h> | ||
| 27 | #include "pcie-designware.h" | ||
| 28 | |||
| 29 | #define to_kirin_pcie(x) dev_get_drvdata((x)->dev) | ||
| 30 | |||
| 31 | #define REF_CLK_FREQ 100000000 | ||
| 32 | |||
| 33 | /* PCIe ELBI registers */ | ||
| 34 | #define SOC_PCIECTRL_CTRL0_ADDR 0x000 | ||
| 35 | #define SOC_PCIECTRL_CTRL1_ADDR 0x004 | ||
| 36 | #define SOC_PCIEPHY_CTRL2_ADDR 0x008 | ||
| 37 | #define SOC_PCIEPHY_CTRL3_ADDR 0x00c | ||
| 38 | #define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21) | ||
| 39 | |||
| 40 | /* info located in APB */ | ||
| 41 | #define PCIE_APP_LTSSM_ENABLE 0x01c | ||
| 42 | #define PCIE_APB_PHY_CTRL0 0x0 | ||
| 43 | #define PCIE_APB_PHY_CTRL1 0x4 | ||
| 44 | #define PCIE_APB_PHY_STATUS0 0x400 | ||
| 45 | #define PCIE_LINKUP_ENABLE (0x8020) | ||
| 46 | #define PCIE_LTSSM_ENABLE_BIT (0x1 << 11) | ||
| 47 | #define PIPE_CLK_STABLE (0x1 << 19) | ||
| 48 | #define PHY_REF_PAD_BIT (0x1 << 8) | ||
| 49 | #define PHY_PWR_DOWN_BIT (0x1 << 22) | ||
| 50 | #define PHY_RST_ACK_BIT (0x1 << 16) | ||
| 51 | |||
| 52 | /* info located in sysctrl */ | ||
| 53 | #define SCTRL_PCIE_CMOS_OFFSET 0x60 | ||
| 54 | #define SCTRL_PCIE_CMOS_BIT 0x10 | ||
| 55 | #define SCTRL_PCIE_ISO_OFFSET 0x44 | ||
| 56 | #define SCTRL_PCIE_ISO_BIT 0x30 | ||
| 57 | #define SCTRL_PCIE_HPCLK_OFFSET 0x190 | ||
| 58 | #define SCTRL_PCIE_HPCLK_BIT 0x184000 | ||
| 59 | #define SCTRL_PCIE_OE_OFFSET 0x14a | ||
| 60 | #define PCIE_DEBOUNCE_PARAM 0xF0F400 | ||
| 61 | #define PCIE_OE_BYPASS (0x3 << 28) | ||
| 62 | |||
| 63 | /* peri_crg ctrl */ | ||
| 64 | #define CRGCTRL_PCIE_ASSERT_OFFSET 0x88 | ||
| 65 | #define CRGCTRL_PCIE_ASSERT_BIT 0x8c000000 | ||
| 66 | |||
| 67 | /* Time for delay */ | ||
| 68 | #define REF_2_PERST_MIN 20000 | ||
| 69 | #define REF_2_PERST_MAX 25000 | ||
| 70 | #define PERST_2_ACCESS_MIN 10000 | ||
| 71 | #define PERST_2_ACCESS_MAX 12000 | ||
| 72 | #define LINK_WAIT_MIN 900 | ||
| 73 | #define LINK_WAIT_MAX 1000 | ||
| 74 | #define PIPE_CLK_WAIT_MIN 550 | ||
| 75 | #define PIPE_CLK_WAIT_MAX 600 | ||
| 76 | #define TIME_CMOS_MIN 100 | ||
| 77 | #define TIME_CMOS_MAX 105 | ||
| 78 | #define TIME_PHY_PD_MIN 10 | ||
| 79 | #define TIME_PHY_PD_MAX 11 | ||
| 80 | |||
| 81 | struct kirin_pcie { | ||
| 82 | struct dw_pcie *pci; | ||
| 83 | void __iomem *apb_base; | ||
| 84 | void __iomem *phy_base; | ||
| 85 | struct regmap *crgctrl; | ||
| 86 | struct regmap *sysctrl; | ||
| 87 | struct clk *apb_sys_clk; | ||
| 88 | struct clk *apb_phy_clk; | ||
| 89 | struct clk *phy_ref_clk; | ||
| 90 | struct clk *pcie_aclk; | ||
| 91 | struct clk *pcie_aux_clk; | ||
| 92 | int gpio_id_reset; | ||
| 93 | }; | ||
| 94 | |||
| 95 | /* Registers in PCIeCTRL */ | ||
| 96 | static inline void kirin_apb_ctrl_writel(struct kirin_pcie *kirin_pcie, | ||
| 97 | u32 val, u32 reg) | ||
| 98 | { | ||
| 99 | writel(val, kirin_pcie->apb_base + reg); | ||
| 100 | } | ||
| 101 | |||
| 102 | static inline u32 kirin_apb_ctrl_readl(struct kirin_pcie *kirin_pcie, u32 reg) | ||
| 103 | { | ||
| 104 | return readl(kirin_pcie->apb_base + reg); | ||
| 105 | } | ||
| 106 | |||
| 107 | /* Registers in PCIePHY */ | ||
| 108 | static inline void kirin_apb_phy_writel(struct kirin_pcie *kirin_pcie, | ||
| 109 | u32 val, u32 reg) | ||
| 110 | { | ||
| 111 | writel(val, kirin_pcie->phy_base + reg); | ||
| 112 | } | ||
| 113 | |||
| 114 | static inline u32 kirin_apb_phy_readl(struct kirin_pcie *kirin_pcie, u32 reg) | ||
| 115 | { | ||
| 116 | return readl(kirin_pcie->phy_base + reg); | ||
| 117 | } | ||
| 118 | |||
| 119 | static long kirin_pcie_get_clk(struct kirin_pcie *kirin_pcie, | ||
| 120 | struct platform_device *pdev) | ||
| 121 | { | ||
| 122 | struct device *dev = &pdev->dev; | ||
| 123 | |||
| 124 | kirin_pcie->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref"); | ||
| 125 | if (IS_ERR(kirin_pcie->phy_ref_clk)) | ||
| 126 | return PTR_ERR(kirin_pcie->phy_ref_clk); | ||
| 127 | |||
| 128 | kirin_pcie->pcie_aux_clk = devm_clk_get(dev, "pcie_aux"); | ||
| 129 | if (IS_ERR(kirin_pcie->pcie_aux_clk)) | ||
| 130 | return PTR_ERR(kirin_pcie->pcie_aux_clk); | ||
| 131 | |||
| 132 | kirin_pcie->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy"); | ||
| 133 | if (IS_ERR(kirin_pcie->apb_phy_clk)) | ||
| 134 | return PTR_ERR(kirin_pcie->apb_phy_clk); | ||
| 135 | |||
| 136 | kirin_pcie->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys"); | ||
| 137 | if (IS_ERR(kirin_pcie->apb_sys_clk)) | ||
| 138 | return PTR_ERR(kirin_pcie->apb_sys_clk); | ||
| 139 | |||
| 140 | kirin_pcie->pcie_aclk = devm_clk_get(dev, "pcie_aclk"); | ||
| 141 | if (IS_ERR(kirin_pcie->pcie_aclk)) | ||
| 142 | return PTR_ERR(kirin_pcie->pcie_aclk); | ||
| 143 | |||
| 144 | return 0; | ||
| 145 | } | ||
| 146 | |||
| 147 | static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie, | ||
| 148 | struct platform_device *pdev) | ||
| 149 | { | ||
| 150 | struct device *dev = &pdev->dev; | ||
| 151 | struct resource *apb; | ||
| 152 | struct resource *phy; | ||
| 153 | struct resource *dbi; | ||
| 154 | |||
| 155 | apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb"); | ||
| 156 | kirin_pcie->apb_base = devm_ioremap_resource(dev, apb); | ||
| 157 | if (IS_ERR(kirin_pcie->apb_base)) | ||
| 158 | return PTR_ERR(kirin_pcie->apb_base); | ||
| 159 | |||
| 160 | phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); | ||
| 161 | kirin_pcie->phy_base = devm_ioremap_resource(dev, phy); | ||
| 162 | if (IS_ERR(kirin_pcie->phy_base)) | ||
| 163 | return PTR_ERR(kirin_pcie->phy_base); | ||
| 164 | |||
| 165 | dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); | ||
| 166 | kirin_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi); | ||
| 167 | if (IS_ERR(kirin_pcie->pci->dbi_base)) | ||
| 168 | return PTR_ERR(kirin_pcie->pci->dbi_base); | ||
| 169 | |||
| 170 | kirin_pcie->crgctrl = | ||
| 171 | syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl"); | ||
| 172 | if (IS_ERR(kirin_pcie->crgctrl)) | ||
| 173 | return PTR_ERR(kirin_pcie->crgctrl); | ||
| 174 | |||
| 175 | kirin_pcie->sysctrl = | ||
| 176 | syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl"); | ||
| 177 | if (IS_ERR(kirin_pcie->sysctrl)) | ||
| 178 | return PTR_ERR(kirin_pcie->sysctrl); | ||
| 179 | |||
| 180 | return 0; | ||
| 181 | } | ||
| 182 | |||
| 183 | static int kirin_pcie_phy_init(struct kirin_pcie *kirin_pcie) | ||
| 184 | { | ||
| 185 | struct device *dev = kirin_pcie->pci->dev; | ||
| 186 | u32 reg_val; | ||
| 187 | |||
| 188 | reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1); | ||
| 189 | reg_val &= ~PHY_REF_PAD_BIT; | ||
| 190 | kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1); | ||
| 191 | |||
| 192 | reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL0); | ||
| 193 | reg_val &= ~PHY_PWR_DOWN_BIT; | ||
| 194 | kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL0); | ||
| 195 | usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX); | ||
| 196 | |||
| 197 | reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1); | ||
| 198 | reg_val &= ~PHY_RST_ACK_BIT; | ||
| 199 | kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1); | ||
| 200 | |||
| 201 | usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX); | ||
| 202 | reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_STATUS0); | ||
| 203 | if (reg_val & PIPE_CLK_STABLE) { | ||
| 204 | dev_err(dev, "PIPE clk is not stable\n"); | ||
| 205 | return -EINVAL; | ||
| 206 | } | ||
| 207 | |||
| 208 | return 0; | ||
| 209 | } | ||
| 210 | |||
| 211 | static void kirin_pcie_oe_enable(struct kirin_pcie *kirin_pcie) | ||
| 212 | { | ||
| 213 | u32 val; | ||
| 214 | |||
| 215 | regmap_read(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, &val); | ||
| 216 | val |= PCIE_DEBOUNCE_PARAM; | ||
| 217 | val &= ~PCIE_OE_BYPASS; | ||
| 218 | regmap_write(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, val); | ||
| 219 | } | ||
| 220 | |||
| 221 | static int kirin_pcie_clk_ctrl(struct kirin_pcie *kirin_pcie, bool enable) | ||
| 222 | { | ||
| 223 | int ret = 0; | ||
| 224 | |||
| 225 | if (!enable) | ||
| 226 | goto close_clk; | ||
| 227 | |||
| 228 | ret = clk_set_rate(kirin_pcie->phy_ref_clk, REF_CLK_FREQ); | ||
| 229 | if (ret) | ||
| 230 | return ret; | ||
| 231 | |||
| 232 | ret = clk_prepare_enable(kirin_pcie->phy_ref_clk); | ||
| 233 | if (ret) | ||
| 234 | return ret; | ||
| 235 | |||
| 236 | ret = clk_prepare_enable(kirin_pcie->apb_sys_clk); | ||
| 237 | if (ret) | ||
| 238 | goto apb_sys_fail; | ||
| 239 | |||
| 240 | ret = clk_prepare_enable(kirin_pcie->apb_phy_clk); | ||
| 241 | if (ret) | ||
| 242 | goto apb_phy_fail; | ||
| 243 | |||
| 244 | ret = clk_prepare_enable(kirin_pcie->pcie_aclk); | ||
| 245 | if (ret) | ||
| 246 | goto aclk_fail; | ||
| 247 | |||
| 248 | ret = clk_prepare_enable(kirin_pcie->pcie_aux_clk); | ||
| 249 | if (ret) | ||
| 250 | goto aux_clk_fail; | ||
| 251 | |||
| 252 | return 0; | ||
| 253 | |||
| 254 | close_clk: | ||
| 255 | clk_disable_unprepare(kirin_pcie->pcie_aux_clk); | ||
| 256 | aux_clk_fail: | ||
| 257 | clk_disable_unprepare(kirin_pcie->pcie_aclk); | ||
| 258 | aclk_fail: | ||
| 259 | clk_disable_unprepare(kirin_pcie->apb_phy_clk); | ||
| 260 | apb_phy_fail: | ||
| 261 | clk_disable_unprepare(kirin_pcie->apb_sys_clk); | ||
| 262 | apb_sys_fail: | ||
| 263 | clk_disable_unprepare(kirin_pcie->phy_ref_clk); | ||
| 264 | |||
| 265 | return ret; | ||
| 266 | } | ||
| 267 | |||
| 268 | static int kirin_pcie_power_on(struct kirin_pcie *kirin_pcie) | ||
| 269 | { | ||
| 270 | int ret; | ||
| 271 | |||
| 272 | /* Power supply for Host */ | ||
| 273 | regmap_write(kirin_pcie->sysctrl, | ||
| 274 | SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT); | ||
| 275 | usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX); | ||
| 276 | kirin_pcie_oe_enable(kirin_pcie); | ||
| 277 | |||
| 278 | ret = kirin_pcie_clk_ctrl(kirin_pcie, true); | ||
| 279 | if (ret) | ||
| 280 | return ret; | ||
| 281 | |||
| 282 | /* ISO disable, PCIeCtrl, PHY assert and clk gate clear */ | ||
| 283 | regmap_write(kirin_pcie->sysctrl, | ||
| 284 | SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT); | ||
| 285 | regmap_write(kirin_pcie->crgctrl, | ||
| 286 | CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT); | ||
| 287 | regmap_write(kirin_pcie->sysctrl, | ||
| 288 | SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT); | ||
| 289 | |||
| 290 | ret = kirin_pcie_phy_init(kirin_pcie); | ||
| 291 | if (ret) | ||
| 292 | goto close_clk; | ||
| 293 | |||
| 294 | /* perst assert Endpoint */ | ||
| 295 | if (!gpio_request(kirin_pcie->gpio_id_reset, "pcie_perst")) { | ||
| 296 | usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX); | ||
| 297 | ret = gpio_direction_output(kirin_pcie->gpio_id_reset, 1); | ||
| 298 | if (ret) | ||
| 299 | goto close_clk; | ||
| 300 | usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX); | ||
| 301 | |||
| 302 | return 0; | ||
| 303 | } | ||
| 304 | |||
| 305 | close_clk: | ||
| 306 | kirin_pcie_clk_ctrl(kirin_pcie, false); | ||
| 307 | return ret; | ||
| 308 | } | ||
| 309 | |||
| 310 | static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie, | ||
| 311 | bool on) | ||
| 312 | { | ||
| 313 | u32 val; | ||
| 314 | |||
| 315 | val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL0_ADDR); | ||
| 316 | if (on) | ||
| 317 | val = val | PCIE_ELBI_SLV_DBI_ENABLE; | ||
| 318 | else | ||
| 319 | val = val & ~PCIE_ELBI_SLV_DBI_ENABLE; | ||
| 320 | |||
| 321 | kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL0_ADDR); | ||
| 322 | } | ||
| 323 | |||
| 324 | static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie, | ||
| 325 | bool on) | ||
| 326 | { | ||
| 327 | u32 val; | ||
| 328 | |||
| 329 | val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL1_ADDR); | ||
| 330 | if (on) | ||
| 331 | val = val | PCIE_ELBI_SLV_DBI_ENABLE; | ||
| 332 | else | ||
| 333 | val = val & ~PCIE_ELBI_SLV_DBI_ENABLE; | ||
| 334 | |||
| 335 | kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL1_ADDR); | ||
| 336 | } | ||
| 337 | |||
| 338 | static int kirin_pcie_rd_own_conf(struct pcie_port *pp, | ||
| 339 | int where, int size, u32 *val) | ||
| 340 | { | ||
| 341 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 342 | struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); | ||
| 343 | int ret; | ||
| 344 | |||
| 345 | kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true); | ||
| 346 | ret = dw_pcie_read(pci->dbi_base + where, size, val); | ||
| 347 | kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false); | ||
| 348 | |||
| 349 | return ret; | ||
| 350 | } | ||
| 351 | |||
| 352 | static int kirin_pcie_wr_own_conf(struct pcie_port *pp, | ||
| 353 | int where, int size, u32 val) | ||
| 354 | { | ||
| 355 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 356 | struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); | ||
| 357 | int ret; | ||
| 358 | |||
| 359 | kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true); | ||
| 360 | ret = dw_pcie_write(pci->dbi_base + where, size, val); | ||
| 361 | kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false); | ||
| 362 | |||
| 363 | return ret; | ||
| 364 | } | ||
| 365 | |||
| 366 | static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, | ||
| 367 | u32 reg, size_t size) | ||
| 368 | { | ||
| 369 | struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); | ||
| 370 | u32 ret; | ||
| 371 | |||
| 372 | kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true); | ||
| 373 | dw_pcie_read(base + reg, size, &ret); | ||
| 374 | kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false); | ||
| 375 | |||
| 376 | return ret; | ||
| 377 | } | ||
| 378 | |||
| 379 | static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, | ||
| 380 | u32 reg, size_t size, u32 val) | ||
| 381 | { | ||
| 382 | struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); | ||
| 383 | |||
| 384 | kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true); | ||
| 385 | dw_pcie_write(base + reg, size, val); | ||
| 386 | kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false); | ||
| 387 | } | ||
| 388 | |||
| 389 | static int kirin_pcie_link_up(struct dw_pcie *pci) | ||
| 390 | { | ||
| 391 | struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); | ||
| 392 | u32 val = kirin_apb_ctrl_readl(kirin_pcie, PCIE_APB_PHY_STATUS0); | ||
| 393 | |||
| 394 | if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE) | ||
| 395 | return 1; | ||
| 396 | |||
| 397 | return 0; | ||
| 398 | } | ||
| 399 | |||
| 400 | static int kirin_pcie_establish_link(struct pcie_port *pp) | ||
| 401 | { | ||
| 402 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 403 | struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); | ||
| 404 | struct device *dev = kirin_pcie->pci->dev; | ||
| 405 | int count = 0; | ||
| 406 | |||
| 407 | if (kirin_pcie_link_up(pci)) | ||
| 408 | return 0; | ||
| 409 | |||
| 410 | dw_pcie_setup_rc(pp); | ||
| 411 | |||
| 412 | /* assert LTSSM enable */ | ||
| 413 | kirin_apb_ctrl_writel(kirin_pcie, PCIE_LTSSM_ENABLE_BIT, | ||
| 414 | PCIE_APP_LTSSM_ENABLE); | ||
| 415 | |||
| 416 | /* check if the link is up or not */ | ||
| 417 | while (!kirin_pcie_link_up(pci)) { | ||
| 418 | usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX); | ||
| 419 | count++; | ||
| 420 | if (count == 1000) { | ||
| 421 | dev_err(dev, "Link Fail\n"); | ||
| 422 | return -EINVAL; | ||
| 423 | } | ||
| 424 | } | ||
| 425 | |||
| 426 | return 0; | ||
| 427 | } | ||
| 428 | |||
| 429 | static int kirin_pcie_host_init(struct pcie_port *pp) | ||
| 430 | { | ||
| 431 | kirin_pcie_establish_link(pp); | ||
| 432 | |||
| 433 | return 0; | ||
| 434 | } | ||
| 435 | |||
| 436 | static struct dw_pcie_ops kirin_dw_pcie_ops = { | ||
| 437 | .read_dbi = kirin_pcie_read_dbi, | ||
| 438 | .write_dbi = kirin_pcie_write_dbi, | ||
| 439 | .link_up = kirin_pcie_link_up, | ||
| 440 | }; | ||
| 441 | |||
| 442 | static const struct dw_pcie_host_ops kirin_pcie_host_ops = { | ||
| 443 | .rd_own_conf = kirin_pcie_rd_own_conf, | ||
| 444 | .wr_own_conf = kirin_pcie_wr_own_conf, | ||
| 445 | .host_init = kirin_pcie_host_init, | ||
| 446 | }; | ||
| 447 | |||
| 448 | static int __init kirin_add_pcie_port(struct dw_pcie *pci, | ||
| 449 | struct platform_device *pdev) | ||
| 450 | { | ||
| 451 | pci->pp.ops = &kirin_pcie_host_ops; | ||
| 452 | |||
| 453 | return dw_pcie_host_init(&pci->pp); | ||
| 454 | } | ||
| 455 | |||
| 456 | static int kirin_pcie_probe(struct platform_device *pdev) | ||
| 457 | { | ||
| 458 | struct device *dev = &pdev->dev; | ||
| 459 | struct kirin_pcie *kirin_pcie; | ||
| 460 | struct dw_pcie *pci; | ||
| 461 | int ret; | ||
| 462 | |||
| 463 | if (!dev->of_node) { | ||
| 464 | dev_err(dev, "NULL node\n"); | ||
| 465 | return -EINVAL; | ||
| 466 | } | ||
| 467 | |||
| 468 | kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL); | ||
| 469 | if (!kirin_pcie) | ||
| 470 | return -ENOMEM; | ||
| 471 | |||
| 472 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
| 473 | if (!pci) | ||
| 474 | return -ENOMEM; | ||
| 475 | |||
| 476 | pci->dev = dev; | ||
| 477 | pci->ops = &kirin_dw_pcie_ops; | ||
| 478 | kirin_pcie->pci = pci; | ||
| 479 | |||
| 480 | ret = kirin_pcie_get_clk(kirin_pcie, pdev); | ||
| 481 | if (ret) | ||
| 482 | return ret; | ||
| 483 | |||
| 484 | ret = kirin_pcie_get_resource(kirin_pcie, pdev); | ||
| 485 | if (ret) | ||
| 486 | return ret; | ||
| 487 | |||
| 488 | kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node, | ||
| 489 | "reset-gpios", 0); | ||
| 490 | if (kirin_pcie->gpio_id_reset < 0) | ||
| 491 | return -ENODEV; | ||
| 492 | |||
| 493 | ret = kirin_pcie_power_on(kirin_pcie); | ||
| 494 | if (ret) | ||
| 495 | return ret; | ||
| 496 | |||
| 497 | platform_set_drvdata(pdev, kirin_pcie); | ||
| 498 | |||
| 499 | return kirin_add_pcie_port(pci, pdev); | ||
| 500 | } | ||
| 501 | |||
| 502 | static const struct of_device_id kirin_pcie_match[] = { | ||
| 503 | { .compatible = "hisilicon,kirin960-pcie" }, | ||
| 504 | {}, | ||
| 505 | }; | ||
| 506 | |||
| 507 | static struct platform_driver kirin_pcie_driver = { | ||
| 508 | .probe = kirin_pcie_probe, | ||
| 509 | .driver = { | ||
| 510 | .name = "kirin-pcie", | ||
| 511 | .of_match_table = kirin_pcie_match, | ||
| 512 | .suppress_bind_attrs = true, | ||
| 513 | }, | ||
| 514 | }; | ||
| 515 | builtin_platform_driver(kirin_pcie_driver); | ||
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c new file mode 100644 index 000000000000..a1d0198081a6 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-qcom.c | |||
| @@ -0,0 +1,1299 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Qualcomm PCIe root complex driver | ||
| 4 | * | ||
| 5 | * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. | ||
| 6 | * Copyright 2015 Linaro Limited. | ||
| 7 | * | ||
| 8 | * Author: Stanimir Varbanov <svarbanov@mm-sol.com> | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/clk.h> | ||
| 12 | #include <linux/delay.h> | ||
| 13 | #include <linux/gpio/consumer.h> | ||
| 14 | #include <linux/interrupt.h> | ||
| 15 | #include <linux/io.h> | ||
| 16 | #include <linux/iopoll.h> | ||
| 17 | #include <linux/kernel.h> | ||
| 18 | #include <linux/init.h> | ||
| 19 | #include <linux/of_device.h> | ||
| 20 | #include <linux/of_gpio.h> | ||
| 21 | #include <linux/pci.h> | ||
| 22 | #include <linux/pm_runtime.h> | ||
| 23 | #include <linux/platform_device.h> | ||
| 24 | #include <linux/phy/phy.h> | ||
| 25 | #include <linux/regulator/consumer.h> | ||
| 26 | #include <linux/reset.h> | ||
| 27 | #include <linux/slab.h> | ||
| 28 | #include <linux/types.h> | ||
| 29 | |||
| 30 | #include "pcie-designware.h" | ||
| 31 | |||
| 32 | #define PCIE20_PARF_SYS_CTRL 0x00 | ||
| 33 | #define MST_WAKEUP_EN BIT(13) | ||
| 34 | #define SLV_WAKEUP_EN BIT(12) | ||
| 35 | #define MSTR_ACLK_CGC_DIS BIT(10) | ||
| 36 | #define SLV_ACLK_CGC_DIS BIT(9) | ||
| 37 | #define CORE_CLK_CGC_DIS BIT(6) | ||
| 38 | #define AUX_PWR_DET BIT(4) | ||
| 39 | #define L23_CLK_RMV_DIS BIT(2) | ||
| 40 | #define L1_CLK_RMV_DIS BIT(1) | ||
| 41 | |||
| 42 | #define PCIE20_COMMAND_STATUS 0x04 | ||
| 43 | #define CMD_BME_VAL 0x4 | ||
| 44 | #define PCIE20_DEVICE_CONTROL2_STATUS2 0x98 | ||
| 45 | #define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10 | ||
| 46 | |||
| 47 | #define PCIE20_PARF_PHY_CTRL 0x40 | ||
| 48 | #define PCIE20_PARF_PHY_REFCLK 0x4C | ||
| 49 | #define PCIE20_PARF_DBI_BASE_ADDR 0x168 | ||
| 50 | #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C | ||
| 51 | #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 | ||
| 52 | #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT 0x178 | ||
| 53 | #define PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2 0x1A8 | ||
| 54 | #define PCIE20_PARF_LTSSM 0x1B0 | ||
| 55 | #define PCIE20_PARF_SID_OFFSET 0x234 | ||
| 56 | #define PCIE20_PARF_BDF_TRANSLATE_CFG 0x24C | ||
| 57 | |||
| 58 | #define PCIE20_ELBI_SYS_CTRL 0x04 | ||
| 59 | #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) | ||
| 60 | |||
| 61 | #define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818 | ||
| 62 | #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 | ||
| 63 | #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 | ||
| 64 | #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c | ||
| 65 | #define CFG_BRIDGE_SB_INIT BIT(0) | ||
| 66 | |||
| 67 | #define PCIE20_CAP 0x70 | ||
| 68 | #define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC) | ||
| 69 | #define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11)) | ||
| 70 | #define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14) | ||
| 71 | #define PCIE_CAP_LINK1_VAL 0x2FD7F | ||
| 72 | |||
| 73 | #define PCIE20_PARF_Q2A_FLUSH 0x1AC | ||
| 74 | |||
| 75 | #define PCIE20_MISC_CONTROL_1_REG 0x8BC | ||
| 76 | #define DBI_RO_WR_EN 1 | ||
| 77 | |||
| 78 | #define PERST_DELAY_US 1000 | ||
| 79 | |||
| 80 | #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 | ||
| 81 | #define SLV_ADDR_SPACE_SZ 0x10000000 | ||
| 82 | |||
| 83 | #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 | ||
| 84 | struct qcom_pcie_resources_2_1_0 { | ||
| 85 | struct clk *iface_clk; | ||
| 86 | struct clk *core_clk; | ||
| 87 | struct clk *phy_clk; | ||
| 88 | struct reset_control *pci_reset; | ||
| 89 | struct reset_control *axi_reset; | ||
| 90 | struct reset_control *ahb_reset; | ||
| 91 | struct reset_control *por_reset; | ||
| 92 | struct reset_control *phy_reset; | ||
| 93 | struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; | ||
| 94 | }; | ||
| 95 | |||
| 96 | struct qcom_pcie_resources_1_0_0 { | ||
| 97 | struct clk *iface; | ||
| 98 | struct clk *aux; | ||
| 99 | struct clk *master_bus; | ||
| 100 | struct clk *slave_bus; | ||
| 101 | struct reset_control *core; | ||
| 102 | struct regulator *vdda; | ||
| 103 | }; | ||
| 104 | |||
| 105 | #define QCOM_PCIE_2_3_2_MAX_SUPPLY 2 | ||
| 106 | struct qcom_pcie_resources_2_3_2 { | ||
| 107 | struct clk *aux_clk; | ||
| 108 | struct clk *master_clk; | ||
| 109 | struct clk *slave_clk; | ||
| 110 | struct clk *cfg_clk; | ||
| 111 | struct clk *pipe_clk; | ||
| 112 | struct regulator_bulk_data supplies[QCOM_PCIE_2_3_2_MAX_SUPPLY]; | ||
| 113 | }; | ||
| 114 | |||
| 115 | struct qcom_pcie_resources_2_4_0 { | ||
| 116 | struct clk *aux_clk; | ||
| 117 | struct clk *master_clk; | ||
| 118 | struct clk *slave_clk; | ||
| 119 | struct reset_control *axi_m_reset; | ||
| 120 | struct reset_control *axi_s_reset; | ||
| 121 | struct reset_control *pipe_reset; | ||
| 122 | struct reset_control *axi_m_vmid_reset; | ||
| 123 | struct reset_control *axi_s_xpu_reset; | ||
| 124 | struct reset_control *parf_reset; | ||
| 125 | struct reset_control *phy_reset; | ||
| 126 | struct reset_control *axi_m_sticky_reset; | ||
| 127 | struct reset_control *pipe_sticky_reset; | ||
| 128 | struct reset_control *pwr_reset; | ||
| 129 | struct reset_control *ahb_reset; | ||
| 130 | struct reset_control *phy_ahb_reset; | ||
| 131 | }; | ||
| 132 | |||
| 133 | struct qcom_pcie_resources_2_3_3 { | ||
| 134 | struct clk *iface; | ||
| 135 | struct clk *axi_m_clk; | ||
| 136 | struct clk *axi_s_clk; | ||
| 137 | struct clk *ahb_clk; | ||
| 138 | struct clk *aux_clk; | ||
| 139 | struct reset_control *rst[7]; | ||
| 140 | }; | ||
| 141 | |||
| 142 | union qcom_pcie_resources { | ||
| 143 | struct qcom_pcie_resources_1_0_0 v1_0_0; | ||
| 144 | struct qcom_pcie_resources_2_1_0 v2_1_0; | ||
| 145 | struct qcom_pcie_resources_2_3_2 v2_3_2; | ||
| 146 | struct qcom_pcie_resources_2_3_3 v2_3_3; | ||
| 147 | struct qcom_pcie_resources_2_4_0 v2_4_0; | ||
| 148 | }; | ||
| 149 | |||
| 150 | struct qcom_pcie; | ||
| 151 | |||
| 152 | struct qcom_pcie_ops { | ||
| 153 | int (*get_resources)(struct qcom_pcie *pcie); | ||
| 154 | int (*init)(struct qcom_pcie *pcie); | ||
| 155 | int (*post_init)(struct qcom_pcie *pcie); | ||
| 156 | void (*deinit)(struct qcom_pcie *pcie); | ||
| 157 | void (*post_deinit)(struct qcom_pcie *pcie); | ||
| 158 | void (*ltssm_enable)(struct qcom_pcie *pcie); | ||
| 159 | }; | ||
| 160 | |||
| 161 | struct qcom_pcie { | ||
| 162 | struct dw_pcie *pci; | ||
| 163 | void __iomem *parf; /* DT parf */ | ||
| 164 | void __iomem *elbi; /* DT elbi */ | ||
| 165 | union qcom_pcie_resources res; | ||
| 166 | struct phy *phy; | ||
| 167 | struct gpio_desc *reset; | ||
| 168 | const struct qcom_pcie_ops *ops; | ||
| 169 | }; | ||
| 170 | |||
| 171 | #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) | ||
| 172 | |||
| 173 | static void qcom_ep_reset_assert(struct qcom_pcie *pcie) | ||
| 174 | { | ||
| 175 | gpiod_set_value_cansleep(pcie->reset, 1); | ||
| 176 | usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); | ||
| 177 | } | ||
| 178 | |||
| 179 | static void qcom_ep_reset_deassert(struct qcom_pcie *pcie) | ||
| 180 | { | ||
| 181 | gpiod_set_value_cansleep(pcie->reset, 0); | ||
| 182 | usleep_range(PERST_DELAY_US, PERST_DELAY_US + 500); | ||
| 183 | } | ||
| 184 | |||
| 185 | static int qcom_pcie_establish_link(struct qcom_pcie *pcie) | ||
| 186 | { | ||
| 187 | struct dw_pcie *pci = pcie->pci; | ||
| 188 | |||
| 189 | if (dw_pcie_link_up(pci)) | ||
| 190 | return 0; | ||
| 191 | |||
| 192 | /* Enable Link Training state machine */ | ||
| 193 | if (pcie->ops->ltssm_enable) | ||
| 194 | pcie->ops->ltssm_enable(pcie); | ||
| 195 | |||
| 196 | return dw_pcie_wait_for_link(pci); | ||
| 197 | } | ||
| 198 | |||
| 199 | static void qcom_pcie_2_1_0_ltssm_enable(struct qcom_pcie *pcie) | ||
| 200 | { | ||
| 201 | u32 val; | ||
| 202 | |||
| 203 | /* enable link training */ | ||
| 204 | val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); | ||
| 205 | val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; | ||
| 206 | writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); | ||
| 207 | } | ||
| 208 | |||
| 209 | static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) | ||
| 210 | { | ||
| 211 | struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; | ||
| 212 | struct dw_pcie *pci = pcie->pci; | ||
| 213 | struct device *dev = pci->dev; | ||
| 214 | int ret; | ||
| 215 | |||
| 216 | res->supplies[0].supply = "vdda"; | ||
| 217 | res->supplies[1].supply = "vdda_phy"; | ||
| 218 | res->supplies[2].supply = "vdda_refclk"; | ||
| 219 | ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), | ||
| 220 | res->supplies); | ||
| 221 | if (ret) | ||
| 222 | return ret; | ||
| 223 | |||
| 224 | res->iface_clk = devm_clk_get(dev, "iface"); | ||
| 225 | if (IS_ERR(res->iface_clk)) | ||
| 226 | return PTR_ERR(res->iface_clk); | ||
| 227 | |||
| 228 | res->core_clk = devm_clk_get(dev, "core"); | ||
| 229 | if (IS_ERR(res->core_clk)) | ||
| 230 | return PTR_ERR(res->core_clk); | ||
| 231 | |||
| 232 | res->phy_clk = devm_clk_get(dev, "phy"); | ||
| 233 | if (IS_ERR(res->phy_clk)) | ||
| 234 | return PTR_ERR(res->phy_clk); | ||
| 235 | |||
| 236 | res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); | ||
| 237 | if (IS_ERR(res->pci_reset)) | ||
| 238 | return PTR_ERR(res->pci_reset); | ||
| 239 | |||
| 240 | res->axi_reset = devm_reset_control_get_exclusive(dev, "axi"); | ||
| 241 | if (IS_ERR(res->axi_reset)) | ||
| 242 | return PTR_ERR(res->axi_reset); | ||
| 243 | |||
| 244 | res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); | ||
| 245 | if (IS_ERR(res->ahb_reset)) | ||
| 246 | return PTR_ERR(res->ahb_reset); | ||
| 247 | |||
| 248 | res->por_reset = devm_reset_control_get_exclusive(dev, "por"); | ||
| 249 | if (IS_ERR(res->por_reset)) | ||
| 250 | return PTR_ERR(res->por_reset); | ||
| 251 | |||
| 252 | res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); | ||
| 253 | return PTR_ERR_OR_ZERO(res->phy_reset); | ||
| 254 | } | ||
| 255 | |||
| 256 | static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) | ||
| 257 | { | ||
| 258 | struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; | ||
| 259 | |||
| 260 | reset_control_assert(res->pci_reset); | ||
| 261 | reset_control_assert(res->axi_reset); | ||
| 262 | reset_control_assert(res->ahb_reset); | ||
| 263 | reset_control_assert(res->por_reset); | ||
| 264 | reset_control_assert(res->pci_reset); | ||
| 265 | clk_disable_unprepare(res->iface_clk); | ||
| 266 | clk_disable_unprepare(res->core_clk); | ||
| 267 | clk_disable_unprepare(res->phy_clk); | ||
| 268 | regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); | ||
| 269 | } | ||
| 270 | |||
| 271 | static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) | ||
| 272 | { | ||
| 273 | struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; | ||
| 274 | struct dw_pcie *pci = pcie->pci; | ||
| 275 | struct device *dev = pci->dev; | ||
| 276 | u32 val; | ||
| 277 | int ret; | ||
| 278 | |||
| 279 | ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); | ||
| 280 | if (ret < 0) { | ||
| 281 | dev_err(dev, "cannot enable regulators\n"); | ||
| 282 | return ret; | ||
| 283 | } | ||
| 284 | |||
| 285 | ret = reset_control_assert(res->ahb_reset); | ||
| 286 | if (ret) { | ||
| 287 | dev_err(dev, "cannot assert ahb reset\n"); | ||
| 288 | goto err_assert_ahb; | ||
| 289 | } | ||
| 290 | |||
| 291 | ret = clk_prepare_enable(res->iface_clk); | ||
| 292 | if (ret) { | ||
| 293 | dev_err(dev, "cannot prepare/enable iface clock\n"); | ||
| 294 | goto err_assert_ahb; | ||
| 295 | } | ||
| 296 | |||
| 297 | ret = clk_prepare_enable(res->phy_clk); | ||
| 298 | if (ret) { | ||
| 299 | dev_err(dev, "cannot prepare/enable phy clock\n"); | ||
| 300 | goto err_clk_phy; | ||
| 301 | } | ||
| 302 | |||
| 303 | ret = clk_prepare_enable(res->core_clk); | ||
| 304 | if (ret) { | ||
| 305 | dev_err(dev, "cannot prepare/enable core clock\n"); | ||
| 306 | goto err_clk_core; | ||
| 307 | } | ||
| 308 | |||
| 309 | ret = reset_control_deassert(res->ahb_reset); | ||
| 310 | if (ret) { | ||
| 311 | dev_err(dev, "cannot deassert ahb reset\n"); | ||
| 312 | goto err_deassert_ahb; | ||
| 313 | } | ||
| 314 | |||
| 315 | /* enable PCIe clocks and resets */ | ||
| 316 | val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); | ||
| 317 | val &= ~BIT(0); | ||
| 318 | writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); | ||
| 319 | |||
| 320 | /* enable external reference clock */ | ||
| 321 | val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); | ||
| 322 | val |= BIT(16); | ||
| 323 | writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); | ||
| 324 | |||
| 325 | ret = reset_control_deassert(res->phy_reset); | ||
| 326 | if (ret) { | ||
| 327 | dev_err(dev, "cannot deassert phy reset\n"); | ||
| 328 | return ret; | ||
| 329 | } | ||
| 330 | |||
| 331 | ret = reset_control_deassert(res->pci_reset); | ||
| 332 | if (ret) { | ||
| 333 | dev_err(dev, "cannot deassert pci reset\n"); | ||
| 334 | return ret; | ||
| 335 | } | ||
| 336 | |||
| 337 | ret = reset_control_deassert(res->por_reset); | ||
| 338 | if (ret) { | ||
| 339 | dev_err(dev, "cannot deassert por reset\n"); | ||
| 340 | return ret; | ||
| 341 | } | ||
| 342 | |||
| 343 | ret = reset_control_deassert(res->axi_reset); | ||
| 344 | if (ret) { | ||
| 345 | dev_err(dev, "cannot deassert axi reset\n"); | ||
| 346 | return ret; | ||
| 347 | } | ||
| 348 | |||
| 349 | /* wait for clock acquisition */ | ||
| 350 | usleep_range(1000, 1500); | ||
| 351 | |||
| 352 | |||
| 353 | /* Set the Max TLP size to 2K, instead of using default of 4K */ | ||
| 354 | writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, | ||
| 355 | pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0); | ||
| 356 | writel(CFG_BRIDGE_SB_INIT, | ||
| 357 | pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1); | ||
| 358 | |||
| 359 | return 0; | ||
| 360 | |||
| 361 | err_deassert_ahb: | ||
| 362 | clk_disable_unprepare(res->core_clk); | ||
| 363 | err_clk_core: | ||
| 364 | clk_disable_unprepare(res->phy_clk); | ||
| 365 | err_clk_phy: | ||
| 366 | clk_disable_unprepare(res->iface_clk); | ||
| 367 | err_assert_ahb: | ||
| 368 | regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); | ||
| 369 | |||
| 370 | return ret; | ||
| 371 | } | ||
| 372 | |||
| 373 | static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie) | ||
| 374 | { | ||
| 375 | struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; | ||
| 376 | struct dw_pcie *pci = pcie->pci; | ||
| 377 | struct device *dev = pci->dev; | ||
| 378 | |||
| 379 | res->vdda = devm_regulator_get(dev, "vdda"); | ||
| 380 | if (IS_ERR(res->vdda)) | ||
| 381 | return PTR_ERR(res->vdda); | ||
| 382 | |||
| 383 | res->iface = devm_clk_get(dev, "iface"); | ||
| 384 | if (IS_ERR(res->iface)) | ||
| 385 | return PTR_ERR(res->iface); | ||
| 386 | |||
| 387 | res->aux = devm_clk_get(dev, "aux"); | ||
| 388 | if (IS_ERR(res->aux)) | ||
| 389 | return PTR_ERR(res->aux); | ||
| 390 | |||
| 391 | res->master_bus = devm_clk_get(dev, "master_bus"); | ||
| 392 | if (IS_ERR(res->master_bus)) | ||
| 393 | return PTR_ERR(res->master_bus); | ||
| 394 | |||
| 395 | res->slave_bus = devm_clk_get(dev, "slave_bus"); | ||
| 396 | if (IS_ERR(res->slave_bus)) | ||
| 397 | return PTR_ERR(res->slave_bus); | ||
| 398 | |||
| 399 | res->core = devm_reset_control_get_exclusive(dev, "core"); | ||
| 400 | return PTR_ERR_OR_ZERO(res->core); | ||
| 401 | } | ||
| 402 | |||
| 403 | static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie) | ||
| 404 | { | ||
| 405 | struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; | ||
| 406 | |||
| 407 | reset_control_assert(res->core); | ||
| 408 | clk_disable_unprepare(res->slave_bus); | ||
| 409 | clk_disable_unprepare(res->master_bus); | ||
| 410 | clk_disable_unprepare(res->iface); | ||
| 411 | clk_disable_unprepare(res->aux); | ||
| 412 | regulator_disable(res->vdda); | ||
| 413 | } | ||
| 414 | |||
| 415 | static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie) | ||
| 416 | { | ||
| 417 | struct qcom_pcie_resources_1_0_0 *res = &pcie->res.v1_0_0; | ||
| 418 | struct dw_pcie *pci = pcie->pci; | ||
| 419 | struct device *dev = pci->dev; | ||
| 420 | int ret; | ||
| 421 | |||
| 422 | ret = reset_control_deassert(res->core); | ||
| 423 | if (ret) { | ||
| 424 | dev_err(dev, "cannot deassert core reset\n"); | ||
| 425 | return ret; | ||
| 426 | } | ||
| 427 | |||
| 428 | ret = clk_prepare_enable(res->aux); | ||
| 429 | if (ret) { | ||
| 430 | dev_err(dev, "cannot prepare/enable aux clock\n"); | ||
| 431 | goto err_res; | ||
| 432 | } | ||
| 433 | |||
| 434 | ret = clk_prepare_enable(res->iface); | ||
| 435 | if (ret) { | ||
| 436 | dev_err(dev, "cannot prepare/enable iface clock\n"); | ||
| 437 | goto err_aux; | ||
| 438 | } | ||
| 439 | |||
| 440 | ret = clk_prepare_enable(res->master_bus); | ||
| 441 | if (ret) { | ||
| 442 | dev_err(dev, "cannot prepare/enable master_bus clock\n"); | ||
| 443 | goto err_iface; | ||
| 444 | } | ||
| 445 | |||
| 446 | ret = clk_prepare_enable(res->slave_bus); | ||
| 447 | if (ret) { | ||
| 448 | dev_err(dev, "cannot prepare/enable slave_bus clock\n"); | ||
| 449 | goto err_master; | ||
| 450 | } | ||
| 451 | |||
| 452 | ret = regulator_enable(res->vdda); | ||
| 453 | if (ret) { | ||
| 454 | dev_err(dev, "cannot enable vdda regulator\n"); | ||
| 455 | goto err_slave; | ||
| 456 | } | ||
| 457 | |||
| 458 | /* change DBI base address */ | ||
| 459 | writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); | ||
| 460 | |||
| 461 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
| 462 | u32 val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); | ||
| 463 | |||
| 464 | val |= BIT(31); | ||
| 465 | writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT); | ||
| 466 | } | ||
| 467 | |||
| 468 | return 0; | ||
| 469 | err_slave: | ||
| 470 | clk_disable_unprepare(res->slave_bus); | ||
| 471 | err_master: | ||
| 472 | clk_disable_unprepare(res->master_bus); | ||
| 473 | err_iface: | ||
| 474 | clk_disable_unprepare(res->iface); | ||
| 475 | err_aux: | ||
| 476 | clk_disable_unprepare(res->aux); | ||
| 477 | err_res: | ||
| 478 | reset_control_assert(res->core); | ||
| 479 | |||
| 480 | return ret; | ||
| 481 | } | ||
| 482 | |||
| 483 | static void qcom_pcie_2_3_2_ltssm_enable(struct qcom_pcie *pcie) | ||
| 484 | { | ||
| 485 | u32 val; | ||
| 486 | |||
| 487 | /* enable link training */ | ||
| 488 | val = readl(pcie->parf + PCIE20_PARF_LTSSM); | ||
| 489 | val |= BIT(8); | ||
| 490 | writel(val, pcie->parf + PCIE20_PARF_LTSSM); | ||
| 491 | } | ||
| 492 | |||
| 493 | static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie) | ||
| 494 | { | ||
| 495 | struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; | ||
| 496 | struct dw_pcie *pci = pcie->pci; | ||
| 497 | struct device *dev = pci->dev; | ||
| 498 | int ret; | ||
| 499 | |||
| 500 | res->supplies[0].supply = "vdda"; | ||
| 501 | res->supplies[1].supply = "vddpe-3v3"; | ||
| 502 | ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(res->supplies), | ||
| 503 | res->supplies); | ||
| 504 | if (ret) | ||
| 505 | return ret; | ||
| 506 | |||
| 507 | res->aux_clk = devm_clk_get(dev, "aux"); | ||
| 508 | if (IS_ERR(res->aux_clk)) | ||
| 509 | return PTR_ERR(res->aux_clk); | ||
| 510 | |||
| 511 | res->cfg_clk = devm_clk_get(dev, "cfg"); | ||
| 512 | if (IS_ERR(res->cfg_clk)) | ||
| 513 | return PTR_ERR(res->cfg_clk); | ||
| 514 | |||
| 515 | res->master_clk = devm_clk_get(dev, "bus_master"); | ||
| 516 | if (IS_ERR(res->master_clk)) | ||
| 517 | return PTR_ERR(res->master_clk); | ||
| 518 | |||
| 519 | res->slave_clk = devm_clk_get(dev, "bus_slave"); | ||
| 520 | if (IS_ERR(res->slave_clk)) | ||
| 521 | return PTR_ERR(res->slave_clk); | ||
| 522 | |||
| 523 | res->pipe_clk = devm_clk_get(dev, "pipe"); | ||
| 524 | return PTR_ERR_OR_ZERO(res->pipe_clk); | ||
| 525 | } | ||
| 526 | |||
| 527 | static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie) | ||
| 528 | { | ||
| 529 | struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; | ||
| 530 | |||
| 531 | clk_disable_unprepare(res->slave_clk); | ||
| 532 | clk_disable_unprepare(res->master_clk); | ||
| 533 | clk_disable_unprepare(res->cfg_clk); | ||
| 534 | clk_disable_unprepare(res->aux_clk); | ||
| 535 | |||
| 536 | regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); | ||
| 537 | } | ||
| 538 | |||
| 539 | static void qcom_pcie_post_deinit_2_3_2(struct qcom_pcie *pcie) | ||
| 540 | { | ||
| 541 | struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; | ||
| 542 | |||
| 543 | clk_disable_unprepare(res->pipe_clk); | ||
| 544 | } | ||
| 545 | |||
| 546 | static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie) | ||
| 547 | { | ||
| 548 | struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; | ||
| 549 | struct dw_pcie *pci = pcie->pci; | ||
| 550 | struct device *dev = pci->dev; | ||
| 551 | u32 val; | ||
| 552 | int ret; | ||
| 553 | |||
| 554 | ret = regulator_bulk_enable(ARRAY_SIZE(res->supplies), res->supplies); | ||
| 555 | if (ret < 0) { | ||
| 556 | dev_err(dev, "cannot enable regulators\n"); | ||
| 557 | return ret; | ||
| 558 | } | ||
| 559 | |||
| 560 | ret = clk_prepare_enable(res->aux_clk); | ||
| 561 | if (ret) { | ||
| 562 | dev_err(dev, "cannot prepare/enable aux clock\n"); | ||
| 563 | goto err_aux_clk; | ||
| 564 | } | ||
| 565 | |||
| 566 | ret = clk_prepare_enable(res->cfg_clk); | ||
| 567 | if (ret) { | ||
| 568 | dev_err(dev, "cannot prepare/enable cfg clock\n"); | ||
| 569 | goto err_cfg_clk; | ||
| 570 | } | ||
| 571 | |||
| 572 | ret = clk_prepare_enable(res->master_clk); | ||
| 573 | if (ret) { | ||
| 574 | dev_err(dev, "cannot prepare/enable master clock\n"); | ||
| 575 | goto err_master_clk; | ||
| 576 | } | ||
| 577 | |||
| 578 | ret = clk_prepare_enable(res->slave_clk); | ||
| 579 | if (ret) { | ||
| 580 | dev_err(dev, "cannot prepare/enable slave clock\n"); | ||
| 581 | goto err_slave_clk; | ||
| 582 | } | ||
| 583 | |||
| 584 | /* enable PCIe clocks and resets */ | ||
| 585 | val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); | ||
| 586 | val &= ~BIT(0); | ||
| 587 | writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); | ||
| 588 | |||
| 589 | /* change DBI base address */ | ||
| 590 | writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); | ||
| 591 | |||
| 592 | /* MAC PHY_POWERDOWN MUX DISABLE */ | ||
| 593 | val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); | ||
| 594 | val &= ~BIT(29); | ||
| 595 | writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); | ||
| 596 | |||
| 597 | val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); | ||
| 598 | val |= BIT(4); | ||
| 599 | writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); | ||
| 600 | |||
| 601 | val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); | ||
| 602 | val |= BIT(31); | ||
| 603 | writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); | ||
| 604 | |||
| 605 | return 0; | ||
| 606 | |||
| 607 | err_slave_clk: | ||
| 608 | clk_disable_unprepare(res->master_clk); | ||
| 609 | err_master_clk: | ||
| 610 | clk_disable_unprepare(res->cfg_clk); | ||
| 611 | err_cfg_clk: | ||
| 612 | clk_disable_unprepare(res->aux_clk); | ||
| 613 | |||
| 614 | err_aux_clk: | ||
| 615 | regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); | ||
| 616 | |||
| 617 | return ret; | ||
| 618 | } | ||
| 619 | |||
| 620 | static int qcom_pcie_post_init_2_3_2(struct qcom_pcie *pcie) | ||
| 621 | { | ||
| 622 | struct qcom_pcie_resources_2_3_2 *res = &pcie->res.v2_3_2; | ||
| 623 | struct dw_pcie *pci = pcie->pci; | ||
| 624 | struct device *dev = pci->dev; | ||
| 625 | int ret; | ||
| 626 | |||
| 627 | ret = clk_prepare_enable(res->pipe_clk); | ||
| 628 | if (ret) { | ||
| 629 | dev_err(dev, "cannot prepare/enable pipe clock\n"); | ||
| 630 | return ret; | ||
| 631 | } | ||
| 632 | |||
| 633 | return 0; | ||
| 634 | } | ||
| 635 | |||
| 636 | static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie) | ||
| 637 | { | ||
| 638 | struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; | ||
| 639 | struct dw_pcie *pci = pcie->pci; | ||
| 640 | struct device *dev = pci->dev; | ||
| 641 | |||
| 642 | res->aux_clk = devm_clk_get(dev, "aux"); | ||
| 643 | if (IS_ERR(res->aux_clk)) | ||
| 644 | return PTR_ERR(res->aux_clk); | ||
| 645 | |||
| 646 | res->master_clk = devm_clk_get(dev, "master_bus"); | ||
| 647 | if (IS_ERR(res->master_clk)) | ||
| 648 | return PTR_ERR(res->master_clk); | ||
| 649 | |||
| 650 | res->slave_clk = devm_clk_get(dev, "slave_bus"); | ||
| 651 | if (IS_ERR(res->slave_clk)) | ||
| 652 | return PTR_ERR(res->slave_clk); | ||
| 653 | |||
| 654 | res->axi_m_reset = devm_reset_control_get_exclusive(dev, "axi_m"); | ||
| 655 | if (IS_ERR(res->axi_m_reset)) | ||
| 656 | return PTR_ERR(res->axi_m_reset); | ||
| 657 | |||
| 658 | res->axi_s_reset = devm_reset_control_get_exclusive(dev, "axi_s"); | ||
| 659 | if (IS_ERR(res->axi_s_reset)) | ||
| 660 | return PTR_ERR(res->axi_s_reset); | ||
| 661 | |||
| 662 | res->pipe_reset = devm_reset_control_get_exclusive(dev, "pipe"); | ||
| 663 | if (IS_ERR(res->pipe_reset)) | ||
| 664 | return PTR_ERR(res->pipe_reset); | ||
| 665 | |||
| 666 | res->axi_m_vmid_reset = devm_reset_control_get_exclusive(dev, | ||
| 667 | "axi_m_vmid"); | ||
| 668 | if (IS_ERR(res->axi_m_vmid_reset)) | ||
| 669 | return PTR_ERR(res->axi_m_vmid_reset); | ||
| 670 | |||
| 671 | res->axi_s_xpu_reset = devm_reset_control_get_exclusive(dev, | ||
| 672 | "axi_s_xpu"); | ||
| 673 | if (IS_ERR(res->axi_s_xpu_reset)) | ||
| 674 | return PTR_ERR(res->axi_s_xpu_reset); | ||
| 675 | |||
| 676 | res->parf_reset = devm_reset_control_get_exclusive(dev, "parf"); | ||
| 677 | if (IS_ERR(res->parf_reset)) | ||
| 678 | return PTR_ERR(res->parf_reset); | ||
| 679 | |||
| 680 | res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); | ||
| 681 | if (IS_ERR(res->phy_reset)) | ||
| 682 | return PTR_ERR(res->phy_reset); | ||
| 683 | |||
| 684 | res->axi_m_sticky_reset = devm_reset_control_get_exclusive(dev, | ||
| 685 | "axi_m_sticky"); | ||
| 686 | if (IS_ERR(res->axi_m_sticky_reset)) | ||
| 687 | return PTR_ERR(res->axi_m_sticky_reset); | ||
| 688 | |||
| 689 | res->pipe_sticky_reset = devm_reset_control_get_exclusive(dev, | ||
| 690 | "pipe_sticky"); | ||
| 691 | if (IS_ERR(res->pipe_sticky_reset)) | ||
| 692 | return PTR_ERR(res->pipe_sticky_reset); | ||
| 693 | |||
| 694 | res->pwr_reset = devm_reset_control_get_exclusive(dev, "pwr"); | ||
| 695 | if (IS_ERR(res->pwr_reset)) | ||
| 696 | return PTR_ERR(res->pwr_reset); | ||
| 697 | |||
| 698 | res->ahb_reset = devm_reset_control_get_exclusive(dev, "ahb"); | ||
| 699 | if (IS_ERR(res->ahb_reset)) | ||
| 700 | return PTR_ERR(res->ahb_reset); | ||
| 701 | |||
| 702 | res->phy_ahb_reset = devm_reset_control_get_exclusive(dev, "phy_ahb"); | ||
| 703 | if (IS_ERR(res->phy_ahb_reset)) | ||
| 704 | return PTR_ERR(res->phy_ahb_reset); | ||
| 705 | |||
| 706 | return 0; | ||
| 707 | } | ||
| 708 | |||
| 709 | static void qcom_pcie_deinit_2_4_0(struct qcom_pcie *pcie) | ||
| 710 | { | ||
| 711 | struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; | ||
| 712 | |||
| 713 | reset_control_assert(res->axi_m_reset); | ||
| 714 | reset_control_assert(res->axi_s_reset); | ||
| 715 | reset_control_assert(res->pipe_reset); | ||
| 716 | reset_control_assert(res->pipe_sticky_reset); | ||
| 717 | reset_control_assert(res->phy_reset); | ||
| 718 | reset_control_assert(res->phy_ahb_reset); | ||
| 719 | reset_control_assert(res->axi_m_sticky_reset); | ||
| 720 | reset_control_assert(res->pwr_reset); | ||
| 721 | reset_control_assert(res->ahb_reset); | ||
| 722 | clk_disable_unprepare(res->aux_clk); | ||
| 723 | clk_disable_unprepare(res->master_clk); | ||
| 724 | clk_disable_unprepare(res->slave_clk); | ||
| 725 | } | ||
| 726 | |||
| 727 | static int qcom_pcie_init_2_4_0(struct qcom_pcie *pcie) | ||
| 728 | { | ||
| 729 | struct qcom_pcie_resources_2_4_0 *res = &pcie->res.v2_4_0; | ||
| 730 | struct dw_pcie *pci = pcie->pci; | ||
| 731 | struct device *dev = pci->dev; | ||
| 732 | u32 val; | ||
| 733 | int ret; | ||
| 734 | |||
| 735 | ret = reset_control_assert(res->axi_m_reset); | ||
| 736 | if (ret) { | ||
| 737 | dev_err(dev, "cannot assert axi master reset\n"); | ||
| 738 | return ret; | ||
| 739 | } | ||
| 740 | |||
| 741 | ret = reset_control_assert(res->axi_s_reset); | ||
| 742 | if (ret) { | ||
| 743 | dev_err(dev, "cannot assert axi slave reset\n"); | ||
| 744 | return ret; | ||
| 745 | } | ||
| 746 | |||
| 747 | usleep_range(10000, 12000); | ||
| 748 | |||
| 749 | ret = reset_control_assert(res->pipe_reset); | ||
| 750 | if (ret) { | ||
| 751 | dev_err(dev, "cannot assert pipe reset\n"); | ||
| 752 | return ret; | ||
| 753 | } | ||
| 754 | |||
| 755 | ret = reset_control_assert(res->pipe_sticky_reset); | ||
| 756 | if (ret) { | ||
| 757 | dev_err(dev, "cannot assert pipe sticky reset\n"); | ||
| 758 | return ret; | ||
| 759 | } | ||
| 760 | |||
| 761 | ret = reset_control_assert(res->phy_reset); | ||
| 762 | if (ret) { | ||
| 763 | dev_err(dev, "cannot assert phy reset\n"); | ||
| 764 | return ret; | ||
| 765 | } | ||
| 766 | |||
| 767 | ret = reset_control_assert(res->phy_ahb_reset); | ||
| 768 | if (ret) { | ||
| 769 | dev_err(dev, "cannot assert phy ahb reset\n"); | ||
| 770 | return ret; | ||
| 771 | } | ||
| 772 | |||
| 773 | usleep_range(10000, 12000); | ||
| 774 | |||
| 775 | ret = reset_control_assert(res->axi_m_sticky_reset); | ||
| 776 | if (ret) { | ||
| 777 | dev_err(dev, "cannot assert axi master sticky reset\n"); | ||
| 778 | return ret; | ||
| 779 | } | ||
| 780 | |||
| 781 | ret = reset_control_assert(res->pwr_reset); | ||
| 782 | if (ret) { | ||
| 783 | dev_err(dev, "cannot assert power reset\n"); | ||
| 784 | return ret; | ||
| 785 | } | ||
| 786 | |||
| 787 | ret = reset_control_assert(res->ahb_reset); | ||
| 788 | if (ret) { | ||
| 789 | dev_err(dev, "cannot assert ahb reset\n"); | ||
| 790 | return ret; | ||
| 791 | } | ||
| 792 | |||
| 793 | usleep_range(10000, 12000); | ||
| 794 | |||
| 795 | ret = reset_control_deassert(res->phy_ahb_reset); | ||
| 796 | if (ret) { | ||
| 797 | dev_err(dev, "cannot deassert phy ahb reset\n"); | ||
| 798 | return ret; | ||
| 799 | } | ||
| 800 | |||
| 801 | ret = reset_control_deassert(res->phy_reset); | ||
| 802 | if (ret) { | ||
| 803 | dev_err(dev, "cannot deassert phy reset\n"); | ||
| 804 | goto err_rst_phy; | ||
| 805 | } | ||
| 806 | |||
| 807 | ret = reset_control_deassert(res->pipe_reset); | ||
| 808 | if (ret) { | ||
| 809 | dev_err(dev, "cannot deassert pipe reset\n"); | ||
| 810 | goto err_rst_pipe; | ||
| 811 | } | ||
| 812 | |||
| 813 | ret = reset_control_deassert(res->pipe_sticky_reset); | ||
| 814 | if (ret) { | ||
| 815 | dev_err(dev, "cannot deassert pipe sticky reset\n"); | ||
| 816 | goto err_rst_pipe_sticky; | ||
| 817 | } | ||
| 818 | |||
| 819 | usleep_range(10000, 12000); | ||
| 820 | |||
| 821 | ret = reset_control_deassert(res->axi_m_reset); | ||
| 822 | if (ret) { | ||
| 823 | dev_err(dev, "cannot deassert axi master reset\n"); | ||
| 824 | goto err_rst_axi_m; | ||
| 825 | } | ||
| 826 | |||
| 827 | ret = reset_control_deassert(res->axi_m_sticky_reset); | ||
| 828 | if (ret) { | ||
| 829 | dev_err(dev, "cannot deassert axi master sticky reset\n"); | ||
| 830 | goto err_rst_axi_m_sticky; | ||
| 831 | } | ||
| 832 | |||
| 833 | ret = reset_control_deassert(res->axi_s_reset); | ||
| 834 | if (ret) { | ||
| 835 | dev_err(dev, "cannot deassert axi slave reset\n"); | ||
| 836 | goto err_rst_axi_s; | ||
| 837 | } | ||
| 838 | |||
| 839 | ret = reset_control_deassert(res->pwr_reset); | ||
| 840 | if (ret) { | ||
| 841 | dev_err(dev, "cannot deassert power reset\n"); | ||
| 842 | goto err_rst_pwr; | ||
| 843 | } | ||
| 844 | |||
| 845 | ret = reset_control_deassert(res->ahb_reset); | ||
| 846 | if (ret) { | ||
| 847 | dev_err(dev, "cannot deassert ahb reset\n"); | ||
| 848 | goto err_rst_ahb; | ||
| 849 | } | ||
| 850 | |||
| 851 | usleep_range(10000, 12000); | ||
| 852 | |||
| 853 | ret = clk_prepare_enable(res->aux_clk); | ||
| 854 | if (ret) { | ||
| 855 | dev_err(dev, "cannot prepare/enable iface clock\n"); | ||
| 856 | goto err_clk_aux; | ||
| 857 | } | ||
| 858 | |||
| 859 | ret = clk_prepare_enable(res->master_clk); | ||
| 860 | if (ret) { | ||
| 861 | dev_err(dev, "cannot prepare/enable core clock\n"); | ||
| 862 | goto err_clk_axi_m; | ||
| 863 | } | ||
| 864 | |||
| 865 | ret = clk_prepare_enable(res->slave_clk); | ||
| 866 | if (ret) { | ||
| 867 | dev_err(dev, "cannot prepare/enable phy clock\n"); | ||
| 868 | goto err_clk_axi_s; | ||
| 869 | } | ||
| 870 | |||
| 871 | /* enable PCIe clocks and resets */ | ||
| 872 | val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); | ||
| 873 | val &= ~BIT(0); | ||
| 874 | writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); | ||
| 875 | |||
| 876 | /* change DBI base address */ | ||
| 877 | writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); | ||
| 878 | |||
| 879 | /* MAC PHY_POWERDOWN MUX DISABLE */ | ||
| 880 | val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); | ||
| 881 | val &= ~BIT(29); | ||
| 882 | writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); | ||
| 883 | |||
| 884 | val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); | ||
| 885 | val |= BIT(4); | ||
| 886 | writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); | ||
| 887 | |||
| 888 | val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); | ||
| 889 | val |= BIT(31); | ||
| 890 | writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); | ||
| 891 | |||
| 892 | return 0; | ||
| 893 | |||
| 894 | err_clk_axi_s: | ||
| 895 | clk_disable_unprepare(res->master_clk); | ||
| 896 | err_clk_axi_m: | ||
| 897 | clk_disable_unprepare(res->aux_clk); | ||
| 898 | err_clk_aux: | ||
| 899 | reset_control_assert(res->ahb_reset); | ||
| 900 | err_rst_ahb: | ||
| 901 | reset_control_assert(res->pwr_reset); | ||
| 902 | err_rst_pwr: | ||
| 903 | reset_control_assert(res->axi_s_reset); | ||
| 904 | err_rst_axi_s: | ||
| 905 | reset_control_assert(res->axi_m_sticky_reset); | ||
| 906 | err_rst_axi_m_sticky: | ||
| 907 | reset_control_assert(res->axi_m_reset); | ||
| 908 | err_rst_axi_m: | ||
| 909 | reset_control_assert(res->pipe_sticky_reset); | ||
| 910 | err_rst_pipe_sticky: | ||
| 911 | reset_control_assert(res->pipe_reset); | ||
| 912 | err_rst_pipe: | ||
| 913 | reset_control_assert(res->phy_reset); | ||
| 914 | err_rst_phy: | ||
| 915 | reset_control_assert(res->phy_ahb_reset); | ||
| 916 | return ret; | ||
| 917 | } | ||
| 918 | |||
| 919 | static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie) | ||
| 920 | { | ||
| 921 | struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; | ||
| 922 | struct dw_pcie *pci = pcie->pci; | ||
| 923 | struct device *dev = pci->dev; | ||
| 924 | int i; | ||
| 925 | const char *rst_names[] = { "axi_m", "axi_s", "pipe", | ||
| 926 | "axi_m_sticky", "sticky", | ||
| 927 | "ahb", "sleep", }; | ||
| 928 | |||
| 929 | res->iface = devm_clk_get(dev, "iface"); | ||
| 930 | if (IS_ERR(res->iface)) | ||
| 931 | return PTR_ERR(res->iface); | ||
| 932 | |||
| 933 | res->axi_m_clk = devm_clk_get(dev, "axi_m"); | ||
| 934 | if (IS_ERR(res->axi_m_clk)) | ||
| 935 | return PTR_ERR(res->axi_m_clk); | ||
| 936 | |||
| 937 | res->axi_s_clk = devm_clk_get(dev, "axi_s"); | ||
| 938 | if (IS_ERR(res->axi_s_clk)) | ||
| 939 | return PTR_ERR(res->axi_s_clk); | ||
| 940 | |||
| 941 | res->ahb_clk = devm_clk_get(dev, "ahb"); | ||
| 942 | if (IS_ERR(res->ahb_clk)) | ||
| 943 | return PTR_ERR(res->ahb_clk); | ||
| 944 | |||
| 945 | res->aux_clk = devm_clk_get(dev, "aux"); | ||
| 946 | if (IS_ERR(res->aux_clk)) | ||
| 947 | return PTR_ERR(res->aux_clk); | ||
| 948 | |||
| 949 | for (i = 0; i < ARRAY_SIZE(rst_names); i++) { | ||
| 950 | res->rst[i] = devm_reset_control_get(dev, rst_names[i]); | ||
| 951 | if (IS_ERR(res->rst[i])) | ||
| 952 | return PTR_ERR(res->rst[i]); | ||
| 953 | } | ||
| 954 | |||
| 955 | return 0; | ||
| 956 | } | ||
| 957 | |||
| 958 | static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie) | ||
| 959 | { | ||
| 960 | struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; | ||
| 961 | |||
| 962 | clk_disable_unprepare(res->iface); | ||
| 963 | clk_disable_unprepare(res->axi_m_clk); | ||
| 964 | clk_disable_unprepare(res->axi_s_clk); | ||
| 965 | clk_disable_unprepare(res->ahb_clk); | ||
| 966 | clk_disable_unprepare(res->aux_clk); | ||
| 967 | } | ||
| 968 | |||
| 969 | static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) | ||
| 970 | { | ||
| 971 | struct qcom_pcie_resources_2_3_3 *res = &pcie->res.v2_3_3; | ||
| 972 | struct dw_pcie *pci = pcie->pci; | ||
| 973 | struct device *dev = pci->dev; | ||
| 974 | int i, ret; | ||
| 975 | u32 val; | ||
| 976 | |||
| 977 | for (i = 0; i < ARRAY_SIZE(res->rst); i++) { | ||
| 978 | ret = reset_control_assert(res->rst[i]); | ||
| 979 | if (ret) { | ||
| 980 | dev_err(dev, "reset #%d assert failed (%d)\n", i, ret); | ||
| 981 | return ret; | ||
| 982 | } | ||
| 983 | } | ||
| 984 | |||
| 985 | usleep_range(2000, 2500); | ||
| 986 | |||
| 987 | for (i = 0; i < ARRAY_SIZE(res->rst); i++) { | ||
| 988 | ret = reset_control_deassert(res->rst[i]); | ||
| 989 | if (ret) { | ||
| 990 | dev_err(dev, "reset #%d deassert failed (%d)\n", i, | ||
| 991 | ret); | ||
| 992 | return ret; | ||
| 993 | } | ||
| 994 | } | ||
| 995 | |||
| 996 | /* | ||
| 997 | * Don't have a way to see if the reset has completed. | ||
| 998 | * Wait for some time. | ||
| 999 | */ | ||
| 1000 | usleep_range(2000, 2500); | ||
| 1001 | |||
| 1002 | ret = clk_prepare_enable(res->iface); | ||
| 1003 | if (ret) { | ||
| 1004 | dev_err(dev, "cannot prepare/enable core clock\n"); | ||
| 1005 | goto err_clk_iface; | ||
| 1006 | } | ||
| 1007 | |||
| 1008 | ret = clk_prepare_enable(res->axi_m_clk); | ||
| 1009 | if (ret) { | ||
| 1010 | dev_err(dev, "cannot prepare/enable core clock\n"); | ||
| 1011 | goto err_clk_axi_m; | ||
| 1012 | } | ||
| 1013 | |||
| 1014 | ret = clk_prepare_enable(res->axi_s_clk); | ||
| 1015 | if (ret) { | ||
| 1016 | dev_err(dev, "cannot prepare/enable axi slave clock\n"); | ||
| 1017 | goto err_clk_axi_s; | ||
| 1018 | } | ||
| 1019 | |||
| 1020 | ret = clk_prepare_enable(res->ahb_clk); | ||
| 1021 | if (ret) { | ||
| 1022 | dev_err(dev, "cannot prepare/enable ahb clock\n"); | ||
| 1023 | goto err_clk_ahb; | ||
| 1024 | } | ||
| 1025 | |||
| 1026 | ret = clk_prepare_enable(res->aux_clk); | ||
| 1027 | if (ret) { | ||
| 1028 | dev_err(dev, "cannot prepare/enable aux clock\n"); | ||
| 1029 | goto err_clk_aux; | ||
| 1030 | } | ||
| 1031 | |||
| 1032 | writel(SLV_ADDR_SPACE_SZ, | ||
| 1033 | pcie->parf + PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE); | ||
| 1034 | |||
| 1035 | val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); | ||
| 1036 | val &= ~BIT(0); | ||
| 1037 | writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); | ||
| 1038 | |||
| 1039 | writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); | ||
| 1040 | |||
| 1041 | writel(MST_WAKEUP_EN | SLV_WAKEUP_EN | MSTR_ACLK_CGC_DIS | ||
| 1042 | | SLV_ACLK_CGC_DIS | CORE_CLK_CGC_DIS | | ||
| 1043 | AUX_PWR_DET | L23_CLK_RMV_DIS | L1_CLK_RMV_DIS, | ||
| 1044 | pcie->parf + PCIE20_PARF_SYS_CTRL); | ||
| 1045 | writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); | ||
| 1046 | |||
| 1047 | writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS); | ||
| 1048 | writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); | ||
| 1049 | writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1); | ||
| 1050 | |||
| 1051 | val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); | ||
| 1052 | val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT; | ||
| 1053 | writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); | ||
| 1054 | |||
| 1055 | writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base + | ||
| 1056 | PCIE20_DEVICE_CONTROL2_STATUS2); | ||
| 1057 | |||
| 1058 | return 0; | ||
| 1059 | |||
| 1060 | err_clk_aux: | ||
| 1061 | clk_disable_unprepare(res->ahb_clk); | ||
| 1062 | err_clk_ahb: | ||
| 1063 | clk_disable_unprepare(res->axi_s_clk); | ||
| 1064 | err_clk_axi_s: | ||
| 1065 | clk_disable_unprepare(res->axi_m_clk); | ||
| 1066 | err_clk_axi_m: | ||
| 1067 | clk_disable_unprepare(res->iface); | ||
| 1068 | err_clk_iface: | ||
| 1069 | /* | ||
| 1070 | * Not checking for failure, will anyway return | ||
| 1071 | * the original failure in 'ret'. | ||
| 1072 | */ | ||
| 1073 | for (i = 0; i < ARRAY_SIZE(res->rst); i++) | ||
| 1074 | reset_control_assert(res->rst[i]); | ||
| 1075 | |||
| 1076 | return ret; | ||
| 1077 | } | ||
| 1078 | |||
| 1079 | static int qcom_pcie_link_up(struct dw_pcie *pci) | ||
| 1080 | { | ||
| 1081 | u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA); | ||
| 1082 | |||
| 1083 | return !!(val & PCI_EXP_LNKSTA_DLLLA); | ||
| 1084 | } | ||
| 1085 | |||
| 1086 | static int qcom_pcie_host_init(struct pcie_port *pp) | ||
| 1087 | { | ||
| 1088 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 1089 | struct qcom_pcie *pcie = to_qcom_pcie(pci); | ||
| 1090 | int ret; | ||
| 1091 | |||
| 1092 | pm_runtime_get_sync(pci->dev); | ||
| 1093 | qcom_ep_reset_assert(pcie); | ||
| 1094 | |||
| 1095 | ret = pcie->ops->init(pcie); | ||
| 1096 | if (ret) | ||
| 1097 | return ret; | ||
| 1098 | |||
| 1099 | ret = phy_power_on(pcie->phy); | ||
| 1100 | if (ret) | ||
| 1101 | goto err_deinit; | ||
| 1102 | |||
| 1103 | if (pcie->ops->post_init) { | ||
| 1104 | ret = pcie->ops->post_init(pcie); | ||
| 1105 | if (ret) | ||
| 1106 | goto err_disable_phy; | ||
| 1107 | } | ||
| 1108 | |||
| 1109 | dw_pcie_setup_rc(pp); | ||
| 1110 | |||
| 1111 | if (IS_ENABLED(CONFIG_PCI_MSI)) | ||
| 1112 | dw_pcie_msi_init(pp); | ||
| 1113 | |||
| 1114 | qcom_ep_reset_deassert(pcie); | ||
| 1115 | |||
| 1116 | ret = qcom_pcie_establish_link(pcie); | ||
| 1117 | if (ret) | ||
| 1118 | goto err; | ||
| 1119 | |||
| 1120 | return 0; | ||
| 1121 | err: | ||
| 1122 | qcom_ep_reset_assert(pcie); | ||
| 1123 | if (pcie->ops->post_deinit) | ||
| 1124 | pcie->ops->post_deinit(pcie); | ||
| 1125 | err_disable_phy: | ||
| 1126 | phy_power_off(pcie->phy); | ||
| 1127 | err_deinit: | ||
| 1128 | pcie->ops->deinit(pcie); | ||
| 1129 | pm_runtime_put(pci->dev); | ||
| 1130 | |||
| 1131 | return ret; | ||
| 1132 | } | ||
| 1133 | |||
| 1134 | static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, | ||
| 1135 | u32 *val) | ||
| 1136 | { | ||
| 1137 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 1138 | |||
| 1139 | /* the device class is not reported correctly from the register */ | ||
| 1140 | if (where == PCI_CLASS_REVISION && size == 4) { | ||
| 1141 | *val = readl(pci->dbi_base + PCI_CLASS_REVISION); | ||
| 1142 | *val &= 0xff; /* keep revision id */ | ||
| 1143 | *val |= PCI_CLASS_BRIDGE_PCI << 16; | ||
| 1144 | return PCIBIOS_SUCCESSFUL; | ||
| 1145 | } | ||
| 1146 | |||
| 1147 | return dw_pcie_read(pci->dbi_base + where, size, val); | ||
| 1148 | } | ||
| 1149 | |||
| 1150 | static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { | ||
| 1151 | .host_init = qcom_pcie_host_init, | ||
| 1152 | .rd_own_conf = qcom_pcie_rd_own_conf, | ||
| 1153 | }; | ||
| 1154 | |||
| 1155 | /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ | ||
| 1156 | static const struct qcom_pcie_ops ops_2_1_0 = { | ||
| 1157 | .get_resources = qcom_pcie_get_resources_2_1_0, | ||
| 1158 | .init = qcom_pcie_init_2_1_0, | ||
| 1159 | .deinit = qcom_pcie_deinit_2_1_0, | ||
| 1160 | .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, | ||
| 1161 | }; | ||
| 1162 | |||
| 1163 | /* Qcom IP rev.: 1.0.0 Synopsys IP rev.: 4.11a */ | ||
| 1164 | static const struct qcom_pcie_ops ops_1_0_0 = { | ||
| 1165 | .get_resources = qcom_pcie_get_resources_1_0_0, | ||
| 1166 | .init = qcom_pcie_init_1_0_0, | ||
| 1167 | .deinit = qcom_pcie_deinit_1_0_0, | ||
| 1168 | .ltssm_enable = qcom_pcie_2_1_0_ltssm_enable, | ||
| 1169 | }; | ||
| 1170 | |||
| 1171 | /* Qcom IP rev.: 2.3.2 Synopsys IP rev.: 4.21a */ | ||
| 1172 | static const struct qcom_pcie_ops ops_2_3_2 = { | ||
| 1173 | .get_resources = qcom_pcie_get_resources_2_3_2, | ||
| 1174 | .init = qcom_pcie_init_2_3_2, | ||
| 1175 | .post_init = qcom_pcie_post_init_2_3_2, | ||
| 1176 | .deinit = qcom_pcie_deinit_2_3_2, | ||
| 1177 | .post_deinit = qcom_pcie_post_deinit_2_3_2, | ||
| 1178 | .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, | ||
| 1179 | }; | ||
| 1180 | |||
| 1181 | /* Qcom IP rev.: 2.4.0 Synopsys IP rev.: 4.20a */ | ||
| 1182 | static const struct qcom_pcie_ops ops_2_4_0 = { | ||
| 1183 | .get_resources = qcom_pcie_get_resources_2_4_0, | ||
| 1184 | .init = qcom_pcie_init_2_4_0, | ||
| 1185 | .deinit = qcom_pcie_deinit_2_4_0, | ||
| 1186 | .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, | ||
| 1187 | }; | ||
| 1188 | |||
| 1189 | /* Qcom IP rev.: 2.3.3 Synopsys IP rev.: 4.30a */ | ||
| 1190 | static const struct qcom_pcie_ops ops_2_3_3 = { | ||
| 1191 | .get_resources = qcom_pcie_get_resources_2_3_3, | ||
| 1192 | .init = qcom_pcie_init_2_3_3, | ||
| 1193 | .deinit = qcom_pcie_deinit_2_3_3, | ||
| 1194 | .ltssm_enable = qcom_pcie_2_3_2_ltssm_enable, | ||
| 1195 | }; | ||
| 1196 | |||
| 1197 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
| 1198 | .link_up = qcom_pcie_link_up, | ||
| 1199 | }; | ||
| 1200 | |||
| 1201 | static int qcom_pcie_probe(struct platform_device *pdev) | ||
| 1202 | { | ||
| 1203 | struct device *dev = &pdev->dev; | ||
| 1204 | struct resource *res; | ||
| 1205 | struct pcie_port *pp; | ||
| 1206 | struct dw_pcie *pci; | ||
| 1207 | struct qcom_pcie *pcie; | ||
| 1208 | int ret; | ||
| 1209 | |||
| 1210 | pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); | ||
| 1211 | if (!pcie) | ||
| 1212 | return -ENOMEM; | ||
| 1213 | |||
| 1214 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
| 1215 | if (!pci) | ||
| 1216 | return -ENOMEM; | ||
| 1217 | |||
| 1218 | pm_runtime_enable(dev); | ||
| 1219 | pci->dev = dev; | ||
| 1220 | pci->ops = &dw_pcie_ops; | ||
| 1221 | pp = &pci->pp; | ||
| 1222 | |||
| 1223 | pcie->pci = pci; | ||
| 1224 | |||
| 1225 | pcie->ops = of_device_get_match_data(dev); | ||
| 1226 | |||
| 1227 | pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW); | ||
| 1228 | if (IS_ERR(pcie->reset)) | ||
| 1229 | return PTR_ERR(pcie->reset); | ||
| 1230 | |||
| 1231 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf"); | ||
| 1232 | pcie->parf = devm_ioremap_resource(dev, res); | ||
| 1233 | if (IS_ERR(pcie->parf)) | ||
| 1234 | return PTR_ERR(pcie->parf); | ||
| 1235 | |||
| 1236 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); | ||
| 1237 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); | ||
| 1238 | if (IS_ERR(pci->dbi_base)) | ||
| 1239 | return PTR_ERR(pci->dbi_base); | ||
| 1240 | |||
| 1241 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi"); | ||
| 1242 | pcie->elbi = devm_ioremap_resource(dev, res); | ||
| 1243 | if (IS_ERR(pcie->elbi)) | ||
| 1244 | return PTR_ERR(pcie->elbi); | ||
| 1245 | |||
| 1246 | pcie->phy = devm_phy_optional_get(dev, "pciephy"); | ||
| 1247 | if (IS_ERR(pcie->phy)) | ||
| 1248 | return PTR_ERR(pcie->phy); | ||
| 1249 | |||
| 1250 | ret = pcie->ops->get_resources(pcie); | ||
| 1251 | if (ret) | ||
| 1252 | return ret; | ||
| 1253 | |||
| 1254 | pp->root_bus_nr = -1; | ||
| 1255 | pp->ops = &qcom_pcie_dw_ops; | ||
| 1256 | |||
| 1257 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
| 1258 | pp->msi_irq = platform_get_irq_byname(pdev, "msi"); | ||
| 1259 | if (pp->msi_irq < 0) | ||
| 1260 | return pp->msi_irq; | ||
| 1261 | } | ||
| 1262 | |||
| 1263 | ret = phy_init(pcie->phy); | ||
| 1264 | if (ret) { | ||
| 1265 | pm_runtime_disable(&pdev->dev); | ||
| 1266 | return ret; | ||
| 1267 | } | ||
| 1268 | |||
| 1269 | platform_set_drvdata(pdev, pcie); | ||
| 1270 | |||
| 1271 | ret = dw_pcie_host_init(pp); | ||
| 1272 | if (ret) { | ||
| 1273 | dev_err(dev, "cannot initialize host\n"); | ||
| 1274 | pm_runtime_disable(&pdev->dev); | ||
| 1275 | return ret; | ||
| 1276 | } | ||
| 1277 | |||
| 1278 | return 0; | ||
| 1279 | } | ||
| 1280 | |||
| 1281 | static const struct of_device_id qcom_pcie_match[] = { | ||
| 1282 | { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 }, | ||
| 1283 | { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 }, | ||
| 1284 | { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 }, | ||
| 1285 | { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 }, | ||
| 1286 | { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 }, | ||
| 1287 | { .compatible = "qcom,pcie-ipq4019", .data = &ops_2_4_0 }, | ||
| 1288 | { } | ||
| 1289 | }; | ||
| 1290 | |||
| 1291 | static struct platform_driver qcom_pcie_driver = { | ||
| 1292 | .probe = qcom_pcie_probe, | ||
| 1293 | .driver = { | ||
| 1294 | .name = "qcom-pcie", | ||
| 1295 | .suppress_bind_attrs = true, | ||
| 1296 | .of_match_table = qcom_pcie_match, | ||
| 1297 | }, | ||
| 1298 | }; | ||
| 1299 | builtin_platform_driver(qcom_pcie_driver); | ||
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c new file mode 100644 index 000000000000..ecb58f7b7566 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-spear13xx.c | |||
| @@ -0,0 +1,314 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * PCIe host controller driver for ST Microelectronics SPEAr13xx SoCs | ||
| 4 | * | ||
| 5 | * SPEAr13xx PCIe Glue Layer Source Code | ||
| 6 | * | ||
| 7 | * Copyright (C) 2010-2014 ST Microelectronics | ||
| 8 | * Pratyush Anand <pratyush.anand@gmail.com> | ||
| 9 | * Mohit Kumar <mohit.kumar.dhaka@gmail.com> | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/clk.h> | ||
| 13 | #include <linux/interrupt.h> | ||
| 14 | #include <linux/kernel.h> | ||
| 15 | #include <linux/init.h> | ||
| 16 | #include <linux/of.h> | ||
| 17 | #include <linux/pci.h> | ||
| 18 | #include <linux/phy/phy.h> | ||
| 19 | #include <linux/platform_device.h> | ||
| 20 | #include <linux/resource.h> | ||
| 21 | |||
| 22 | #include "pcie-designware.h" | ||
| 23 | |||
| 24 | struct spear13xx_pcie { | ||
| 25 | struct dw_pcie *pci; | ||
| 26 | void __iomem *app_base; | ||
| 27 | struct phy *phy; | ||
| 28 | struct clk *clk; | ||
| 29 | bool is_gen1; | ||
| 30 | }; | ||
| 31 | |||
| 32 | struct pcie_app_reg { | ||
| 33 | u32 app_ctrl_0; /* cr0 */ | ||
| 34 | u32 app_ctrl_1; /* cr1 */ | ||
| 35 | u32 app_status_0; /* cr2 */ | ||
| 36 | u32 app_status_1; /* cr3 */ | ||
| 37 | u32 msg_status; /* cr4 */ | ||
| 38 | u32 msg_payload; /* cr5 */ | ||
| 39 | u32 int_sts; /* cr6 */ | ||
| 40 | u32 int_clr; /* cr7 */ | ||
| 41 | u32 int_mask; /* cr8 */ | ||
| 42 | u32 mst_bmisc; /* cr9 */ | ||
| 43 | u32 phy_ctrl; /* cr10 */ | ||
| 44 | u32 phy_status; /* cr11 */ | ||
| 45 | u32 cxpl_debug_info_0; /* cr12 */ | ||
| 46 | u32 cxpl_debug_info_1; /* cr13 */ | ||
| 47 | u32 ven_msg_ctrl_0; /* cr14 */ | ||
| 48 | u32 ven_msg_ctrl_1; /* cr15 */ | ||
| 49 | u32 ven_msg_data_0; /* cr16 */ | ||
| 50 | u32 ven_msg_data_1; /* cr17 */ | ||
| 51 | u32 ven_msi_0; /* cr18 */ | ||
| 52 | u32 ven_msi_1; /* cr19 */ | ||
| 53 | u32 mst_rmisc; /* cr20 */ | ||
| 54 | }; | ||
| 55 | |||
| 56 | /* CR0 ID */ | ||
| 57 | #define APP_LTSSM_ENABLE_ID 3 | ||
| 58 | #define DEVICE_TYPE_RC (4 << 25) | ||
| 59 | #define MISCTRL_EN_ID 30 | ||
| 60 | #define REG_TRANSLATION_ENABLE 31 | ||
| 61 | |||
| 62 | /* CR3 ID */ | ||
| 63 | #define XMLH_LINK_UP (1 << 6) | ||
| 64 | |||
| 65 | /* CR6 */ | ||
| 66 | #define MSI_CTRL_INT (1 << 26) | ||
| 67 | |||
| 68 | #define EXP_CAP_ID_OFFSET 0x70 | ||
| 69 | |||
| 70 | #define to_spear13xx_pcie(x) dev_get_drvdata((x)->dev) | ||
| 71 | |||
| 72 | static int spear13xx_pcie_establish_link(struct spear13xx_pcie *spear13xx_pcie) | ||
| 73 | { | ||
| 74 | struct dw_pcie *pci = spear13xx_pcie->pci; | ||
| 75 | struct pcie_port *pp = &pci->pp; | ||
| 76 | struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; | ||
| 77 | u32 val; | ||
| 78 | u32 exp_cap_off = EXP_CAP_ID_OFFSET; | ||
| 79 | |||
| 80 | if (dw_pcie_link_up(pci)) { | ||
| 81 | dev_err(pci->dev, "link already up\n"); | ||
| 82 | return 0; | ||
| 83 | } | ||
| 84 | |||
| 85 | dw_pcie_setup_rc(pp); | ||
| 86 | |||
| 87 | /* | ||
| 88 | * this controller support only 128 bytes read size, however its | ||
| 89 | * default value in capability register is 512 bytes. So force | ||
| 90 | * it to 128 here. | ||
| 91 | */ | ||
| 92 | dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, &val); | ||
| 93 | val &= ~PCI_EXP_DEVCTL_READRQ; | ||
| 94 | dw_pcie_write(pci->dbi_base + exp_cap_off + PCI_EXP_DEVCTL, 2, val); | ||
| 95 | |||
| 96 | dw_pcie_write(pci->dbi_base + PCI_VENDOR_ID, 2, 0x104A); | ||
| 97 | dw_pcie_write(pci->dbi_base + PCI_DEVICE_ID, 2, 0xCD80); | ||
| 98 | |||
| 99 | /* | ||
| 100 | * if is_gen1 is set then handle it, so that some buggy card | ||
| 101 | * also works | ||
| 102 | */ | ||
| 103 | if (spear13xx_pcie->is_gen1) { | ||
| 104 | dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCAP, | ||
| 105 | 4, &val); | ||
| 106 | if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { | ||
| 107 | val &= ~((u32)PCI_EXP_LNKCAP_SLS); | ||
| 108 | val |= PCI_EXP_LNKCAP_SLS_2_5GB; | ||
| 109 | dw_pcie_write(pci->dbi_base + exp_cap_off + | ||
| 110 | PCI_EXP_LNKCAP, 4, val); | ||
| 111 | } | ||
| 112 | |||
| 113 | dw_pcie_read(pci->dbi_base + exp_cap_off + PCI_EXP_LNKCTL2, | ||
| 114 | 2, &val); | ||
| 115 | if ((val & PCI_EXP_LNKCAP_SLS) != PCI_EXP_LNKCAP_SLS_2_5GB) { | ||
| 116 | val &= ~((u32)PCI_EXP_LNKCAP_SLS); | ||
| 117 | val |= PCI_EXP_LNKCAP_SLS_2_5GB; | ||
| 118 | dw_pcie_write(pci->dbi_base + exp_cap_off + | ||
| 119 | PCI_EXP_LNKCTL2, 2, val); | ||
| 120 | } | ||
| 121 | } | ||
| 122 | |||
| 123 | /* enable ltssm */ | ||
| 124 | writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID) | ||
| 125 | | (1 << APP_LTSSM_ENABLE_ID) | ||
| 126 | | ((u32)1 << REG_TRANSLATION_ENABLE), | ||
| 127 | &app_reg->app_ctrl_0); | ||
| 128 | |||
| 129 | return dw_pcie_wait_for_link(pci); | ||
| 130 | } | ||
| 131 | |||
| 132 | static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg) | ||
| 133 | { | ||
| 134 | struct spear13xx_pcie *spear13xx_pcie = arg; | ||
| 135 | struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; | ||
| 136 | struct dw_pcie *pci = spear13xx_pcie->pci; | ||
| 137 | struct pcie_port *pp = &pci->pp; | ||
| 138 | unsigned int status; | ||
| 139 | |||
| 140 | status = readl(&app_reg->int_sts); | ||
| 141 | |||
| 142 | if (status & MSI_CTRL_INT) { | ||
| 143 | BUG_ON(!IS_ENABLED(CONFIG_PCI_MSI)); | ||
| 144 | dw_handle_msi_irq(pp); | ||
| 145 | } | ||
| 146 | |||
| 147 | writel(status, &app_reg->int_clr); | ||
| 148 | |||
| 149 | return IRQ_HANDLED; | ||
| 150 | } | ||
| 151 | |||
| 152 | static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie) | ||
| 153 | { | ||
| 154 | struct dw_pcie *pci = spear13xx_pcie->pci; | ||
| 155 | struct pcie_port *pp = &pci->pp; | ||
| 156 | struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; | ||
| 157 | |||
| 158 | /* Enable MSI interrupt */ | ||
| 159 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
| 160 | dw_pcie_msi_init(pp); | ||
| 161 | writel(readl(&app_reg->int_mask) | | ||
| 162 | MSI_CTRL_INT, &app_reg->int_mask); | ||
| 163 | } | ||
| 164 | } | ||
| 165 | |||
| 166 | static int spear13xx_pcie_link_up(struct dw_pcie *pci) | ||
| 167 | { | ||
| 168 | struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci); | ||
| 169 | struct pcie_app_reg *app_reg = spear13xx_pcie->app_base; | ||
| 170 | |||
| 171 | if (readl(&app_reg->app_status_1) & XMLH_LINK_UP) | ||
| 172 | return 1; | ||
| 173 | |||
| 174 | return 0; | ||
| 175 | } | ||
| 176 | |||
| 177 | static int spear13xx_pcie_host_init(struct pcie_port *pp) | ||
| 178 | { | ||
| 179 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 180 | struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci); | ||
| 181 | |||
| 182 | spear13xx_pcie_establish_link(spear13xx_pcie); | ||
| 183 | spear13xx_pcie_enable_interrupts(spear13xx_pcie); | ||
| 184 | |||
| 185 | return 0; | ||
| 186 | } | ||
| 187 | |||
| 188 | static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = { | ||
| 189 | .host_init = spear13xx_pcie_host_init, | ||
| 190 | }; | ||
| 191 | |||
| 192 | static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie, | ||
| 193 | struct platform_device *pdev) | ||
| 194 | { | ||
| 195 | struct dw_pcie *pci = spear13xx_pcie->pci; | ||
| 196 | struct pcie_port *pp = &pci->pp; | ||
| 197 | struct device *dev = &pdev->dev; | ||
| 198 | int ret; | ||
| 199 | |||
| 200 | pp->irq = platform_get_irq(pdev, 0); | ||
| 201 | if (pp->irq < 0) { | ||
| 202 | dev_err(dev, "failed to get irq\n"); | ||
| 203 | return pp->irq; | ||
| 204 | } | ||
| 205 | ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler, | ||
| 206 | IRQF_SHARED | IRQF_NO_THREAD, | ||
| 207 | "spear1340-pcie", spear13xx_pcie); | ||
| 208 | if (ret) { | ||
| 209 | dev_err(dev, "failed to request irq %d\n", pp->irq); | ||
| 210 | return ret; | ||
| 211 | } | ||
| 212 | |||
| 213 | pp->root_bus_nr = -1; | ||
| 214 | pp->ops = &spear13xx_pcie_host_ops; | ||
| 215 | |||
| 216 | ret = dw_pcie_host_init(pp); | ||
| 217 | if (ret) { | ||
| 218 | dev_err(dev, "failed to initialize host\n"); | ||
| 219 | return ret; | ||
| 220 | } | ||
| 221 | |||
| 222 | return 0; | ||
| 223 | } | ||
| 224 | |||
| 225 | static const struct dw_pcie_ops dw_pcie_ops = { | ||
| 226 | .link_up = spear13xx_pcie_link_up, | ||
| 227 | }; | ||
| 228 | |||
| 229 | static int spear13xx_pcie_probe(struct platform_device *pdev) | ||
| 230 | { | ||
| 231 | struct device *dev = &pdev->dev; | ||
| 232 | struct dw_pcie *pci; | ||
| 233 | struct spear13xx_pcie *spear13xx_pcie; | ||
| 234 | struct device_node *np = dev->of_node; | ||
| 235 | struct resource *dbi_base; | ||
| 236 | int ret; | ||
| 237 | |||
| 238 | spear13xx_pcie = devm_kzalloc(dev, sizeof(*spear13xx_pcie), GFP_KERNEL); | ||
| 239 | if (!spear13xx_pcie) | ||
| 240 | return -ENOMEM; | ||
| 241 | |||
| 242 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
| 243 | if (!pci) | ||
| 244 | return -ENOMEM; | ||
| 245 | |||
| 246 | pci->dev = dev; | ||
| 247 | pci->ops = &dw_pcie_ops; | ||
| 248 | |||
| 249 | spear13xx_pcie->pci = pci; | ||
| 250 | |||
| 251 | spear13xx_pcie->phy = devm_phy_get(dev, "pcie-phy"); | ||
| 252 | if (IS_ERR(spear13xx_pcie->phy)) { | ||
| 253 | ret = PTR_ERR(spear13xx_pcie->phy); | ||
| 254 | if (ret == -EPROBE_DEFER) | ||
| 255 | dev_info(dev, "probe deferred\n"); | ||
| 256 | else | ||
| 257 | dev_err(dev, "couldn't get pcie-phy\n"); | ||
| 258 | return ret; | ||
| 259 | } | ||
| 260 | |||
| 261 | phy_init(spear13xx_pcie->phy); | ||
| 262 | |||
| 263 | spear13xx_pcie->clk = devm_clk_get(dev, NULL); | ||
| 264 | if (IS_ERR(spear13xx_pcie->clk)) { | ||
| 265 | dev_err(dev, "couldn't get clk for pcie\n"); | ||
| 266 | return PTR_ERR(spear13xx_pcie->clk); | ||
| 267 | } | ||
| 268 | ret = clk_prepare_enable(spear13xx_pcie->clk); | ||
| 269 | if (ret) { | ||
| 270 | dev_err(dev, "couldn't enable clk for pcie\n"); | ||
| 271 | return ret; | ||
| 272 | } | ||
| 273 | |||
| 274 | dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); | ||
| 275 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); | ||
| 276 | if (IS_ERR(pci->dbi_base)) { | ||
| 277 | dev_err(dev, "couldn't remap dbi base %p\n", dbi_base); | ||
| 278 | ret = PTR_ERR(pci->dbi_base); | ||
| 279 | goto fail_clk; | ||
| 280 | } | ||
| 281 | spear13xx_pcie->app_base = pci->dbi_base + 0x2000; | ||
| 282 | |||
| 283 | if (of_property_read_bool(np, "st,pcie-is-gen1")) | ||
| 284 | spear13xx_pcie->is_gen1 = true; | ||
| 285 | |||
| 286 | platform_set_drvdata(pdev, spear13xx_pcie); | ||
| 287 | |||
| 288 | ret = spear13xx_add_pcie_port(spear13xx_pcie, pdev); | ||
| 289 | if (ret < 0) | ||
| 290 | goto fail_clk; | ||
| 291 | |||
| 292 | return 0; | ||
| 293 | |||
| 294 | fail_clk: | ||
| 295 | clk_disable_unprepare(spear13xx_pcie->clk); | ||
| 296 | |||
| 297 | return ret; | ||
| 298 | } | ||
| 299 | |||
| 300 | static const struct of_device_id spear13xx_pcie_of_match[] = { | ||
| 301 | { .compatible = "st,spear1340-pcie", }, | ||
| 302 | {}, | ||
| 303 | }; | ||
| 304 | |||
| 305 | static struct platform_driver spear13xx_pcie_driver = { | ||
| 306 | .probe = spear13xx_pcie_probe, | ||
| 307 | .driver = { | ||
| 308 | .name = "spear-pcie", | ||
| 309 | .of_match_table = of_match_ptr(spear13xx_pcie_of_match), | ||
| 310 | .suppress_bind_attrs = true, | ||
| 311 | }, | ||
| 312 | }; | ||
| 313 | |||
| 314 | builtin_platform_driver(spear13xx_pcie_driver); | ||
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c new file mode 100644 index 000000000000..d3172d5d3d35 --- /dev/null +++ b/drivers/pci/controller/pci-aardvark.c | |||
| @@ -0,0 +1,978 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Driver for the Aardvark PCIe controller, used on Marvell Armada | ||
| 4 | * 3700. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2016 Marvell | ||
| 7 | * | ||
| 8 | * Author: Hezi Shahmoon <hezi.shahmoon@marvell.com> | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/delay.h> | ||
| 12 | #include <linux/interrupt.h> | ||
| 13 | #include <linux/irq.h> | ||
| 14 | #include <linux/irqdomain.h> | ||
| 15 | #include <linux/kernel.h> | ||
| 16 | #include <linux/pci.h> | ||
| 17 | #include <linux/init.h> | ||
| 18 | #include <linux/platform_device.h> | ||
| 19 | #include <linux/of_address.h> | ||
| 20 | #include <linux/of_pci.h> | ||
| 21 | |||
| 22 | #include "../pci.h" | ||
| 23 | |||
| 24 | /* PCIe core registers */ | ||
| 25 | #define PCIE_CORE_CMD_STATUS_REG 0x4 | ||
| 26 | #define PCIE_CORE_CMD_IO_ACCESS_EN BIT(0) | ||
| 27 | #define PCIE_CORE_CMD_MEM_ACCESS_EN BIT(1) | ||
| 28 | #define PCIE_CORE_CMD_MEM_IO_REQ_EN BIT(2) | ||
| 29 | #define PCIE_CORE_DEV_CTRL_STATS_REG 0xc8 | ||
| 30 | #define PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE (0 << 4) | ||
| 31 | #define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5 | ||
| 32 | #define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11) | ||
| 33 | #define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12 | ||
| 34 | #define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2 | ||
| 35 | #define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0 | ||
| 36 | #define PCIE_CORE_LINK_L0S_ENTRY BIT(0) | ||
| 37 | #define PCIE_CORE_LINK_TRAINING BIT(5) | ||
| 38 | #define PCIE_CORE_LINK_WIDTH_SHIFT 20 | ||
| 39 | #define PCIE_CORE_ERR_CAPCTL_REG 0x118 | ||
| 40 | #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX BIT(5) | ||
| 41 | #define PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN BIT(6) | ||
| 42 | #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK BIT(7) | ||
| 43 | #define PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV BIT(8) | ||
| 44 | |||
| 45 | /* PIO registers base address and register offsets */ | ||
| 46 | #define PIO_BASE_ADDR 0x4000 | ||
| 47 | #define PIO_CTRL (PIO_BASE_ADDR + 0x0) | ||
| 48 | #define PIO_CTRL_TYPE_MASK GENMASK(3, 0) | ||
| 49 | #define PIO_CTRL_ADDR_WIN_DISABLE BIT(24) | ||
| 50 | #define PIO_STAT (PIO_BASE_ADDR + 0x4) | ||
| 51 | #define PIO_COMPLETION_STATUS_SHIFT 7 | ||
| 52 | #define PIO_COMPLETION_STATUS_MASK GENMASK(9, 7) | ||
| 53 | #define PIO_COMPLETION_STATUS_OK 0 | ||
| 54 | #define PIO_COMPLETION_STATUS_UR 1 | ||
| 55 | #define PIO_COMPLETION_STATUS_CRS 2 | ||
| 56 | #define PIO_COMPLETION_STATUS_CA 4 | ||
| 57 | #define PIO_NON_POSTED_REQ BIT(0) | ||
| 58 | #define PIO_ADDR_LS (PIO_BASE_ADDR + 0x8) | ||
| 59 | #define PIO_ADDR_MS (PIO_BASE_ADDR + 0xc) | ||
| 60 | #define PIO_WR_DATA (PIO_BASE_ADDR + 0x10) | ||
| 61 | #define PIO_WR_DATA_STRB (PIO_BASE_ADDR + 0x14) | ||
| 62 | #define PIO_RD_DATA (PIO_BASE_ADDR + 0x18) | ||
| 63 | #define PIO_START (PIO_BASE_ADDR + 0x1c) | ||
| 64 | #define PIO_ISR (PIO_BASE_ADDR + 0x20) | ||
| 65 | #define PIO_ISRM (PIO_BASE_ADDR + 0x24) | ||
| 66 | |||
| 67 | /* Aardvark Control registers */ | ||
| 68 | #define CONTROL_BASE_ADDR 0x4800 | ||
| 69 | #define PCIE_CORE_CTRL0_REG (CONTROL_BASE_ADDR + 0x0) | ||
| 70 | #define PCIE_GEN_SEL_MSK 0x3 | ||
| 71 | #define PCIE_GEN_SEL_SHIFT 0x0 | ||
| 72 | #define SPEED_GEN_1 0 | ||
| 73 | #define SPEED_GEN_2 1 | ||
| 74 | #define SPEED_GEN_3 2 | ||
| 75 | #define IS_RC_MSK 1 | ||
| 76 | #define IS_RC_SHIFT 2 | ||
| 77 | #define LANE_CNT_MSK 0x18 | ||
| 78 | #define LANE_CNT_SHIFT 0x3 | ||
| 79 | #define LANE_COUNT_1 (0 << LANE_CNT_SHIFT) | ||
| 80 | #define LANE_COUNT_2 (1 << LANE_CNT_SHIFT) | ||
| 81 | #define LANE_COUNT_4 (2 << LANE_CNT_SHIFT) | ||
| 82 | #define LANE_COUNT_8 (3 << LANE_CNT_SHIFT) | ||
| 83 | #define LINK_TRAINING_EN BIT(6) | ||
| 84 | #define LEGACY_INTA BIT(28) | ||
| 85 | #define LEGACY_INTB BIT(29) | ||
| 86 | #define LEGACY_INTC BIT(30) | ||
| 87 | #define LEGACY_INTD BIT(31) | ||
| 88 | #define PCIE_CORE_CTRL1_REG (CONTROL_BASE_ADDR + 0x4) | ||
| 89 | #define HOT_RESET_GEN BIT(0) | ||
| 90 | #define PCIE_CORE_CTRL2_REG (CONTROL_BASE_ADDR + 0x8) | ||
| 91 | #define PCIE_CORE_CTRL2_RESERVED 0x7 | ||
| 92 | #define PCIE_CORE_CTRL2_TD_ENABLE BIT(4) | ||
| 93 | #define PCIE_CORE_CTRL2_STRICT_ORDER_ENABLE BIT(5) | ||
| 94 | #define PCIE_CORE_CTRL2_OB_WIN_ENABLE BIT(6) | ||
| 95 | #define PCIE_CORE_CTRL2_MSI_ENABLE BIT(10) | ||
| 96 | #define PCIE_ISR0_REG (CONTROL_BASE_ADDR + 0x40) | ||
| 97 | #define PCIE_ISR0_MASK_REG (CONTROL_BASE_ADDR + 0x44) | ||
| 98 | #define PCIE_ISR0_MSI_INT_PENDING BIT(24) | ||
| 99 | #define PCIE_ISR0_INTX_ASSERT(val) BIT(16 + (val)) | ||
| 100 | #define PCIE_ISR0_INTX_DEASSERT(val) BIT(20 + (val)) | ||
| 101 | #define PCIE_ISR0_ALL_MASK GENMASK(26, 0) | ||
| 102 | #define PCIE_ISR1_REG (CONTROL_BASE_ADDR + 0x48) | ||
| 103 | #define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C) | ||
| 104 | #define PCIE_ISR1_POWER_STATE_CHANGE BIT(4) | ||
| 105 | #define PCIE_ISR1_FLUSH BIT(5) | ||
| 106 | #define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val)) | ||
| 107 | #define PCIE_ISR1_ALL_MASK GENMASK(11, 4) | ||
| 108 | #define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50) | ||
| 109 | #define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54) | ||
| 110 | #define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58) | ||
| 111 | #define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C) | ||
| 112 | #define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C) | ||
| 113 | |||
| 114 | /* PCIe window configuration */ | ||
| 115 | #define OB_WIN_BASE_ADDR 0x4c00 | ||
| 116 | #define OB_WIN_BLOCK_SIZE 0x20 | ||
| 117 | #define OB_WIN_REG_ADDR(win, offset) (OB_WIN_BASE_ADDR + \ | ||
| 118 | OB_WIN_BLOCK_SIZE * (win) + \ | ||
| 119 | (offset)) | ||
| 120 | #define OB_WIN_MATCH_LS(win) OB_WIN_REG_ADDR(win, 0x00) | ||
| 121 | #define OB_WIN_MATCH_MS(win) OB_WIN_REG_ADDR(win, 0x04) | ||
| 122 | #define OB_WIN_REMAP_LS(win) OB_WIN_REG_ADDR(win, 0x08) | ||
| 123 | #define OB_WIN_REMAP_MS(win) OB_WIN_REG_ADDR(win, 0x0c) | ||
| 124 | #define OB_WIN_MASK_LS(win) OB_WIN_REG_ADDR(win, 0x10) | ||
| 125 | #define OB_WIN_MASK_MS(win) OB_WIN_REG_ADDR(win, 0x14) | ||
| 126 | #define OB_WIN_ACTIONS(win) OB_WIN_REG_ADDR(win, 0x18) | ||
| 127 | |||
| 128 | /* PCIe window types */ | ||
| 129 | #define OB_PCIE_MEM 0x0 | ||
| 130 | #define OB_PCIE_IO 0x4 | ||
| 131 | |||
| 132 | /* LMI registers base address and register offsets */ | ||
| 133 | #define LMI_BASE_ADDR 0x6000 | ||
| 134 | #define CFG_REG (LMI_BASE_ADDR + 0x0) | ||
| 135 | #define LTSSM_SHIFT 24 | ||
| 136 | #define LTSSM_MASK 0x3f | ||
| 137 | #define LTSSM_L0 0x10 | ||
| 138 | #define RC_BAR_CONFIG 0x300 | ||
| 139 | |||
| 140 | /* PCIe core controller registers */ | ||
| 141 | #define CTRL_CORE_BASE_ADDR 0x18000 | ||
| 142 | #define CTRL_CONFIG_REG (CTRL_CORE_BASE_ADDR + 0x0) | ||
| 143 | #define CTRL_MODE_SHIFT 0x0 | ||
| 144 | #define CTRL_MODE_MASK 0x1 | ||
| 145 | #define PCIE_CORE_MODE_DIRECT 0x0 | ||
| 146 | #define PCIE_CORE_MODE_COMMAND 0x1 | ||
| 147 | |||
| 148 | /* PCIe Central Interrupts Registers */ | ||
| 149 | #define CENTRAL_INT_BASE_ADDR 0x1b000 | ||
| 150 | #define HOST_CTRL_INT_STATUS_REG (CENTRAL_INT_BASE_ADDR + 0x0) | ||
| 151 | #define HOST_CTRL_INT_MASK_REG (CENTRAL_INT_BASE_ADDR + 0x4) | ||
| 152 | #define PCIE_IRQ_CMDQ_INT BIT(0) | ||
| 153 | #define PCIE_IRQ_MSI_STATUS_INT BIT(1) | ||
| 154 | #define PCIE_IRQ_CMD_SENT_DONE BIT(3) | ||
| 155 | #define PCIE_IRQ_DMA_INT BIT(4) | ||
| 156 | #define PCIE_IRQ_IB_DXFERDONE BIT(5) | ||
| 157 | #define PCIE_IRQ_OB_DXFERDONE BIT(6) | ||
| 158 | #define PCIE_IRQ_OB_RXFERDONE BIT(7) | ||
| 159 | #define PCIE_IRQ_COMPQ_INT BIT(12) | ||
| 160 | #define PCIE_IRQ_DIR_RD_DDR_DET BIT(13) | ||
| 161 | #define PCIE_IRQ_DIR_WR_DDR_DET BIT(14) | ||
| 162 | #define PCIE_IRQ_CORE_INT BIT(16) | ||
| 163 | #define PCIE_IRQ_CORE_INT_PIO BIT(17) | ||
| 164 | #define PCIE_IRQ_DPMU_INT BIT(18) | ||
| 165 | #define PCIE_IRQ_PCIE_MIS_INT BIT(19) | ||
| 166 | #define PCIE_IRQ_MSI_INT1_DET BIT(20) | ||
| 167 | #define PCIE_IRQ_MSI_INT2_DET BIT(21) | ||
| 168 | #define PCIE_IRQ_RC_DBELL_DET BIT(22) | ||
| 169 | #define PCIE_IRQ_EP_STATUS BIT(23) | ||
| 170 | #define PCIE_IRQ_ALL_MASK 0xfff0fb | ||
| 171 | #define PCIE_IRQ_ENABLE_INTS_MASK PCIE_IRQ_CORE_INT | ||
| 172 | |||
| 173 | /* Transaction types */ | ||
| 174 | #define PCIE_CONFIG_RD_TYPE0 0x8 | ||
| 175 | #define PCIE_CONFIG_RD_TYPE1 0x9 | ||
| 176 | #define PCIE_CONFIG_WR_TYPE0 0xa | ||
| 177 | #define PCIE_CONFIG_WR_TYPE1 0xb | ||
| 178 | |||
| 179 | #define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20) | ||
| 180 | #define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15) | ||
| 181 | #define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12) | ||
| 182 | #define PCIE_CONF_REG(reg) ((reg) & 0xffc) | ||
| 183 | #define PCIE_CONF_ADDR(bus, devfn, where) \ | ||
| 184 | (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \ | ||
| 185 | PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where)) | ||
| 186 | |||
| 187 | #define PIO_TIMEOUT_MS 1 | ||
| 188 | |||
| 189 | #define LINK_WAIT_MAX_RETRIES 10 | ||
| 190 | #define LINK_WAIT_USLEEP_MIN 90000 | ||
| 191 | #define LINK_WAIT_USLEEP_MAX 100000 | ||
| 192 | |||
| 193 | #define MSI_IRQ_NUM 32 | ||
| 194 | |||
| 195 | struct advk_pcie { | ||
| 196 | struct platform_device *pdev; | ||
| 197 | void __iomem *base; | ||
| 198 | struct list_head resources; | ||
| 199 | struct irq_domain *irq_domain; | ||
| 200 | struct irq_chip irq_chip; | ||
| 201 | struct irq_domain *msi_domain; | ||
| 202 | struct irq_domain *msi_inner_domain; | ||
| 203 | struct irq_chip msi_bottom_irq_chip; | ||
| 204 | struct irq_chip msi_irq_chip; | ||
| 205 | struct msi_domain_info msi_domain_info; | ||
| 206 | DECLARE_BITMAP(msi_used, MSI_IRQ_NUM); | ||
| 207 | struct mutex msi_used_lock; | ||
| 208 | u16 msi_msg; | ||
| 209 | int root_bus_nr; | ||
| 210 | }; | ||
| 211 | |||
| 212 | static inline void advk_writel(struct advk_pcie *pcie, u32 val, u64 reg) | ||
| 213 | { | ||
| 214 | writel(val, pcie->base + reg); | ||
| 215 | } | ||
| 216 | |||
| 217 | static inline u32 advk_readl(struct advk_pcie *pcie, u64 reg) | ||
| 218 | { | ||
| 219 | return readl(pcie->base + reg); | ||
| 220 | } | ||
| 221 | |||
| 222 | static int advk_pcie_link_up(struct advk_pcie *pcie) | ||
| 223 | { | ||
| 224 | u32 val, ltssm_state; | ||
| 225 | |||
| 226 | val = advk_readl(pcie, CFG_REG); | ||
| 227 | ltssm_state = (val >> LTSSM_SHIFT) & LTSSM_MASK; | ||
| 228 | return ltssm_state >= LTSSM_L0; | ||
| 229 | } | ||
| 230 | |||
| 231 | static int advk_pcie_wait_for_link(struct advk_pcie *pcie) | ||
| 232 | { | ||
| 233 | struct device *dev = &pcie->pdev->dev; | ||
| 234 | int retries; | ||
| 235 | |||
| 236 | /* check if the link is up or not */ | ||
| 237 | for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { | ||
| 238 | if (advk_pcie_link_up(pcie)) { | ||
| 239 | dev_info(dev, "link up\n"); | ||
| 240 | return 0; | ||
| 241 | } | ||
| 242 | |||
| 243 | usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); | ||
| 244 | } | ||
| 245 | |||
| 246 | dev_err(dev, "link never came up\n"); | ||
| 247 | return -ETIMEDOUT; | ||
| 248 | } | ||
| 249 | |||
| 250 | /* | ||
| 251 | * Set PCIe address window register which could be used for memory | ||
| 252 | * mapping. | ||
| 253 | */ | ||
| 254 | static void advk_pcie_set_ob_win(struct advk_pcie *pcie, | ||
| 255 | u32 win_num, u32 match_ms, | ||
| 256 | u32 match_ls, u32 mask_ms, | ||
| 257 | u32 mask_ls, u32 remap_ms, | ||
| 258 | u32 remap_ls, u32 action) | ||
| 259 | { | ||
| 260 | advk_writel(pcie, match_ls, OB_WIN_MATCH_LS(win_num)); | ||
| 261 | advk_writel(pcie, match_ms, OB_WIN_MATCH_MS(win_num)); | ||
| 262 | advk_writel(pcie, mask_ms, OB_WIN_MASK_MS(win_num)); | ||
| 263 | advk_writel(pcie, mask_ls, OB_WIN_MASK_LS(win_num)); | ||
| 264 | advk_writel(pcie, remap_ms, OB_WIN_REMAP_MS(win_num)); | ||
| 265 | advk_writel(pcie, remap_ls, OB_WIN_REMAP_LS(win_num)); | ||
| 266 | advk_writel(pcie, action, OB_WIN_ACTIONS(win_num)); | ||
| 267 | advk_writel(pcie, match_ls | BIT(0), OB_WIN_MATCH_LS(win_num)); | ||
| 268 | } | ||
| 269 | |||
| 270 | static void advk_pcie_setup_hw(struct advk_pcie *pcie) | ||
| 271 | { | ||
| 272 | u32 reg; | ||
| 273 | int i; | ||
| 274 | |||
| 275 | /* Point PCIe unit MBUS decode windows to DRAM space */ | ||
| 276 | for (i = 0; i < 8; i++) | ||
| 277 | advk_pcie_set_ob_win(pcie, i, 0, 0, 0, 0, 0, 0, 0); | ||
| 278 | |||
| 279 | /* Set to Direct mode */ | ||
| 280 | reg = advk_readl(pcie, CTRL_CONFIG_REG); | ||
| 281 | reg &= ~(CTRL_MODE_MASK << CTRL_MODE_SHIFT); | ||
| 282 | reg |= ((PCIE_CORE_MODE_DIRECT & CTRL_MODE_MASK) << CTRL_MODE_SHIFT); | ||
| 283 | advk_writel(pcie, reg, CTRL_CONFIG_REG); | ||
| 284 | |||
| 285 | /* Set PCI global control register to RC mode */ | ||
| 286 | reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); | ||
| 287 | reg |= (IS_RC_MSK << IS_RC_SHIFT); | ||
| 288 | advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); | ||
| 289 | |||
| 290 | /* Set Advanced Error Capabilities and Control PF0 register */ | ||
| 291 | reg = PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX | | ||
| 292 | PCIE_CORE_ERR_CAPCTL_ECRC_CHK_TX_EN | | ||
| 293 | PCIE_CORE_ERR_CAPCTL_ECRC_CHCK | | ||
| 294 | PCIE_CORE_ERR_CAPCTL_ECRC_CHCK_RCV; | ||
| 295 | advk_writel(pcie, reg, PCIE_CORE_ERR_CAPCTL_REG); | ||
| 296 | |||
| 297 | /* Set PCIe Device Control and Status 1 PF0 register */ | ||
| 298 | reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE | | ||
| 299 | (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) | | ||
| 300 | PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE | | ||
| 301 | (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ << | ||
| 302 | PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT); | ||
| 303 | advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG); | ||
| 304 | |||
| 305 | /* Program PCIe Control 2 to disable strict ordering */ | ||
| 306 | reg = PCIE_CORE_CTRL2_RESERVED | | ||
| 307 | PCIE_CORE_CTRL2_TD_ENABLE; | ||
| 308 | advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); | ||
| 309 | |||
| 310 | /* Set GEN2 */ | ||
| 311 | reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); | ||
| 312 | reg &= ~PCIE_GEN_SEL_MSK; | ||
| 313 | reg |= SPEED_GEN_2; | ||
| 314 | advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); | ||
| 315 | |||
| 316 | /* Set lane X1 */ | ||
| 317 | reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); | ||
| 318 | reg &= ~LANE_CNT_MSK; | ||
| 319 | reg |= LANE_COUNT_1; | ||
| 320 | advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); | ||
| 321 | |||
| 322 | /* Enable link training */ | ||
| 323 | reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG); | ||
| 324 | reg |= LINK_TRAINING_EN; | ||
| 325 | advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG); | ||
| 326 | |||
| 327 | /* Enable MSI */ | ||
| 328 | reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG); | ||
| 329 | reg |= PCIE_CORE_CTRL2_MSI_ENABLE; | ||
| 330 | advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); | ||
| 331 | |||
| 332 | /* Clear all interrupts */ | ||
| 333 | advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG); | ||
| 334 | advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG); | ||
| 335 | advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG); | ||
| 336 | |||
| 337 | /* Disable All ISR0/1 Sources */ | ||
| 338 | reg = PCIE_ISR0_ALL_MASK; | ||
| 339 | reg &= ~PCIE_ISR0_MSI_INT_PENDING; | ||
| 340 | advk_writel(pcie, reg, PCIE_ISR0_MASK_REG); | ||
| 341 | |||
| 342 | advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG); | ||
| 343 | |||
| 344 | /* Unmask all MSI's */ | ||
| 345 | advk_writel(pcie, 0, PCIE_MSI_MASK_REG); | ||
| 346 | |||
| 347 | /* Enable summary interrupt for GIC SPI source */ | ||
| 348 | reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK); | ||
| 349 | advk_writel(pcie, reg, HOST_CTRL_INT_MASK_REG); | ||
| 350 | |||
| 351 | reg = advk_readl(pcie, PCIE_CORE_CTRL2_REG); | ||
| 352 | reg |= PCIE_CORE_CTRL2_OB_WIN_ENABLE; | ||
| 353 | advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG); | ||
| 354 | |||
| 355 | /* Bypass the address window mapping for PIO */ | ||
| 356 | reg = advk_readl(pcie, PIO_CTRL); | ||
| 357 | reg |= PIO_CTRL_ADDR_WIN_DISABLE; | ||
| 358 | advk_writel(pcie, reg, PIO_CTRL); | ||
| 359 | |||
| 360 | /* Start link training */ | ||
| 361 | reg = advk_readl(pcie, PCIE_CORE_LINK_CTRL_STAT_REG); | ||
| 362 | reg |= PCIE_CORE_LINK_TRAINING; | ||
| 363 | advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG); | ||
| 364 | |||
| 365 | advk_pcie_wait_for_link(pcie); | ||
| 366 | |||
| 367 | reg = PCIE_CORE_LINK_L0S_ENTRY | | ||
| 368 | (1 << PCIE_CORE_LINK_WIDTH_SHIFT); | ||
| 369 | advk_writel(pcie, reg, PCIE_CORE_LINK_CTRL_STAT_REG); | ||
| 370 | |||
| 371 | reg = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG); | ||
| 372 | reg |= PCIE_CORE_CMD_MEM_ACCESS_EN | | ||
| 373 | PCIE_CORE_CMD_IO_ACCESS_EN | | ||
| 374 | PCIE_CORE_CMD_MEM_IO_REQ_EN; | ||
| 375 | advk_writel(pcie, reg, PCIE_CORE_CMD_STATUS_REG); | ||
| 376 | } | ||
| 377 | |||
| 378 | static void advk_pcie_check_pio_status(struct advk_pcie *pcie) | ||
| 379 | { | ||
| 380 | struct device *dev = &pcie->pdev->dev; | ||
| 381 | u32 reg; | ||
| 382 | unsigned int status; | ||
| 383 | char *strcomp_status, *str_posted; | ||
| 384 | |||
| 385 | reg = advk_readl(pcie, PIO_STAT); | ||
| 386 | status = (reg & PIO_COMPLETION_STATUS_MASK) >> | ||
| 387 | PIO_COMPLETION_STATUS_SHIFT; | ||
| 388 | |||
| 389 | if (!status) | ||
| 390 | return; | ||
| 391 | |||
| 392 | switch (status) { | ||
| 393 | case PIO_COMPLETION_STATUS_UR: | ||
| 394 | strcomp_status = "UR"; | ||
| 395 | break; | ||
| 396 | case PIO_COMPLETION_STATUS_CRS: | ||
| 397 | strcomp_status = "CRS"; | ||
| 398 | break; | ||
| 399 | case PIO_COMPLETION_STATUS_CA: | ||
| 400 | strcomp_status = "CA"; | ||
| 401 | break; | ||
| 402 | default: | ||
| 403 | strcomp_status = "Unknown"; | ||
| 404 | break; | ||
| 405 | } | ||
| 406 | |||
| 407 | if (reg & PIO_NON_POSTED_REQ) | ||
| 408 | str_posted = "Non-posted"; | ||
| 409 | else | ||
| 410 | str_posted = "Posted"; | ||
| 411 | |||
| 412 | dev_err(dev, "%s PIO Response Status: %s, %#x @ %#x\n", | ||
| 413 | str_posted, strcomp_status, reg, advk_readl(pcie, PIO_ADDR_LS)); | ||
| 414 | } | ||
| 415 | |||
| 416 | static int advk_pcie_wait_pio(struct advk_pcie *pcie) | ||
| 417 | { | ||
| 418 | struct device *dev = &pcie->pdev->dev; | ||
| 419 | unsigned long timeout; | ||
| 420 | |||
| 421 | timeout = jiffies + msecs_to_jiffies(PIO_TIMEOUT_MS); | ||
| 422 | |||
| 423 | while (time_before(jiffies, timeout)) { | ||
| 424 | u32 start, isr; | ||
| 425 | |||
| 426 | start = advk_readl(pcie, PIO_START); | ||
| 427 | isr = advk_readl(pcie, PIO_ISR); | ||
| 428 | if (!start && isr) | ||
| 429 | return 0; | ||
| 430 | } | ||
| 431 | |||
| 432 | dev_err(dev, "config read/write timed out\n"); | ||
| 433 | return -ETIMEDOUT; | ||
| 434 | } | ||
| 435 | |||
| 436 | static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, | ||
| 437 | int where, int size, u32 *val) | ||
| 438 | { | ||
| 439 | struct advk_pcie *pcie = bus->sysdata; | ||
| 440 | u32 reg; | ||
| 441 | int ret; | ||
| 442 | |||
| 443 | if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) { | ||
| 444 | *val = 0xffffffff; | ||
| 445 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 446 | } | ||
| 447 | |||
| 448 | /* Start PIO */ | ||
| 449 | advk_writel(pcie, 0, PIO_START); | ||
| 450 | advk_writel(pcie, 1, PIO_ISR); | ||
| 451 | |||
| 452 | /* Program the control register */ | ||
| 453 | reg = advk_readl(pcie, PIO_CTRL); | ||
| 454 | reg &= ~PIO_CTRL_TYPE_MASK; | ||
| 455 | if (bus->number == pcie->root_bus_nr) | ||
| 456 | reg |= PCIE_CONFIG_RD_TYPE0; | ||
| 457 | else | ||
| 458 | reg |= PCIE_CONFIG_RD_TYPE1; | ||
| 459 | advk_writel(pcie, reg, PIO_CTRL); | ||
| 460 | |||
| 461 | /* Program the address registers */ | ||
| 462 | reg = PCIE_CONF_ADDR(bus->number, devfn, where); | ||
| 463 | advk_writel(pcie, reg, PIO_ADDR_LS); | ||
| 464 | advk_writel(pcie, 0, PIO_ADDR_MS); | ||
| 465 | |||
| 466 | /* Program the data strobe */ | ||
| 467 | advk_writel(pcie, 0xf, PIO_WR_DATA_STRB); | ||
| 468 | |||
| 469 | /* Start the transfer */ | ||
| 470 | advk_writel(pcie, 1, PIO_START); | ||
| 471 | |||
| 472 | ret = advk_pcie_wait_pio(pcie); | ||
| 473 | if (ret < 0) | ||
| 474 | return PCIBIOS_SET_FAILED; | ||
| 475 | |||
| 476 | advk_pcie_check_pio_status(pcie); | ||
| 477 | |||
| 478 | /* Get the read result */ | ||
| 479 | *val = advk_readl(pcie, PIO_RD_DATA); | ||
| 480 | if (size == 1) | ||
| 481 | *val = (*val >> (8 * (where & 3))) & 0xff; | ||
| 482 | else if (size == 2) | ||
| 483 | *val = (*val >> (8 * (where & 3))) & 0xffff; | ||
| 484 | |||
| 485 | return PCIBIOS_SUCCESSFUL; | ||
| 486 | } | ||
| 487 | |||
| 488 | static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, | ||
| 489 | int where, int size, u32 val) | ||
| 490 | { | ||
| 491 | struct advk_pcie *pcie = bus->sysdata; | ||
| 492 | u32 reg; | ||
| 493 | u32 data_strobe = 0x0; | ||
| 494 | int offset; | ||
| 495 | int ret; | ||
| 496 | |||
| 497 | if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) | ||
| 498 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 499 | |||
| 500 | if (where % size) | ||
| 501 | return PCIBIOS_SET_FAILED; | ||
| 502 | |||
| 503 | /* Start PIO */ | ||
| 504 | advk_writel(pcie, 0, PIO_START); | ||
| 505 | advk_writel(pcie, 1, PIO_ISR); | ||
| 506 | |||
| 507 | /* Program the control register */ | ||
| 508 | reg = advk_readl(pcie, PIO_CTRL); | ||
| 509 | reg &= ~PIO_CTRL_TYPE_MASK; | ||
| 510 | if (bus->number == pcie->root_bus_nr) | ||
| 511 | reg |= PCIE_CONFIG_WR_TYPE0; | ||
| 512 | else | ||
| 513 | reg |= PCIE_CONFIG_WR_TYPE1; | ||
| 514 | advk_writel(pcie, reg, PIO_CTRL); | ||
| 515 | |||
| 516 | /* Program the address registers */ | ||
| 517 | reg = PCIE_CONF_ADDR(bus->number, devfn, where); | ||
| 518 | advk_writel(pcie, reg, PIO_ADDR_LS); | ||
| 519 | advk_writel(pcie, 0, PIO_ADDR_MS); | ||
| 520 | |||
| 521 | /* Calculate the write strobe */ | ||
| 522 | offset = where & 0x3; | ||
| 523 | reg = val << (8 * offset); | ||
| 524 | data_strobe = GENMASK(size - 1, 0) << offset; | ||
| 525 | |||
| 526 | /* Program the data register */ | ||
| 527 | advk_writel(pcie, reg, PIO_WR_DATA); | ||
| 528 | |||
| 529 | /* Program the data strobe */ | ||
| 530 | advk_writel(pcie, data_strobe, PIO_WR_DATA_STRB); | ||
| 531 | |||
| 532 | /* Start the transfer */ | ||
| 533 | advk_writel(pcie, 1, PIO_START); | ||
| 534 | |||
| 535 | ret = advk_pcie_wait_pio(pcie); | ||
| 536 | if (ret < 0) | ||
| 537 | return PCIBIOS_SET_FAILED; | ||
| 538 | |||
| 539 | advk_pcie_check_pio_status(pcie); | ||
| 540 | |||
| 541 | return PCIBIOS_SUCCESSFUL; | ||
| 542 | } | ||
| 543 | |||
| 544 | static struct pci_ops advk_pcie_ops = { | ||
| 545 | .read = advk_pcie_rd_conf, | ||
| 546 | .write = advk_pcie_wr_conf, | ||
| 547 | }; | ||
| 548 | |||
| 549 | static void advk_msi_irq_compose_msi_msg(struct irq_data *data, | ||
| 550 | struct msi_msg *msg) | ||
| 551 | { | ||
| 552 | struct advk_pcie *pcie = irq_data_get_irq_chip_data(data); | ||
| 553 | phys_addr_t msi_msg = virt_to_phys(&pcie->msi_msg); | ||
| 554 | |||
| 555 | msg->address_lo = lower_32_bits(msi_msg); | ||
| 556 | msg->address_hi = upper_32_bits(msi_msg); | ||
| 557 | msg->data = data->irq; | ||
| 558 | } | ||
| 559 | |||
| 560 | static int advk_msi_set_affinity(struct irq_data *irq_data, | ||
| 561 | const struct cpumask *mask, bool force) | ||
| 562 | { | ||
| 563 | return -EINVAL; | ||
| 564 | } | ||
| 565 | |||
| 566 | static int advk_msi_irq_domain_alloc(struct irq_domain *domain, | ||
| 567 | unsigned int virq, | ||
| 568 | unsigned int nr_irqs, void *args) | ||
| 569 | { | ||
| 570 | struct advk_pcie *pcie = domain->host_data; | ||
| 571 | int hwirq, i; | ||
| 572 | |||
| 573 | mutex_lock(&pcie->msi_used_lock); | ||
| 574 | hwirq = bitmap_find_next_zero_area(pcie->msi_used, MSI_IRQ_NUM, | ||
| 575 | 0, nr_irqs, 0); | ||
| 576 | if (hwirq >= MSI_IRQ_NUM) { | ||
| 577 | mutex_unlock(&pcie->msi_used_lock); | ||
| 578 | return -ENOSPC; | ||
| 579 | } | ||
| 580 | |||
| 581 | bitmap_set(pcie->msi_used, hwirq, nr_irqs); | ||
| 582 | mutex_unlock(&pcie->msi_used_lock); | ||
| 583 | |||
| 584 | for (i = 0; i < nr_irqs; i++) | ||
| 585 | irq_domain_set_info(domain, virq + i, hwirq + i, | ||
| 586 | &pcie->msi_bottom_irq_chip, | ||
| 587 | domain->host_data, handle_simple_irq, | ||
| 588 | NULL, NULL); | ||
| 589 | |||
| 590 | return hwirq; | ||
| 591 | } | ||
| 592 | |||
| 593 | static void advk_msi_irq_domain_free(struct irq_domain *domain, | ||
| 594 | unsigned int virq, unsigned int nr_irqs) | ||
| 595 | { | ||
| 596 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); | ||
| 597 | struct advk_pcie *pcie = domain->host_data; | ||
| 598 | |||
| 599 | mutex_lock(&pcie->msi_used_lock); | ||
| 600 | bitmap_clear(pcie->msi_used, d->hwirq, nr_irqs); | ||
| 601 | mutex_unlock(&pcie->msi_used_lock); | ||
| 602 | } | ||
| 603 | |||
| 604 | static const struct irq_domain_ops advk_msi_domain_ops = { | ||
| 605 | .alloc = advk_msi_irq_domain_alloc, | ||
| 606 | .free = advk_msi_irq_domain_free, | ||
| 607 | }; | ||
| 608 | |||
| 609 | static void advk_pcie_irq_mask(struct irq_data *d) | ||
| 610 | { | ||
| 611 | struct advk_pcie *pcie = d->domain->host_data; | ||
| 612 | irq_hw_number_t hwirq = irqd_to_hwirq(d); | ||
| 613 | u32 mask; | ||
| 614 | |||
| 615 | mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); | ||
| 616 | mask |= PCIE_ISR1_INTX_ASSERT(hwirq); | ||
| 617 | advk_writel(pcie, mask, PCIE_ISR1_MASK_REG); | ||
| 618 | } | ||
| 619 | |||
| 620 | static void advk_pcie_irq_unmask(struct irq_data *d) | ||
| 621 | { | ||
| 622 | struct advk_pcie *pcie = d->domain->host_data; | ||
| 623 | irq_hw_number_t hwirq = irqd_to_hwirq(d); | ||
| 624 | u32 mask; | ||
| 625 | |||
| 626 | mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); | ||
| 627 | mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq); | ||
| 628 | advk_writel(pcie, mask, PCIE_ISR1_MASK_REG); | ||
| 629 | } | ||
| 630 | |||
| 631 | static int advk_pcie_irq_map(struct irq_domain *h, | ||
| 632 | unsigned int virq, irq_hw_number_t hwirq) | ||
| 633 | { | ||
| 634 | struct advk_pcie *pcie = h->host_data; | ||
| 635 | |||
| 636 | advk_pcie_irq_mask(irq_get_irq_data(virq)); | ||
| 637 | irq_set_status_flags(virq, IRQ_LEVEL); | ||
| 638 | irq_set_chip_and_handler(virq, &pcie->irq_chip, | ||
| 639 | handle_level_irq); | ||
| 640 | irq_set_chip_data(virq, pcie); | ||
| 641 | |||
| 642 | return 0; | ||
| 643 | } | ||
| 644 | |||
| 645 | static const struct irq_domain_ops advk_pcie_irq_domain_ops = { | ||
| 646 | .map = advk_pcie_irq_map, | ||
| 647 | .xlate = irq_domain_xlate_onecell, | ||
| 648 | }; | ||
| 649 | |||
| 650 | static int advk_pcie_init_msi_irq_domain(struct advk_pcie *pcie) | ||
| 651 | { | ||
| 652 | struct device *dev = &pcie->pdev->dev; | ||
| 653 | struct device_node *node = dev->of_node; | ||
| 654 | struct irq_chip *bottom_ic, *msi_ic; | ||
| 655 | struct msi_domain_info *msi_di; | ||
| 656 | phys_addr_t msi_msg_phys; | ||
| 657 | |||
| 658 | mutex_init(&pcie->msi_used_lock); | ||
| 659 | |||
| 660 | bottom_ic = &pcie->msi_bottom_irq_chip; | ||
| 661 | |||
| 662 | bottom_ic->name = "MSI"; | ||
| 663 | bottom_ic->irq_compose_msi_msg = advk_msi_irq_compose_msi_msg; | ||
| 664 | bottom_ic->irq_set_affinity = advk_msi_set_affinity; | ||
| 665 | |||
| 666 | msi_ic = &pcie->msi_irq_chip; | ||
| 667 | msi_ic->name = "advk-MSI"; | ||
| 668 | |||
| 669 | msi_di = &pcie->msi_domain_info; | ||
| 670 | msi_di->flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | | ||
| 671 | MSI_FLAG_MULTI_PCI_MSI; | ||
| 672 | msi_di->chip = msi_ic; | ||
| 673 | |||
| 674 | msi_msg_phys = virt_to_phys(&pcie->msi_msg); | ||
| 675 | |||
| 676 | advk_writel(pcie, lower_32_bits(msi_msg_phys), | ||
| 677 | PCIE_MSI_ADDR_LOW_REG); | ||
| 678 | advk_writel(pcie, upper_32_bits(msi_msg_phys), | ||
| 679 | PCIE_MSI_ADDR_HIGH_REG); | ||
| 680 | |||
| 681 | pcie->msi_inner_domain = | ||
| 682 | irq_domain_add_linear(NULL, MSI_IRQ_NUM, | ||
| 683 | &advk_msi_domain_ops, pcie); | ||
| 684 | if (!pcie->msi_inner_domain) | ||
| 685 | return -ENOMEM; | ||
| 686 | |||
| 687 | pcie->msi_domain = | ||
| 688 | pci_msi_create_irq_domain(of_node_to_fwnode(node), | ||
| 689 | msi_di, pcie->msi_inner_domain); | ||
| 690 | if (!pcie->msi_domain) { | ||
| 691 | irq_domain_remove(pcie->msi_inner_domain); | ||
| 692 | return -ENOMEM; | ||
| 693 | } | ||
| 694 | |||
| 695 | return 0; | ||
| 696 | } | ||
| 697 | |||
| 698 | static void advk_pcie_remove_msi_irq_domain(struct advk_pcie *pcie) | ||
| 699 | { | ||
| 700 | irq_domain_remove(pcie->msi_domain); | ||
| 701 | irq_domain_remove(pcie->msi_inner_domain); | ||
| 702 | } | ||
| 703 | |||
| 704 | static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) | ||
| 705 | { | ||
| 706 | struct device *dev = &pcie->pdev->dev; | ||
| 707 | struct device_node *node = dev->of_node; | ||
| 708 | struct device_node *pcie_intc_node; | ||
| 709 | struct irq_chip *irq_chip; | ||
| 710 | |||
| 711 | pcie_intc_node = of_get_next_child(node, NULL); | ||
| 712 | if (!pcie_intc_node) { | ||
| 713 | dev_err(dev, "No PCIe Intc node found\n"); | ||
| 714 | return -ENODEV; | ||
| 715 | } | ||
| 716 | |||
| 717 | irq_chip = &pcie->irq_chip; | ||
| 718 | |||
| 719 | irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq", | ||
| 720 | dev_name(dev)); | ||
| 721 | if (!irq_chip->name) { | ||
| 722 | of_node_put(pcie_intc_node); | ||
| 723 | return -ENOMEM; | ||
| 724 | } | ||
| 725 | |||
| 726 | irq_chip->irq_mask = advk_pcie_irq_mask; | ||
| 727 | irq_chip->irq_mask_ack = advk_pcie_irq_mask; | ||
| 728 | irq_chip->irq_unmask = advk_pcie_irq_unmask; | ||
| 729 | |||
| 730 | pcie->irq_domain = | ||
| 731 | irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, | ||
| 732 | &advk_pcie_irq_domain_ops, pcie); | ||
| 733 | if (!pcie->irq_domain) { | ||
| 734 | dev_err(dev, "Failed to get a INTx IRQ domain\n"); | ||
| 735 | of_node_put(pcie_intc_node); | ||
| 736 | return -ENOMEM; | ||
| 737 | } | ||
| 738 | |||
| 739 | return 0; | ||
| 740 | } | ||
| 741 | |||
| 742 | static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie) | ||
| 743 | { | ||
| 744 | irq_domain_remove(pcie->irq_domain); | ||
| 745 | } | ||
| 746 | |||
| 747 | static void advk_pcie_handle_msi(struct advk_pcie *pcie) | ||
| 748 | { | ||
| 749 | u32 msi_val, msi_mask, msi_status, msi_idx; | ||
| 750 | u16 msi_data; | ||
| 751 | |||
| 752 | msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG); | ||
| 753 | msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG); | ||
| 754 | msi_status = msi_val & ~msi_mask; | ||
| 755 | |||
| 756 | for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) { | ||
| 757 | if (!(BIT(msi_idx) & msi_status)) | ||
| 758 | continue; | ||
| 759 | |||
| 760 | advk_writel(pcie, BIT(msi_idx), PCIE_MSI_STATUS_REG); | ||
| 761 | msi_data = advk_readl(pcie, PCIE_MSI_PAYLOAD_REG) & 0xFF; | ||
| 762 | generic_handle_irq(msi_data); | ||
| 763 | } | ||
| 764 | |||
| 765 | advk_writel(pcie, PCIE_ISR0_MSI_INT_PENDING, | ||
| 766 | PCIE_ISR0_REG); | ||
| 767 | } | ||
| 768 | |||
| 769 | static void advk_pcie_handle_int(struct advk_pcie *pcie) | ||
| 770 | { | ||
| 771 | u32 isr0_val, isr0_mask, isr0_status; | ||
| 772 | u32 isr1_val, isr1_mask, isr1_status; | ||
| 773 | int i, virq; | ||
| 774 | |||
| 775 | isr0_val = advk_readl(pcie, PCIE_ISR0_REG); | ||
| 776 | isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG); | ||
| 777 | isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK); | ||
| 778 | |||
| 779 | isr1_val = advk_readl(pcie, PCIE_ISR1_REG); | ||
| 780 | isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG); | ||
| 781 | isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK); | ||
| 782 | |||
| 783 | if (!isr0_status && !isr1_status) { | ||
| 784 | advk_writel(pcie, isr0_val, PCIE_ISR0_REG); | ||
| 785 | advk_writel(pcie, isr1_val, PCIE_ISR1_REG); | ||
| 786 | return; | ||
| 787 | } | ||
| 788 | |||
| 789 | /* Process MSI interrupts */ | ||
| 790 | if (isr0_status & PCIE_ISR0_MSI_INT_PENDING) | ||
| 791 | advk_pcie_handle_msi(pcie); | ||
| 792 | |||
| 793 | /* Process legacy interrupts */ | ||
| 794 | for (i = 0; i < PCI_NUM_INTX; i++) { | ||
| 795 | if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i))) | ||
| 796 | continue; | ||
| 797 | |||
| 798 | advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i), | ||
| 799 | PCIE_ISR1_REG); | ||
| 800 | |||
| 801 | virq = irq_find_mapping(pcie->irq_domain, i); | ||
| 802 | generic_handle_irq(virq); | ||
| 803 | } | ||
| 804 | } | ||
| 805 | |||
| 806 | static irqreturn_t advk_pcie_irq_handler(int irq, void *arg) | ||
| 807 | { | ||
| 808 | struct advk_pcie *pcie = arg; | ||
| 809 | u32 status; | ||
| 810 | |||
| 811 | status = advk_readl(pcie, HOST_CTRL_INT_STATUS_REG); | ||
| 812 | if (!(status & PCIE_IRQ_CORE_INT)) | ||
| 813 | return IRQ_NONE; | ||
| 814 | |||
| 815 | advk_pcie_handle_int(pcie); | ||
| 816 | |||
| 817 | /* Clear interrupt */ | ||
| 818 | advk_writel(pcie, PCIE_IRQ_CORE_INT, HOST_CTRL_INT_STATUS_REG); | ||
| 819 | |||
| 820 | return IRQ_HANDLED; | ||
| 821 | } | ||
| 822 | |||
| 823 | static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie) | ||
| 824 | { | ||
| 825 | int err, res_valid = 0; | ||
| 826 | struct device *dev = &pcie->pdev->dev; | ||
| 827 | struct resource_entry *win, *tmp; | ||
| 828 | resource_size_t iobase; | ||
| 829 | |||
| 830 | INIT_LIST_HEAD(&pcie->resources); | ||
| 831 | |||
| 832 | err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, | ||
| 833 | &pcie->resources, &iobase); | ||
| 834 | if (err) | ||
| 835 | return err; | ||
| 836 | |||
| 837 | err = devm_request_pci_bus_resources(dev, &pcie->resources); | ||
| 838 | if (err) | ||
| 839 | goto out_release_res; | ||
| 840 | |||
| 841 | resource_list_for_each_entry_safe(win, tmp, &pcie->resources) { | ||
| 842 | struct resource *res = win->res; | ||
| 843 | |||
| 844 | switch (resource_type(res)) { | ||
| 845 | case IORESOURCE_IO: | ||
| 846 | advk_pcie_set_ob_win(pcie, 1, | ||
| 847 | upper_32_bits(res->start), | ||
| 848 | lower_32_bits(res->start), | ||
| 849 | 0, 0xF8000000, 0, | ||
| 850 | lower_32_bits(res->start), | ||
| 851 | OB_PCIE_IO); | ||
| 852 | err = pci_remap_iospace(res, iobase); | ||
| 853 | if (err) { | ||
| 854 | dev_warn(dev, "error %d: failed to map resource %pR\n", | ||
| 855 | err, res); | ||
| 856 | resource_list_destroy_entry(win); | ||
| 857 | } | ||
| 858 | break; | ||
| 859 | case IORESOURCE_MEM: | ||
| 860 | advk_pcie_set_ob_win(pcie, 0, | ||
| 861 | upper_32_bits(res->start), | ||
| 862 | lower_32_bits(res->start), | ||
| 863 | 0x0, 0xF8000000, 0, | ||
| 864 | lower_32_bits(res->start), | ||
| 865 | (2 << 20) | OB_PCIE_MEM); | ||
| 866 | res_valid |= !(res->flags & IORESOURCE_PREFETCH); | ||
| 867 | break; | ||
| 868 | case IORESOURCE_BUS: | ||
| 869 | pcie->root_bus_nr = res->start; | ||
| 870 | break; | ||
| 871 | } | ||
| 872 | } | ||
| 873 | |||
| 874 | if (!res_valid) { | ||
| 875 | dev_err(dev, "non-prefetchable memory resource required\n"); | ||
| 876 | err = -EINVAL; | ||
| 877 | goto out_release_res; | ||
| 878 | } | ||
| 879 | |||
| 880 | return 0; | ||
| 881 | |||
| 882 | out_release_res: | ||
| 883 | pci_free_resource_list(&pcie->resources); | ||
| 884 | return err; | ||
| 885 | } | ||
| 886 | |||
| 887 | static int advk_pcie_probe(struct platform_device *pdev) | ||
| 888 | { | ||
| 889 | struct device *dev = &pdev->dev; | ||
| 890 | struct advk_pcie *pcie; | ||
| 891 | struct resource *res; | ||
| 892 | struct pci_bus *bus, *child; | ||
| 893 | struct pci_host_bridge *bridge; | ||
| 894 | int ret, irq; | ||
| 895 | |||
| 896 | bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie)); | ||
| 897 | if (!bridge) | ||
| 898 | return -ENOMEM; | ||
| 899 | |||
| 900 | pcie = pci_host_bridge_priv(bridge); | ||
| 901 | pcie->pdev = pdev; | ||
| 902 | |||
| 903 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 904 | pcie->base = devm_ioremap_resource(dev, res); | ||
| 905 | if (IS_ERR(pcie->base)) | ||
| 906 | return PTR_ERR(pcie->base); | ||
| 907 | |||
| 908 | irq = platform_get_irq(pdev, 0); | ||
| 909 | ret = devm_request_irq(dev, irq, advk_pcie_irq_handler, | ||
| 910 | IRQF_SHARED | IRQF_NO_THREAD, "advk-pcie", | ||
| 911 | pcie); | ||
| 912 | if (ret) { | ||
| 913 | dev_err(dev, "Failed to register interrupt\n"); | ||
| 914 | return ret; | ||
| 915 | } | ||
| 916 | |||
| 917 | ret = advk_pcie_parse_request_of_pci_ranges(pcie); | ||
| 918 | if (ret) { | ||
| 919 | dev_err(dev, "Failed to parse resources\n"); | ||
| 920 | return ret; | ||
| 921 | } | ||
| 922 | |||
| 923 | advk_pcie_setup_hw(pcie); | ||
| 924 | |||
| 925 | ret = advk_pcie_init_irq_domain(pcie); | ||
| 926 | if (ret) { | ||
| 927 | dev_err(dev, "Failed to initialize irq\n"); | ||
| 928 | return ret; | ||
| 929 | } | ||
| 930 | |||
| 931 | ret = advk_pcie_init_msi_irq_domain(pcie); | ||
| 932 | if (ret) { | ||
| 933 | dev_err(dev, "Failed to initialize irq\n"); | ||
| 934 | advk_pcie_remove_irq_domain(pcie); | ||
| 935 | return ret; | ||
| 936 | } | ||
| 937 | |||
| 938 | list_splice_init(&pcie->resources, &bridge->windows); | ||
| 939 | bridge->dev.parent = dev; | ||
| 940 | bridge->sysdata = pcie; | ||
| 941 | bridge->busnr = 0; | ||
| 942 | bridge->ops = &advk_pcie_ops; | ||
| 943 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 944 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 945 | |||
| 946 | ret = pci_scan_root_bus_bridge(bridge); | ||
| 947 | if (ret < 0) { | ||
| 948 | advk_pcie_remove_msi_irq_domain(pcie); | ||
| 949 | advk_pcie_remove_irq_domain(pcie); | ||
| 950 | return ret; | ||
| 951 | } | ||
| 952 | |||
| 953 | bus = bridge->bus; | ||
| 954 | |||
| 955 | pci_bus_assign_resources(bus); | ||
| 956 | |||
| 957 | list_for_each_entry(child, &bus->children, node) | ||
| 958 | pcie_bus_configure_settings(child); | ||
| 959 | |||
| 960 | pci_bus_add_devices(bus); | ||
| 961 | return 0; | ||
| 962 | } | ||
| 963 | |||
| 964 | static const struct of_device_id advk_pcie_of_match_table[] = { | ||
| 965 | { .compatible = "marvell,armada-3700-pcie", }, | ||
| 966 | {}, | ||
| 967 | }; | ||
| 968 | |||
| 969 | static struct platform_driver advk_pcie_driver = { | ||
| 970 | .driver = { | ||
| 971 | .name = "advk-pcie", | ||
| 972 | .of_match_table = advk_pcie_of_match_table, | ||
| 973 | /* Driver unloading/unbinding currently not supported */ | ||
| 974 | .suppress_bind_attrs = true, | ||
| 975 | }, | ||
| 976 | .probe = advk_pcie_probe, | ||
| 977 | }; | ||
| 978 | builtin_platform_driver(advk_pcie_driver); | ||
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c new file mode 100644 index 000000000000..a1ebe9ed441f --- /dev/null +++ b/drivers/pci/controller/pci-ftpci100.c | |||
| @@ -0,0 +1,619 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Support for Faraday Technology FTPC100 PCI Controller | ||
| 4 | * | ||
| 5 | * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org> | ||
| 6 | * | ||
| 7 | * Based on the out-of-tree OpenWRT patch for Cortina Gemini: | ||
| 8 | * Copyright (C) 2009 Janos Laube <janos.dev@gmail.com> | ||
| 9 | * Copyright (C) 2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt> | ||
| 10 | * Based on SL2312 PCI controller code | ||
| 11 | * Storlink (C) 2003 | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/init.h> | ||
| 15 | #include <linux/interrupt.h> | ||
| 16 | #include <linux/io.h> | ||
| 17 | #include <linux/kernel.h> | ||
| 18 | #include <linux/of_address.h> | ||
| 19 | #include <linux/of_device.h> | ||
| 20 | #include <linux/of_irq.h> | ||
| 21 | #include <linux/of_pci.h> | ||
| 22 | #include <linux/pci.h> | ||
| 23 | #include <linux/platform_device.h> | ||
| 24 | #include <linux/slab.h> | ||
| 25 | #include <linux/irqdomain.h> | ||
| 26 | #include <linux/irqchip/chained_irq.h> | ||
| 27 | #include <linux/bitops.h> | ||
| 28 | #include <linux/irq.h> | ||
| 29 | #include <linux/clk.h> | ||
| 30 | |||
| 31 | #include "../pci.h" | ||
| 32 | |||
| 33 | /* | ||
| 34 | * Special configuration registers directly in the first few words | ||
| 35 | * in I/O space. | ||
| 36 | */ | ||
| 37 | #define PCI_IOSIZE 0x00 | ||
| 38 | #define PCI_PROT 0x04 /* AHB protection */ | ||
| 39 | #define PCI_CTRL 0x08 /* PCI control signal */ | ||
| 40 | #define PCI_SOFTRST 0x10 /* Soft reset counter and response error enable */ | ||
| 41 | #define PCI_CONFIG 0x28 /* PCI configuration command register */ | ||
| 42 | #define PCI_DATA 0x2C | ||
| 43 | |||
| 44 | #define FARADAY_PCI_STATUS_CMD 0x04 /* Status and command */ | ||
| 45 | #define FARADAY_PCI_PMC 0x40 /* Power management control */ | ||
| 46 | #define FARADAY_PCI_PMCSR 0x44 /* Power management status */ | ||
| 47 | #define FARADAY_PCI_CTRL1 0x48 /* Control register 1 */ | ||
| 48 | #define FARADAY_PCI_CTRL2 0x4C /* Control register 2 */ | ||
| 49 | #define FARADAY_PCI_MEM1_BASE_SIZE 0x50 /* Memory base and size #1 */ | ||
| 50 | #define FARADAY_PCI_MEM2_BASE_SIZE 0x54 /* Memory base and size #2 */ | ||
| 51 | #define FARADAY_PCI_MEM3_BASE_SIZE 0x58 /* Memory base and size #3 */ | ||
| 52 | |||
| 53 | #define PCI_STATUS_66MHZ_CAPABLE BIT(21) | ||
| 54 | |||
| 55 | /* Bits 31..28 gives INTD..INTA status */ | ||
| 56 | #define PCI_CTRL2_INTSTS_SHIFT 28 | ||
| 57 | #define PCI_CTRL2_INTMASK_CMDERR BIT(27) | ||
| 58 | #define PCI_CTRL2_INTMASK_PARERR BIT(26) | ||
| 59 | /* Bits 25..22 masks INTD..INTA */ | ||
| 60 | #define PCI_CTRL2_INTMASK_SHIFT 22 | ||
| 61 | #define PCI_CTRL2_INTMASK_MABRT_RX BIT(21) | ||
| 62 | #define PCI_CTRL2_INTMASK_TABRT_RX BIT(20) | ||
| 63 | #define PCI_CTRL2_INTMASK_TABRT_TX BIT(19) | ||
| 64 | #define PCI_CTRL2_INTMASK_RETRY4 BIT(18) | ||
| 65 | #define PCI_CTRL2_INTMASK_SERR_RX BIT(17) | ||
| 66 | #define PCI_CTRL2_INTMASK_PERR_RX BIT(16) | ||
| 67 | /* Bit 15 reserved */ | ||
| 68 | #define PCI_CTRL2_MSTPRI_REQ6 BIT(14) | ||
| 69 | #define PCI_CTRL2_MSTPRI_REQ5 BIT(13) | ||
| 70 | #define PCI_CTRL2_MSTPRI_REQ4 BIT(12) | ||
| 71 | #define PCI_CTRL2_MSTPRI_REQ3 BIT(11) | ||
| 72 | #define PCI_CTRL2_MSTPRI_REQ2 BIT(10) | ||
| 73 | #define PCI_CTRL2_MSTPRI_REQ1 BIT(9) | ||
| 74 | #define PCI_CTRL2_MSTPRI_REQ0 BIT(8) | ||
| 75 | /* Bits 7..4 reserved */ | ||
| 76 | /* Bits 3..0 TRDYW */ | ||
| 77 | |||
| 78 | /* | ||
| 79 | * Memory configs: | ||
| 80 | * Bit 31..20 defines the PCI side memory base | ||
| 81 | * Bit 19..16 (4 bits) defines the size per below | ||
| 82 | */ | ||
| 83 | #define FARADAY_PCI_MEMBASE_MASK 0xfff00000 | ||
| 84 | #define FARADAY_PCI_MEMSIZE_1MB 0x0 | ||
| 85 | #define FARADAY_PCI_MEMSIZE_2MB 0x1 | ||
| 86 | #define FARADAY_PCI_MEMSIZE_4MB 0x2 | ||
| 87 | #define FARADAY_PCI_MEMSIZE_8MB 0x3 | ||
| 88 | #define FARADAY_PCI_MEMSIZE_16MB 0x4 | ||
| 89 | #define FARADAY_PCI_MEMSIZE_32MB 0x5 | ||
| 90 | #define FARADAY_PCI_MEMSIZE_64MB 0x6 | ||
| 91 | #define FARADAY_PCI_MEMSIZE_128MB 0x7 | ||
| 92 | #define FARADAY_PCI_MEMSIZE_256MB 0x8 | ||
| 93 | #define FARADAY_PCI_MEMSIZE_512MB 0x9 | ||
| 94 | #define FARADAY_PCI_MEMSIZE_1GB 0xa | ||
| 95 | #define FARADAY_PCI_MEMSIZE_2GB 0xb | ||
| 96 | #define FARADAY_PCI_MEMSIZE_SHIFT 16 | ||
| 97 | |||
| 98 | /* | ||
| 99 | * The DMA base is set to 0x0 for all memory segments, it reflects the | ||
| 100 | * fact that the memory of the host system starts at 0x0. | ||
| 101 | */ | ||
| 102 | #define FARADAY_PCI_DMA_MEM1_BASE 0x00000000 | ||
| 103 | #define FARADAY_PCI_DMA_MEM2_BASE 0x00000000 | ||
| 104 | #define FARADAY_PCI_DMA_MEM3_BASE 0x00000000 | ||
| 105 | |||
| 106 | /* Defines for PCI configuration command register */ | ||
| 107 | #define PCI_CONF_ENABLE BIT(31) | ||
| 108 | #define PCI_CONF_WHERE(r) ((r) & 0xFC) | ||
| 109 | #define PCI_CONF_BUS(b) (((b) & 0xFF) << 16) | ||
| 110 | #define PCI_CONF_DEVICE(d) (((d) & 0x1F) << 11) | ||
| 111 | #define PCI_CONF_FUNCTION(f) (((f) & 0x07) << 8) | ||
| 112 | |||
| 113 | /** | ||
| 114 | * struct faraday_pci_variant - encodes IP block differences | ||
| 115 | * @cascaded_irq: this host has cascaded IRQs from an interrupt controller | ||
| 116 | * embedded in the host bridge. | ||
| 117 | */ | ||
| 118 | struct faraday_pci_variant { | ||
| 119 | bool cascaded_irq; | ||
| 120 | }; | ||
| 121 | |||
| 122 | struct faraday_pci { | ||
| 123 | struct device *dev; | ||
| 124 | void __iomem *base; | ||
| 125 | struct irq_domain *irqdomain; | ||
| 126 | struct pci_bus *bus; | ||
| 127 | struct clk *bus_clk; | ||
| 128 | }; | ||
| 129 | |||
| 130 | static int faraday_res_to_memcfg(resource_size_t mem_base, | ||
| 131 | resource_size_t mem_size, u32 *val) | ||
| 132 | { | ||
| 133 | u32 outval; | ||
| 134 | |||
| 135 | switch (mem_size) { | ||
| 136 | case SZ_1M: | ||
| 137 | outval = FARADAY_PCI_MEMSIZE_1MB; | ||
| 138 | break; | ||
| 139 | case SZ_2M: | ||
| 140 | outval = FARADAY_PCI_MEMSIZE_2MB; | ||
| 141 | break; | ||
| 142 | case SZ_4M: | ||
| 143 | outval = FARADAY_PCI_MEMSIZE_4MB; | ||
| 144 | break; | ||
| 145 | case SZ_8M: | ||
| 146 | outval = FARADAY_PCI_MEMSIZE_8MB; | ||
| 147 | break; | ||
| 148 | case SZ_16M: | ||
| 149 | outval = FARADAY_PCI_MEMSIZE_16MB; | ||
| 150 | break; | ||
| 151 | case SZ_32M: | ||
| 152 | outval = FARADAY_PCI_MEMSIZE_32MB; | ||
| 153 | break; | ||
| 154 | case SZ_64M: | ||
| 155 | outval = FARADAY_PCI_MEMSIZE_64MB; | ||
| 156 | break; | ||
| 157 | case SZ_128M: | ||
| 158 | outval = FARADAY_PCI_MEMSIZE_128MB; | ||
| 159 | break; | ||
| 160 | case SZ_256M: | ||
| 161 | outval = FARADAY_PCI_MEMSIZE_256MB; | ||
| 162 | break; | ||
| 163 | case SZ_512M: | ||
| 164 | outval = FARADAY_PCI_MEMSIZE_512MB; | ||
| 165 | break; | ||
| 166 | case SZ_1G: | ||
| 167 | outval = FARADAY_PCI_MEMSIZE_1GB; | ||
| 168 | break; | ||
| 169 | case SZ_2G: | ||
| 170 | outval = FARADAY_PCI_MEMSIZE_2GB; | ||
| 171 | break; | ||
| 172 | default: | ||
| 173 | return -EINVAL; | ||
| 174 | } | ||
| 175 | outval <<= FARADAY_PCI_MEMSIZE_SHIFT; | ||
| 176 | |||
| 177 | /* This is probably not good */ | ||
| 178 | if (mem_base & ~(FARADAY_PCI_MEMBASE_MASK)) | ||
| 179 | pr_warn("truncated PCI memory base\n"); | ||
| 180 | /* Translate to bridge side address space */ | ||
| 181 | outval |= (mem_base & FARADAY_PCI_MEMBASE_MASK); | ||
| 182 | pr_debug("Translated pci base @%pap, size %pap to config %08x\n", | ||
| 183 | &mem_base, &mem_size, outval); | ||
| 184 | |||
| 185 | *val = outval; | ||
| 186 | return 0; | ||
| 187 | } | ||
| 188 | |||
| 189 | static int faraday_raw_pci_read_config(struct faraday_pci *p, int bus_number, | ||
| 190 | unsigned int fn, int config, int size, | ||
| 191 | u32 *value) | ||
| 192 | { | ||
| 193 | writel(PCI_CONF_BUS(bus_number) | | ||
| 194 | PCI_CONF_DEVICE(PCI_SLOT(fn)) | | ||
| 195 | PCI_CONF_FUNCTION(PCI_FUNC(fn)) | | ||
| 196 | PCI_CONF_WHERE(config) | | ||
| 197 | PCI_CONF_ENABLE, | ||
| 198 | p->base + PCI_CONFIG); | ||
| 199 | |||
| 200 | *value = readl(p->base + PCI_DATA); | ||
| 201 | |||
| 202 | if (size == 1) | ||
| 203 | *value = (*value >> (8 * (config & 3))) & 0xFF; | ||
| 204 | else if (size == 2) | ||
| 205 | *value = (*value >> (8 * (config & 3))) & 0xFFFF; | ||
| 206 | |||
| 207 | return PCIBIOS_SUCCESSFUL; | ||
| 208 | } | ||
| 209 | |||
| 210 | static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn, | ||
| 211 | int config, int size, u32 *value) | ||
| 212 | { | ||
| 213 | struct faraday_pci *p = bus->sysdata; | ||
| 214 | |||
| 215 | dev_dbg(&bus->dev, | ||
| 216 | "[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", | ||
| 217 | PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value); | ||
| 218 | |||
| 219 | return faraday_raw_pci_read_config(p, bus->number, fn, config, size, value); | ||
| 220 | } | ||
| 221 | |||
| 222 | static int faraday_raw_pci_write_config(struct faraday_pci *p, int bus_number, | ||
| 223 | unsigned int fn, int config, int size, | ||
| 224 | u32 value) | ||
| 225 | { | ||
| 226 | int ret = PCIBIOS_SUCCESSFUL; | ||
| 227 | |||
| 228 | writel(PCI_CONF_BUS(bus_number) | | ||
| 229 | PCI_CONF_DEVICE(PCI_SLOT(fn)) | | ||
| 230 | PCI_CONF_FUNCTION(PCI_FUNC(fn)) | | ||
| 231 | PCI_CONF_WHERE(config) | | ||
| 232 | PCI_CONF_ENABLE, | ||
| 233 | p->base + PCI_CONFIG); | ||
| 234 | |||
| 235 | switch (size) { | ||
| 236 | case 4: | ||
| 237 | writel(value, p->base + PCI_DATA); | ||
| 238 | break; | ||
| 239 | case 2: | ||
| 240 | writew(value, p->base + PCI_DATA + (config & 3)); | ||
| 241 | break; | ||
| 242 | case 1: | ||
| 243 | writeb(value, p->base + PCI_DATA + (config & 3)); | ||
| 244 | break; | ||
| 245 | default: | ||
| 246 | ret = PCIBIOS_BAD_REGISTER_NUMBER; | ||
| 247 | } | ||
| 248 | |||
| 249 | return ret; | ||
| 250 | } | ||
| 251 | |||
| 252 | static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn, | ||
| 253 | int config, int size, u32 value) | ||
| 254 | { | ||
| 255 | struct faraday_pci *p = bus->sysdata; | ||
| 256 | |||
| 257 | dev_dbg(&bus->dev, | ||
| 258 | "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", | ||
| 259 | PCI_SLOT(fn), PCI_FUNC(fn), config, size, value); | ||
| 260 | |||
| 261 | return faraday_raw_pci_write_config(p, bus->number, fn, config, size, | ||
| 262 | value); | ||
| 263 | } | ||
| 264 | |||
| 265 | static struct pci_ops faraday_pci_ops = { | ||
| 266 | .read = faraday_pci_read_config, | ||
| 267 | .write = faraday_pci_write_config, | ||
| 268 | }; | ||
| 269 | |||
| 270 | static void faraday_pci_ack_irq(struct irq_data *d) | ||
| 271 | { | ||
| 272 | struct faraday_pci *p = irq_data_get_irq_chip_data(d); | ||
| 273 | unsigned int reg; | ||
| 274 | |||
| 275 | faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®); | ||
| 276 | reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT); | ||
| 277 | reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTSTS_SHIFT); | ||
| 278 | faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg); | ||
| 279 | } | ||
| 280 | |||
| 281 | static void faraday_pci_mask_irq(struct irq_data *d) | ||
| 282 | { | ||
| 283 | struct faraday_pci *p = irq_data_get_irq_chip_data(d); | ||
| 284 | unsigned int reg; | ||
| 285 | |||
| 286 | faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®); | ||
| 287 | reg &= ~((0xF << PCI_CTRL2_INTSTS_SHIFT) | ||
| 288 | | BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT)); | ||
| 289 | faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg); | ||
| 290 | } | ||
| 291 | |||
| 292 | static void faraday_pci_unmask_irq(struct irq_data *d) | ||
| 293 | { | ||
| 294 | struct faraday_pci *p = irq_data_get_irq_chip_data(d); | ||
| 295 | unsigned int reg; | ||
| 296 | |||
| 297 | faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®); | ||
| 298 | reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT); | ||
| 299 | reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT); | ||
| 300 | faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg); | ||
| 301 | } | ||
| 302 | |||
| 303 | static void faraday_pci_irq_handler(struct irq_desc *desc) | ||
| 304 | { | ||
| 305 | struct faraday_pci *p = irq_desc_get_handler_data(desc); | ||
| 306 | struct irq_chip *irqchip = irq_desc_get_chip(desc); | ||
| 307 | unsigned int irq_stat, reg, i; | ||
| 308 | |||
| 309 | faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®); | ||
| 310 | irq_stat = reg >> PCI_CTRL2_INTSTS_SHIFT; | ||
| 311 | |||
| 312 | chained_irq_enter(irqchip, desc); | ||
| 313 | |||
| 314 | for (i = 0; i < 4; i++) { | ||
| 315 | if ((irq_stat & BIT(i)) == 0) | ||
| 316 | continue; | ||
| 317 | generic_handle_irq(irq_find_mapping(p->irqdomain, i)); | ||
| 318 | } | ||
| 319 | |||
| 320 | chained_irq_exit(irqchip, desc); | ||
| 321 | } | ||
| 322 | |||
| 323 | static struct irq_chip faraday_pci_irq_chip = { | ||
| 324 | .name = "PCI", | ||
| 325 | .irq_ack = faraday_pci_ack_irq, | ||
| 326 | .irq_mask = faraday_pci_mask_irq, | ||
| 327 | .irq_unmask = faraday_pci_unmask_irq, | ||
| 328 | }; | ||
| 329 | |||
| 330 | static int faraday_pci_irq_map(struct irq_domain *domain, unsigned int irq, | ||
| 331 | irq_hw_number_t hwirq) | ||
| 332 | { | ||
| 333 | irq_set_chip_and_handler(irq, &faraday_pci_irq_chip, handle_level_irq); | ||
| 334 | irq_set_chip_data(irq, domain->host_data); | ||
| 335 | |||
| 336 | return 0; | ||
| 337 | } | ||
| 338 | |||
| 339 | static const struct irq_domain_ops faraday_pci_irqdomain_ops = { | ||
| 340 | .map = faraday_pci_irq_map, | ||
| 341 | }; | ||
| 342 | |||
| 343 | static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p) | ||
| 344 | { | ||
| 345 | struct device_node *intc = of_get_next_child(p->dev->of_node, NULL); | ||
| 346 | int irq; | ||
| 347 | int i; | ||
| 348 | |||
| 349 | if (!intc) { | ||
| 350 | dev_err(p->dev, "missing child interrupt-controller node\n"); | ||
| 351 | return -EINVAL; | ||
| 352 | } | ||
| 353 | |||
| 354 | /* All PCI IRQs cascade off this one */ | ||
| 355 | irq = of_irq_get(intc, 0); | ||
| 356 | if (irq <= 0) { | ||
| 357 | dev_err(p->dev, "failed to get parent IRQ\n"); | ||
| 358 | return irq ?: -EINVAL; | ||
| 359 | } | ||
| 360 | |||
| 361 | p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX, | ||
| 362 | &faraday_pci_irqdomain_ops, p); | ||
| 363 | if (!p->irqdomain) { | ||
| 364 | dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n"); | ||
| 365 | return -EINVAL; | ||
| 366 | } | ||
| 367 | |||
| 368 | irq_set_chained_handler_and_data(irq, faraday_pci_irq_handler, p); | ||
| 369 | |||
| 370 | for (i = 0; i < 4; i++) | ||
| 371 | irq_create_mapping(p->irqdomain, i); | ||
| 372 | |||
| 373 | return 0; | ||
| 374 | } | ||
| 375 | |||
| 376 | static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p, | ||
| 377 | struct device_node *np) | ||
| 378 | { | ||
| 379 | struct of_pci_range range; | ||
| 380 | struct of_pci_range_parser parser; | ||
| 381 | struct device *dev = p->dev; | ||
| 382 | u32 confreg[3] = { | ||
| 383 | FARADAY_PCI_MEM1_BASE_SIZE, | ||
| 384 | FARADAY_PCI_MEM2_BASE_SIZE, | ||
| 385 | FARADAY_PCI_MEM3_BASE_SIZE, | ||
| 386 | }; | ||
| 387 | int i = 0; | ||
| 388 | u32 val; | ||
| 389 | |||
| 390 | if (of_pci_dma_range_parser_init(&parser, np)) { | ||
| 391 | dev_err(dev, "missing dma-ranges property\n"); | ||
| 392 | return -EINVAL; | ||
| 393 | } | ||
| 394 | |||
| 395 | /* | ||
| 396 | * Get the dma-ranges from the device tree | ||
| 397 | */ | ||
| 398 | for_each_of_pci_range(&parser, &range) { | ||
| 399 | u64 end = range.pci_addr + range.size - 1; | ||
| 400 | int ret; | ||
| 401 | |||
| 402 | ret = faraday_res_to_memcfg(range.pci_addr, range.size, &val); | ||
| 403 | if (ret) { | ||
| 404 | dev_err(dev, | ||
| 405 | "DMA range %d: illegal MEM resource size\n", i); | ||
| 406 | return -EINVAL; | ||
| 407 | } | ||
| 408 | |||
| 409 | dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n", | ||
| 410 | i + 1, range.pci_addr, end, val); | ||
| 411 | if (i <= 2) { | ||
| 412 | faraday_raw_pci_write_config(p, 0, 0, confreg[i], | ||
| 413 | 4, val); | ||
| 414 | } else { | ||
| 415 | dev_err(dev, "ignore extraneous dma-range %d\n", i); | ||
| 416 | break; | ||
| 417 | } | ||
| 418 | |||
| 419 | i++; | ||
| 420 | } | ||
| 421 | |||
| 422 | return 0; | ||
| 423 | } | ||
| 424 | |||
| 425 | static int faraday_pci_probe(struct platform_device *pdev) | ||
| 426 | { | ||
| 427 | struct device *dev = &pdev->dev; | ||
| 428 | const struct faraday_pci_variant *variant = | ||
| 429 | of_device_get_match_data(dev); | ||
| 430 | struct resource *regs; | ||
| 431 | resource_size_t io_base; | ||
| 432 | struct resource_entry *win; | ||
| 433 | struct faraday_pci *p; | ||
| 434 | struct resource *mem; | ||
| 435 | struct resource *io; | ||
| 436 | struct pci_host_bridge *host; | ||
| 437 | struct clk *clk; | ||
| 438 | unsigned char max_bus_speed = PCI_SPEED_33MHz; | ||
| 439 | unsigned char cur_bus_speed = PCI_SPEED_33MHz; | ||
| 440 | int ret; | ||
| 441 | u32 val; | ||
| 442 | LIST_HEAD(res); | ||
| 443 | |||
| 444 | host = devm_pci_alloc_host_bridge(dev, sizeof(*p)); | ||
| 445 | if (!host) | ||
| 446 | return -ENOMEM; | ||
| 447 | |||
| 448 | host->dev.parent = dev; | ||
| 449 | host->ops = &faraday_pci_ops; | ||
| 450 | host->busnr = 0; | ||
| 451 | host->msi = NULL; | ||
| 452 | host->map_irq = of_irq_parse_and_map_pci; | ||
| 453 | host->swizzle_irq = pci_common_swizzle; | ||
| 454 | p = pci_host_bridge_priv(host); | ||
| 455 | host->sysdata = p; | ||
| 456 | p->dev = dev; | ||
| 457 | |||
| 458 | /* Retrieve and enable optional clocks */ | ||
| 459 | clk = devm_clk_get(dev, "PCLK"); | ||
| 460 | if (IS_ERR(clk)) | ||
| 461 | return PTR_ERR(clk); | ||
| 462 | ret = clk_prepare_enable(clk); | ||
| 463 | if (ret) { | ||
| 464 | dev_err(dev, "could not prepare PCLK\n"); | ||
| 465 | return ret; | ||
| 466 | } | ||
| 467 | p->bus_clk = devm_clk_get(dev, "PCICLK"); | ||
| 468 | if (IS_ERR(p->bus_clk)) | ||
| 469 | return PTR_ERR(p->bus_clk); | ||
| 470 | ret = clk_prepare_enable(p->bus_clk); | ||
| 471 | if (ret) { | ||
| 472 | dev_err(dev, "could not prepare PCICLK\n"); | ||
| 473 | return ret; | ||
| 474 | } | ||
| 475 | |||
| 476 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 477 | p->base = devm_ioremap_resource(dev, regs); | ||
| 478 | if (IS_ERR(p->base)) | ||
| 479 | return PTR_ERR(p->base); | ||
| 480 | |||
| 481 | ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, | ||
| 482 | &res, &io_base); | ||
| 483 | if (ret) | ||
| 484 | return ret; | ||
| 485 | |||
| 486 | ret = devm_request_pci_bus_resources(dev, &res); | ||
| 487 | if (ret) | ||
| 488 | return ret; | ||
| 489 | |||
| 490 | /* Get the I/O and memory ranges from DT */ | ||
| 491 | resource_list_for_each_entry(win, &res) { | ||
| 492 | switch (resource_type(win->res)) { | ||
| 493 | case IORESOURCE_IO: | ||
| 494 | io = win->res; | ||
| 495 | io->name = "Gemini PCI I/O"; | ||
| 496 | if (!faraday_res_to_memcfg(io->start - win->offset, | ||
| 497 | resource_size(io), &val)) { | ||
| 498 | /* setup I/O space size */ | ||
| 499 | writel(val, p->base + PCI_IOSIZE); | ||
| 500 | } else { | ||
| 501 | dev_err(dev, "illegal IO mem size\n"); | ||
| 502 | return -EINVAL; | ||
| 503 | } | ||
| 504 | ret = pci_remap_iospace(io, io_base); | ||
| 505 | if (ret) { | ||
| 506 | dev_warn(dev, "error %d: failed to map resource %pR\n", | ||
| 507 | ret, io); | ||
| 508 | continue; | ||
| 509 | } | ||
| 510 | break; | ||
| 511 | case IORESOURCE_MEM: | ||
| 512 | mem = win->res; | ||
| 513 | mem->name = "Gemini PCI MEM"; | ||
| 514 | break; | ||
| 515 | case IORESOURCE_BUS: | ||
| 516 | break; | ||
| 517 | default: | ||
| 518 | break; | ||
| 519 | } | ||
| 520 | } | ||
| 521 | |||
| 522 | /* Setup hostbridge */ | ||
| 523 | val = readl(p->base + PCI_CTRL); | ||
| 524 | val |= PCI_COMMAND_IO; | ||
| 525 | val |= PCI_COMMAND_MEMORY; | ||
| 526 | val |= PCI_COMMAND_MASTER; | ||
| 527 | writel(val, p->base + PCI_CTRL); | ||
| 528 | /* Mask and clear all interrupts */ | ||
| 529 | faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000); | ||
| 530 | if (variant->cascaded_irq) { | ||
| 531 | ret = faraday_pci_setup_cascaded_irq(p); | ||
| 532 | if (ret) { | ||
| 533 | dev_err(dev, "failed to setup cascaded IRQ\n"); | ||
| 534 | return ret; | ||
| 535 | } | ||
| 536 | } | ||
| 537 | |||
| 538 | /* Check bus clock if we can gear up to 66 MHz */ | ||
| 539 | if (!IS_ERR(p->bus_clk)) { | ||
| 540 | unsigned long rate; | ||
| 541 | u32 val; | ||
| 542 | |||
| 543 | faraday_raw_pci_read_config(p, 0, 0, | ||
| 544 | FARADAY_PCI_STATUS_CMD, 4, &val); | ||
| 545 | rate = clk_get_rate(p->bus_clk); | ||
| 546 | |||
| 547 | if ((rate == 33000000) && (val & PCI_STATUS_66MHZ_CAPABLE)) { | ||
| 548 | dev_info(dev, "33MHz bus is 66MHz capable\n"); | ||
| 549 | max_bus_speed = PCI_SPEED_66MHz; | ||
| 550 | ret = clk_set_rate(p->bus_clk, 66000000); | ||
| 551 | if (ret) | ||
| 552 | dev_err(dev, "failed to set bus clock\n"); | ||
| 553 | } else { | ||
| 554 | dev_info(dev, "33MHz only bus\n"); | ||
| 555 | max_bus_speed = PCI_SPEED_33MHz; | ||
| 556 | } | ||
| 557 | |||
| 558 | /* Bumping the clock may fail so read back the rate */ | ||
| 559 | rate = clk_get_rate(p->bus_clk); | ||
| 560 | if (rate == 33000000) | ||
| 561 | cur_bus_speed = PCI_SPEED_33MHz; | ||
| 562 | if (rate == 66000000) | ||
| 563 | cur_bus_speed = PCI_SPEED_66MHz; | ||
| 564 | } | ||
| 565 | |||
| 566 | ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node); | ||
| 567 | if (ret) | ||
| 568 | return ret; | ||
| 569 | |||
| 570 | list_splice_init(&res, &host->windows); | ||
| 571 | ret = pci_scan_root_bus_bridge(host); | ||
| 572 | if (ret) { | ||
| 573 | dev_err(dev, "failed to scan host: %d\n", ret); | ||
| 574 | return ret; | ||
| 575 | } | ||
| 576 | p->bus = host->bus; | ||
| 577 | p->bus->max_bus_speed = max_bus_speed; | ||
| 578 | p->bus->cur_bus_speed = cur_bus_speed; | ||
| 579 | |||
| 580 | pci_bus_assign_resources(p->bus); | ||
| 581 | pci_bus_add_devices(p->bus); | ||
| 582 | pci_free_resource_list(&res); | ||
| 583 | |||
| 584 | return 0; | ||
| 585 | } | ||
| 586 | |||
| 587 | /* | ||
| 588 | * We encode bridge variants here, we have at least two so it doesn't | ||
| 589 | * hurt to have infrastructure to encompass future variants as well. | ||
| 590 | */ | ||
| 591 | static const struct faraday_pci_variant faraday_regular = { | ||
| 592 | .cascaded_irq = true, | ||
| 593 | }; | ||
| 594 | |||
| 595 | static const struct faraday_pci_variant faraday_dual = { | ||
| 596 | .cascaded_irq = false, | ||
| 597 | }; | ||
| 598 | |||
| 599 | static const struct of_device_id faraday_pci_of_match[] = { | ||
| 600 | { | ||
| 601 | .compatible = "faraday,ftpci100", | ||
| 602 | .data = &faraday_regular, | ||
| 603 | }, | ||
| 604 | { | ||
| 605 | .compatible = "faraday,ftpci100-dual", | ||
| 606 | .data = &faraday_dual, | ||
| 607 | }, | ||
| 608 | {}, | ||
| 609 | }; | ||
| 610 | |||
| 611 | static struct platform_driver faraday_pci_driver = { | ||
| 612 | .driver = { | ||
| 613 | .name = "ftpci100", | ||
| 614 | .of_match_table = of_match_ptr(faraday_pci_of_match), | ||
| 615 | .suppress_bind_attrs = true, | ||
| 616 | }, | ||
| 617 | .probe = faraday_pci_probe, | ||
| 618 | }; | ||
| 619 | builtin_platform_driver(faraday_pci_driver); | ||
diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c new file mode 100644 index 000000000000..d8f10451f273 --- /dev/null +++ b/drivers/pci/controller/pci-host-common.c | |||
| @@ -0,0 +1,118 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Generic PCI host driver common code | ||
| 4 | * | ||
| 5 | * Copyright (C) 2014 ARM Limited | ||
| 6 | * | ||
| 7 | * Author: Will Deacon <will.deacon@arm.com> | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <linux/kernel.h> | ||
| 11 | #include <linux/of_address.h> | ||
| 12 | #include <linux/of_pci.h> | ||
| 13 | #include <linux/pci-ecam.h> | ||
| 14 | #include <linux/platform_device.h> | ||
| 15 | |||
| 16 | static void gen_pci_unmap_cfg(void *ptr) | ||
| 17 | { | ||
| 18 | pci_ecam_free((struct pci_config_window *)ptr); | ||
| 19 | } | ||
| 20 | |||
| 21 | static struct pci_config_window *gen_pci_init(struct device *dev, | ||
| 22 | struct list_head *resources, struct pci_ecam_ops *ops) | ||
| 23 | { | ||
| 24 | int err; | ||
| 25 | struct resource cfgres; | ||
| 26 | struct resource *bus_range = NULL; | ||
| 27 | struct pci_config_window *cfg; | ||
| 28 | |||
| 29 | /* Parse our PCI ranges and request their resources */ | ||
| 30 | err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range); | ||
| 31 | if (err) | ||
| 32 | return ERR_PTR(err); | ||
| 33 | |||
| 34 | err = of_address_to_resource(dev->of_node, 0, &cfgres); | ||
| 35 | if (err) { | ||
| 36 | dev_err(dev, "missing \"reg\" property\n"); | ||
| 37 | goto err_out; | ||
| 38 | } | ||
| 39 | |||
| 40 | cfg = pci_ecam_create(dev, &cfgres, bus_range, ops); | ||
| 41 | if (IS_ERR(cfg)) { | ||
| 42 | err = PTR_ERR(cfg); | ||
| 43 | goto err_out; | ||
| 44 | } | ||
| 45 | |||
| 46 | err = devm_add_action(dev, gen_pci_unmap_cfg, cfg); | ||
| 47 | if (err) { | ||
| 48 | gen_pci_unmap_cfg(cfg); | ||
| 49 | goto err_out; | ||
| 50 | } | ||
| 51 | return cfg; | ||
| 52 | |||
| 53 | err_out: | ||
| 54 | pci_free_resource_list(resources); | ||
| 55 | return ERR_PTR(err); | ||
| 56 | } | ||
| 57 | |||
| 58 | int pci_host_common_probe(struct platform_device *pdev, | ||
| 59 | struct pci_ecam_ops *ops) | ||
| 60 | { | ||
| 61 | const char *type; | ||
| 62 | struct device *dev = &pdev->dev; | ||
| 63 | struct device_node *np = dev->of_node; | ||
| 64 | struct pci_host_bridge *bridge; | ||
| 65 | struct pci_config_window *cfg; | ||
| 66 | struct list_head resources; | ||
| 67 | int ret; | ||
| 68 | |||
| 69 | bridge = devm_pci_alloc_host_bridge(dev, 0); | ||
| 70 | if (!bridge) | ||
| 71 | return -ENOMEM; | ||
| 72 | |||
| 73 | type = of_get_property(np, "device_type", NULL); | ||
| 74 | if (!type || strcmp(type, "pci")) { | ||
| 75 | dev_err(dev, "invalid \"device_type\" %s\n", type); | ||
| 76 | return -EINVAL; | ||
| 77 | } | ||
| 78 | |||
| 79 | of_pci_check_probe_only(); | ||
| 80 | |||
| 81 | /* Parse and map our Configuration Space windows */ | ||
| 82 | cfg = gen_pci_init(dev, &resources, ops); | ||
| 83 | if (IS_ERR(cfg)) | ||
| 84 | return PTR_ERR(cfg); | ||
| 85 | |||
| 86 | /* Do not reassign resources if probe only */ | ||
| 87 | if (!pci_has_flag(PCI_PROBE_ONLY)) | ||
| 88 | pci_add_flags(PCI_REASSIGN_ALL_BUS); | ||
| 89 | |||
| 90 | list_splice_init(&resources, &bridge->windows); | ||
| 91 | bridge->dev.parent = dev; | ||
| 92 | bridge->sysdata = cfg; | ||
| 93 | bridge->busnr = cfg->busr.start; | ||
| 94 | bridge->ops = &ops->pci_ops; | ||
| 95 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 96 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 97 | |||
| 98 | ret = pci_host_probe(bridge); | ||
| 99 | if (ret < 0) { | ||
| 100 | pci_free_resource_list(&resources); | ||
| 101 | return ret; | ||
| 102 | } | ||
| 103 | |||
| 104 | platform_set_drvdata(pdev, bridge->bus); | ||
| 105 | return 0; | ||
| 106 | } | ||
| 107 | |||
| 108 | int pci_host_common_remove(struct platform_device *pdev) | ||
| 109 | { | ||
| 110 | struct pci_bus *bus = platform_get_drvdata(pdev); | ||
| 111 | |||
| 112 | pci_lock_rescan_remove(); | ||
| 113 | pci_stop_root_bus(bus); | ||
| 114 | pci_remove_root_bus(bus); | ||
| 115 | pci_unlock_rescan_remove(); | ||
| 116 | |||
| 117 | return 0; | ||
| 118 | } | ||
diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c new file mode 100644 index 000000000000..dea3ec7592a2 --- /dev/null +++ b/drivers/pci/controller/pci-host-generic.c | |||
| @@ -0,0 +1,100 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Simple, generic PCI host controller driver targetting firmware-initialised | ||
| 4 | * systems and virtual machines (e.g. the PCI emulation provided by kvmtool). | ||
| 5 | * | ||
| 6 | * Copyright (C) 2014 ARM Limited | ||
| 7 | * | ||
| 8 | * Author: Will Deacon <will.deacon@arm.com> | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/kernel.h> | ||
| 12 | #include <linux/init.h> | ||
| 13 | #include <linux/of_address.h> | ||
| 14 | #include <linux/of_pci.h> | ||
| 15 | #include <linux/pci-ecam.h> | ||
| 16 | #include <linux/platform_device.h> | ||
| 17 | |||
| 18 | static struct pci_ecam_ops gen_pci_cfg_cam_bus_ops = { | ||
| 19 | .bus_shift = 16, | ||
| 20 | .pci_ops = { | ||
| 21 | .map_bus = pci_ecam_map_bus, | ||
| 22 | .read = pci_generic_config_read, | ||
| 23 | .write = pci_generic_config_write, | ||
| 24 | } | ||
| 25 | }; | ||
| 26 | |||
| 27 | static bool pci_dw_valid_device(struct pci_bus *bus, unsigned int devfn) | ||
| 28 | { | ||
| 29 | struct pci_config_window *cfg = bus->sysdata; | ||
| 30 | |||
| 31 | /* | ||
| 32 | * The Synopsys DesignWare PCIe controller in ECAM mode will not filter | ||
| 33 | * type 0 config TLPs sent to devices 1 and up on its downstream port, | ||
| 34 | * resulting in devices appearing multiple times on bus 0 unless we | ||
| 35 | * filter out those accesses here. | ||
| 36 | */ | ||
| 37 | if (bus->number == cfg->busr.start && PCI_SLOT(devfn) > 0) | ||
| 38 | return false; | ||
| 39 | |||
| 40 | return true; | ||
| 41 | } | ||
| 42 | |||
| 43 | static void __iomem *pci_dw_ecam_map_bus(struct pci_bus *bus, | ||
| 44 | unsigned int devfn, int where) | ||
| 45 | { | ||
| 46 | if (!pci_dw_valid_device(bus, devfn)) | ||
| 47 | return NULL; | ||
| 48 | |||
| 49 | return pci_ecam_map_bus(bus, devfn, where); | ||
| 50 | } | ||
| 51 | |||
| 52 | static struct pci_ecam_ops pci_dw_ecam_bus_ops = { | ||
| 53 | .bus_shift = 20, | ||
| 54 | .pci_ops = { | ||
| 55 | .map_bus = pci_dw_ecam_map_bus, | ||
| 56 | .read = pci_generic_config_read, | ||
| 57 | .write = pci_generic_config_write, | ||
| 58 | } | ||
| 59 | }; | ||
| 60 | |||
| 61 | static const struct of_device_id gen_pci_of_match[] = { | ||
| 62 | { .compatible = "pci-host-cam-generic", | ||
| 63 | .data = &gen_pci_cfg_cam_bus_ops }, | ||
| 64 | |||
| 65 | { .compatible = "pci-host-ecam-generic", | ||
| 66 | .data = &pci_generic_ecam_ops }, | ||
| 67 | |||
| 68 | { .compatible = "marvell,armada8k-pcie-ecam", | ||
| 69 | .data = &pci_dw_ecam_bus_ops }, | ||
| 70 | |||
| 71 | { .compatible = "socionext,synquacer-pcie-ecam", | ||
| 72 | .data = &pci_dw_ecam_bus_ops }, | ||
| 73 | |||
| 74 | { .compatible = "snps,dw-pcie-ecam", | ||
| 75 | .data = &pci_dw_ecam_bus_ops }, | ||
| 76 | |||
| 77 | { }, | ||
| 78 | }; | ||
| 79 | |||
| 80 | static int gen_pci_probe(struct platform_device *pdev) | ||
| 81 | { | ||
| 82 | const struct of_device_id *of_id; | ||
| 83 | struct pci_ecam_ops *ops; | ||
| 84 | |||
| 85 | of_id = of_match_node(gen_pci_of_match, pdev->dev.of_node); | ||
| 86 | ops = (struct pci_ecam_ops *)of_id->data; | ||
| 87 | |||
| 88 | return pci_host_common_probe(pdev, ops); | ||
| 89 | } | ||
| 90 | |||
| 91 | static struct platform_driver gen_pci_driver = { | ||
| 92 | .driver = { | ||
| 93 | .name = "pci-host-generic", | ||
| 94 | .of_match_table = gen_pci_of_match, | ||
| 95 | .suppress_bind_attrs = true, | ||
| 96 | }, | ||
| 97 | .probe = gen_pci_probe, | ||
| 98 | .remove = pci_host_common_remove, | ||
| 99 | }; | ||
| 100 | builtin_platform_driver(gen_pci_driver); | ||
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c new file mode 100644 index 000000000000..6cc5036ac83c --- /dev/null +++ b/drivers/pci/controller/pci-hyperv.c | |||
| @@ -0,0 +1,2694 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Copyright (c) Microsoft Corporation. | ||
| 4 | * | ||
| 5 | * Author: | ||
| 6 | * Jake Oshins <jakeo@microsoft.com> | ||
| 7 | * | ||
| 8 | * This driver acts as a paravirtual front-end for PCI Express root buses. | ||
| 9 | * When a PCI Express function (either an entire device or an SR-IOV | ||
| 10 | * Virtual Function) is being passed through to the VM, this driver exposes | ||
| 11 | * a new bus to the guest VM. This is modeled as a root PCI bus because | ||
| 12 | * no bridges are being exposed to the VM. In fact, with a "Generation 2" | ||
| 13 | * VM within Hyper-V, there may seem to be no PCI bus at all in the VM | ||
| 14 | * until a device as been exposed using this driver. | ||
| 15 | * | ||
| 16 | * Each root PCI bus has its own PCI domain, which is called "Segment" in | ||
| 17 | * the PCI Firmware Specifications. Thus while each device passed through | ||
| 18 | * to the VM using this front-end will appear at "device 0", the domain will | ||
| 19 | * be unique. Typically, each bus will have one PCI function on it, though | ||
| 20 | * this driver does support more than one. | ||
| 21 | * | ||
| 22 | * In order to map the interrupts from the device through to the guest VM, | ||
| 23 | * this driver also implements an IRQ Domain, which handles interrupts (either | ||
| 24 | * MSI or MSI-X) associated with the functions on the bus. As interrupts are | ||
| 25 | * set up, torn down, or reaffined, this driver communicates with the | ||
| 26 | * underlying hypervisor to adjust the mappings in the I/O MMU so that each | ||
| 27 | * interrupt will be delivered to the correct virtual processor at the right | ||
| 28 | * vector. This driver does not support level-triggered (line-based) | ||
| 29 | * interrupts, and will report that the Interrupt Line register in the | ||
| 30 | * function's configuration space is zero. | ||
| 31 | * | ||
| 32 | * The rest of this driver mostly maps PCI concepts onto underlying Hyper-V | ||
| 33 | * facilities. For instance, the configuration space of a function exposed | ||
| 34 | * by Hyper-V is mapped into a single page of memory space, and the | ||
| 35 | * read and write handlers for config space must be aware of this mechanism. | ||
| 36 | * Similarly, device setup and teardown involves messages sent to and from | ||
| 37 | * the PCI back-end driver in Hyper-V. | ||
| 38 | */ | ||
| 39 | |||
| 40 | #include <linux/kernel.h> | ||
| 41 | #include <linux/module.h> | ||
| 42 | #include <linux/pci.h> | ||
| 43 | #include <linux/delay.h> | ||
| 44 | #include <linux/semaphore.h> | ||
| 45 | #include <linux/irqdomain.h> | ||
| 46 | #include <asm/irqdomain.h> | ||
| 47 | #include <asm/apic.h> | ||
| 48 | #include <linux/msi.h> | ||
| 49 | #include <linux/hyperv.h> | ||
| 50 | #include <linux/refcount.h> | ||
| 51 | #include <asm/mshyperv.h> | ||
| 52 | |||
| 53 | /* | ||
| 54 | * Protocol versions. The low word is the minor version, the high word the | ||
| 55 | * major version. | ||
| 56 | */ | ||
| 57 | |||
| 58 | #define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor))) | ||
| 59 | #define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16) | ||
| 60 | #define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff) | ||
| 61 | |||
| 62 | enum pci_protocol_version_t { | ||
| 63 | PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */ | ||
| 64 | PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */ | ||
| 65 | }; | ||
| 66 | |||
| 67 | #define CPU_AFFINITY_ALL -1ULL | ||
| 68 | |||
| 69 | /* | ||
| 70 | * Supported protocol versions in the order of probing - highest go | ||
| 71 | * first. | ||
| 72 | */ | ||
| 73 | static enum pci_protocol_version_t pci_protocol_versions[] = { | ||
| 74 | PCI_PROTOCOL_VERSION_1_2, | ||
| 75 | PCI_PROTOCOL_VERSION_1_1, | ||
| 76 | }; | ||
| 77 | |||
| 78 | /* | ||
| 79 | * Protocol version negotiated by hv_pci_protocol_negotiation(). | ||
| 80 | */ | ||
| 81 | static enum pci_protocol_version_t pci_protocol_version; | ||
| 82 | |||
| 83 | #define PCI_CONFIG_MMIO_LENGTH 0x2000 | ||
| 84 | #define CFG_PAGE_OFFSET 0x1000 | ||
| 85 | #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET) | ||
| 86 | |||
| 87 | #define MAX_SUPPORTED_MSI_MESSAGES 0x400 | ||
| 88 | |||
| 89 | #define STATUS_REVISION_MISMATCH 0xC0000059 | ||
| 90 | |||
| 91 | /* | ||
| 92 | * Message Types | ||
| 93 | */ | ||
| 94 | |||
| 95 | enum pci_message_type { | ||
| 96 | /* | ||
| 97 | * Version 1.1 | ||
| 98 | */ | ||
| 99 | PCI_MESSAGE_BASE = 0x42490000, | ||
| 100 | PCI_BUS_RELATIONS = PCI_MESSAGE_BASE + 0, | ||
| 101 | PCI_QUERY_BUS_RELATIONS = PCI_MESSAGE_BASE + 1, | ||
| 102 | PCI_POWER_STATE_CHANGE = PCI_MESSAGE_BASE + 4, | ||
| 103 | PCI_QUERY_RESOURCE_REQUIREMENTS = PCI_MESSAGE_BASE + 5, | ||
| 104 | PCI_QUERY_RESOURCE_RESOURCES = PCI_MESSAGE_BASE + 6, | ||
| 105 | PCI_BUS_D0ENTRY = PCI_MESSAGE_BASE + 7, | ||
| 106 | PCI_BUS_D0EXIT = PCI_MESSAGE_BASE + 8, | ||
| 107 | PCI_READ_BLOCK = PCI_MESSAGE_BASE + 9, | ||
| 108 | PCI_WRITE_BLOCK = PCI_MESSAGE_BASE + 0xA, | ||
| 109 | PCI_EJECT = PCI_MESSAGE_BASE + 0xB, | ||
| 110 | PCI_QUERY_STOP = PCI_MESSAGE_BASE + 0xC, | ||
| 111 | PCI_REENABLE = PCI_MESSAGE_BASE + 0xD, | ||
| 112 | PCI_QUERY_STOP_FAILED = PCI_MESSAGE_BASE + 0xE, | ||
| 113 | PCI_EJECTION_COMPLETE = PCI_MESSAGE_BASE + 0xF, | ||
| 114 | PCI_RESOURCES_ASSIGNED = PCI_MESSAGE_BASE + 0x10, | ||
| 115 | PCI_RESOURCES_RELEASED = PCI_MESSAGE_BASE + 0x11, | ||
| 116 | PCI_INVALIDATE_BLOCK = PCI_MESSAGE_BASE + 0x12, | ||
| 117 | PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13, | ||
| 118 | PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14, | ||
| 119 | PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15, | ||
| 120 | PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16, | ||
| 121 | PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17, | ||
| 122 | PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */ | ||
| 123 | PCI_MESSAGE_MAXIMUM | ||
| 124 | }; | ||
| 125 | |||
| 126 | /* | ||
| 127 | * Structures defining the virtual PCI Express protocol. | ||
| 128 | */ | ||
| 129 | |||
| 130 | union pci_version { | ||
| 131 | struct { | ||
| 132 | u16 minor_version; | ||
| 133 | u16 major_version; | ||
| 134 | } parts; | ||
| 135 | u32 version; | ||
| 136 | } __packed; | ||
| 137 | |||
| 138 | /* | ||
| 139 | * Function numbers are 8-bits wide on Express, as interpreted through ARI, | ||
| 140 | * which is all this driver does. This representation is the one used in | ||
| 141 | * Windows, which is what is expected when sending this back and forth with | ||
| 142 | * the Hyper-V parent partition. | ||
| 143 | */ | ||
| 144 | union win_slot_encoding { | ||
| 145 | struct { | ||
| 146 | u32 dev:5; | ||
| 147 | u32 func:3; | ||
| 148 | u32 reserved:24; | ||
| 149 | } bits; | ||
| 150 | u32 slot; | ||
| 151 | } __packed; | ||
| 152 | |||
| 153 | /* | ||
| 154 | * Pretty much as defined in the PCI Specifications. | ||
| 155 | */ | ||
| 156 | struct pci_function_description { | ||
| 157 | u16 v_id; /* vendor ID */ | ||
| 158 | u16 d_id; /* device ID */ | ||
| 159 | u8 rev; | ||
| 160 | u8 prog_intf; | ||
| 161 | u8 subclass; | ||
| 162 | u8 base_class; | ||
| 163 | u32 subsystem_id; | ||
| 164 | union win_slot_encoding win_slot; | ||
| 165 | u32 ser; /* serial number */ | ||
| 166 | } __packed; | ||
| 167 | |||
| 168 | /** | ||
| 169 | * struct hv_msi_desc | ||
| 170 | * @vector: IDT entry | ||
| 171 | * @delivery_mode: As defined in Intel's Programmer's | ||
| 172 | * Reference Manual, Volume 3, Chapter 8. | ||
| 173 | * @vector_count: Number of contiguous entries in the | ||
| 174 | * Interrupt Descriptor Table that are | ||
| 175 | * occupied by this Message-Signaled | ||
| 176 | * Interrupt. For "MSI", as first defined | ||
| 177 | * in PCI 2.2, this can be between 1 and | ||
| 178 | * 32. For "MSI-X," as first defined in PCI | ||
| 179 | * 3.0, this must be 1, as each MSI-X table | ||
| 180 | * entry would have its own descriptor. | ||
| 181 | * @reserved: Empty space | ||
| 182 | * @cpu_mask: All the target virtual processors. | ||
| 183 | */ | ||
| 184 | struct hv_msi_desc { | ||
| 185 | u8 vector; | ||
| 186 | u8 delivery_mode; | ||
| 187 | u16 vector_count; | ||
| 188 | u32 reserved; | ||
| 189 | u64 cpu_mask; | ||
| 190 | } __packed; | ||
| 191 | |||
| 192 | /** | ||
| 193 | * struct hv_msi_desc2 - 1.2 version of hv_msi_desc | ||
| 194 | * @vector: IDT entry | ||
| 195 | * @delivery_mode: As defined in Intel's Programmer's | ||
| 196 | * Reference Manual, Volume 3, Chapter 8. | ||
| 197 | * @vector_count: Number of contiguous entries in the | ||
| 198 | * Interrupt Descriptor Table that are | ||
| 199 | * occupied by this Message-Signaled | ||
| 200 | * Interrupt. For "MSI", as first defined | ||
| 201 | * in PCI 2.2, this can be between 1 and | ||
| 202 | * 32. For "MSI-X," as first defined in PCI | ||
| 203 | * 3.0, this must be 1, as each MSI-X table | ||
| 204 | * entry would have its own descriptor. | ||
| 205 | * @processor_count: number of bits enabled in array. | ||
| 206 | * @processor_array: All the target virtual processors. | ||
| 207 | */ | ||
| 208 | struct hv_msi_desc2 { | ||
| 209 | u8 vector; | ||
| 210 | u8 delivery_mode; | ||
| 211 | u16 vector_count; | ||
| 212 | u16 processor_count; | ||
| 213 | u16 processor_array[32]; | ||
| 214 | } __packed; | ||
| 215 | |||
| 216 | /** | ||
| 217 | * struct tran_int_desc | ||
| 218 | * @reserved: unused, padding | ||
| 219 | * @vector_count: same as in hv_msi_desc | ||
| 220 | * @data: This is the "data payload" value that is | ||
| 221 | * written by the device when it generates | ||
| 222 | * a message-signaled interrupt, either MSI | ||
| 223 | * or MSI-X. | ||
| 224 | * @address: This is the address to which the data | ||
| 225 | * payload is written on interrupt | ||
| 226 | * generation. | ||
| 227 | */ | ||
| 228 | struct tran_int_desc { | ||
| 229 | u16 reserved; | ||
| 230 | u16 vector_count; | ||
| 231 | u32 data; | ||
| 232 | u64 address; | ||
| 233 | } __packed; | ||
| 234 | |||
| 235 | /* | ||
| 236 | * A generic message format for virtual PCI. | ||
| 237 | * Specific message formats are defined later in the file. | ||
| 238 | */ | ||
| 239 | |||
| 240 | struct pci_message { | ||
| 241 | u32 type; | ||
| 242 | } __packed; | ||
| 243 | |||
| 244 | struct pci_child_message { | ||
| 245 | struct pci_message message_type; | ||
| 246 | union win_slot_encoding wslot; | ||
| 247 | } __packed; | ||
| 248 | |||
| 249 | struct pci_incoming_message { | ||
| 250 | struct vmpacket_descriptor hdr; | ||
| 251 | struct pci_message message_type; | ||
| 252 | } __packed; | ||
| 253 | |||
| 254 | struct pci_response { | ||
| 255 | struct vmpacket_descriptor hdr; | ||
| 256 | s32 status; /* negative values are failures */ | ||
| 257 | } __packed; | ||
| 258 | |||
| 259 | struct pci_packet { | ||
| 260 | void (*completion_func)(void *context, struct pci_response *resp, | ||
| 261 | int resp_packet_size); | ||
| 262 | void *compl_ctxt; | ||
| 263 | |||
| 264 | struct pci_message message[0]; | ||
| 265 | }; | ||
| 266 | |||
| 267 | /* | ||
| 268 | * Specific message types supporting the PCI protocol. | ||
| 269 | */ | ||
| 270 | |||
| 271 | /* | ||
| 272 | * Version negotiation message. Sent from the guest to the host. | ||
| 273 | * The guest is free to try different versions until the host | ||
| 274 | * accepts the version. | ||
| 275 | * | ||
| 276 | * pci_version: The protocol version requested. | ||
| 277 | * is_last_attempt: If TRUE, this is the last version guest will request. | ||
| 278 | * reservedz: Reserved field, set to zero. | ||
| 279 | */ | ||
| 280 | |||
| 281 | struct pci_version_request { | ||
| 282 | struct pci_message message_type; | ||
| 283 | u32 protocol_version; | ||
| 284 | } __packed; | ||
| 285 | |||
| 286 | /* | ||
| 287 | * Bus D0 Entry. This is sent from the guest to the host when the virtual | ||
| 288 | * bus (PCI Express port) is ready for action. | ||
| 289 | */ | ||
| 290 | |||
| 291 | struct pci_bus_d0_entry { | ||
| 292 | struct pci_message message_type; | ||
| 293 | u32 reserved; | ||
| 294 | u64 mmio_base; | ||
| 295 | } __packed; | ||
| 296 | |||
| 297 | struct pci_bus_relations { | ||
| 298 | struct pci_incoming_message incoming; | ||
| 299 | u32 device_count; | ||
| 300 | struct pci_function_description func[0]; | ||
| 301 | } __packed; | ||
| 302 | |||
| 303 | struct pci_q_res_req_response { | ||
| 304 | struct vmpacket_descriptor hdr; | ||
| 305 | s32 status; /* negative values are failures */ | ||
| 306 | u32 probed_bar[6]; | ||
| 307 | } __packed; | ||
| 308 | |||
| 309 | struct pci_set_power { | ||
| 310 | struct pci_message message_type; | ||
| 311 | union win_slot_encoding wslot; | ||
| 312 | u32 power_state; /* In Windows terms */ | ||
| 313 | u32 reserved; | ||
| 314 | } __packed; | ||
| 315 | |||
| 316 | struct pci_set_power_response { | ||
| 317 | struct vmpacket_descriptor hdr; | ||
| 318 | s32 status; /* negative values are failures */ | ||
| 319 | union win_slot_encoding wslot; | ||
| 320 | u32 resultant_state; /* In Windows terms */ | ||
| 321 | u32 reserved; | ||
| 322 | } __packed; | ||
| 323 | |||
| 324 | struct pci_resources_assigned { | ||
| 325 | struct pci_message message_type; | ||
| 326 | union win_slot_encoding wslot; | ||
| 327 | u8 memory_range[0x14][6]; /* not used here */ | ||
| 328 | u32 msi_descriptors; | ||
| 329 | u32 reserved[4]; | ||
| 330 | } __packed; | ||
| 331 | |||
| 332 | struct pci_resources_assigned2 { | ||
| 333 | struct pci_message message_type; | ||
| 334 | union win_slot_encoding wslot; | ||
| 335 | u8 memory_range[0x14][6]; /* not used here */ | ||
| 336 | u32 msi_descriptor_count; | ||
| 337 | u8 reserved[70]; | ||
| 338 | } __packed; | ||
| 339 | |||
| 340 | struct pci_create_interrupt { | ||
| 341 | struct pci_message message_type; | ||
| 342 | union win_slot_encoding wslot; | ||
| 343 | struct hv_msi_desc int_desc; | ||
| 344 | } __packed; | ||
| 345 | |||
| 346 | struct pci_create_int_response { | ||
| 347 | struct pci_response response; | ||
| 348 | u32 reserved; | ||
| 349 | struct tran_int_desc int_desc; | ||
| 350 | } __packed; | ||
| 351 | |||
| 352 | struct pci_create_interrupt2 { | ||
| 353 | struct pci_message message_type; | ||
| 354 | union win_slot_encoding wslot; | ||
| 355 | struct hv_msi_desc2 int_desc; | ||
| 356 | } __packed; | ||
| 357 | |||
| 358 | struct pci_delete_interrupt { | ||
| 359 | struct pci_message message_type; | ||
| 360 | union win_slot_encoding wslot; | ||
| 361 | struct tran_int_desc int_desc; | ||
| 362 | } __packed; | ||
| 363 | |||
| 364 | struct pci_dev_incoming { | ||
| 365 | struct pci_incoming_message incoming; | ||
| 366 | union win_slot_encoding wslot; | ||
| 367 | } __packed; | ||
| 368 | |||
| 369 | struct pci_eject_response { | ||
| 370 | struct pci_message message_type; | ||
| 371 | union win_slot_encoding wslot; | ||
| 372 | u32 status; | ||
| 373 | } __packed; | ||
| 374 | |||
| 375 | static int pci_ring_size = (4 * PAGE_SIZE); | ||
| 376 | |||
| 377 | /* | ||
| 378 | * Definitions or interrupt steering hypercall. | ||
| 379 | */ | ||
| 380 | #define HV_PARTITION_ID_SELF ((u64)-1) | ||
| 381 | #define HVCALL_RETARGET_INTERRUPT 0x7e | ||
| 382 | |||
| 383 | struct hv_interrupt_entry { | ||
| 384 | u32 source; /* 1 for MSI(-X) */ | ||
| 385 | u32 reserved1; | ||
| 386 | u32 address; | ||
| 387 | u32 data; | ||
| 388 | }; | ||
| 389 | |||
| 390 | #define HV_VP_SET_BANK_COUNT_MAX 5 /* current implementation limit */ | ||
| 391 | |||
| 392 | struct hv_vp_set { | ||
| 393 | u64 format; /* 0 (HvGenericSetSparse4k) */ | ||
| 394 | u64 valid_banks; | ||
| 395 | u64 masks[HV_VP_SET_BANK_COUNT_MAX]; | ||
| 396 | }; | ||
| 397 | |||
| 398 | /* | ||
| 399 | * flags for hv_device_interrupt_target.flags | ||
| 400 | */ | ||
| 401 | #define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1 | ||
| 402 | #define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2 | ||
| 403 | |||
| 404 | struct hv_device_interrupt_target { | ||
| 405 | u32 vector; | ||
| 406 | u32 flags; | ||
| 407 | union { | ||
| 408 | u64 vp_mask; | ||
| 409 | struct hv_vp_set vp_set; | ||
| 410 | }; | ||
| 411 | }; | ||
| 412 | |||
| 413 | struct retarget_msi_interrupt { | ||
| 414 | u64 partition_id; /* use "self" */ | ||
| 415 | u64 device_id; | ||
| 416 | struct hv_interrupt_entry int_entry; | ||
| 417 | u64 reserved2; | ||
| 418 | struct hv_device_interrupt_target int_target; | ||
| 419 | } __packed; | ||
| 420 | |||
| 421 | /* | ||
| 422 | * Driver specific state. | ||
| 423 | */ | ||
| 424 | |||
| 425 | enum hv_pcibus_state { | ||
| 426 | hv_pcibus_init = 0, | ||
| 427 | hv_pcibus_probed, | ||
| 428 | hv_pcibus_installed, | ||
| 429 | hv_pcibus_removed, | ||
| 430 | hv_pcibus_maximum | ||
| 431 | }; | ||
| 432 | |||
| 433 | struct hv_pcibus_device { | ||
| 434 | struct pci_sysdata sysdata; | ||
| 435 | enum hv_pcibus_state state; | ||
| 436 | refcount_t remove_lock; | ||
| 437 | struct hv_device *hdev; | ||
| 438 | resource_size_t low_mmio_space; | ||
| 439 | resource_size_t high_mmio_space; | ||
| 440 | struct resource *mem_config; | ||
| 441 | struct resource *low_mmio_res; | ||
| 442 | struct resource *high_mmio_res; | ||
| 443 | struct completion *survey_event; | ||
| 444 | struct completion remove_event; | ||
| 445 | struct pci_bus *pci_bus; | ||
| 446 | spinlock_t config_lock; /* Avoid two threads writing index page */ | ||
| 447 | spinlock_t device_list_lock; /* Protect lists below */ | ||
| 448 | void __iomem *cfg_addr; | ||
| 449 | |||
| 450 | struct list_head resources_for_children; | ||
| 451 | |||
| 452 | struct list_head children; | ||
| 453 | struct list_head dr_list; | ||
| 454 | |||
| 455 | struct msi_domain_info msi_info; | ||
| 456 | struct msi_controller msi_chip; | ||
| 457 | struct irq_domain *irq_domain; | ||
| 458 | |||
| 459 | /* hypercall arg, must not cross page boundary */ | ||
| 460 | struct retarget_msi_interrupt retarget_msi_interrupt_params; | ||
| 461 | |||
| 462 | spinlock_t retarget_msi_interrupt_lock; | ||
| 463 | |||
| 464 | struct workqueue_struct *wq; | ||
| 465 | }; | ||
| 466 | |||
| 467 | /* | ||
| 468 | * Tracks "Device Relations" messages from the host, which must be both | ||
| 469 | * processed in order and deferred so that they don't run in the context | ||
| 470 | * of the incoming packet callback. | ||
| 471 | */ | ||
| 472 | struct hv_dr_work { | ||
| 473 | struct work_struct wrk; | ||
| 474 | struct hv_pcibus_device *bus; | ||
| 475 | }; | ||
| 476 | |||
| 477 | struct hv_dr_state { | ||
| 478 | struct list_head list_entry; | ||
| 479 | u32 device_count; | ||
| 480 | struct pci_function_description func[0]; | ||
| 481 | }; | ||
| 482 | |||
| 483 | enum hv_pcichild_state { | ||
| 484 | hv_pcichild_init = 0, | ||
| 485 | hv_pcichild_requirements, | ||
| 486 | hv_pcichild_resourced, | ||
| 487 | hv_pcichild_ejecting, | ||
| 488 | hv_pcichild_maximum | ||
| 489 | }; | ||
| 490 | |||
| 491 | struct hv_pci_dev { | ||
| 492 | /* List protected by pci_rescan_remove_lock */ | ||
| 493 | struct list_head list_entry; | ||
| 494 | refcount_t refs; | ||
| 495 | enum hv_pcichild_state state; | ||
| 496 | struct pci_function_description desc; | ||
| 497 | bool reported_missing; | ||
| 498 | struct hv_pcibus_device *hbus; | ||
| 499 | struct work_struct wrk; | ||
| 500 | |||
| 501 | /* | ||
| 502 | * What would be observed if one wrote 0xFFFFFFFF to a BAR and then | ||
| 503 | * read it back, for each of the BAR offsets within config space. | ||
| 504 | */ | ||
| 505 | u32 probed_bar[6]; | ||
| 506 | }; | ||
| 507 | |||
| 508 | struct hv_pci_compl { | ||
| 509 | struct completion host_event; | ||
| 510 | s32 completion_status; | ||
| 511 | }; | ||
| 512 | |||
| 513 | static void hv_pci_onchannelcallback(void *context); | ||
| 514 | |||
| 515 | /** | ||
| 516 | * hv_pci_generic_compl() - Invoked for a completion packet | ||
| 517 | * @context: Set up by the sender of the packet. | ||
| 518 | * @resp: The response packet | ||
| 519 | * @resp_packet_size: Size in bytes of the packet | ||
| 520 | * | ||
| 521 | * This function is used to trigger an event and report status | ||
| 522 | * for any message for which the completion packet contains a | ||
| 523 | * status and nothing else. | ||
| 524 | */ | ||
| 525 | static void hv_pci_generic_compl(void *context, struct pci_response *resp, | ||
| 526 | int resp_packet_size) | ||
| 527 | { | ||
| 528 | struct hv_pci_compl *comp_pkt = context; | ||
| 529 | |||
| 530 | if (resp_packet_size >= offsetofend(struct pci_response, status)) | ||
| 531 | comp_pkt->completion_status = resp->status; | ||
| 532 | else | ||
| 533 | comp_pkt->completion_status = -1; | ||
| 534 | |||
| 535 | complete(&comp_pkt->host_event); | ||
| 536 | } | ||
| 537 | |||
| 538 | static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus, | ||
| 539 | u32 wslot); | ||
| 540 | |||
| 541 | static void get_pcichild(struct hv_pci_dev *hpdev) | ||
| 542 | { | ||
| 543 | refcount_inc(&hpdev->refs); | ||
| 544 | } | ||
| 545 | |||
| 546 | static void put_pcichild(struct hv_pci_dev *hpdev) | ||
| 547 | { | ||
| 548 | if (refcount_dec_and_test(&hpdev->refs)) | ||
| 549 | kfree(hpdev); | ||
| 550 | } | ||
| 551 | |||
| 552 | static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus); | ||
| 553 | static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus); | ||
| 554 | |||
| 555 | /* | ||
| 556 | * There is no good way to get notified from vmbus_onoffer_rescind(), | ||
| 557 | * so let's use polling here, since this is not a hot path. | ||
| 558 | */ | ||
| 559 | static int wait_for_response(struct hv_device *hdev, | ||
| 560 | struct completion *comp) | ||
| 561 | { | ||
| 562 | while (true) { | ||
| 563 | if (hdev->channel->rescind) { | ||
| 564 | dev_warn_once(&hdev->device, "The device is gone.\n"); | ||
| 565 | return -ENODEV; | ||
| 566 | } | ||
| 567 | |||
| 568 | if (wait_for_completion_timeout(comp, HZ / 10)) | ||
| 569 | break; | ||
| 570 | } | ||
| 571 | |||
| 572 | return 0; | ||
| 573 | } | ||
| 574 | |||
| 575 | /** | ||
| 576 | * devfn_to_wslot() - Convert from Linux PCI slot to Windows | ||
| 577 | * @devfn: The Linux representation of PCI slot | ||
| 578 | * | ||
| 579 | * Windows uses a slightly different representation of PCI slot. | ||
| 580 | * | ||
| 581 | * Return: The Windows representation | ||
| 582 | */ | ||
| 583 | static u32 devfn_to_wslot(int devfn) | ||
| 584 | { | ||
| 585 | union win_slot_encoding wslot; | ||
| 586 | |||
| 587 | wslot.slot = 0; | ||
| 588 | wslot.bits.dev = PCI_SLOT(devfn); | ||
| 589 | wslot.bits.func = PCI_FUNC(devfn); | ||
| 590 | |||
| 591 | return wslot.slot; | ||
| 592 | } | ||
| 593 | |||
| 594 | /** | ||
| 595 | * wslot_to_devfn() - Convert from Windows PCI slot to Linux | ||
| 596 | * @wslot: The Windows representation of PCI slot | ||
| 597 | * | ||
| 598 | * Windows uses a slightly different representation of PCI slot. | ||
| 599 | * | ||
| 600 | * Return: The Linux representation | ||
| 601 | */ | ||
| 602 | static int wslot_to_devfn(u32 wslot) | ||
| 603 | { | ||
| 604 | union win_slot_encoding slot_no; | ||
| 605 | |||
| 606 | slot_no.slot = wslot; | ||
| 607 | return PCI_DEVFN(slot_no.bits.dev, slot_no.bits.func); | ||
| 608 | } | ||
| 609 | |||
| 610 | /* | ||
| 611 | * PCI Configuration Space for these root PCI buses is implemented as a pair | ||
| 612 | * of pages in memory-mapped I/O space. Writing to the first page chooses | ||
| 613 | * the PCI function being written or read. Once the first page has been | ||
| 614 | * written to, the following page maps in the entire configuration space of | ||
| 615 | * the function. | ||
| 616 | */ | ||
| 617 | |||
| 618 | /** | ||
| 619 | * _hv_pcifront_read_config() - Internal PCI config read | ||
| 620 | * @hpdev: The PCI driver's representation of the device | ||
| 621 | * @where: Offset within config space | ||
| 622 | * @size: Size of the transfer | ||
| 623 | * @val: Pointer to the buffer receiving the data | ||
| 624 | */ | ||
| 625 | static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where, | ||
| 626 | int size, u32 *val) | ||
| 627 | { | ||
| 628 | unsigned long flags; | ||
| 629 | void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where; | ||
| 630 | |||
| 631 | /* | ||
| 632 | * If the attempt is to read the IDs or the ROM BAR, simulate that. | ||
| 633 | */ | ||
| 634 | if (where + size <= PCI_COMMAND) { | ||
| 635 | memcpy(val, ((u8 *)&hpdev->desc.v_id) + where, size); | ||
| 636 | } else if (where >= PCI_CLASS_REVISION && where + size <= | ||
| 637 | PCI_CACHE_LINE_SIZE) { | ||
| 638 | memcpy(val, ((u8 *)&hpdev->desc.rev) + where - | ||
| 639 | PCI_CLASS_REVISION, size); | ||
| 640 | } else if (where >= PCI_SUBSYSTEM_VENDOR_ID && where + size <= | ||
| 641 | PCI_ROM_ADDRESS) { | ||
| 642 | memcpy(val, (u8 *)&hpdev->desc.subsystem_id + where - | ||
| 643 | PCI_SUBSYSTEM_VENDOR_ID, size); | ||
| 644 | } else if (where >= PCI_ROM_ADDRESS && where + size <= | ||
| 645 | PCI_CAPABILITY_LIST) { | ||
| 646 | /* ROM BARs are unimplemented */ | ||
| 647 | *val = 0; | ||
| 648 | } else if (where >= PCI_INTERRUPT_LINE && where + size <= | ||
| 649 | PCI_INTERRUPT_PIN) { | ||
| 650 | /* | ||
| 651 | * Interrupt Line and Interrupt PIN are hard-wired to zero | ||
| 652 | * because this front-end only supports message-signaled | ||
| 653 | * interrupts. | ||
| 654 | */ | ||
| 655 | *val = 0; | ||
| 656 | } else if (where + size <= CFG_PAGE_SIZE) { | ||
| 657 | spin_lock_irqsave(&hpdev->hbus->config_lock, flags); | ||
| 658 | /* Choose the function to be read. (See comment above) */ | ||
| 659 | writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr); | ||
| 660 | /* Make sure the function was chosen before we start reading. */ | ||
| 661 | mb(); | ||
| 662 | /* Read from that function's config space. */ | ||
| 663 | switch (size) { | ||
| 664 | case 1: | ||
| 665 | *val = readb(addr); | ||
| 666 | break; | ||
| 667 | case 2: | ||
| 668 | *val = readw(addr); | ||
| 669 | break; | ||
| 670 | default: | ||
| 671 | *val = readl(addr); | ||
| 672 | break; | ||
| 673 | } | ||
| 674 | /* | ||
| 675 | * Make sure the read was done before we release the spinlock | ||
| 676 | * allowing consecutive reads/writes. | ||
| 677 | */ | ||
| 678 | mb(); | ||
| 679 | spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags); | ||
| 680 | } else { | ||
| 681 | dev_err(&hpdev->hbus->hdev->device, | ||
| 682 | "Attempt to read beyond a function's config space.\n"); | ||
| 683 | } | ||
| 684 | } | ||
| 685 | |||
| 686 | static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev) | ||
| 687 | { | ||
| 688 | u16 ret; | ||
| 689 | unsigned long flags; | ||
| 690 | void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + | ||
| 691 | PCI_VENDOR_ID; | ||
| 692 | |||
| 693 | spin_lock_irqsave(&hpdev->hbus->config_lock, flags); | ||
| 694 | |||
| 695 | /* Choose the function to be read. (See comment above) */ | ||
| 696 | writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr); | ||
| 697 | /* Make sure the function was chosen before we start reading. */ | ||
| 698 | mb(); | ||
| 699 | /* Read from that function's config space. */ | ||
| 700 | ret = readw(addr); | ||
| 701 | /* | ||
| 702 | * mb() is not required here, because the spin_unlock_irqrestore() | ||
| 703 | * is a barrier. | ||
| 704 | */ | ||
| 705 | |||
| 706 | spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags); | ||
| 707 | |||
| 708 | return ret; | ||
| 709 | } | ||
| 710 | |||
| 711 | /** | ||
| 712 | * _hv_pcifront_write_config() - Internal PCI config write | ||
| 713 | * @hpdev: The PCI driver's representation of the device | ||
| 714 | * @where: Offset within config space | ||
| 715 | * @size: Size of the transfer | ||
| 716 | * @val: The data being transferred | ||
| 717 | */ | ||
| 718 | static void _hv_pcifront_write_config(struct hv_pci_dev *hpdev, int where, | ||
| 719 | int size, u32 val) | ||
| 720 | { | ||
| 721 | unsigned long flags; | ||
| 722 | void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET + where; | ||
| 723 | |||
| 724 | if (where >= PCI_SUBSYSTEM_VENDOR_ID && | ||
| 725 | where + size <= PCI_CAPABILITY_LIST) { | ||
| 726 | /* SSIDs and ROM BARs are read-only */ | ||
| 727 | } else if (where >= PCI_COMMAND && where + size <= CFG_PAGE_SIZE) { | ||
| 728 | spin_lock_irqsave(&hpdev->hbus->config_lock, flags); | ||
| 729 | /* Choose the function to be written. (See comment above) */ | ||
| 730 | writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr); | ||
| 731 | /* Make sure the function was chosen before we start writing. */ | ||
| 732 | wmb(); | ||
| 733 | /* Write to that function's config space. */ | ||
| 734 | switch (size) { | ||
| 735 | case 1: | ||
| 736 | writeb(val, addr); | ||
| 737 | break; | ||
| 738 | case 2: | ||
| 739 | writew(val, addr); | ||
| 740 | break; | ||
| 741 | default: | ||
| 742 | writel(val, addr); | ||
| 743 | break; | ||
| 744 | } | ||
| 745 | /* | ||
| 746 | * Make sure the write was done before we release the spinlock | ||
| 747 | * allowing consecutive reads/writes. | ||
| 748 | */ | ||
| 749 | mb(); | ||
| 750 | spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags); | ||
| 751 | } else { | ||
| 752 | dev_err(&hpdev->hbus->hdev->device, | ||
| 753 | "Attempt to write beyond a function's config space.\n"); | ||
| 754 | } | ||
| 755 | } | ||
| 756 | |||
| 757 | /** | ||
| 758 | * hv_pcifront_read_config() - Read configuration space | ||
| 759 | * @bus: PCI Bus structure | ||
| 760 | * @devfn: Device/function | ||
| 761 | * @where: Offset from base | ||
| 762 | * @size: Byte/word/dword | ||
| 763 | * @val: Value to be read | ||
| 764 | * | ||
| 765 | * Return: PCIBIOS_SUCCESSFUL on success | ||
| 766 | * PCIBIOS_DEVICE_NOT_FOUND on failure | ||
| 767 | */ | ||
| 768 | static int hv_pcifront_read_config(struct pci_bus *bus, unsigned int devfn, | ||
| 769 | int where, int size, u32 *val) | ||
| 770 | { | ||
| 771 | struct hv_pcibus_device *hbus = | ||
| 772 | container_of(bus->sysdata, struct hv_pcibus_device, sysdata); | ||
| 773 | struct hv_pci_dev *hpdev; | ||
| 774 | |||
| 775 | hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn)); | ||
| 776 | if (!hpdev) | ||
| 777 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 778 | |||
| 779 | _hv_pcifront_read_config(hpdev, where, size, val); | ||
| 780 | |||
| 781 | put_pcichild(hpdev); | ||
| 782 | return PCIBIOS_SUCCESSFUL; | ||
| 783 | } | ||
| 784 | |||
| 785 | /** | ||
| 786 | * hv_pcifront_write_config() - Write configuration space | ||
| 787 | * @bus: PCI Bus structure | ||
| 788 | * @devfn: Device/function | ||
| 789 | * @where: Offset from base | ||
| 790 | * @size: Byte/word/dword | ||
| 791 | * @val: Value to be written to device | ||
| 792 | * | ||
| 793 | * Return: PCIBIOS_SUCCESSFUL on success | ||
| 794 | * PCIBIOS_DEVICE_NOT_FOUND on failure | ||
| 795 | */ | ||
| 796 | static int hv_pcifront_write_config(struct pci_bus *bus, unsigned int devfn, | ||
| 797 | int where, int size, u32 val) | ||
| 798 | { | ||
| 799 | struct hv_pcibus_device *hbus = | ||
| 800 | container_of(bus->sysdata, struct hv_pcibus_device, sysdata); | ||
| 801 | struct hv_pci_dev *hpdev; | ||
| 802 | |||
| 803 | hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(devfn)); | ||
| 804 | if (!hpdev) | ||
| 805 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 806 | |||
| 807 | _hv_pcifront_write_config(hpdev, where, size, val); | ||
| 808 | |||
| 809 | put_pcichild(hpdev); | ||
| 810 | return PCIBIOS_SUCCESSFUL; | ||
| 811 | } | ||
| 812 | |||
| 813 | /* PCIe operations */ | ||
| 814 | static struct pci_ops hv_pcifront_ops = { | ||
| 815 | .read = hv_pcifront_read_config, | ||
| 816 | .write = hv_pcifront_write_config, | ||
| 817 | }; | ||
| 818 | |||
| 819 | /* Interrupt management hooks */ | ||
| 820 | static void hv_int_desc_free(struct hv_pci_dev *hpdev, | ||
| 821 | struct tran_int_desc *int_desc) | ||
| 822 | { | ||
| 823 | struct pci_delete_interrupt *int_pkt; | ||
| 824 | struct { | ||
| 825 | struct pci_packet pkt; | ||
| 826 | u8 buffer[sizeof(struct pci_delete_interrupt)]; | ||
| 827 | } ctxt; | ||
| 828 | |||
| 829 | memset(&ctxt, 0, sizeof(ctxt)); | ||
| 830 | int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message; | ||
| 831 | int_pkt->message_type.type = | ||
| 832 | PCI_DELETE_INTERRUPT_MESSAGE; | ||
| 833 | int_pkt->wslot.slot = hpdev->desc.win_slot.slot; | ||
| 834 | int_pkt->int_desc = *int_desc; | ||
| 835 | vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, sizeof(*int_pkt), | ||
| 836 | (unsigned long)&ctxt.pkt, VM_PKT_DATA_INBAND, 0); | ||
| 837 | kfree(int_desc); | ||
| 838 | } | ||
| 839 | |||
| 840 | /** | ||
| 841 | * hv_msi_free() - Free the MSI. | ||
| 842 | * @domain: The interrupt domain pointer | ||
| 843 | * @info: Extra MSI-related context | ||
| 844 | * @irq: Identifies the IRQ. | ||
| 845 | * | ||
| 846 | * The Hyper-V parent partition and hypervisor are tracking the | ||
| 847 | * messages that are in use, keeping the interrupt redirection | ||
| 848 | * table up to date. This callback sends a message that frees | ||
| 849 | * the IRT entry and related tracking nonsense. | ||
| 850 | */ | ||
| 851 | static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info, | ||
| 852 | unsigned int irq) | ||
| 853 | { | ||
| 854 | struct hv_pcibus_device *hbus; | ||
| 855 | struct hv_pci_dev *hpdev; | ||
| 856 | struct pci_dev *pdev; | ||
| 857 | struct tran_int_desc *int_desc; | ||
| 858 | struct irq_data *irq_data = irq_domain_get_irq_data(domain, irq); | ||
| 859 | struct msi_desc *msi = irq_data_get_msi_desc(irq_data); | ||
| 860 | |||
| 861 | pdev = msi_desc_to_pci_dev(msi); | ||
| 862 | hbus = info->data; | ||
| 863 | int_desc = irq_data_get_irq_chip_data(irq_data); | ||
| 864 | if (!int_desc) | ||
| 865 | return; | ||
| 866 | |||
| 867 | irq_data->chip_data = NULL; | ||
| 868 | hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn)); | ||
| 869 | if (!hpdev) { | ||
| 870 | kfree(int_desc); | ||
| 871 | return; | ||
| 872 | } | ||
| 873 | |||
| 874 | hv_int_desc_free(hpdev, int_desc); | ||
| 875 | put_pcichild(hpdev); | ||
| 876 | } | ||
| 877 | |||
| 878 | static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest, | ||
| 879 | bool force) | ||
| 880 | { | ||
| 881 | struct irq_data *parent = data->parent_data; | ||
| 882 | |||
| 883 | return parent->chip->irq_set_affinity(parent, dest, force); | ||
| 884 | } | ||
| 885 | |||
| 886 | static void hv_irq_mask(struct irq_data *data) | ||
| 887 | { | ||
| 888 | pci_msi_mask_irq(data); | ||
| 889 | } | ||
| 890 | |||
| 891 | /** | ||
| 892 | * hv_irq_unmask() - "Unmask" the IRQ by setting its current | ||
| 893 | * affinity. | ||
| 894 | * @data: Describes the IRQ | ||
| 895 | * | ||
| 896 | * Build new a destination for the MSI and make a hypercall to | ||
| 897 | * update the Interrupt Redirection Table. "Device Logical ID" | ||
| 898 | * is built out of this PCI bus's instance GUID and the function | ||
| 899 | * number of the device. | ||
| 900 | */ | ||
| 901 | static void hv_irq_unmask(struct irq_data *data) | ||
| 902 | { | ||
| 903 | struct msi_desc *msi_desc = irq_data_get_msi_desc(data); | ||
| 904 | struct irq_cfg *cfg = irqd_cfg(data); | ||
| 905 | struct retarget_msi_interrupt *params; | ||
| 906 | struct hv_pcibus_device *hbus; | ||
| 907 | struct cpumask *dest; | ||
| 908 | struct pci_bus *pbus; | ||
| 909 | struct pci_dev *pdev; | ||
| 910 | unsigned long flags; | ||
| 911 | u32 var_size = 0; | ||
| 912 | int cpu_vmbus; | ||
| 913 | int cpu; | ||
| 914 | u64 res; | ||
| 915 | |||
| 916 | dest = irq_data_get_effective_affinity_mask(data); | ||
| 917 | pdev = msi_desc_to_pci_dev(msi_desc); | ||
| 918 | pbus = pdev->bus; | ||
| 919 | hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); | ||
| 920 | |||
| 921 | spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags); | ||
| 922 | |||
| 923 | params = &hbus->retarget_msi_interrupt_params; | ||
| 924 | memset(params, 0, sizeof(*params)); | ||
| 925 | params->partition_id = HV_PARTITION_ID_SELF; | ||
| 926 | params->int_entry.source = 1; /* MSI(-X) */ | ||
| 927 | params->int_entry.address = msi_desc->msg.address_lo; | ||
| 928 | params->int_entry.data = msi_desc->msg.data; | ||
| 929 | params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | | ||
| 930 | (hbus->hdev->dev_instance.b[4] << 16) | | ||
| 931 | (hbus->hdev->dev_instance.b[7] << 8) | | ||
| 932 | (hbus->hdev->dev_instance.b[6] & 0xf8) | | ||
| 933 | PCI_FUNC(pdev->devfn); | ||
| 934 | params->int_target.vector = cfg->vector; | ||
| 935 | |||
| 936 | /* | ||
| 937 | * Honoring apic->irq_delivery_mode set to dest_Fixed by | ||
| 938 | * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a | ||
| 939 | * spurious interrupt storm. Not doing so does not seem to have a | ||
| 940 | * negative effect (yet?). | ||
| 941 | */ | ||
| 942 | |||
| 943 | if (pci_protocol_version >= PCI_PROTOCOL_VERSION_1_2) { | ||
| 944 | /* | ||
| 945 | * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the | ||
| 946 | * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides | ||
| 947 | * with >64 VP support. | ||
| 948 | * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED | ||
| 949 | * is not sufficient for this hypercall. | ||
| 950 | */ | ||
| 951 | params->int_target.flags |= | ||
| 952 | HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET; | ||
| 953 | params->int_target.vp_set.valid_banks = | ||
| 954 | (1ull << HV_VP_SET_BANK_COUNT_MAX) - 1; | ||
| 955 | |||
| 956 | /* | ||
| 957 | * var-sized hypercall, var-size starts after vp_mask (thus | ||
| 958 | * vp_set.format does not count, but vp_set.valid_banks does). | ||
| 959 | */ | ||
| 960 | var_size = 1 + HV_VP_SET_BANK_COUNT_MAX; | ||
| 961 | |||
| 962 | for_each_cpu_and(cpu, dest, cpu_online_mask) { | ||
| 963 | cpu_vmbus = hv_cpu_number_to_vp_number(cpu); | ||
| 964 | |||
| 965 | if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) { | ||
| 966 | dev_err(&hbus->hdev->device, | ||
| 967 | "too high CPU %d", cpu_vmbus); | ||
| 968 | res = 1; | ||
| 969 | goto exit_unlock; | ||
| 970 | } | ||
| 971 | |||
| 972 | params->int_target.vp_set.masks[cpu_vmbus / 64] |= | ||
| 973 | (1ULL << (cpu_vmbus & 63)); | ||
| 974 | } | ||
| 975 | } else { | ||
| 976 | for_each_cpu_and(cpu, dest, cpu_online_mask) { | ||
| 977 | params->int_target.vp_mask |= | ||
| 978 | (1ULL << hv_cpu_number_to_vp_number(cpu)); | ||
| 979 | } | ||
| 980 | } | ||
| 981 | |||
| 982 | res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17), | ||
| 983 | params, NULL); | ||
| 984 | |||
| 985 | exit_unlock: | ||
| 986 | spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags); | ||
| 987 | |||
| 988 | if (res) { | ||
| 989 | dev_err(&hbus->hdev->device, | ||
| 990 | "%s() failed: %#llx", __func__, res); | ||
| 991 | return; | ||
| 992 | } | ||
| 993 | |||
| 994 | pci_msi_unmask_irq(data); | ||
| 995 | } | ||
| 996 | |||
| 997 | struct compose_comp_ctxt { | ||
| 998 | struct hv_pci_compl comp_pkt; | ||
| 999 | struct tran_int_desc int_desc; | ||
| 1000 | }; | ||
| 1001 | |||
| 1002 | static void hv_pci_compose_compl(void *context, struct pci_response *resp, | ||
| 1003 | int resp_packet_size) | ||
| 1004 | { | ||
| 1005 | struct compose_comp_ctxt *comp_pkt = context; | ||
| 1006 | struct pci_create_int_response *int_resp = | ||
| 1007 | (struct pci_create_int_response *)resp; | ||
| 1008 | |||
| 1009 | comp_pkt->comp_pkt.completion_status = resp->status; | ||
| 1010 | comp_pkt->int_desc = int_resp->int_desc; | ||
| 1011 | complete(&comp_pkt->comp_pkt.host_event); | ||
| 1012 | } | ||
| 1013 | |||
| 1014 | static u32 hv_compose_msi_req_v1( | ||
| 1015 | struct pci_create_interrupt *int_pkt, struct cpumask *affinity, | ||
| 1016 | u32 slot, u8 vector) | ||
| 1017 | { | ||
| 1018 | int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; | ||
| 1019 | int_pkt->wslot.slot = slot; | ||
| 1020 | int_pkt->int_desc.vector = vector; | ||
| 1021 | int_pkt->int_desc.vector_count = 1; | ||
| 1022 | int_pkt->int_desc.delivery_mode = dest_Fixed; | ||
| 1023 | |||
| 1024 | /* | ||
| 1025 | * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in | ||
| 1026 | * hv_irq_unmask(). | ||
| 1027 | */ | ||
| 1028 | int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL; | ||
| 1029 | |||
| 1030 | return sizeof(*int_pkt); | ||
| 1031 | } | ||
| 1032 | |||
| 1033 | static u32 hv_compose_msi_req_v2( | ||
| 1034 | struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity, | ||
| 1035 | u32 slot, u8 vector) | ||
| 1036 | { | ||
| 1037 | int cpu; | ||
| 1038 | |||
| 1039 | int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2; | ||
| 1040 | int_pkt->wslot.slot = slot; | ||
| 1041 | int_pkt->int_desc.vector = vector; | ||
| 1042 | int_pkt->int_desc.vector_count = 1; | ||
| 1043 | int_pkt->int_desc.delivery_mode = dest_Fixed; | ||
| 1044 | |||
| 1045 | /* | ||
| 1046 | * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten | ||
| 1047 | * by subsequent retarget in hv_irq_unmask(). | ||
| 1048 | */ | ||
| 1049 | cpu = cpumask_first_and(affinity, cpu_online_mask); | ||
| 1050 | int_pkt->int_desc.processor_array[0] = | ||
| 1051 | hv_cpu_number_to_vp_number(cpu); | ||
| 1052 | int_pkt->int_desc.processor_count = 1; | ||
| 1053 | |||
| 1054 | return sizeof(*int_pkt); | ||
| 1055 | } | ||
| 1056 | |||
| 1057 | /** | ||
| 1058 | * hv_compose_msi_msg() - Supplies a valid MSI address/data | ||
| 1059 | * @data: Everything about this MSI | ||
| 1060 | * @msg: Buffer that is filled in by this function | ||
| 1061 | * | ||
| 1062 | * This function unpacks the IRQ looking for target CPU set, IDT | ||
| 1063 | * vector and mode and sends a message to the parent partition | ||
| 1064 | * asking for a mapping for that tuple in this partition. The | ||
| 1065 | * response supplies a data value and address to which that data | ||
| 1066 | * should be written to trigger that interrupt. | ||
| 1067 | */ | ||
| 1068 | static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | ||
| 1069 | { | ||
| 1070 | struct irq_cfg *cfg = irqd_cfg(data); | ||
| 1071 | struct hv_pcibus_device *hbus; | ||
| 1072 | struct hv_pci_dev *hpdev; | ||
| 1073 | struct pci_bus *pbus; | ||
| 1074 | struct pci_dev *pdev; | ||
| 1075 | struct cpumask *dest; | ||
| 1076 | struct compose_comp_ctxt comp; | ||
| 1077 | struct tran_int_desc *int_desc; | ||
| 1078 | struct { | ||
| 1079 | struct pci_packet pci_pkt; | ||
| 1080 | union { | ||
| 1081 | struct pci_create_interrupt v1; | ||
| 1082 | struct pci_create_interrupt2 v2; | ||
| 1083 | } int_pkts; | ||
| 1084 | } __packed ctxt; | ||
| 1085 | |||
| 1086 | u32 size; | ||
| 1087 | int ret; | ||
| 1088 | |||
| 1089 | pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); | ||
| 1090 | dest = irq_data_get_effective_affinity_mask(data); | ||
| 1091 | pbus = pdev->bus; | ||
| 1092 | hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); | ||
| 1093 | hpdev = get_pcichild_wslot(hbus, devfn_to_wslot(pdev->devfn)); | ||
| 1094 | if (!hpdev) | ||
| 1095 | goto return_null_message; | ||
| 1096 | |||
| 1097 | /* Free any previous message that might have already been composed. */ | ||
| 1098 | if (data->chip_data) { | ||
| 1099 | int_desc = data->chip_data; | ||
| 1100 | data->chip_data = NULL; | ||
| 1101 | hv_int_desc_free(hpdev, int_desc); | ||
| 1102 | } | ||
| 1103 | |||
| 1104 | int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC); | ||
| 1105 | if (!int_desc) | ||
| 1106 | goto drop_reference; | ||
| 1107 | |||
| 1108 | memset(&ctxt, 0, sizeof(ctxt)); | ||
| 1109 | init_completion(&comp.comp_pkt.host_event); | ||
| 1110 | ctxt.pci_pkt.completion_func = hv_pci_compose_compl; | ||
| 1111 | ctxt.pci_pkt.compl_ctxt = ∁ | ||
| 1112 | |||
| 1113 | switch (pci_protocol_version) { | ||
| 1114 | case PCI_PROTOCOL_VERSION_1_1: | ||
| 1115 | size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, | ||
| 1116 | dest, | ||
| 1117 | hpdev->desc.win_slot.slot, | ||
| 1118 | cfg->vector); | ||
| 1119 | break; | ||
| 1120 | |||
| 1121 | case PCI_PROTOCOL_VERSION_1_2: | ||
| 1122 | size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, | ||
| 1123 | dest, | ||
| 1124 | hpdev->desc.win_slot.slot, | ||
| 1125 | cfg->vector); | ||
| 1126 | break; | ||
| 1127 | |||
| 1128 | default: | ||
| 1129 | /* As we only negotiate protocol versions known to this driver, | ||
| 1130 | * this path should never hit. However, this is it not a hot | ||
| 1131 | * path so we print a message to aid future updates. | ||
| 1132 | */ | ||
| 1133 | dev_err(&hbus->hdev->device, | ||
| 1134 | "Unexpected vPCI protocol, update driver."); | ||
| 1135 | goto free_int_desc; | ||
| 1136 | } | ||
| 1137 | |||
| 1138 | ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts, | ||
| 1139 | size, (unsigned long)&ctxt.pci_pkt, | ||
| 1140 | VM_PKT_DATA_INBAND, | ||
| 1141 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | ||
| 1142 | if (ret) { | ||
| 1143 | dev_err(&hbus->hdev->device, | ||
| 1144 | "Sending request for interrupt failed: 0x%x", | ||
| 1145 | comp.comp_pkt.completion_status); | ||
| 1146 | goto free_int_desc; | ||
| 1147 | } | ||
| 1148 | |||
| 1149 | /* | ||
| 1150 | * Since this function is called with IRQ locks held, can't | ||
| 1151 | * do normal wait for completion; instead poll. | ||
| 1152 | */ | ||
| 1153 | while (!try_wait_for_completion(&comp.comp_pkt.host_event)) { | ||
| 1154 | /* 0xFFFF means an invalid PCI VENDOR ID. */ | ||
| 1155 | if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) { | ||
| 1156 | dev_err_once(&hbus->hdev->device, | ||
| 1157 | "the device has gone\n"); | ||
| 1158 | goto free_int_desc; | ||
| 1159 | } | ||
| 1160 | |||
| 1161 | /* | ||
| 1162 | * When the higher level interrupt code calls us with | ||
| 1163 | * interrupt disabled, we must poll the channel by calling | ||
| 1164 | * the channel callback directly when channel->target_cpu is | ||
| 1165 | * the current CPU. When the higher level interrupt code | ||
| 1166 | * calls us with interrupt enabled, let's add the | ||
| 1167 | * local_bh_disable()/enable() to avoid race. | ||
| 1168 | */ | ||
| 1169 | local_bh_disable(); | ||
| 1170 | |||
| 1171 | if (hbus->hdev->channel->target_cpu == smp_processor_id()) | ||
| 1172 | hv_pci_onchannelcallback(hbus); | ||
| 1173 | |||
| 1174 | local_bh_enable(); | ||
| 1175 | |||
| 1176 | if (hpdev->state == hv_pcichild_ejecting) { | ||
| 1177 | dev_err_once(&hbus->hdev->device, | ||
| 1178 | "the device is being ejected\n"); | ||
| 1179 | goto free_int_desc; | ||
| 1180 | } | ||
| 1181 | |||
| 1182 | udelay(100); | ||
| 1183 | } | ||
| 1184 | |||
| 1185 | if (comp.comp_pkt.completion_status < 0) { | ||
| 1186 | dev_err(&hbus->hdev->device, | ||
| 1187 | "Request for interrupt failed: 0x%x", | ||
| 1188 | comp.comp_pkt.completion_status); | ||
| 1189 | goto free_int_desc; | ||
| 1190 | } | ||
| 1191 | |||
| 1192 | /* | ||
| 1193 | * Record the assignment so that this can be unwound later. Using | ||
| 1194 | * irq_set_chip_data() here would be appropriate, but the lock it takes | ||
| 1195 | * is already held. | ||
| 1196 | */ | ||
| 1197 | *int_desc = comp.int_desc; | ||
| 1198 | data->chip_data = int_desc; | ||
| 1199 | |||
| 1200 | /* Pass up the result. */ | ||
| 1201 | msg->address_hi = comp.int_desc.address >> 32; | ||
| 1202 | msg->address_lo = comp.int_desc.address & 0xffffffff; | ||
| 1203 | msg->data = comp.int_desc.data; | ||
| 1204 | |||
| 1205 | put_pcichild(hpdev); | ||
| 1206 | return; | ||
| 1207 | |||
| 1208 | free_int_desc: | ||
| 1209 | kfree(int_desc); | ||
| 1210 | drop_reference: | ||
| 1211 | put_pcichild(hpdev); | ||
| 1212 | return_null_message: | ||
| 1213 | msg->address_hi = 0; | ||
| 1214 | msg->address_lo = 0; | ||
| 1215 | msg->data = 0; | ||
| 1216 | } | ||
| 1217 | |||
| 1218 | /* HW Interrupt Chip Descriptor */ | ||
| 1219 | static struct irq_chip hv_msi_irq_chip = { | ||
| 1220 | .name = "Hyper-V PCIe MSI", | ||
| 1221 | .irq_compose_msi_msg = hv_compose_msi_msg, | ||
| 1222 | .irq_set_affinity = hv_set_affinity, | ||
| 1223 | .irq_ack = irq_chip_ack_parent, | ||
| 1224 | .irq_mask = hv_irq_mask, | ||
| 1225 | .irq_unmask = hv_irq_unmask, | ||
| 1226 | }; | ||
| 1227 | |||
| 1228 | static irq_hw_number_t hv_msi_domain_ops_get_hwirq(struct msi_domain_info *info, | ||
| 1229 | msi_alloc_info_t *arg) | ||
| 1230 | { | ||
| 1231 | return arg->msi_hwirq; | ||
| 1232 | } | ||
| 1233 | |||
| 1234 | static struct msi_domain_ops hv_msi_ops = { | ||
| 1235 | .get_hwirq = hv_msi_domain_ops_get_hwirq, | ||
| 1236 | .msi_prepare = pci_msi_prepare, | ||
| 1237 | .set_desc = pci_msi_set_desc, | ||
| 1238 | .msi_free = hv_msi_free, | ||
| 1239 | }; | ||
| 1240 | |||
| 1241 | /** | ||
| 1242 | * hv_pcie_init_irq_domain() - Initialize IRQ domain | ||
| 1243 | * @hbus: The root PCI bus | ||
| 1244 | * | ||
| 1245 | * This function creates an IRQ domain which will be used for | ||
| 1246 | * interrupts from devices that have been passed through. These | ||
| 1247 | * devices only support MSI and MSI-X, not line-based interrupts | ||
| 1248 | * or simulations of line-based interrupts through PCIe's | ||
| 1249 | * fabric-layer messages. Because interrupts are remapped, we | ||
| 1250 | * can support multi-message MSI here. | ||
| 1251 | * | ||
| 1252 | * Return: '0' on success and error value on failure | ||
| 1253 | */ | ||
| 1254 | static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus) | ||
| 1255 | { | ||
| 1256 | hbus->msi_info.chip = &hv_msi_irq_chip; | ||
| 1257 | hbus->msi_info.ops = &hv_msi_ops; | ||
| 1258 | hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS | | ||
| 1259 | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI | | ||
| 1260 | MSI_FLAG_PCI_MSIX); | ||
| 1261 | hbus->msi_info.handler = handle_edge_irq; | ||
| 1262 | hbus->msi_info.handler_name = "edge"; | ||
| 1263 | hbus->msi_info.data = hbus; | ||
| 1264 | hbus->irq_domain = pci_msi_create_irq_domain(hbus->sysdata.fwnode, | ||
| 1265 | &hbus->msi_info, | ||
| 1266 | x86_vector_domain); | ||
| 1267 | if (!hbus->irq_domain) { | ||
| 1268 | dev_err(&hbus->hdev->device, | ||
| 1269 | "Failed to build an MSI IRQ domain\n"); | ||
| 1270 | return -ENODEV; | ||
| 1271 | } | ||
| 1272 | |||
| 1273 | return 0; | ||
| 1274 | } | ||
| 1275 | |||
| 1276 | /** | ||
| 1277 | * get_bar_size() - Get the address space consumed by a BAR | ||
| 1278 | * @bar_val: Value that a BAR returned after -1 was written | ||
| 1279 | * to it. | ||
| 1280 | * | ||
| 1281 | * This function returns the size of the BAR, rounded up to 1 | ||
| 1282 | * page. It has to be rounded up because the hypervisor's page | ||
| 1283 | * table entry that maps the BAR into the VM can't specify an | ||
| 1284 | * offset within a page. The invariant is that the hypervisor | ||
| 1285 | * must place any BARs of smaller than page length at the | ||
| 1286 | * beginning of a page. | ||
| 1287 | * | ||
| 1288 | * Return: Size in bytes of the consumed MMIO space. | ||
| 1289 | */ | ||
| 1290 | static u64 get_bar_size(u64 bar_val) | ||
| 1291 | { | ||
| 1292 | return round_up((1 + ~(bar_val & PCI_BASE_ADDRESS_MEM_MASK)), | ||
| 1293 | PAGE_SIZE); | ||
| 1294 | } | ||
| 1295 | |||
| 1296 | /** | ||
| 1297 | * survey_child_resources() - Total all MMIO requirements | ||
| 1298 | * @hbus: Root PCI bus, as understood by this driver | ||
| 1299 | */ | ||
| 1300 | static void survey_child_resources(struct hv_pcibus_device *hbus) | ||
| 1301 | { | ||
| 1302 | struct hv_pci_dev *hpdev; | ||
| 1303 | resource_size_t bar_size = 0; | ||
| 1304 | unsigned long flags; | ||
| 1305 | struct completion *event; | ||
| 1306 | u64 bar_val; | ||
| 1307 | int i; | ||
| 1308 | |||
| 1309 | /* If nobody is waiting on the answer, don't compute it. */ | ||
| 1310 | event = xchg(&hbus->survey_event, NULL); | ||
| 1311 | if (!event) | ||
| 1312 | return; | ||
| 1313 | |||
| 1314 | /* If the answer has already been computed, go with it. */ | ||
| 1315 | if (hbus->low_mmio_space || hbus->high_mmio_space) { | ||
| 1316 | complete(event); | ||
| 1317 | return; | ||
| 1318 | } | ||
| 1319 | |||
| 1320 | spin_lock_irqsave(&hbus->device_list_lock, flags); | ||
| 1321 | |||
| 1322 | /* | ||
| 1323 | * Due to an interesting quirk of the PCI spec, all memory regions | ||
| 1324 | * for a child device are a power of 2 in size and aligned in memory, | ||
| 1325 | * so it's sufficient to just add them up without tracking alignment. | ||
| 1326 | */ | ||
| 1327 | list_for_each_entry(hpdev, &hbus->children, list_entry) { | ||
| 1328 | for (i = 0; i < 6; i++) { | ||
| 1329 | if (hpdev->probed_bar[i] & PCI_BASE_ADDRESS_SPACE_IO) | ||
| 1330 | dev_err(&hbus->hdev->device, | ||
| 1331 | "There's an I/O BAR in this list!\n"); | ||
| 1332 | |||
| 1333 | if (hpdev->probed_bar[i] != 0) { | ||
| 1334 | /* | ||
| 1335 | * A probed BAR has all the upper bits set that | ||
| 1336 | * can be changed. | ||
| 1337 | */ | ||
| 1338 | |||
| 1339 | bar_val = hpdev->probed_bar[i]; | ||
| 1340 | if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64) | ||
| 1341 | bar_val |= | ||
| 1342 | ((u64)hpdev->probed_bar[++i] << 32); | ||
| 1343 | else | ||
| 1344 | bar_val |= 0xffffffff00000000ULL; | ||
| 1345 | |||
| 1346 | bar_size = get_bar_size(bar_val); | ||
| 1347 | |||
| 1348 | if (bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64) | ||
| 1349 | hbus->high_mmio_space += bar_size; | ||
| 1350 | else | ||
| 1351 | hbus->low_mmio_space += bar_size; | ||
| 1352 | } | ||
| 1353 | } | ||
| 1354 | } | ||
| 1355 | |||
| 1356 | spin_unlock_irqrestore(&hbus->device_list_lock, flags); | ||
| 1357 | complete(event); | ||
| 1358 | } | ||
| 1359 | |||
| 1360 | /** | ||
| 1361 | * prepopulate_bars() - Fill in BARs with defaults | ||
| 1362 | * @hbus: Root PCI bus, as understood by this driver | ||
| 1363 | * | ||
| 1364 | * The core PCI driver code seems much, much happier if the BARs | ||
| 1365 | * for a device have values upon first scan. So fill them in. | ||
| 1366 | * The algorithm below works down from large sizes to small, | ||
| 1367 | * attempting to pack the assignments optimally. The assumption, | ||
| 1368 | * enforced in other parts of the code, is that the beginning of | ||
| 1369 | * the memory-mapped I/O space will be aligned on the largest | ||
| 1370 | * BAR size. | ||
| 1371 | */ | ||
| 1372 | static void prepopulate_bars(struct hv_pcibus_device *hbus) | ||
| 1373 | { | ||
| 1374 | resource_size_t high_size = 0; | ||
| 1375 | resource_size_t low_size = 0; | ||
| 1376 | resource_size_t high_base = 0; | ||
| 1377 | resource_size_t low_base = 0; | ||
| 1378 | resource_size_t bar_size; | ||
| 1379 | struct hv_pci_dev *hpdev; | ||
| 1380 | unsigned long flags; | ||
| 1381 | u64 bar_val; | ||
| 1382 | u32 command; | ||
| 1383 | bool high; | ||
| 1384 | int i; | ||
| 1385 | |||
| 1386 | if (hbus->low_mmio_space) { | ||
| 1387 | low_size = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space)); | ||
| 1388 | low_base = hbus->low_mmio_res->start; | ||
| 1389 | } | ||
| 1390 | |||
| 1391 | if (hbus->high_mmio_space) { | ||
| 1392 | high_size = 1ULL << | ||
| 1393 | (63 - __builtin_clzll(hbus->high_mmio_space)); | ||
| 1394 | high_base = hbus->high_mmio_res->start; | ||
| 1395 | } | ||
| 1396 | |||
| 1397 | spin_lock_irqsave(&hbus->device_list_lock, flags); | ||
| 1398 | |||
| 1399 | /* Pick addresses for the BARs. */ | ||
| 1400 | do { | ||
| 1401 | list_for_each_entry(hpdev, &hbus->children, list_entry) { | ||
| 1402 | for (i = 0; i < 6; i++) { | ||
| 1403 | bar_val = hpdev->probed_bar[i]; | ||
| 1404 | if (bar_val == 0) | ||
| 1405 | continue; | ||
| 1406 | high = bar_val & PCI_BASE_ADDRESS_MEM_TYPE_64; | ||
| 1407 | if (high) { | ||
| 1408 | bar_val |= | ||
| 1409 | ((u64)hpdev->probed_bar[i + 1] | ||
| 1410 | << 32); | ||
| 1411 | } else { | ||
| 1412 | bar_val |= 0xffffffffULL << 32; | ||
| 1413 | } | ||
| 1414 | bar_size = get_bar_size(bar_val); | ||
| 1415 | if (high) { | ||
| 1416 | if (high_size != bar_size) { | ||
| 1417 | i++; | ||
| 1418 | continue; | ||
| 1419 | } | ||
| 1420 | _hv_pcifront_write_config(hpdev, | ||
| 1421 | PCI_BASE_ADDRESS_0 + (4 * i), | ||
| 1422 | 4, | ||
| 1423 | (u32)(high_base & 0xffffff00)); | ||
| 1424 | i++; | ||
| 1425 | _hv_pcifront_write_config(hpdev, | ||
| 1426 | PCI_BASE_ADDRESS_0 + (4 * i), | ||
| 1427 | 4, (u32)(high_base >> 32)); | ||
| 1428 | high_base += bar_size; | ||
| 1429 | } else { | ||
| 1430 | if (low_size != bar_size) | ||
| 1431 | continue; | ||
| 1432 | _hv_pcifront_write_config(hpdev, | ||
| 1433 | PCI_BASE_ADDRESS_0 + (4 * i), | ||
| 1434 | 4, | ||
| 1435 | (u32)(low_base & 0xffffff00)); | ||
| 1436 | low_base += bar_size; | ||
| 1437 | } | ||
| 1438 | } | ||
| 1439 | if (high_size <= 1 && low_size <= 1) { | ||
| 1440 | /* Set the memory enable bit. */ | ||
| 1441 | _hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, | ||
| 1442 | &command); | ||
| 1443 | command |= PCI_COMMAND_MEMORY; | ||
| 1444 | _hv_pcifront_write_config(hpdev, PCI_COMMAND, 2, | ||
| 1445 | command); | ||
| 1446 | break; | ||
| 1447 | } | ||
| 1448 | } | ||
| 1449 | |||
| 1450 | high_size >>= 1; | ||
| 1451 | low_size >>= 1; | ||
| 1452 | } while (high_size || low_size); | ||
| 1453 | |||
| 1454 | spin_unlock_irqrestore(&hbus->device_list_lock, flags); | ||
| 1455 | } | ||
| 1456 | |||
| 1457 | /** | ||
| 1458 | * create_root_hv_pci_bus() - Expose a new root PCI bus | ||
| 1459 | * @hbus: Root PCI bus, as understood by this driver | ||
| 1460 | * | ||
| 1461 | * Return: 0 on success, -errno on failure | ||
| 1462 | */ | ||
| 1463 | static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus) | ||
| 1464 | { | ||
| 1465 | /* Register the device */ | ||
| 1466 | hbus->pci_bus = pci_create_root_bus(&hbus->hdev->device, | ||
| 1467 | 0, /* bus number is always zero */ | ||
| 1468 | &hv_pcifront_ops, | ||
| 1469 | &hbus->sysdata, | ||
| 1470 | &hbus->resources_for_children); | ||
| 1471 | if (!hbus->pci_bus) | ||
| 1472 | return -ENODEV; | ||
| 1473 | |||
| 1474 | hbus->pci_bus->msi = &hbus->msi_chip; | ||
| 1475 | hbus->pci_bus->msi->dev = &hbus->hdev->device; | ||
| 1476 | |||
| 1477 | pci_lock_rescan_remove(); | ||
| 1478 | pci_scan_child_bus(hbus->pci_bus); | ||
| 1479 | pci_bus_assign_resources(hbus->pci_bus); | ||
| 1480 | pci_bus_add_devices(hbus->pci_bus); | ||
| 1481 | pci_unlock_rescan_remove(); | ||
| 1482 | hbus->state = hv_pcibus_installed; | ||
| 1483 | return 0; | ||
| 1484 | } | ||
| 1485 | |||
| 1486 | struct q_res_req_compl { | ||
| 1487 | struct completion host_event; | ||
| 1488 | struct hv_pci_dev *hpdev; | ||
| 1489 | }; | ||
| 1490 | |||
| 1491 | /** | ||
| 1492 | * q_resource_requirements() - Query Resource Requirements | ||
| 1493 | * @context: The completion context. | ||
| 1494 | * @resp: The response that came from the host. | ||
| 1495 | * @resp_packet_size: The size in bytes of resp. | ||
| 1496 | * | ||
| 1497 | * This function is invoked on completion of a Query Resource | ||
| 1498 | * Requirements packet. | ||
| 1499 | */ | ||
| 1500 | static void q_resource_requirements(void *context, struct pci_response *resp, | ||
| 1501 | int resp_packet_size) | ||
| 1502 | { | ||
| 1503 | struct q_res_req_compl *completion = context; | ||
| 1504 | struct pci_q_res_req_response *q_res_req = | ||
| 1505 | (struct pci_q_res_req_response *)resp; | ||
| 1506 | int i; | ||
| 1507 | |||
| 1508 | if (resp->status < 0) { | ||
| 1509 | dev_err(&completion->hpdev->hbus->hdev->device, | ||
| 1510 | "query resource requirements failed: %x\n", | ||
| 1511 | resp->status); | ||
| 1512 | } else { | ||
| 1513 | for (i = 0; i < 6; i++) { | ||
| 1514 | completion->hpdev->probed_bar[i] = | ||
| 1515 | q_res_req->probed_bar[i]; | ||
| 1516 | } | ||
| 1517 | } | ||
| 1518 | |||
| 1519 | complete(&completion->host_event); | ||
| 1520 | } | ||
| 1521 | |||
| 1522 | /** | ||
| 1523 | * new_pcichild_device() - Create a new child device | ||
| 1524 | * @hbus: The internal struct tracking this root PCI bus. | ||
| 1525 | * @desc: The information supplied so far from the host | ||
| 1526 | * about the device. | ||
| 1527 | * | ||
| 1528 | * This function creates the tracking structure for a new child | ||
| 1529 | * device and kicks off the process of figuring out what it is. | ||
| 1530 | * | ||
| 1531 | * Return: Pointer to the new tracking struct | ||
| 1532 | */ | ||
| 1533 | static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus, | ||
| 1534 | struct pci_function_description *desc) | ||
| 1535 | { | ||
| 1536 | struct hv_pci_dev *hpdev; | ||
| 1537 | struct pci_child_message *res_req; | ||
| 1538 | struct q_res_req_compl comp_pkt; | ||
| 1539 | struct { | ||
| 1540 | struct pci_packet init_packet; | ||
| 1541 | u8 buffer[sizeof(struct pci_child_message)]; | ||
| 1542 | } pkt; | ||
| 1543 | unsigned long flags; | ||
| 1544 | int ret; | ||
| 1545 | |||
| 1546 | hpdev = kzalloc(sizeof(*hpdev), GFP_ATOMIC); | ||
| 1547 | if (!hpdev) | ||
| 1548 | return NULL; | ||
| 1549 | |||
| 1550 | hpdev->hbus = hbus; | ||
| 1551 | |||
| 1552 | memset(&pkt, 0, sizeof(pkt)); | ||
| 1553 | init_completion(&comp_pkt.host_event); | ||
| 1554 | comp_pkt.hpdev = hpdev; | ||
| 1555 | pkt.init_packet.compl_ctxt = &comp_pkt; | ||
| 1556 | pkt.init_packet.completion_func = q_resource_requirements; | ||
| 1557 | res_req = (struct pci_child_message *)&pkt.init_packet.message; | ||
| 1558 | res_req->message_type.type = PCI_QUERY_RESOURCE_REQUIREMENTS; | ||
| 1559 | res_req->wslot.slot = desc->win_slot.slot; | ||
| 1560 | |||
| 1561 | ret = vmbus_sendpacket(hbus->hdev->channel, res_req, | ||
| 1562 | sizeof(struct pci_child_message), | ||
| 1563 | (unsigned long)&pkt.init_packet, | ||
| 1564 | VM_PKT_DATA_INBAND, | ||
| 1565 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | ||
| 1566 | if (ret) | ||
| 1567 | goto error; | ||
| 1568 | |||
| 1569 | if (wait_for_response(hbus->hdev, &comp_pkt.host_event)) | ||
| 1570 | goto error; | ||
| 1571 | |||
| 1572 | hpdev->desc = *desc; | ||
| 1573 | refcount_set(&hpdev->refs, 1); | ||
| 1574 | get_pcichild(hpdev); | ||
| 1575 | spin_lock_irqsave(&hbus->device_list_lock, flags); | ||
| 1576 | |||
| 1577 | list_add_tail(&hpdev->list_entry, &hbus->children); | ||
| 1578 | spin_unlock_irqrestore(&hbus->device_list_lock, flags); | ||
| 1579 | return hpdev; | ||
| 1580 | |||
| 1581 | error: | ||
| 1582 | kfree(hpdev); | ||
| 1583 | return NULL; | ||
| 1584 | } | ||
| 1585 | |||
| 1586 | /** | ||
| 1587 | * get_pcichild_wslot() - Find device from slot | ||
| 1588 | * @hbus: Root PCI bus, as understood by this driver | ||
| 1589 | * @wslot: Location on the bus | ||
| 1590 | * | ||
| 1591 | * This function looks up a PCI device and returns the internal | ||
| 1592 | * representation of it. It acquires a reference on it, so that | ||
| 1593 | * the device won't be deleted while somebody is using it. The | ||
| 1594 | * caller is responsible for calling put_pcichild() to release | ||
| 1595 | * this reference. | ||
| 1596 | * | ||
| 1597 | * Return: Internal representation of a PCI device | ||
| 1598 | */ | ||
| 1599 | static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus, | ||
| 1600 | u32 wslot) | ||
| 1601 | { | ||
| 1602 | unsigned long flags; | ||
| 1603 | struct hv_pci_dev *iter, *hpdev = NULL; | ||
| 1604 | |||
| 1605 | spin_lock_irqsave(&hbus->device_list_lock, flags); | ||
| 1606 | list_for_each_entry(iter, &hbus->children, list_entry) { | ||
| 1607 | if (iter->desc.win_slot.slot == wslot) { | ||
| 1608 | hpdev = iter; | ||
| 1609 | get_pcichild(hpdev); | ||
| 1610 | break; | ||
| 1611 | } | ||
| 1612 | } | ||
| 1613 | spin_unlock_irqrestore(&hbus->device_list_lock, flags); | ||
| 1614 | |||
| 1615 | return hpdev; | ||
| 1616 | } | ||
| 1617 | |||
| 1618 | /** | ||
| 1619 | * pci_devices_present_work() - Handle new list of child devices | ||
| 1620 | * @work: Work struct embedded in struct hv_dr_work | ||
| 1621 | * | ||
| 1622 | * "Bus Relations" is the Windows term for "children of this | ||
| 1623 | * bus." The terminology is preserved here for people trying to | ||
| 1624 | * debug the interaction between Hyper-V and Linux. This | ||
| 1625 | * function is called when the parent partition reports a list | ||
| 1626 | * of functions that should be observed under this PCI Express | ||
| 1627 | * port (bus). | ||
| 1628 | * | ||
| 1629 | * This function updates the list, and must tolerate being | ||
| 1630 | * called multiple times with the same information. The typical | ||
| 1631 | * number of child devices is one, with very atypical cases | ||
| 1632 | * involving three or four, so the algorithms used here can be | ||
| 1633 | * simple and inefficient. | ||
| 1634 | * | ||
| 1635 | * It must also treat the omission of a previously observed device as | ||
| 1636 | * notification that the device no longer exists. | ||
| 1637 | * | ||
| 1638 | * Note that this function is serialized with hv_eject_device_work(), | ||
| 1639 | * because both are pushed to the ordered workqueue hbus->wq. | ||
| 1640 | */ | ||
| 1641 | static void pci_devices_present_work(struct work_struct *work) | ||
| 1642 | { | ||
| 1643 | u32 child_no; | ||
| 1644 | bool found; | ||
| 1645 | struct pci_function_description *new_desc; | ||
| 1646 | struct hv_pci_dev *hpdev; | ||
| 1647 | struct hv_pcibus_device *hbus; | ||
| 1648 | struct list_head removed; | ||
| 1649 | struct hv_dr_work *dr_wrk; | ||
| 1650 | struct hv_dr_state *dr = NULL; | ||
| 1651 | unsigned long flags; | ||
| 1652 | |||
| 1653 | dr_wrk = container_of(work, struct hv_dr_work, wrk); | ||
| 1654 | hbus = dr_wrk->bus; | ||
| 1655 | kfree(dr_wrk); | ||
| 1656 | |||
| 1657 | INIT_LIST_HEAD(&removed); | ||
| 1658 | |||
| 1659 | /* Pull this off the queue and process it if it was the last one. */ | ||
| 1660 | spin_lock_irqsave(&hbus->device_list_lock, flags); | ||
| 1661 | while (!list_empty(&hbus->dr_list)) { | ||
| 1662 | dr = list_first_entry(&hbus->dr_list, struct hv_dr_state, | ||
| 1663 | list_entry); | ||
| 1664 | list_del(&dr->list_entry); | ||
| 1665 | |||
| 1666 | /* Throw this away if the list still has stuff in it. */ | ||
| 1667 | if (!list_empty(&hbus->dr_list)) { | ||
| 1668 | kfree(dr); | ||
| 1669 | continue; | ||
| 1670 | } | ||
| 1671 | } | ||
| 1672 | spin_unlock_irqrestore(&hbus->device_list_lock, flags); | ||
| 1673 | |||
| 1674 | if (!dr) { | ||
| 1675 | put_hvpcibus(hbus); | ||
| 1676 | return; | ||
| 1677 | } | ||
| 1678 | |||
| 1679 | /* First, mark all existing children as reported missing. */ | ||
| 1680 | spin_lock_irqsave(&hbus->device_list_lock, flags); | ||
| 1681 | list_for_each_entry(hpdev, &hbus->children, list_entry) { | ||
| 1682 | hpdev->reported_missing = true; | ||
| 1683 | } | ||
| 1684 | spin_unlock_irqrestore(&hbus->device_list_lock, flags); | ||
| 1685 | |||
| 1686 | /* Next, add back any reported devices. */ | ||
| 1687 | for (child_no = 0; child_no < dr->device_count; child_no++) { | ||
| 1688 | found = false; | ||
| 1689 | new_desc = &dr->func[child_no]; | ||
| 1690 | |||
| 1691 | spin_lock_irqsave(&hbus->device_list_lock, flags); | ||
| 1692 | list_for_each_entry(hpdev, &hbus->children, list_entry) { | ||
| 1693 | if ((hpdev->desc.win_slot.slot == new_desc->win_slot.slot) && | ||
| 1694 | (hpdev->desc.v_id == new_desc->v_id) && | ||
| 1695 | (hpdev->desc.d_id == new_desc->d_id) && | ||
| 1696 | (hpdev->desc.ser == new_desc->ser)) { | ||
| 1697 | hpdev->reported_missing = false; | ||
| 1698 | found = true; | ||
| 1699 | } | ||
| 1700 | } | ||
| 1701 | spin_unlock_irqrestore(&hbus->device_list_lock, flags); | ||
| 1702 | |||
| 1703 | if (!found) { | ||
| 1704 | hpdev = new_pcichild_device(hbus, new_desc); | ||
| 1705 | if (!hpdev) | ||
| 1706 | dev_err(&hbus->hdev->device, | ||
| 1707 | "couldn't record a child device.\n"); | ||
| 1708 | } | ||
| 1709 | } | ||
| 1710 | |||
| 1711 | /* Move missing children to a list on the stack. */ | ||
| 1712 | spin_lock_irqsave(&hbus->device_list_lock, flags); | ||
| 1713 | do { | ||
| 1714 | found = false; | ||
| 1715 | list_for_each_entry(hpdev, &hbus->children, list_entry) { | ||
| 1716 | if (hpdev->reported_missing) { | ||
| 1717 | found = true; | ||
| 1718 | put_pcichild(hpdev); | ||
| 1719 | list_move_tail(&hpdev->list_entry, &removed); | ||
| 1720 | break; | ||
| 1721 | } | ||
| 1722 | } | ||
| 1723 | } while (found); | ||
| 1724 | spin_unlock_irqrestore(&hbus->device_list_lock, flags); | ||
| 1725 | |||
| 1726 | /* Delete everything that should no longer exist. */ | ||
| 1727 | while (!list_empty(&removed)) { | ||
| 1728 | hpdev = list_first_entry(&removed, struct hv_pci_dev, | ||
| 1729 | list_entry); | ||
| 1730 | list_del(&hpdev->list_entry); | ||
| 1731 | put_pcichild(hpdev); | ||
| 1732 | } | ||
| 1733 | |||
| 1734 | switch (hbus->state) { | ||
| 1735 | case hv_pcibus_installed: | ||
| 1736 | /* | ||
| 1737 | * Tell the core to rescan bus | ||
| 1738 | * because there may have been changes. | ||
| 1739 | */ | ||
| 1740 | pci_lock_rescan_remove(); | ||
| 1741 | pci_scan_child_bus(hbus->pci_bus); | ||
| 1742 | pci_unlock_rescan_remove(); | ||
| 1743 | break; | ||
| 1744 | |||
| 1745 | case hv_pcibus_init: | ||
| 1746 | case hv_pcibus_probed: | ||
| 1747 | survey_child_resources(hbus); | ||
| 1748 | break; | ||
| 1749 | |||
| 1750 | default: | ||
| 1751 | break; | ||
| 1752 | } | ||
| 1753 | |||
| 1754 | put_hvpcibus(hbus); | ||
| 1755 | kfree(dr); | ||
| 1756 | } | ||
| 1757 | |||
| 1758 | /** | ||
| 1759 | * hv_pci_devices_present() - Handles list of new children | ||
| 1760 | * @hbus: Root PCI bus, as understood by this driver | ||
| 1761 | * @relations: Packet from host listing children | ||
| 1762 | * | ||
| 1763 | * This function is invoked whenever a new list of devices for | ||
| 1764 | * this bus appears. | ||
| 1765 | */ | ||
| 1766 | static void hv_pci_devices_present(struct hv_pcibus_device *hbus, | ||
| 1767 | struct pci_bus_relations *relations) | ||
| 1768 | { | ||
| 1769 | struct hv_dr_state *dr; | ||
| 1770 | struct hv_dr_work *dr_wrk; | ||
| 1771 | unsigned long flags; | ||
| 1772 | bool pending_dr; | ||
| 1773 | |||
| 1774 | dr_wrk = kzalloc(sizeof(*dr_wrk), GFP_NOWAIT); | ||
| 1775 | if (!dr_wrk) | ||
| 1776 | return; | ||
| 1777 | |||
| 1778 | dr = kzalloc(offsetof(struct hv_dr_state, func) + | ||
| 1779 | (sizeof(struct pci_function_description) * | ||
| 1780 | (relations->device_count)), GFP_NOWAIT); | ||
| 1781 | if (!dr) { | ||
| 1782 | kfree(dr_wrk); | ||
| 1783 | return; | ||
| 1784 | } | ||
| 1785 | |||
| 1786 | INIT_WORK(&dr_wrk->wrk, pci_devices_present_work); | ||
| 1787 | dr_wrk->bus = hbus; | ||
| 1788 | dr->device_count = relations->device_count; | ||
| 1789 | if (dr->device_count != 0) { | ||
| 1790 | memcpy(dr->func, relations->func, | ||
| 1791 | sizeof(struct pci_function_description) * | ||
| 1792 | dr->device_count); | ||
| 1793 | } | ||
| 1794 | |||
| 1795 | spin_lock_irqsave(&hbus->device_list_lock, flags); | ||
| 1796 | /* | ||
| 1797 | * If pending_dr is true, we have already queued a work, | ||
| 1798 | * which will see the new dr. Otherwise, we need to | ||
| 1799 | * queue a new work. | ||
| 1800 | */ | ||
| 1801 | pending_dr = !list_empty(&hbus->dr_list); | ||
| 1802 | list_add_tail(&dr->list_entry, &hbus->dr_list); | ||
| 1803 | spin_unlock_irqrestore(&hbus->device_list_lock, flags); | ||
| 1804 | |||
| 1805 | if (pending_dr) { | ||
| 1806 | kfree(dr_wrk); | ||
| 1807 | } else { | ||
| 1808 | get_hvpcibus(hbus); | ||
| 1809 | queue_work(hbus->wq, &dr_wrk->wrk); | ||
| 1810 | } | ||
| 1811 | } | ||
| 1812 | |||
| 1813 | /** | ||
| 1814 | * hv_eject_device_work() - Asynchronously handles ejection | ||
| 1815 | * @work: Work struct embedded in internal device struct | ||
| 1816 | * | ||
| 1817 | * This function handles ejecting a device. Windows will | ||
| 1818 | * attempt to gracefully eject a device, waiting 60 seconds to | ||
| 1819 | * hear back from the guest OS that this completed successfully. | ||
| 1820 | * If this timer expires, the device will be forcibly removed. | ||
| 1821 | */ | ||
| 1822 | static void hv_eject_device_work(struct work_struct *work) | ||
| 1823 | { | ||
| 1824 | struct pci_eject_response *ejct_pkt; | ||
| 1825 | struct hv_pci_dev *hpdev; | ||
| 1826 | struct pci_dev *pdev; | ||
| 1827 | unsigned long flags; | ||
| 1828 | int wslot; | ||
| 1829 | struct { | ||
| 1830 | struct pci_packet pkt; | ||
| 1831 | u8 buffer[sizeof(struct pci_eject_response)]; | ||
| 1832 | } ctxt; | ||
| 1833 | |||
| 1834 | hpdev = container_of(work, struct hv_pci_dev, wrk); | ||
| 1835 | |||
| 1836 | WARN_ON(hpdev->state != hv_pcichild_ejecting); | ||
| 1837 | |||
| 1838 | /* | ||
| 1839 | * Ejection can come before or after the PCI bus has been set up, so | ||
| 1840 | * attempt to find it and tear down the bus state, if it exists. This | ||
| 1841 | * must be done without constructs like pci_domain_nr(hbus->pci_bus) | ||
| 1842 | * because hbus->pci_bus may not exist yet. | ||
| 1843 | */ | ||
| 1844 | wslot = wslot_to_devfn(hpdev->desc.win_slot.slot); | ||
| 1845 | pdev = pci_get_domain_bus_and_slot(hpdev->hbus->sysdata.domain, 0, | ||
| 1846 | wslot); | ||
| 1847 | if (pdev) { | ||
| 1848 | pci_lock_rescan_remove(); | ||
| 1849 | pci_stop_and_remove_bus_device(pdev); | ||
| 1850 | pci_dev_put(pdev); | ||
| 1851 | pci_unlock_rescan_remove(); | ||
| 1852 | } | ||
| 1853 | |||
| 1854 | spin_lock_irqsave(&hpdev->hbus->device_list_lock, flags); | ||
| 1855 | list_del(&hpdev->list_entry); | ||
| 1856 | spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags); | ||
| 1857 | |||
| 1858 | memset(&ctxt, 0, sizeof(ctxt)); | ||
| 1859 | ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; | ||
| 1860 | ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; | ||
| 1861 | ejct_pkt->wslot.slot = hpdev->desc.win_slot.slot; | ||
| 1862 | vmbus_sendpacket(hpdev->hbus->hdev->channel, ejct_pkt, | ||
| 1863 | sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt, | ||
| 1864 | VM_PKT_DATA_INBAND, 0); | ||
| 1865 | |||
| 1866 | put_pcichild(hpdev); | ||
| 1867 | put_pcichild(hpdev); | ||
| 1868 | put_hvpcibus(hpdev->hbus); | ||
| 1869 | } | ||
| 1870 | |||
| 1871 | /** | ||
| 1872 | * hv_pci_eject_device() - Handles device ejection | ||
| 1873 | * @hpdev: Internal device tracking struct | ||
| 1874 | * | ||
| 1875 | * This function is invoked when an ejection packet arrives. It | ||
| 1876 | * just schedules work so that we don't re-enter the packet | ||
| 1877 | * delivery code handling the ejection. | ||
| 1878 | */ | ||
| 1879 | static void hv_pci_eject_device(struct hv_pci_dev *hpdev) | ||
| 1880 | { | ||
| 1881 | hpdev->state = hv_pcichild_ejecting; | ||
| 1882 | get_pcichild(hpdev); | ||
| 1883 | INIT_WORK(&hpdev->wrk, hv_eject_device_work); | ||
| 1884 | get_hvpcibus(hpdev->hbus); | ||
| 1885 | queue_work(hpdev->hbus->wq, &hpdev->wrk); | ||
| 1886 | } | ||
| 1887 | |||
| 1888 | /** | ||
| 1889 | * hv_pci_onchannelcallback() - Handles incoming packets | ||
| 1890 | * @context: Internal bus tracking struct | ||
| 1891 | * | ||
| 1892 | * This function is invoked whenever the host sends a packet to | ||
| 1893 | * this channel (which is private to this root PCI bus). | ||
| 1894 | */ | ||
| 1895 | static void hv_pci_onchannelcallback(void *context) | ||
| 1896 | { | ||
| 1897 | const int packet_size = 0x100; | ||
| 1898 | int ret; | ||
| 1899 | struct hv_pcibus_device *hbus = context; | ||
| 1900 | u32 bytes_recvd; | ||
| 1901 | u64 req_id; | ||
| 1902 | struct vmpacket_descriptor *desc; | ||
| 1903 | unsigned char *buffer; | ||
| 1904 | int bufferlen = packet_size; | ||
| 1905 | struct pci_packet *comp_packet; | ||
| 1906 | struct pci_response *response; | ||
| 1907 | struct pci_incoming_message *new_message; | ||
| 1908 | struct pci_bus_relations *bus_rel; | ||
| 1909 | struct pci_dev_incoming *dev_message; | ||
| 1910 | struct hv_pci_dev *hpdev; | ||
| 1911 | |||
| 1912 | buffer = kmalloc(bufferlen, GFP_ATOMIC); | ||
| 1913 | if (!buffer) | ||
| 1914 | return; | ||
| 1915 | |||
| 1916 | while (1) { | ||
| 1917 | ret = vmbus_recvpacket_raw(hbus->hdev->channel, buffer, | ||
| 1918 | bufferlen, &bytes_recvd, &req_id); | ||
| 1919 | |||
| 1920 | if (ret == -ENOBUFS) { | ||
| 1921 | kfree(buffer); | ||
| 1922 | /* Handle large packet */ | ||
| 1923 | bufferlen = bytes_recvd; | ||
| 1924 | buffer = kmalloc(bytes_recvd, GFP_ATOMIC); | ||
| 1925 | if (!buffer) | ||
| 1926 | return; | ||
| 1927 | continue; | ||
| 1928 | } | ||
| 1929 | |||
| 1930 | /* Zero length indicates there are no more packets. */ | ||
| 1931 | if (ret || !bytes_recvd) | ||
| 1932 | break; | ||
| 1933 | |||
| 1934 | /* | ||
| 1935 | * All incoming packets must be at least as large as a | ||
| 1936 | * response. | ||
| 1937 | */ | ||
| 1938 | if (bytes_recvd <= sizeof(struct pci_response)) | ||
| 1939 | continue; | ||
| 1940 | desc = (struct vmpacket_descriptor *)buffer; | ||
| 1941 | |||
| 1942 | switch (desc->type) { | ||
| 1943 | case VM_PKT_COMP: | ||
| 1944 | |||
| 1945 | /* | ||
| 1946 | * The host is trusted, and thus it's safe to interpret | ||
| 1947 | * this transaction ID as a pointer. | ||
| 1948 | */ | ||
| 1949 | comp_packet = (struct pci_packet *)req_id; | ||
| 1950 | response = (struct pci_response *)buffer; | ||
| 1951 | comp_packet->completion_func(comp_packet->compl_ctxt, | ||
| 1952 | response, | ||
| 1953 | bytes_recvd); | ||
| 1954 | break; | ||
| 1955 | |||
| 1956 | case VM_PKT_DATA_INBAND: | ||
| 1957 | |||
| 1958 | new_message = (struct pci_incoming_message *)buffer; | ||
| 1959 | switch (new_message->message_type.type) { | ||
| 1960 | case PCI_BUS_RELATIONS: | ||
| 1961 | |||
| 1962 | bus_rel = (struct pci_bus_relations *)buffer; | ||
| 1963 | if (bytes_recvd < | ||
| 1964 | offsetof(struct pci_bus_relations, func) + | ||
| 1965 | (sizeof(struct pci_function_description) * | ||
| 1966 | (bus_rel->device_count))) { | ||
| 1967 | dev_err(&hbus->hdev->device, | ||
| 1968 | "bus relations too small\n"); | ||
| 1969 | break; | ||
| 1970 | } | ||
| 1971 | |||
| 1972 | hv_pci_devices_present(hbus, bus_rel); | ||
| 1973 | break; | ||
| 1974 | |||
| 1975 | case PCI_EJECT: | ||
| 1976 | |||
| 1977 | dev_message = (struct pci_dev_incoming *)buffer; | ||
| 1978 | hpdev = get_pcichild_wslot(hbus, | ||
| 1979 | dev_message->wslot.slot); | ||
| 1980 | if (hpdev) { | ||
| 1981 | hv_pci_eject_device(hpdev); | ||
| 1982 | put_pcichild(hpdev); | ||
| 1983 | } | ||
| 1984 | break; | ||
| 1985 | |||
| 1986 | default: | ||
| 1987 | dev_warn(&hbus->hdev->device, | ||
| 1988 | "Unimplemented protocol message %x\n", | ||
| 1989 | new_message->message_type.type); | ||
| 1990 | break; | ||
| 1991 | } | ||
| 1992 | break; | ||
| 1993 | |||
| 1994 | default: | ||
| 1995 | dev_err(&hbus->hdev->device, | ||
| 1996 | "unhandled packet type %d, tid %llx len %d\n", | ||
| 1997 | desc->type, req_id, bytes_recvd); | ||
| 1998 | break; | ||
| 1999 | } | ||
| 2000 | } | ||
| 2001 | |||
| 2002 | kfree(buffer); | ||
| 2003 | } | ||
| 2004 | |||
| 2005 | /** | ||
| 2006 | * hv_pci_protocol_negotiation() - Set up protocol | ||
| 2007 | * @hdev: VMBus's tracking struct for this root PCI bus | ||
| 2008 | * | ||
| 2009 | * This driver is intended to support running on Windows 10 | ||
| 2010 | * (server) and later versions. It will not run on earlier | ||
| 2011 | * versions, as they assume that many of the operations which | ||
| 2012 | * Linux needs accomplished with a spinlock held were done via | ||
| 2013 | * asynchronous messaging via VMBus. Windows 10 increases the | ||
| 2014 | * surface area of PCI emulation so that these actions can take | ||
| 2015 | * place by suspending a virtual processor for their duration. | ||
| 2016 | * | ||
| 2017 | * This function negotiates the channel protocol version, | ||
| 2018 | * failing if the host doesn't support the necessary protocol | ||
| 2019 | * level. | ||
| 2020 | */ | ||
| 2021 | static int hv_pci_protocol_negotiation(struct hv_device *hdev) | ||
| 2022 | { | ||
| 2023 | struct pci_version_request *version_req; | ||
| 2024 | struct hv_pci_compl comp_pkt; | ||
| 2025 | struct pci_packet *pkt; | ||
| 2026 | int ret; | ||
| 2027 | int i; | ||
| 2028 | |||
| 2029 | /* | ||
| 2030 | * Initiate the handshake with the host and negotiate | ||
| 2031 | * a version that the host can support. We start with the | ||
| 2032 | * highest version number and go down if the host cannot | ||
| 2033 | * support it. | ||
| 2034 | */ | ||
| 2035 | pkt = kzalloc(sizeof(*pkt) + sizeof(*version_req), GFP_KERNEL); | ||
| 2036 | if (!pkt) | ||
| 2037 | return -ENOMEM; | ||
| 2038 | |||
| 2039 | init_completion(&comp_pkt.host_event); | ||
| 2040 | pkt->completion_func = hv_pci_generic_compl; | ||
| 2041 | pkt->compl_ctxt = &comp_pkt; | ||
| 2042 | version_req = (struct pci_version_request *)&pkt->message; | ||
| 2043 | version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION; | ||
| 2044 | |||
| 2045 | for (i = 0; i < ARRAY_SIZE(pci_protocol_versions); i++) { | ||
| 2046 | version_req->protocol_version = pci_protocol_versions[i]; | ||
| 2047 | ret = vmbus_sendpacket(hdev->channel, version_req, | ||
| 2048 | sizeof(struct pci_version_request), | ||
| 2049 | (unsigned long)pkt, VM_PKT_DATA_INBAND, | ||
| 2050 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | ||
| 2051 | if (!ret) | ||
| 2052 | ret = wait_for_response(hdev, &comp_pkt.host_event); | ||
| 2053 | |||
| 2054 | if (ret) { | ||
| 2055 | dev_err(&hdev->device, | ||
| 2056 | "PCI Pass-through VSP failed to request version: %d", | ||
| 2057 | ret); | ||
| 2058 | goto exit; | ||
| 2059 | } | ||
| 2060 | |||
| 2061 | if (comp_pkt.completion_status >= 0) { | ||
| 2062 | pci_protocol_version = pci_protocol_versions[i]; | ||
| 2063 | dev_info(&hdev->device, | ||
| 2064 | "PCI VMBus probing: Using version %#x\n", | ||
| 2065 | pci_protocol_version); | ||
| 2066 | goto exit; | ||
| 2067 | } | ||
| 2068 | |||
| 2069 | if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) { | ||
| 2070 | dev_err(&hdev->device, | ||
| 2071 | "PCI Pass-through VSP failed version request: %#x", | ||
| 2072 | comp_pkt.completion_status); | ||
| 2073 | ret = -EPROTO; | ||
| 2074 | goto exit; | ||
| 2075 | } | ||
| 2076 | |||
| 2077 | reinit_completion(&comp_pkt.host_event); | ||
| 2078 | } | ||
| 2079 | |||
| 2080 | dev_err(&hdev->device, | ||
| 2081 | "PCI pass-through VSP failed to find supported version"); | ||
| 2082 | ret = -EPROTO; | ||
| 2083 | |||
| 2084 | exit: | ||
| 2085 | kfree(pkt); | ||
| 2086 | return ret; | ||
| 2087 | } | ||
| 2088 | |||
| 2089 | /** | ||
| 2090 | * hv_pci_free_bridge_windows() - Release memory regions for the | ||
| 2091 | * bus | ||
| 2092 | * @hbus: Root PCI bus, as understood by this driver | ||
| 2093 | */ | ||
| 2094 | static void hv_pci_free_bridge_windows(struct hv_pcibus_device *hbus) | ||
| 2095 | { | ||
| 2096 | /* | ||
| 2097 | * Set the resources back to the way they looked when they | ||
| 2098 | * were allocated by setting IORESOURCE_BUSY again. | ||
| 2099 | */ | ||
| 2100 | |||
| 2101 | if (hbus->low_mmio_space && hbus->low_mmio_res) { | ||
| 2102 | hbus->low_mmio_res->flags |= IORESOURCE_BUSY; | ||
| 2103 | vmbus_free_mmio(hbus->low_mmio_res->start, | ||
| 2104 | resource_size(hbus->low_mmio_res)); | ||
| 2105 | } | ||
| 2106 | |||
| 2107 | if (hbus->high_mmio_space && hbus->high_mmio_res) { | ||
| 2108 | hbus->high_mmio_res->flags |= IORESOURCE_BUSY; | ||
| 2109 | vmbus_free_mmio(hbus->high_mmio_res->start, | ||
| 2110 | resource_size(hbus->high_mmio_res)); | ||
| 2111 | } | ||
| 2112 | } | ||
| 2113 | |||
| 2114 | /** | ||
| 2115 | * hv_pci_allocate_bridge_windows() - Allocate memory regions | ||
| 2116 | * for the bus | ||
| 2117 | * @hbus: Root PCI bus, as understood by this driver | ||
| 2118 | * | ||
| 2119 | * This function calls vmbus_allocate_mmio(), which is itself a | ||
| 2120 | * bit of a compromise. Ideally, we might change the pnp layer | ||
| 2121 | * in the kernel such that it comprehends either PCI devices | ||
| 2122 | * which are "grandchildren of ACPI," with some intermediate bus | ||
| 2123 | * node (in this case, VMBus) or change it such that it | ||
| 2124 | * understands VMBus. The pnp layer, however, has been declared | ||
| 2125 | * deprecated, and not subject to change. | ||
| 2126 | * | ||
| 2127 | * The workaround, implemented here, is to ask VMBus to allocate | ||
| 2128 | * MMIO space for this bus. VMBus itself knows which ranges are | ||
| 2129 | * appropriate by looking at its own ACPI objects. Then, after | ||
| 2130 | * these ranges are claimed, they're modified to look like they | ||
| 2131 | * would have looked if the ACPI and pnp code had allocated | ||
| 2132 | * bridge windows. These descriptors have to exist in this form | ||
| 2133 | * in order to satisfy the code which will get invoked when the | ||
| 2134 | * endpoint PCI function driver calls request_mem_region() or | ||
| 2135 | * request_mem_region_exclusive(). | ||
| 2136 | * | ||
| 2137 | * Return: 0 on success, -errno on failure | ||
| 2138 | */ | ||
| 2139 | static int hv_pci_allocate_bridge_windows(struct hv_pcibus_device *hbus) | ||
| 2140 | { | ||
| 2141 | resource_size_t align; | ||
| 2142 | int ret; | ||
| 2143 | |||
| 2144 | if (hbus->low_mmio_space) { | ||
| 2145 | align = 1ULL << (63 - __builtin_clzll(hbus->low_mmio_space)); | ||
| 2146 | ret = vmbus_allocate_mmio(&hbus->low_mmio_res, hbus->hdev, 0, | ||
| 2147 | (u64)(u32)0xffffffff, | ||
| 2148 | hbus->low_mmio_space, | ||
| 2149 | align, false); | ||
| 2150 | if (ret) { | ||
| 2151 | dev_err(&hbus->hdev->device, | ||
| 2152 | "Need %#llx of low MMIO space. Consider reconfiguring the VM.\n", | ||
| 2153 | hbus->low_mmio_space); | ||
| 2154 | return ret; | ||
| 2155 | } | ||
| 2156 | |||
| 2157 | /* Modify this resource to become a bridge window. */ | ||
| 2158 | hbus->low_mmio_res->flags |= IORESOURCE_WINDOW; | ||
| 2159 | hbus->low_mmio_res->flags &= ~IORESOURCE_BUSY; | ||
| 2160 | pci_add_resource(&hbus->resources_for_children, | ||
| 2161 | hbus->low_mmio_res); | ||
| 2162 | } | ||
| 2163 | |||
| 2164 | if (hbus->high_mmio_space) { | ||
| 2165 | align = 1ULL << (63 - __builtin_clzll(hbus->high_mmio_space)); | ||
| 2166 | ret = vmbus_allocate_mmio(&hbus->high_mmio_res, hbus->hdev, | ||
| 2167 | 0x100000000, -1, | ||
| 2168 | hbus->high_mmio_space, align, | ||
| 2169 | false); | ||
| 2170 | if (ret) { | ||
| 2171 | dev_err(&hbus->hdev->device, | ||
| 2172 | "Need %#llx of high MMIO space. Consider reconfiguring the VM.\n", | ||
| 2173 | hbus->high_mmio_space); | ||
| 2174 | goto release_low_mmio; | ||
| 2175 | } | ||
| 2176 | |||
| 2177 | /* Modify this resource to become a bridge window. */ | ||
| 2178 | hbus->high_mmio_res->flags |= IORESOURCE_WINDOW; | ||
| 2179 | hbus->high_mmio_res->flags &= ~IORESOURCE_BUSY; | ||
| 2180 | pci_add_resource(&hbus->resources_for_children, | ||
| 2181 | hbus->high_mmio_res); | ||
| 2182 | } | ||
| 2183 | |||
| 2184 | return 0; | ||
| 2185 | |||
| 2186 | release_low_mmio: | ||
| 2187 | if (hbus->low_mmio_res) { | ||
| 2188 | vmbus_free_mmio(hbus->low_mmio_res->start, | ||
| 2189 | resource_size(hbus->low_mmio_res)); | ||
| 2190 | } | ||
| 2191 | |||
| 2192 | return ret; | ||
| 2193 | } | ||
| 2194 | |||
| 2195 | /** | ||
| 2196 | * hv_allocate_config_window() - Find MMIO space for PCI Config | ||
| 2197 | * @hbus: Root PCI bus, as understood by this driver | ||
| 2198 | * | ||
| 2199 | * This function claims memory-mapped I/O space for accessing | ||
| 2200 | * configuration space for the functions on this bus. | ||
| 2201 | * | ||
| 2202 | * Return: 0 on success, -errno on failure | ||
| 2203 | */ | ||
| 2204 | static int hv_allocate_config_window(struct hv_pcibus_device *hbus) | ||
| 2205 | { | ||
| 2206 | int ret; | ||
| 2207 | |||
| 2208 | /* | ||
| 2209 | * Set up a region of MMIO space to use for accessing configuration | ||
| 2210 | * space. | ||
| 2211 | */ | ||
| 2212 | ret = vmbus_allocate_mmio(&hbus->mem_config, hbus->hdev, 0, -1, | ||
| 2213 | PCI_CONFIG_MMIO_LENGTH, 0x1000, false); | ||
| 2214 | if (ret) | ||
| 2215 | return ret; | ||
| 2216 | |||
| 2217 | /* | ||
| 2218 | * vmbus_allocate_mmio() gets used for allocating both device endpoint | ||
| 2219 | * resource claims (those which cannot be overlapped) and the ranges | ||
| 2220 | * which are valid for the children of this bus, which are intended | ||
| 2221 | * to be overlapped by those children. Set the flag on this claim | ||
| 2222 | * meaning that this region can't be overlapped. | ||
| 2223 | */ | ||
| 2224 | |||
| 2225 | hbus->mem_config->flags |= IORESOURCE_BUSY; | ||
| 2226 | |||
| 2227 | return 0; | ||
| 2228 | } | ||
| 2229 | |||
| 2230 | static void hv_free_config_window(struct hv_pcibus_device *hbus) | ||
| 2231 | { | ||
| 2232 | vmbus_free_mmio(hbus->mem_config->start, PCI_CONFIG_MMIO_LENGTH); | ||
| 2233 | } | ||
| 2234 | |||
| 2235 | /** | ||
| 2236 | * hv_pci_enter_d0() - Bring the "bus" into the D0 power state | ||
| 2237 | * @hdev: VMBus's tracking struct for this root PCI bus | ||
| 2238 | * | ||
| 2239 | * Return: 0 on success, -errno on failure | ||
| 2240 | */ | ||
| 2241 | static int hv_pci_enter_d0(struct hv_device *hdev) | ||
| 2242 | { | ||
| 2243 | struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); | ||
| 2244 | struct pci_bus_d0_entry *d0_entry; | ||
| 2245 | struct hv_pci_compl comp_pkt; | ||
| 2246 | struct pci_packet *pkt; | ||
| 2247 | int ret; | ||
| 2248 | |||
| 2249 | /* | ||
| 2250 | * Tell the host that the bus is ready to use, and moved into the | ||
| 2251 | * powered-on state. This includes telling the host which region | ||
| 2252 | * of memory-mapped I/O space has been chosen for configuration space | ||
| 2253 | * access. | ||
| 2254 | */ | ||
| 2255 | pkt = kzalloc(sizeof(*pkt) + sizeof(*d0_entry), GFP_KERNEL); | ||
| 2256 | if (!pkt) | ||
| 2257 | return -ENOMEM; | ||
| 2258 | |||
| 2259 | init_completion(&comp_pkt.host_event); | ||
| 2260 | pkt->completion_func = hv_pci_generic_compl; | ||
| 2261 | pkt->compl_ctxt = &comp_pkt; | ||
| 2262 | d0_entry = (struct pci_bus_d0_entry *)&pkt->message; | ||
| 2263 | d0_entry->message_type.type = PCI_BUS_D0ENTRY; | ||
| 2264 | d0_entry->mmio_base = hbus->mem_config->start; | ||
| 2265 | |||
| 2266 | ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry), | ||
| 2267 | (unsigned long)pkt, VM_PKT_DATA_INBAND, | ||
| 2268 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | ||
| 2269 | if (!ret) | ||
| 2270 | ret = wait_for_response(hdev, &comp_pkt.host_event); | ||
| 2271 | |||
| 2272 | if (ret) | ||
| 2273 | goto exit; | ||
| 2274 | |||
| 2275 | if (comp_pkt.completion_status < 0) { | ||
| 2276 | dev_err(&hdev->device, | ||
| 2277 | "PCI Pass-through VSP failed D0 Entry with status %x\n", | ||
| 2278 | comp_pkt.completion_status); | ||
| 2279 | ret = -EPROTO; | ||
| 2280 | goto exit; | ||
| 2281 | } | ||
| 2282 | |||
| 2283 | ret = 0; | ||
| 2284 | |||
| 2285 | exit: | ||
| 2286 | kfree(pkt); | ||
| 2287 | return ret; | ||
| 2288 | } | ||
| 2289 | |||
| 2290 | /** | ||
| 2291 | * hv_pci_query_relations() - Ask host to send list of child | ||
| 2292 | * devices | ||
| 2293 | * @hdev: VMBus's tracking struct for this root PCI bus | ||
| 2294 | * | ||
| 2295 | * Return: 0 on success, -errno on failure | ||
| 2296 | */ | ||
| 2297 | static int hv_pci_query_relations(struct hv_device *hdev) | ||
| 2298 | { | ||
| 2299 | struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); | ||
| 2300 | struct pci_message message; | ||
| 2301 | struct completion comp; | ||
| 2302 | int ret; | ||
| 2303 | |||
| 2304 | /* Ask the host to send along the list of child devices */ | ||
| 2305 | init_completion(&comp); | ||
| 2306 | if (cmpxchg(&hbus->survey_event, NULL, &comp)) | ||
| 2307 | return -ENOTEMPTY; | ||
| 2308 | |||
| 2309 | memset(&message, 0, sizeof(message)); | ||
| 2310 | message.type = PCI_QUERY_BUS_RELATIONS; | ||
| 2311 | |||
| 2312 | ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message), | ||
| 2313 | 0, VM_PKT_DATA_INBAND, 0); | ||
| 2314 | if (!ret) | ||
| 2315 | ret = wait_for_response(hdev, &comp); | ||
| 2316 | |||
| 2317 | return ret; | ||
| 2318 | } | ||
| 2319 | |||
| 2320 | /** | ||
| 2321 | * hv_send_resources_allocated() - Report local resource choices | ||
| 2322 | * @hdev: VMBus's tracking struct for this root PCI bus | ||
| 2323 | * | ||
| 2324 | * The host OS is expecting to be sent a request as a message | ||
| 2325 | * which contains all the resources that the device will use. | ||
| 2326 | * The response contains those same resources, "translated" | ||
| 2327 | * which is to say, the values which should be used by the | ||
| 2328 | * hardware, when it delivers an interrupt. (MMIO resources are | ||
| 2329 | * used in local terms.) This is nice for Windows, and lines up | ||
| 2330 | * with the FDO/PDO split, which doesn't exist in Linux. Linux | ||
| 2331 | * is deeply expecting to scan an emulated PCI configuration | ||
| 2332 | * space. So this message is sent here only to drive the state | ||
| 2333 | * machine on the host forward. | ||
| 2334 | * | ||
| 2335 | * Return: 0 on success, -errno on failure | ||
| 2336 | */ | ||
| 2337 | static int hv_send_resources_allocated(struct hv_device *hdev) | ||
| 2338 | { | ||
| 2339 | struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); | ||
| 2340 | struct pci_resources_assigned *res_assigned; | ||
| 2341 | struct pci_resources_assigned2 *res_assigned2; | ||
| 2342 | struct hv_pci_compl comp_pkt; | ||
| 2343 | struct hv_pci_dev *hpdev; | ||
| 2344 | struct pci_packet *pkt; | ||
| 2345 | size_t size_res; | ||
| 2346 | u32 wslot; | ||
| 2347 | int ret; | ||
| 2348 | |||
| 2349 | size_res = (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) | ||
| 2350 | ? sizeof(*res_assigned) : sizeof(*res_assigned2); | ||
| 2351 | |||
| 2352 | pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL); | ||
| 2353 | if (!pkt) | ||
| 2354 | return -ENOMEM; | ||
| 2355 | |||
| 2356 | ret = 0; | ||
| 2357 | |||
| 2358 | for (wslot = 0; wslot < 256; wslot++) { | ||
| 2359 | hpdev = get_pcichild_wslot(hbus, wslot); | ||
| 2360 | if (!hpdev) | ||
| 2361 | continue; | ||
| 2362 | |||
| 2363 | memset(pkt, 0, sizeof(*pkt) + size_res); | ||
| 2364 | init_completion(&comp_pkt.host_event); | ||
| 2365 | pkt->completion_func = hv_pci_generic_compl; | ||
| 2366 | pkt->compl_ctxt = &comp_pkt; | ||
| 2367 | |||
| 2368 | if (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) { | ||
| 2369 | res_assigned = | ||
| 2370 | (struct pci_resources_assigned *)&pkt->message; | ||
| 2371 | res_assigned->message_type.type = | ||
| 2372 | PCI_RESOURCES_ASSIGNED; | ||
| 2373 | res_assigned->wslot.slot = hpdev->desc.win_slot.slot; | ||
| 2374 | } else { | ||
| 2375 | res_assigned2 = | ||
| 2376 | (struct pci_resources_assigned2 *)&pkt->message; | ||
| 2377 | res_assigned2->message_type.type = | ||
| 2378 | PCI_RESOURCES_ASSIGNED2; | ||
| 2379 | res_assigned2->wslot.slot = hpdev->desc.win_slot.slot; | ||
| 2380 | } | ||
| 2381 | put_pcichild(hpdev); | ||
| 2382 | |||
| 2383 | ret = vmbus_sendpacket(hdev->channel, &pkt->message, | ||
| 2384 | size_res, (unsigned long)pkt, | ||
| 2385 | VM_PKT_DATA_INBAND, | ||
| 2386 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | ||
| 2387 | if (!ret) | ||
| 2388 | ret = wait_for_response(hdev, &comp_pkt.host_event); | ||
| 2389 | if (ret) | ||
| 2390 | break; | ||
| 2391 | |||
| 2392 | if (comp_pkt.completion_status < 0) { | ||
| 2393 | ret = -EPROTO; | ||
| 2394 | dev_err(&hdev->device, | ||
| 2395 | "resource allocated returned 0x%x", | ||
| 2396 | comp_pkt.completion_status); | ||
| 2397 | break; | ||
| 2398 | } | ||
| 2399 | } | ||
| 2400 | |||
| 2401 | kfree(pkt); | ||
| 2402 | return ret; | ||
| 2403 | } | ||
| 2404 | |||
| 2405 | /** | ||
| 2406 | * hv_send_resources_released() - Report local resources | ||
| 2407 | * released | ||
| 2408 | * @hdev: VMBus's tracking struct for this root PCI bus | ||
| 2409 | * | ||
| 2410 | * Return: 0 on success, -errno on failure | ||
| 2411 | */ | ||
| 2412 | static int hv_send_resources_released(struct hv_device *hdev) | ||
| 2413 | { | ||
| 2414 | struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); | ||
| 2415 | struct pci_child_message pkt; | ||
| 2416 | struct hv_pci_dev *hpdev; | ||
| 2417 | u32 wslot; | ||
| 2418 | int ret; | ||
| 2419 | |||
| 2420 | for (wslot = 0; wslot < 256; wslot++) { | ||
| 2421 | hpdev = get_pcichild_wslot(hbus, wslot); | ||
| 2422 | if (!hpdev) | ||
| 2423 | continue; | ||
| 2424 | |||
| 2425 | memset(&pkt, 0, sizeof(pkt)); | ||
| 2426 | pkt.message_type.type = PCI_RESOURCES_RELEASED; | ||
| 2427 | pkt.wslot.slot = hpdev->desc.win_slot.slot; | ||
| 2428 | |||
| 2429 | put_pcichild(hpdev); | ||
| 2430 | |||
| 2431 | ret = vmbus_sendpacket(hdev->channel, &pkt, sizeof(pkt), 0, | ||
| 2432 | VM_PKT_DATA_INBAND, 0); | ||
| 2433 | if (ret) | ||
| 2434 | return ret; | ||
| 2435 | } | ||
| 2436 | |||
| 2437 | return 0; | ||
| 2438 | } | ||
| 2439 | |||
| 2440 | static void get_hvpcibus(struct hv_pcibus_device *hbus) | ||
| 2441 | { | ||
| 2442 | refcount_inc(&hbus->remove_lock); | ||
| 2443 | } | ||
| 2444 | |||
| 2445 | static void put_hvpcibus(struct hv_pcibus_device *hbus) | ||
| 2446 | { | ||
| 2447 | if (refcount_dec_and_test(&hbus->remove_lock)) | ||
| 2448 | complete(&hbus->remove_event); | ||
| 2449 | } | ||
| 2450 | |||
| 2451 | /** | ||
| 2452 | * hv_pci_probe() - New VMBus channel probe, for a root PCI bus | ||
| 2453 | * @hdev: VMBus's tracking struct for this root PCI bus | ||
| 2454 | * @dev_id: Identifies the device itself | ||
| 2455 | * | ||
| 2456 | * Return: 0 on success, -errno on failure | ||
| 2457 | */ | ||
| 2458 | static int hv_pci_probe(struct hv_device *hdev, | ||
| 2459 | const struct hv_vmbus_device_id *dev_id) | ||
| 2460 | { | ||
| 2461 | struct hv_pcibus_device *hbus; | ||
| 2462 | int ret; | ||
| 2463 | |||
| 2464 | /* | ||
| 2465 | * hv_pcibus_device contains the hypercall arguments for retargeting in | ||
| 2466 | * hv_irq_unmask(). Those must not cross a page boundary. | ||
| 2467 | */ | ||
| 2468 | BUILD_BUG_ON(sizeof(*hbus) > PAGE_SIZE); | ||
| 2469 | |||
| 2470 | hbus = (struct hv_pcibus_device *)get_zeroed_page(GFP_KERNEL); | ||
| 2471 | if (!hbus) | ||
| 2472 | return -ENOMEM; | ||
| 2473 | hbus->state = hv_pcibus_init; | ||
| 2474 | |||
| 2475 | /* | ||
| 2476 | * The PCI bus "domain" is what is called "segment" in ACPI and | ||
| 2477 | * other specs. Pull it from the instance ID, to get something | ||
| 2478 | * unique. Bytes 8 and 9 are what is used in Windows guests, so | ||
| 2479 | * do the same thing for consistency. Note that, since this code | ||
| 2480 | * only runs in a Hyper-V VM, Hyper-V can (and does) guarantee | ||
| 2481 | * that (1) the only domain in use for something that looks like | ||
| 2482 | * a physical PCI bus (which is actually emulated by the | ||
| 2483 | * hypervisor) is domain 0 and (2) there will be no overlap | ||
| 2484 | * between domains derived from these instance IDs in the same | ||
| 2485 | * VM. | ||
| 2486 | */ | ||
| 2487 | hbus->sysdata.domain = hdev->dev_instance.b[9] | | ||
| 2488 | hdev->dev_instance.b[8] << 8; | ||
| 2489 | |||
| 2490 | hbus->hdev = hdev; | ||
| 2491 | refcount_set(&hbus->remove_lock, 1); | ||
| 2492 | INIT_LIST_HEAD(&hbus->children); | ||
| 2493 | INIT_LIST_HEAD(&hbus->dr_list); | ||
| 2494 | INIT_LIST_HEAD(&hbus->resources_for_children); | ||
| 2495 | spin_lock_init(&hbus->config_lock); | ||
| 2496 | spin_lock_init(&hbus->device_list_lock); | ||
| 2497 | spin_lock_init(&hbus->retarget_msi_interrupt_lock); | ||
| 2498 | init_completion(&hbus->remove_event); | ||
| 2499 | hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0, | ||
| 2500 | hbus->sysdata.domain); | ||
| 2501 | if (!hbus->wq) { | ||
| 2502 | ret = -ENOMEM; | ||
| 2503 | goto free_bus; | ||
| 2504 | } | ||
| 2505 | |||
| 2506 | ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0, | ||
| 2507 | hv_pci_onchannelcallback, hbus); | ||
| 2508 | if (ret) | ||
| 2509 | goto destroy_wq; | ||
| 2510 | |||
| 2511 | hv_set_drvdata(hdev, hbus); | ||
| 2512 | |||
| 2513 | ret = hv_pci_protocol_negotiation(hdev); | ||
| 2514 | if (ret) | ||
| 2515 | goto close; | ||
| 2516 | |||
| 2517 | ret = hv_allocate_config_window(hbus); | ||
| 2518 | if (ret) | ||
| 2519 | goto close; | ||
| 2520 | |||
| 2521 | hbus->cfg_addr = ioremap(hbus->mem_config->start, | ||
| 2522 | PCI_CONFIG_MMIO_LENGTH); | ||
| 2523 | if (!hbus->cfg_addr) { | ||
| 2524 | dev_err(&hdev->device, | ||
| 2525 | "Unable to map a virtual address for config space\n"); | ||
| 2526 | ret = -ENOMEM; | ||
| 2527 | goto free_config; | ||
| 2528 | } | ||
| 2529 | |||
| 2530 | hbus->sysdata.fwnode = irq_domain_alloc_fwnode(hbus); | ||
| 2531 | if (!hbus->sysdata.fwnode) { | ||
| 2532 | ret = -ENOMEM; | ||
| 2533 | goto unmap; | ||
| 2534 | } | ||
| 2535 | |||
| 2536 | ret = hv_pcie_init_irq_domain(hbus); | ||
| 2537 | if (ret) | ||
| 2538 | goto free_fwnode; | ||
| 2539 | |||
| 2540 | ret = hv_pci_query_relations(hdev); | ||
| 2541 | if (ret) | ||
| 2542 | goto free_irq_domain; | ||
| 2543 | |||
| 2544 | ret = hv_pci_enter_d0(hdev); | ||
| 2545 | if (ret) | ||
| 2546 | goto free_irq_domain; | ||
| 2547 | |||
| 2548 | ret = hv_pci_allocate_bridge_windows(hbus); | ||
| 2549 | if (ret) | ||
| 2550 | goto free_irq_domain; | ||
| 2551 | |||
| 2552 | ret = hv_send_resources_allocated(hdev); | ||
| 2553 | if (ret) | ||
| 2554 | goto free_windows; | ||
| 2555 | |||
| 2556 | prepopulate_bars(hbus); | ||
| 2557 | |||
| 2558 | hbus->state = hv_pcibus_probed; | ||
| 2559 | |||
| 2560 | ret = create_root_hv_pci_bus(hbus); | ||
| 2561 | if (ret) | ||
| 2562 | goto free_windows; | ||
| 2563 | |||
| 2564 | return 0; | ||
| 2565 | |||
| 2566 | free_windows: | ||
| 2567 | hv_pci_free_bridge_windows(hbus); | ||
| 2568 | free_irq_domain: | ||
| 2569 | irq_domain_remove(hbus->irq_domain); | ||
| 2570 | free_fwnode: | ||
| 2571 | irq_domain_free_fwnode(hbus->sysdata.fwnode); | ||
| 2572 | unmap: | ||
| 2573 | iounmap(hbus->cfg_addr); | ||
| 2574 | free_config: | ||
| 2575 | hv_free_config_window(hbus); | ||
| 2576 | close: | ||
| 2577 | vmbus_close(hdev->channel); | ||
| 2578 | destroy_wq: | ||
| 2579 | destroy_workqueue(hbus->wq); | ||
| 2580 | free_bus: | ||
| 2581 | free_page((unsigned long)hbus); | ||
| 2582 | return ret; | ||
| 2583 | } | ||
| 2584 | |||
| 2585 | static void hv_pci_bus_exit(struct hv_device *hdev) | ||
| 2586 | { | ||
| 2587 | struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); | ||
| 2588 | struct { | ||
| 2589 | struct pci_packet teardown_packet; | ||
| 2590 | u8 buffer[sizeof(struct pci_message)]; | ||
| 2591 | } pkt; | ||
| 2592 | struct pci_bus_relations relations; | ||
| 2593 | struct hv_pci_compl comp_pkt; | ||
| 2594 | int ret; | ||
| 2595 | |||
| 2596 | /* | ||
| 2597 | * After the host sends the RESCIND_CHANNEL message, it doesn't | ||
| 2598 | * access the per-channel ringbuffer any longer. | ||
| 2599 | */ | ||
| 2600 | if (hdev->channel->rescind) | ||
| 2601 | return; | ||
| 2602 | |||
| 2603 | /* Delete any children which might still exist. */ | ||
| 2604 | memset(&relations, 0, sizeof(relations)); | ||
| 2605 | hv_pci_devices_present(hbus, &relations); | ||
| 2606 | |||
| 2607 | ret = hv_send_resources_released(hdev); | ||
| 2608 | if (ret) | ||
| 2609 | dev_err(&hdev->device, | ||
| 2610 | "Couldn't send resources released packet(s)\n"); | ||
| 2611 | |||
| 2612 | memset(&pkt.teardown_packet, 0, sizeof(pkt.teardown_packet)); | ||
| 2613 | init_completion(&comp_pkt.host_event); | ||
| 2614 | pkt.teardown_packet.completion_func = hv_pci_generic_compl; | ||
| 2615 | pkt.teardown_packet.compl_ctxt = &comp_pkt; | ||
| 2616 | pkt.teardown_packet.message[0].type = PCI_BUS_D0EXIT; | ||
| 2617 | |||
| 2618 | ret = vmbus_sendpacket(hdev->channel, &pkt.teardown_packet.message, | ||
| 2619 | sizeof(struct pci_message), | ||
| 2620 | (unsigned long)&pkt.teardown_packet, | ||
| 2621 | VM_PKT_DATA_INBAND, | ||
| 2622 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | ||
| 2623 | if (!ret) | ||
| 2624 | wait_for_completion_timeout(&comp_pkt.host_event, 10 * HZ); | ||
| 2625 | } | ||
| 2626 | |||
| 2627 | /** | ||
| 2628 | * hv_pci_remove() - Remove routine for this VMBus channel | ||
| 2629 | * @hdev: VMBus's tracking struct for this root PCI bus | ||
| 2630 | * | ||
| 2631 | * Return: 0 on success, -errno on failure | ||
| 2632 | */ | ||
| 2633 | static int hv_pci_remove(struct hv_device *hdev) | ||
| 2634 | { | ||
| 2635 | struct hv_pcibus_device *hbus; | ||
| 2636 | |||
| 2637 | hbus = hv_get_drvdata(hdev); | ||
| 2638 | if (hbus->state == hv_pcibus_installed) { | ||
| 2639 | /* Remove the bus from PCI's point of view. */ | ||
| 2640 | pci_lock_rescan_remove(); | ||
| 2641 | pci_stop_root_bus(hbus->pci_bus); | ||
| 2642 | pci_remove_root_bus(hbus->pci_bus); | ||
| 2643 | pci_unlock_rescan_remove(); | ||
| 2644 | hbus->state = hv_pcibus_removed; | ||
| 2645 | } | ||
| 2646 | |||
| 2647 | hv_pci_bus_exit(hdev); | ||
| 2648 | |||
| 2649 | vmbus_close(hdev->channel); | ||
| 2650 | |||
| 2651 | iounmap(hbus->cfg_addr); | ||
| 2652 | hv_free_config_window(hbus); | ||
| 2653 | pci_free_resource_list(&hbus->resources_for_children); | ||
| 2654 | hv_pci_free_bridge_windows(hbus); | ||
| 2655 | irq_domain_remove(hbus->irq_domain); | ||
| 2656 | irq_domain_free_fwnode(hbus->sysdata.fwnode); | ||
| 2657 | put_hvpcibus(hbus); | ||
| 2658 | wait_for_completion(&hbus->remove_event); | ||
| 2659 | destroy_workqueue(hbus->wq); | ||
| 2660 | free_page((unsigned long)hbus); | ||
| 2661 | return 0; | ||
| 2662 | } | ||
| 2663 | |||
| 2664 | static const struct hv_vmbus_device_id hv_pci_id_table[] = { | ||
| 2665 | /* PCI Pass-through Class ID */ | ||
| 2666 | /* 44C4F61D-4444-4400-9D52-802E27EDE19F */ | ||
| 2667 | { HV_PCIE_GUID, }, | ||
| 2668 | { }, | ||
| 2669 | }; | ||
| 2670 | |||
| 2671 | MODULE_DEVICE_TABLE(vmbus, hv_pci_id_table); | ||
| 2672 | |||
| 2673 | static struct hv_driver hv_pci_drv = { | ||
| 2674 | .name = "hv_pci", | ||
| 2675 | .id_table = hv_pci_id_table, | ||
| 2676 | .probe = hv_pci_probe, | ||
| 2677 | .remove = hv_pci_remove, | ||
| 2678 | }; | ||
| 2679 | |||
| 2680 | static void __exit exit_hv_pci_drv(void) | ||
| 2681 | { | ||
| 2682 | vmbus_driver_unregister(&hv_pci_drv); | ||
| 2683 | } | ||
| 2684 | |||
| 2685 | static int __init init_hv_pci_drv(void) | ||
| 2686 | { | ||
| 2687 | return vmbus_driver_register(&hv_pci_drv); | ||
| 2688 | } | ||
| 2689 | |||
| 2690 | module_init(init_hv_pci_drv); | ||
| 2691 | module_exit(exit_hv_pci_drv); | ||
| 2692 | |||
| 2693 | MODULE_DESCRIPTION("Hyper-V PCI"); | ||
| 2694 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c new file mode 100644 index 000000000000..23e270839e6a --- /dev/null +++ b/drivers/pci/controller/pci-mvebu.c | |||
| @@ -0,0 +1,1313 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * PCIe driver for Marvell Armada 370 and Armada XP SoCs | ||
| 4 | * | ||
| 5 | * Author: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> | ||
| 6 | */ | ||
| 7 | |||
| 8 | #include <linux/kernel.h> | ||
| 9 | #include <linux/pci.h> | ||
| 10 | #include <linux/clk.h> | ||
| 11 | #include <linux/delay.h> | ||
| 12 | #include <linux/gpio.h> | ||
| 13 | #include <linux/init.h> | ||
| 14 | #include <linux/mbus.h> | ||
| 15 | #include <linux/msi.h> | ||
| 16 | #include <linux/slab.h> | ||
| 17 | #include <linux/platform_device.h> | ||
| 18 | #include <linux/of_address.h> | ||
| 19 | #include <linux/of_irq.h> | ||
| 20 | #include <linux/of_gpio.h> | ||
| 21 | #include <linux/of_pci.h> | ||
| 22 | #include <linux/of_platform.h> | ||
| 23 | |||
| 24 | #include "../pci.h" | ||
| 25 | |||
| 26 | /* | ||
| 27 | * PCIe unit register offsets. | ||
| 28 | */ | ||
| 29 | #define PCIE_DEV_ID_OFF 0x0000 | ||
| 30 | #define PCIE_CMD_OFF 0x0004 | ||
| 31 | #define PCIE_DEV_REV_OFF 0x0008 | ||
| 32 | #define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3)) | ||
| 33 | #define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3)) | ||
| 34 | #define PCIE_CAP_PCIEXP 0x0060 | ||
| 35 | #define PCIE_HEADER_LOG_4_OFF 0x0128 | ||
| 36 | #define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4)) | ||
| 37 | #define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4)) | ||
| 38 | #define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4)) | ||
| 39 | #define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4)) | ||
| 40 | #define PCIE_WIN5_CTRL_OFF 0x1880 | ||
| 41 | #define PCIE_WIN5_BASE_OFF 0x1884 | ||
| 42 | #define PCIE_WIN5_REMAP_OFF 0x188c | ||
| 43 | #define PCIE_CONF_ADDR_OFF 0x18f8 | ||
| 44 | #define PCIE_CONF_ADDR_EN 0x80000000 | ||
| 45 | #define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc)) | ||
| 46 | #define PCIE_CONF_BUS(b) (((b) & 0xff) << 16) | ||
| 47 | #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11) | ||
| 48 | #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8) | ||
| 49 | #define PCIE_CONF_ADDR(bus, devfn, where) \ | ||
| 50 | (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \ | ||
| 51 | PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \ | ||
| 52 | PCIE_CONF_ADDR_EN) | ||
| 53 | #define PCIE_CONF_DATA_OFF 0x18fc | ||
| 54 | #define PCIE_MASK_OFF 0x1910 | ||
| 55 | #define PCIE_MASK_ENABLE_INTS 0x0f000000 | ||
| 56 | #define PCIE_CTRL_OFF 0x1a00 | ||
| 57 | #define PCIE_CTRL_X1_MODE 0x0001 | ||
| 58 | #define PCIE_STAT_OFF 0x1a04 | ||
| 59 | #define PCIE_STAT_BUS 0xff00 | ||
| 60 | #define PCIE_STAT_DEV 0x1f0000 | ||
| 61 | #define PCIE_STAT_LINK_DOWN BIT(0) | ||
| 62 | #define PCIE_RC_RTSTA 0x1a14 | ||
| 63 | #define PCIE_DEBUG_CTRL 0x1a60 | ||
| 64 | #define PCIE_DEBUG_SOFT_RESET BIT(20) | ||
| 65 | |||
| 66 | enum { | ||
| 67 | PCISWCAP = PCI_BRIDGE_CONTROL + 2, | ||
| 68 | PCISWCAP_EXP_LIST_ID = PCISWCAP + PCI_CAP_LIST_ID, | ||
| 69 | PCISWCAP_EXP_DEVCAP = PCISWCAP + PCI_EXP_DEVCAP, | ||
| 70 | PCISWCAP_EXP_DEVCTL = PCISWCAP + PCI_EXP_DEVCTL, | ||
| 71 | PCISWCAP_EXP_LNKCAP = PCISWCAP + PCI_EXP_LNKCAP, | ||
| 72 | PCISWCAP_EXP_LNKCTL = PCISWCAP + PCI_EXP_LNKCTL, | ||
| 73 | PCISWCAP_EXP_SLTCAP = PCISWCAP + PCI_EXP_SLTCAP, | ||
| 74 | PCISWCAP_EXP_SLTCTL = PCISWCAP + PCI_EXP_SLTCTL, | ||
| 75 | PCISWCAP_EXP_RTCTL = PCISWCAP + PCI_EXP_RTCTL, | ||
| 76 | PCISWCAP_EXP_RTSTA = PCISWCAP + PCI_EXP_RTSTA, | ||
| 77 | PCISWCAP_EXP_DEVCAP2 = PCISWCAP + PCI_EXP_DEVCAP2, | ||
| 78 | PCISWCAP_EXP_DEVCTL2 = PCISWCAP + PCI_EXP_DEVCTL2, | ||
| 79 | PCISWCAP_EXP_LNKCAP2 = PCISWCAP + PCI_EXP_LNKCAP2, | ||
| 80 | PCISWCAP_EXP_LNKCTL2 = PCISWCAP + PCI_EXP_LNKCTL2, | ||
| 81 | PCISWCAP_EXP_SLTCAP2 = PCISWCAP + PCI_EXP_SLTCAP2, | ||
| 82 | PCISWCAP_EXP_SLTCTL2 = PCISWCAP + PCI_EXP_SLTCTL2, | ||
| 83 | }; | ||
| 84 | |||
| 85 | /* PCI configuration space of a PCI-to-PCI bridge */ | ||
| 86 | struct mvebu_sw_pci_bridge { | ||
| 87 | u16 vendor; | ||
| 88 | u16 device; | ||
| 89 | u16 command; | ||
| 90 | u16 status; | ||
| 91 | u16 class; | ||
| 92 | u8 interface; | ||
| 93 | u8 revision; | ||
| 94 | u8 bist; | ||
| 95 | u8 header_type; | ||
| 96 | u8 latency_timer; | ||
| 97 | u8 cache_line_size; | ||
| 98 | u32 bar[2]; | ||
| 99 | u8 primary_bus; | ||
| 100 | u8 secondary_bus; | ||
| 101 | u8 subordinate_bus; | ||
| 102 | u8 secondary_latency_timer; | ||
| 103 | u8 iobase; | ||
| 104 | u8 iolimit; | ||
| 105 | u16 secondary_status; | ||
| 106 | u16 membase; | ||
| 107 | u16 memlimit; | ||
| 108 | u16 iobaseupper; | ||
| 109 | u16 iolimitupper; | ||
| 110 | u32 romaddr; | ||
| 111 | u8 intline; | ||
| 112 | u8 intpin; | ||
| 113 | u16 bridgectrl; | ||
| 114 | |||
| 115 | /* PCI express capability */ | ||
| 116 | u32 pcie_sltcap; | ||
| 117 | u16 pcie_devctl; | ||
| 118 | u16 pcie_rtctl; | ||
| 119 | }; | ||
| 120 | |||
| 121 | struct mvebu_pcie_port; | ||
| 122 | |||
| 123 | /* Structure representing all PCIe interfaces */ | ||
| 124 | struct mvebu_pcie { | ||
| 125 | struct platform_device *pdev; | ||
| 126 | struct mvebu_pcie_port *ports; | ||
| 127 | struct msi_controller *msi; | ||
| 128 | struct resource io; | ||
| 129 | struct resource realio; | ||
| 130 | struct resource mem; | ||
| 131 | struct resource busn; | ||
| 132 | int nports; | ||
| 133 | }; | ||
| 134 | |||
| 135 | struct mvebu_pcie_window { | ||
| 136 | phys_addr_t base; | ||
| 137 | phys_addr_t remap; | ||
| 138 | size_t size; | ||
| 139 | }; | ||
| 140 | |||
| 141 | /* Structure representing one PCIe interface */ | ||
| 142 | struct mvebu_pcie_port { | ||
| 143 | char *name; | ||
| 144 | void __iomem *base; | ||
| 145 | u32 port; | ||
| 146 | u32 lane; | ||
| 147 | int devfn; | ||
| 148 | unsigned int mem_target; | ||
| 149 | unsigned int mem_attr; | ||
| 150 | unsigned int io_target; | ||
| 151 | unsigned int io_attr; | ||
| 152 | struct clk *clk; | ||
| 153 | struct gpio_desc *reset_gpio; | ||
| 154 | char *reset_name; | ||
| 155 | struct mvebu_sw_pci_bridge bridge; | ||
| 156 | struct device_node *dn; | ||
| 157 | struct mvebu_pcie *pcie; | ||
| 158 | struct mvebu_pcie_window memwin; | ||
| 159 | struct mvebu_pcie_window iowin; | ||
| 160 | u32 saved_pcie_stat; | ||
| 161 | }; | ||
| 162 | |||
| 163 | static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg) | ||
| 164 | { | ||
| 165 | writel(val, port->base + reg); | ||
| 166 | } | ||
| 167 | |||
| 168 | static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg) | ||
| 169 | { | ||
| 170 | return readl(port->base + reg); | ||
| 171 | } | ||
| 172 | |||
| 173 | static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port) | ||
| 174 | { | ||
| 175 | return port->io_target != -1 && port->io_attr != -1; | ||
| 176 | } | ||
| 177 | |||
| 178 | static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port) | ||
| 179 | { | ||
| 180 | return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN); | ||
| 181 | } | ||
| 182 | |||
| 183 | static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr) | ||
| 184 | { | ||
| 185 | u32 stat; | ||
| 186 | |||
| 187 | stat = mvebu_readl(port, PCIE_STAT_OFF); | ||
| 188 | stat &= ~PCIE_STAT_BUS; | ||
| 189 | stat |= nr << 8; | ||
| 190 | mvebu_writel(port, stat, PCIE_STAT_OFF); | ||
| 191 | } | ||
| 192 | |||
| 193 | static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr) | ||
| 194 | { | ||
| 195 | u32 stat; | ||
| 196 | |||
| 197 | stat = mvebu_readl(port, PCIE_STAT_OFF); | ||
| 198 | stat &= ~PCIE_STAT_DEV; | ||
| 199 | stat |= nr << 16; | ||
| 200 | mvebu_writel(port, stat, PCIE_STAT_OFF); | ||
| 201 | } | ||
| 202 | |||
| 203 | /* | ||
| 204 | * Setup PCIE BARs and Address Decode Wins: | ||
| 205 | * BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks | ||
| 206 | * WIN[0-3] -> DRAM bank[0-3] | ||
| 207 | */ | ||
| 208 | static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) | ||
| 209 | { | ||
| 210 | const struct mbus_dram_target_info *dram; | ||
| 211 | u32 size; | ||
| 212 | int i; | ||
| 213 | |||
| 214 | dram = mv_mbus_dram_info(); | ||
| 215 | |||
| 216 | /* First, disable and clear BARs and windows. */ | ||
| 217 | for (i = 1; i < 3; i++) { | ||
| 218 | mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i)); | ||
| 219 | mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i)); | ||
| 220 | mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i)); | ||
| 221 | } | ||
| 222 | |||
| 223 | for (i = 0; i < 5; i++) { | ||
| 224 | mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i)); | ||
| 225 | mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i)); | ||
| 226 | mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i)); | ||
| 227 | } | ||
| 228 | |||
| 229 | mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF); | ||
| 230 | mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF); | ||
| 231 | mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF); | ||
| 232 | |||
| 233 | /* Setup windows for DDR banks. Count total DDR size on the fly. */ | ||
| 234 | size = 0; | ||
| 235 | for (i = 0; i < dram->num_cs; i++) { | ||
| 236 | const struct mbus_dram_window *cs = dram->cs + i; | ||
| 237 | |||
| 238 | mvebu_writel(port, cs->base & 0xffff0000, | ||
| 239 | PCIE_WIN04_BASE_OFF(i)); | ||
| 240 | mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i)); | ||
| 241 | mvebu_writel(port, | ||
| 242 | ((cs->size - 1) & 0xffff0000) | | ||
| 243 | (cs->mbus_attr << 8) | | ||
| 244 | (dram->mbus_dram_target_id << 4) | 1, | ||
| 245 | PCIE_WIN04_CTRL_OFF(i)); | ||
| 246 | |||
| 247 | size += cs->size; | ||
| 248 | } | ||
| 249 | |||
| 250 | /* Round up 'size' to the nearest power of two. */ | ||
| 251 | if ((size & (size - 1)) != 0) | ||
| 252 | size = 1 << fls(size); | ||
| 253 | |||
| 254 | /* Setup BAR[1] to all DRAM banks. */ | ||
| 255 | mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1)); | ||
| 256 | mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1)); | ||
| 257 | mvebu_writel(port, ((size - 1) & 0xffff0000) | 1, | ||
| 258 | PCIE_BAR_CTRL_OFF(1)); | ||
| 259 | } | ||
| 260 | |||
| 261 | static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port) | ||
| 262 | { | ||
| 263 | u32 cmd, mask; | ||
| 264 | |||
| 265 | /* Point PCIe unit MBUS decode windows to DRAM space. */ | ||
| 266 | mvebu_pcie_setup_wins(port); | ||
| 267 | |||
| 268 | /* Master + slave enable. */ | ||
| 269 | cmd = mvebu_readl(port, PCIE_CMD_OFF); | ||
| 270 | cmd |= PCI_COMMAND_IO; | ||
| 271 | cmd |= PCI_COMMAND_MEMORY; | ||
| 272 | cmd |= PCI_COMMAND_MASTER; | ||
| 273 | mvebu_writel(port, cmd, PCIE_CMD_OFF); | ||
| 274 | |||
| 275 | /* Enable interrupt lines A-D. */ | ||
| 276 | mask = mvebu_readl(port, PCIE_MASK_OFF); | ||
| 277 | mask |= PCIE_MASK_ENABLE_INTS; | ||
| 278 | mvebu_writel(port, mask, PCIE_MASK_OFF); | ||
| 279 | } | ||
| 280 | |||
| 281 | static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port, | ||
| 282 | struct pci_bus *bus, | ||
| 283 | u32 devfn, int where, int size, u32 *val) | ||
| 284 | { | ||
| 285 | void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF; | ||
| 286 | |||
| 287 | mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), | ||
| 288 | PCIE_CONF_ADDR_OFF); | ||
| 289 | |||
| 290 | switch (size) { | ||
| 291 | case 1: | ||
| 292 | *val = readb_relaxed(conf_data + (where & 3)); | ||
| 293 | break; | ||
| 294 | case 2: | ||
| 295 | *val = readw_relaxed(conf_data + (where & 2)); | ||
| 296 | break; | ||
| 297 | case 4: | ||
| 298 | *val = readl_relaxed(conf_data); | ||
| 299 | break; | ||
| 300 | } | ||
| 301 | |||
| 302 | return PCIBIOS_SUCCESSFUL; | ||
| 303 | } | ||
| 304 | |||
| 305 | static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port, | ||
| 306 | struct pci_bus *bus, | ||
| 307 | u32 devfn, int where, int size, u32 val) | ||
| 308 | { | ||
| 309 | void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF; | ||
| 310 | |||
| 311 | mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where), | ||
| 312 | PCIE_CONF_ADDR_OFF); | ||
| 313 | |||
| 314 | switch (size) { | ||
| 315 | case 1: | ||
| 316 | writeb(val, conf_data + (where & 3)); | ||
| 317 | break; | ||
| 318 | case 2: | ||
| 319 | writew(val, conf_data + (where & 2)); | ||
| 320 | break; | ||
| 321 | case 4: | ||
| 322 | writel(val, conf_data); | ||
| 323 | break; | ||
| 324 | default: | ||
| 325 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
| 326 | } | ||
| 327 | |||
| 328 | return PCIBIOS_SUCCESSFUL; | ||
| 329 | } | ||
| 330 | |||
| 331 | /* | ||
| 332 | * Remove windows, starting from the largest ones to the smallest | ||
| 333 | * ones. | ||
| 334 | */ | ||
| 335 | static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port, | ||
| 336 | phys_addr_t base, size_t size) | ||
| 337 | { | ||
| 338 | while (size) { | ||
| 339 | size_t sz = 1 << (fls(size) - 1); | ||
| 340 | |||
| 341 | mvebu_mbus_del_window(base, sz); | ||
| 342 | base += sz; | ||
| 343 | size -= sz; | ||
| 344 | } | ||
| 345 | } | ||
| 346 | |||
| 347 | /* | ||
| 348 | * MBus windows can only have a power of two size, but PCI BARs do not | ||
| 349 | * have this constraint. Therefore, we have to split the PCI BAR into | ||
| 350 | * areas each having a power of two size. We start from the largest | ||
| 351 | * one (i.e highest order bit set in the size). | ||
| 352 | */ | ||
| 353 | static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port, | ||
| 354 | unsigned int target, unsigned int attribute, | ||
| 355 | phys_addr_t base, size_t size, | ||
| 356 | phys_addr_t remap) | ||
| 357 | { | ||
| 358 | size_t size_mapped = 0; | ||
| 359 | |||
| 360 | while (size) { | ||
| 361 | size_t sz = 1 << (fls(size) - 1); | ||
| 362 | int ret; | ||
| 363 | |||
| 364 | ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base, | ||
| 365 | sz, remap); | ||
| 366 | if (ret) { | ||
| 367 | phys_addr_t end = base + sz - 1; | ||
| 368 | |||
| 369 | dev_err(&port->pcie->pdev->dev, | ||
| 370 | "Could not create MBus window at [mem %pa-%pa]: %d\n", | ||
| 371 | &base, &end, ret); | ||
| 372 | mvebu_pcie_del_windows(port, base - size_mapped, | ||
| 373 | size_mapped); | ||
| 374 | return; | ||
| 375 | } | ||
| 376 | |||
| 377 | size -= sz; | ||
| 378 | size_mapped += sz; | ||
| 379 | base += sz; | ||
| 380 | if (remap != MVEBU_MBUS_NO_REMAP) | ||
| 381 | remap += sz; | ||
| 382 | } | ||
| 383 | } | ||
| 384 | |||
| 385 | static void mvebu_pcie_set_window(struct mvebu_pcie_port *port, | ||
| 386 | unsigned int target, unsigned int attribute, | ||
| 387 | const struct mvebu_pcie_window *desired, | ||
| 388 | struct mvebu_pcie_window *cur) | ||
| 389 | { | ||
| 390 | if (desired->base == cur->base && desired->remap == cur->remap && | ||
| 391 | desired->size == cur->size) | ||
| 392 | return; | ||
| 393 | |||
| 394 | if (cur->size != 0) { | ||
| 395 | mvebu_pcie_del_windows(port, cur->base, cur->size); | ||
| 396 | cur->size = 0; | ||
| 397 | cur->base = 0; | ||
| 398 | |||
| 399 | /* | ||
| 400 | * If something tries to change the window while it is enabled | ||
| 401 | * the change will not be done atomically. That would be | ||
| 402 | * difficult to do in the general case. | ||
| 403 | */ | ||
| 404 | } | ||
| 405 | |||
| 406 | if (desired->size == 0) | ||
| 407 | return; | ||
| 408 | |||
| 409 | mvebu_pcie_add_windows(port, target, attribute, desired->base, | ||
| 410 | desired->size, desired->remap); | ||
| 411 | *cur = *desired; | ||
| 412 | } | ||
| 413 | |||
| 414 | static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port) | ||
| 415 | { | ||
| 416 | struct mvebu_pcie_window desired = {}; | ||
| 417 | |||
| 418 | /* Are the new iobase/iolimit values invalid? */ | ||
| 419 | if (port->bridge.iolimit < port->bridge.iobase || | ||
| 420 | port->bridge.iolimitupper < port->bridge.iobaseupper || | ||
| 421 | !(port->bridge.command & PCI_COMMAND_IO)) { | ||
| 422 | mvebu_pcie_set_window(port, port->io_target, port->io_attr, | ||
| 423 | &desired, &port->iowin); | ||
| 424 | return; | ||
| 425 | } | ||
| 426 | |||
| 427 | if (!mvebu_has_ioport(port)) { | ||
| 428 | dev_WARN(&port->pcie->pdev->dev, | ||
| 429 | "Attempt to set IO when IO is disabled\n"); | ||
| 430 | return; | ||
| 431 | } | ||
| 432 | |||
| 433 | /* | ||
| 434 | * We read the PCI-to-PCI bridge emulated registers, and | ||
| 435 | * calculate the base address and size of the address decoding | ||
| 436 | * window to setup, according to the PCI-to-PCI bridge | ||
| 437 | * specifications. iobase is the bus address, port->iowin_base | ||
| 438 | * is the CPU address. | ||
| 439 | */ | ||
| 440 | desired.remap = ((port->bridge.iobase & 0xF0) << 8) | | ||
| 441 | (port->bridge.iobaseupper << 16); | ||
| 442 | desired.base = port->pcie->io.start + desired.remap; | ||
| 443 | desired.size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) | | ||
| 444 | (port->bridge.iolimitupper << 16)) - | ||
| 445 | desired.remap) + | ||
| 446 | 1; | ||
| 447 | |||
| 448 | mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired, | ||
| 449 | &port->iowin); | ||
| 450 | } | ||
| 451 | |||
| 452 | static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port) | ||
| 453 | { | ||
| 454 | struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP}; | ||
| 455 | |||
| 456 | /* Are the new membase/memlimit values invalid? */ | ||
| 457 | if (port->bridge.memlimit < port->bridge.membase || | ||
| 458 | !(port->bridge.command & PCI_COMMAND_MEMORY)) { | ||
| 459 | mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, | ||
| 460 | &desired, &port->memwin); | ||
| 461 | return; | ||
| 462 | } | ||
| 463 | |||
| 464 | /* | ||
| 465 | * We read the PCI-to-PCI bridge emulated registers, and | ||
| 466 | * calculate the base address and size of the address decoding | ||
| 467 | * window to setup, according to the PCI-to-PCI bridge | ||
| 468 | * specifications. | ||
| 469 | */ | ||
| 470 | desired.base = ((port->bridge.membase & 0xFFF0) << 16); | ||
| 471 | desired.size = (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) - | ||
| 472 | desired.base + 1; | ||
| 473 | |||
| 474 | mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired, | ||
| 475 | &port->memwin); | ||
| 476 | } | ||
| 477 | |||
| 478 | /* | ||
| 479 | * Initialize the configuration space of the PCI-to-PCI bridge | ||
| 480 | * associated with the given PCIe interface. | ||
| 481 | */ | ||
| 482 | static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port) | ||
| 483 | { | ||
| 484 | struct mvebu_sw_pci_bridge *bridge = &port->bridge; | ||
| 485 | |||
| 486 | memset(bridge, 0, sizeof(struct mvebu_sw_pci_bridge)); | ||
| 487 | |||
| 488 | bridge->class = PCI_CLASS_BRIDGE_PCI; | ||
| 489 | bridge->vendor = PCI_VENDOR_ID_MARVELL; | ||
| 490 | bridge->device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16; | ||
| 491 | bridge->revision = mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff; | ||
| 492 | bridge->header_type = PCI_HEADER_TYPE_BRIDGE; | ||
| 493 | bridge->cache_line_size = 0x10; | ||
| 494 | |||
| 495 | /* We support 32 bits I/O addressing */ | ||
| 496 | bridge->iobase = PCI_IO_RANGE_TYPE_32; | ||
| 497 | bridge->iolimit = PCI_IO_RANGE_TYPE_32; | ||
| 498 | |||
| 499 | /* Add capabilities */ | ||
| 500 | bridge->status = PCI_STATUS_CAP_LIST; | ||
| 501 | } | ||
| 502 | |||
| 503 | /* | ||
| 504 | * Read the configuration space of the PCI-to-PCI bridge associated to | ||
| 505 | * the given PCIe interface. | ||
| 506 | */ | ||
| 507 | static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port, | ||
| 508 | unsigned int where, int size, u32 *value) | ||
| 509 | { | ||
| 510 | struct mvebu_sw_pci_bridge *bridge = &port->bridge; | ||
| 511 | |||
| 512 | switch (where & ~3) { | ||
| 513 | case PCI_VENDOR_ID: | ||
| 514 | *value = bridge->device << 16 | bridge->vendor; | ||
| 515 | break; | ||
| 516 | |||
| 517 | case PCI_COMMAND: | ||
| 518 | *value = bridge->command | bridge->status << 16; | ||
| 519 | break; | ||
| 520 | |||
| 521 | case PCI_CLASS_REVISION: | ||
| 522 | *value = bridge->class << 16 | bridge->interface << 8 | | ||
| 523 | bridge->revision; | ||
| 524 | break; | ||
| 525 | |||
| 526 | case PCI_CACHE_LINE_SIZE: | ||
| 527 | *value = bridge->bist << 24 | bridge->header_type << 16 | | ||
| 528 | bridge->latency_timer << 8 | bridge->cache_line_size; | ||
| 529 | break; | ||
| 530 | |||
| 531 | case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1: | ||
| 532 | *value = bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4]; | ||
| 533 | break; | ||
| 534 | |||
| 535 | case PCI_PRIMARY_BUS: | ||
| 536 | *value = (bridge->secondary_latency_timer << 24 | | ||
| 537 | bridge->subordinate_bus << 16 | | ||
| 538 | bridge->secondary_bus << 8 | | ||
| 539 | bridge->primary_bus); | ||
| 540 | break; | ||
| 541 | |||
| 542 | case PCI_IO_BASE: | ||
| 543 | if (!mvebu_has_ioport(port)) | ||
| 544 | *value = bridge->secondary_status << 16; | ||
| 545 | else | ||
| 546 | *value = (bridge->secondary_status << 16 | | ||
| 547 | bridge->iolimit << 8 | | ||
| 548 | bridge->iobase); | ||
| 549 | break; | ||
| 550 | |||
| 551 | case PCI_MEMORY_BASE: | ||
| 552 | *value = (bridge->memlimit << 16 | bridge->membase); | ||
| 553 | break; | ||
| 554 | |||
| 555 | case PCI_PREF_MEMORY_BASE: | ||
| 556 | *value = 0; | ||
| 557 | break; | ||
| 558 | |||
| 559 | case PCI_IO_BASE_UPPER16: | ||
| 560 | *value = (bridge->iolimitupper << 16 | bridge->iobaseupper); | ||
| 561 | break; | ||
| 562 | |||
| 563 | case PCI_CAPABILITY_LIST: | ||
| 564 | *value = PCISWCAP; | ||
| 565 | break; | ||
| 566 | |||
| 567 | case PCI_ROM_ADDRESS1: | ||
| 568 | *value = 0; | ||
| 569 | break; | ||
| 570 | |||
| 571 | case PCI_INTERRUPT_LINE: | ||
| 572 | /* LINE PIN MIN_GNT MAX_LAT */ | ||
| 573 | *value = 0; | ||
| 574 | break; | ||
| 575 | |||
| 576 | case PCISWCAP_EXP_LIST_ID: | ||
| 577 | /* Set PCIe v2, root port, slot support */ | ||
| 578 | *value = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2 | | ||
| 579 | PCI_EXP_FLAGS_SLOT) << 16 | PCI_CAP_ID_EXP; | ||
| 580 | break; | ||
| 581 | |||
| 582 | case PCISWCAP_EXP_DEVCAP: | ||
| 583 | *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP); | ||
| 584 | break; | ||
| 585 | |||
| 586 | case PCISWCAP_EXP_DEVCTL: | ||
| 587 | *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) & | ||
| 588 | ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE | | ||
| 589 | PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE); | ||
| 590 | *value |= bridge->pcie_devctl; | ||
| 591 | break; | ||
| 592 | |||
| 593 | case PCISWCAP_EXP_LNKCAP: | ||
| 594 | /* | ||
| 595 | * PCIe requires the clock power management capability to be | ||
| 596 | * hard-wired to zero for downstream ports | ||
| 597 | */ | ||
| 598 | *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) & | ||
| 599 | ~PCI_EXP_LNKCAP_CLKPM; | ||
| 600 | break; | ||
| 601 | |||
| 602 | case PCISWCAP_EXP_LNKCTL: | ||
| 603 | *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL); | ||
| 604 | break; | ||
| 605 | |||
| 606 | case PCISWCAP_EXP_SLTCAP: | ||
| 607 | *value = bridge->pcie_sltcap; | ||
| 608 | break; | ||
| 609 | |||
| 610 | case PCISWCAP_EXP_SLTCTL: | ||
| 611 | *value = PCI_EXP_SLTSTA_PDS << 16; | ||
| 612 | break; | ||
| 613 | |||
| 614 | case PCISWCAP_EXP_RTCTL: | ||
| 615 | *value = bridge->pcie_rtctl; | ||
| 616 | break; | ||
| 617 | |||
| 618 | case PCISWCAP_EXP_RTSTA: | ||
| 619 | *value = mvebu_readl(port, PCIE_RC_RTSTA); | ||
| 620 | break; | ||
| 621 | |||
| 622 | /* PCIe requires the v2 fields to be hard-wired to zero */ | ||
| 623 | case PCISWCAP_EXP_DEVCAP2: | ||
| 624 | case PCISWCAP_EXP_DEVCTL2: | ||
| 625 | case PCISWCAP_EXP_LNKCAP2: | ||
| 626 | case PCISWCAP_EXP_LNKCTL2: | ||
| 627 | case PCISWCAP_EXP_SLTCAP2: | ||
| 628 | case PCISWCAP_EXP_SLTCTL2: | ||
| 629 | default: | ||
| 630 | /* | ||
| 631 | * PCI defines configuration read accesses to reserved or | ||
| 632 | * unimplemented registers to read as zero and complete | ||
| 633 | * normally. | ||
| 634 | */ | ||
| 635 | *value = 0; | ||
| 636 | return PCIBIOS_SUCCESSFUL; | ||
| 637 | } | ||
| 638 | |||
| 639 | if (size == 2) | ||
| 640 | *value = (*value >> (8 * (where & 3))) & 0xffff; | ||
| 641 | else if (size == 1) | ||
| 642 | *value = (*value >> (8 * (where & 3))) & 0xff; | ||
| 643 | |||
| 644 | return PCIBIOS_SUCCESSFUL; | ||
| 645 | } | ||
| 646 | |||
| 647 | /* Write to the PCI-to-PCI bridge configuration space */ | ||
| 648 | static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port, | ||
| 649 | unsigned int where, int size, u32 value) | ||
| 650 | { | ||
| 651 | struct mvebu_sw_pci_bridge *bridge = &port->bridge; | ||
| 652 | u32 mask, reg; | ||
| 653 | int err; | ||
| 654 | |||
| 655 | if (size == 4) | ||
| 656 | mask = 0x0; | ||
| 657 | else if (size == 2) | ||
| 658 | mask = ~(0xffff << ((where & 3) * 8)); | ||
| 659 | else if (size == 1) | ||
| 660 | mask = ~(0xff << ((where & 3) * 8)); | ||
| 661 | else | ||
| 662 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
| 663 | |||
| 664 | err = mvebu_sw_pci_bridge_read(port, where & ~3, 4, ®); | ||
| 665 | if (err) | ||
| 666 | return err; | ||
| 667 | |||
| 668 | value = (reg & mask) | value << ((where & 3) * 8); | ||
| 669 | |||
| 670 | switch (where & ~3) { | ||
| 671 | case PCI_COMMAND: | ||
| 672 | { | ||
| 673 | u32 old = bridge->command; | ||
| 674 | |||
| 675 | if (!mvebu_has_ioport(port)) | ||
| 676 | value &= ~PCI_COMMAND_IO; | ||
| 677 | |||
| 678 | bridge->command = value & 0xffff; | ||
| 679 | if ((old ^ bridge->command) & PCI_COMMAND_IO) | ||
| 680 | mvebu_pcie_handle_iobase_change(port); | ||
| 681 | if ((old ^ bridge->command) & PCI_COMMAND_MEMORY) | ||
| 682 | mvebu_pcie_handle_membase_change(port); | ||
| 683 | break; | ||
| 684 | } | ||
| 685 | |||
| 686 | case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1: | ||
| 687 | bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4] = value; | ||
| 688 | break; | ||
| 689 | |||
| 690 | case PCI_IO_BASE: | ||
| 691 | /* | ||
| 692 | * We also keep bit 1 set, it is a read-only bit that | ||
| 693 | * indicates we support 32 bits addressing for the | ||
| 694 | * I/O | ||
| 695 | */ | ||
| 696 | bridge->iobase = (value & 0xff) | PCI_IO_RANGE_TYPE_32; | ||
| 697 | bridge->iolimit = ((value >> 8) & 0xff) | PCI_IO_RANGE_TYPE_32; | ||
| 698 | mvebu_pcie_handle_iobase_change(port); | ||
| 699 | break; | ||
| 700 | |||
| 701 | case PCI_MEMORY_BASE: | ||
| 702 | bridge->membase = value & 0xffff; | ||
| 703 | bridge->memlimit = value >> 16; | ||
| 704 | mvebu_pcie_handle_membase_change(port); | ||
| 705 | break; | ||
| 706 | |||
| 707 | case PCI_IO_BASE_UPPER16: | ||
| 708 | bridge->iobaseupper = value & 0xffff; | ||
| 709 | bridge->iolimitupper = value >> 16; | ||
| 710 | mvebu_pcie_handle_iobase_change(port); | ||
| 711 | break; | ||
| 712 | |||
| 713 | case PCI_PRIMARY_BUS: | ||
| 714 | bridge->primary_bus = value & 0xff; | ||
| 715 | bridge->secondary_bus = (value >> 8) & 0xff; | ||
| 716 | bridge->subordinate_bus = (value >> 16) & 0xff; | ||
| 717 | bridge->secondary_latency_timer = (value >> 24) & 0xff; | ||
| 718 | mvebu_pcie_set_local_bus_nr(port, bridge->secondary_bus); | ||
| 719 | break; | ||
| 720 | |||
| 721 | case PCISWCAP_EXP_DEVCTL: | ||
| 722 | /* | ||
| 723 | * Armada370 data says these bits must always | ||
| 724 | * be zero when in root complex mode. | ||
| 725 | */ | ||
| 726 | value &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE | | ||
| 727 | PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE); | ||
| 728 | |||
| 729 | /* | ||
| 730 | * If the mask is 0xffff0000, then we only want to write | ||
| 731 | * the device control register, rather than clearing the | ||
| 732 | * RW1C bits in the device status register. Mask out the | ||
| 733 | * status register bits. | ||
| 734 | */ | ||
| 735 | if (mask == 0xffff0000) | ||
| 736 | value &= 0xffff; | ||
| 737 | |||
| 738 | mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL); | ||
| 739 | break; | ||
| 740 | |||
| 741 | case PCISWCAP_EXP_LNKCTL: | ||
| 742 | /* | ||
| 743 | * If we don't support CLKREQ, we must ensure that the | ||
| 744 | * CLKREQ enable bit always reads zero. Since we haven't | ||
| 745 | * had this capability, and it's dependent on board wiring, | ||
| 746 | * disable it for the time being. | ||
| 747 | */ | ||
| 748 | value &= ~PCI_EXP_LNKCTL_CLKREQ_EN; | ||
| 749 | |||
| 750 | /* | ||
| 751 | * If the mask is 0xffff0000, then we only want to write | ||
| 752 | * the link control register, rather than clearing the | ||
| 753 | * RW1C bits in the link status register. Mask out the | ||
| 754 | * RW1C status register bits. | ||
| 755 | */ | ||
| 756 | if (mask == 0xffff0000) | ||
| 757 | value &= ~((PCI_EXP_LNKSTA_LABS | | ||
| 758 | PCI_EXP_LNKSTA_LBMS) << 16); | ||
| 759 | |||
| 760 | mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL); | ||
| 761 | break; | ||
| 762 | |||
| 763 | case PCISWCAP_EXP_RTSTA: | ||
| 764 | mvebu_writel(port, value, PCIE_RC_RTSTA); | ||
| 765 | break; | ||
| 766 | |||
| 767 | default: | ||
| 768 | break; | ||
| 769 | } | ||
| 770 | |||
| 771 | return PCIBIOS_SUCCESSFUL; | ||
| 772 | } | ||
| 773 | |||
| 774 | static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys) | ||
| 775 | { | ||
| 776 | return sys->private_data; | ||
| 777 | } | ||
| 778 | |||
| 779 | static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie, | ||
| 780 | struct pci_bus *bus, | ||
| 781 | int devfn) | ||
| 782 | { | ||
| 783 | int i; | ||
| 784 | |||
| 785 | for (i = 0; i < pcie->nports; i++) { | ||
| 786 | struct mvebu_pcie_port *port = &pcie->ports[i]; | ||
| 787 | |||
| 788 | if (bus->number == 0 && port->devfn == devfn) | ||
| 789 | return port; | ||
| 790 | if (bus->number != 0 && | ||
| 791 | bus->number >= port->bridge.secondary_bus && | ||
| 792 | bus->number <= port->bridge.subordinate_bus) | ||
| 793 | return port; | ||
| 794 | } | ||
| 795 | |||
| 796 | return NULL; | ||
| 797 | } | ||
| 798 | |||
| 799 | /* PCI configuration space write function */ | ||
| 800 | static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn, | ||
| 801 | int where, int size, u32 val) | ||
| 802 | { | ||
| 803 | struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata); | ||
| 804 | struct mvebu_pcie_port *port; | ||
| 805 | int ret; | ||
| 806 | |||
| 807 | port = mvebu_pcie_find_port(pcie, bus, devfn); | ||
| 808 | if (!port) | ||
| 809 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 810 | |||
| 811 | /* Access the emulated PCI-to-PCI bridge */ | ||
| 812 | if (bus->number == 0) | ||
| 813 | return mvebu_sw_pci_bridge_write(port, where, size, val); | ||
| 814 | |||
| 815 | if (!mvebu_pcie_link_up(port)) | ||
| 816 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 817 | |||
| 818 | /* Access the real PCIe interface */ | ||
| 819 | ret = mvebu_pcie_hw_wr_conf(port, bus, devfn, | ||
| 820 | where, size, val); | ||
| 821 | |||
| 822 | return ret; | ||
| 823 | } | ||
| 824 | |||
| 825 | /* PCI configuration space read function */ | ||
| 826 | static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, | ||
| 827 | int size, u32 *val) | ||
| 828 | { | ||
| 829 | struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata); | ||
| 830 | struct mvebu_pcie_port *port; | ||
| 831 | int ret; | ||
| 832 | |||
| 833 | port = mvebu_pcie_find_port(pcie, bus, devfn); | ||
| 834 | if (!port) { | ||
| 835 | *val = 0xffffffff; | ||
| 836 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 837 | } | ||
| 838 | |||
| 839 | /* Access the emulated PCI-to-PCI bridge */ | ||
| 840 | if (bus->number == 0) | ||
| 841 | return mvebu_sw_pci_bridge_read(port, where, size, val); | ||
| 842 | |||
| 843 | if (!mvebu_pcie_link_up(port)) { | ||
| 844 | *val = 0xffffffff; | ||
| 845 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 846 | } | ||
| 847 | |||
| 848 | /* Access the real PCIe interface */ | ||
| 849 | ret = mvebu_pcie_hw_rd_conf(port, bus, devfn, | ||
| 850 | where, size, val); | ||
| 851 | |||
| 852 | return ret; | ||
| 853 | } | ||
| 854 | |||
| 855 | static struct pci_ops mvebu_pcie_ops = { | ||
| 856 | .read = mvebu_pcie_rd_conf, | ||
| 857 | .write = mvebu_pcie_wr_conf, | ||
| 858 | }; | ||
| 859 | |||
| 860 | static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys) | ||
| 861 | { | ||
| 862 | struct mvebu_pcie *pcie = sys_to_pcie(sys); | ||
| 863 | int err, i; | ||
| 864 | |||
| 865 | pcie->mem.name = "PCI MEM"; | ||
| 866 | pcie->realio.name = "PCI I/O"; | ||
| 867 | |||
| 868 | if (resource_size(&pcie->realio) != 0) | ||
| 869 | pci_add_resource_offset(&sys->resources, &pcie->realio, | ||
| 870 | sys->io_offset); | ||
| 871 | |||
| 872 | pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); | ||
| 873 | pci_add_resource(&sys->resources, &pcie->busn); | ||
| 874 | |||
| 875 | err = devm_request_pci_bus_resources(&pcie->pdev->dev, &sys->resources); | ||
| 876 | if (err) | ||
| 877 | return 0; | ||
| 878 | |||
| 879 | for (i = 0; i < pcie->nports; i++) { | ||
| 880 | struct mvebu_pcie_port *port = &pcie->ports[i]; | ||
| 881 | |||
| 882 | if (!port->base) | ||
| 883 | continue; | ||
| 884 | mvebu_pcie_setup_hw(port); | ||
| 885 | } | ||
| 886 | |||
| 887 | return 1; | ||
| 888 | } | ||
| 889 | |||
| 890 | static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev, | ||
| 891 | const struct resource *res, | ||
| 892 | resource_size_t start, | ||
| 893 | resource_size_t size, | ||
| 894 | resource_size_t align) | ||
| 895 | { | ||
| 896 | if (dev->bus->number != 0) | ||
| 897 | return start; | ||
| 898 | |||
| 899 | /* | ||
| 900 | * On the PCI-to-PCI bridge side, the I/O windows must have at | ||
| 901 | * least a 64 KB size and the memory windows must have at | ||
| 902 | * least a 1 MB size. Moreover, MBus windows need to have a | ||
| 903 | * base address aligned on their size, and their size must be | ||
| 904 | * a power of two. This means that if the BAR doesn't have a | ||
| 905 | * power of two size, several MBus windows will actually be | ||
| 906 | * created. We need to ensure that the biggest MBus window | ||
| 907 | * (which will be the first one) is aligned on its size, which | ||
| 908 | * explains the rounddown_pow_of_two() being done here. | ||
| 909 | */ | ||
| 910 | if (res->flags & IORESOURCE_IO) | ||
| 911 | return round_up(start, max_t(resource_size_t, SZ_64K, | ||
| 912 | rounddown_pow_of_two(size))); | ||
| 913 | else if (res->flags & IORESOURCE_MEM) | ||
| 914 | return round_up(start, max_t(resource_size_t, SZ_1M, | ||
| 915 | rounddown_pow_of_two(size))); | ||
| 916 | else | ||
| 917 | return start; | ||
| 918 | } | ||
| 919 | |||
| 920 | static void mvebu_pcie_enable(struct mvebu_pcie *pcie) | ||
| 921 | { | ||
| 922 | struct hw_pci hw; | ||
| 923 | |||
| 924 | memset(&hw, 0, sizeof(hw)); | ||
| 925 | |||
| 926 | #ifdef CONFIG_PCI_MSI | ||
| 927 | hw.msi_ctrl = pcie->msi; | ||
| 928 | #endif | ||
| 929 | |||
| 930 | hw.nr_controllers = 1; | ||
| 931 | hw.private_data = (void **)&pcie; | ||
| 932 | hw.setup = mvebu_pcie_setup; | ||
| 933 | hw.map_irq = of_irq_parse_and_map_pci; | ||
| 934 | hw.ops = &mvebu_pcie_ops; | ||
| 935 | hw.align_resource = mvebu_pcie_align_resource; | ||
| 936 | |||
| 937 | pci_common_init_dev(&pcie->pdev->dev, &hw); | ||
| 938 | } | ||
| 939 | |||
| 940 | /* | ||
| 941 | * Looks up the list of register addresses encoded into the reg = | ||
| 942 | * <...> property for one that matches the given port/lane. Once | ||
| 943 | * found, maps it. | ||
| 944 | */ | ||
| 945 | static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev, | ||
| 946 | struct device_node *np, | ||
| 947 | struct mvebu_pcie_port *port) | ||
| 948 | { | ||
| 949 | struct resource regs; | ||
| 950 | int ret = 0; | ||
| 951 | |||
| 952 | ret = of_address_to_resource(np, 0, ®s); | ||
| 953 | if (ret) | ||
| 954 | return ERR_PTR(ret); | ||
| 955 | |||
| 956 | return devm_ioremap_resource(&pdev->dev, ®s); | ||
| 957 | } | ||
| 958 | |||
| 959 | #define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03) | ||
| 960 | #define DT_TYPE_IO 0x1 | ||
| 961 | #define DT_TYPE_MEM32 0x2 | ||
| 962 | #define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF) | ||
| 963 | #define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF) | ||
| 964 | |||
| 965 | static int mvebu_get_tgt_attr(struct device_node *np, int devfn, | ||
| 966 | unsigned long type, | ||
| 967 | unsigned int *tgt, | ||
| 968 | unsigned int *attr) | ||
| 969 | { | ||
| 970 | const int na = 3, ns = 2; | ||
| 971 | const __be32 *range; | ||
| 972 | int rlen, nranges, rangesz, pna, i; | ||
| 973 | |||
| 974 | *tgt = -1; | ||
| 975 | *attr = -1; | ||
| 976 | |||
| 977 | range = of_get_property(np, "ranges", &rlen); | ||
| 978 | if (!range) | ||
| 979 | return -EINVAL; | ||
| 980 | |||
| 981 | pna = of_n_addr_cells(np); | ||
| 982 | rangesz = pna + na + ns; | ||
| 983 | nranges = rlen / sizeof(__be32) / rangesz; | ||
| 984 | |||
| 985 | for (i = 0; i < nranges; i++, range += rangesz) { | ||
| 986 | u32 flags = of_read_number(range, 1); | ||
| 987 | u32 slot = of_read_number(range + 1, 1); | ||
| 988 | u64 cpuaddr = of_read_number(range + na, pna); | ||
| 989 | unsigned long rtype; | ||
| 990 | |||
| 991 | if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO) | ||
| 992 | rtype = IORESOURCE_IO; | ||
| 993 | else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32) | ||
| 994 | rtype = IORESOURCE_MEM; | ||
| 995 | else | ||
| 996 | continue; | ||
| 997 | |||
| 998 | if (slot == PCI_SLOT(devfn) && type == rtype) { | ||
| 999 | *tgt = DT_CPUADDR_TO_TARGET(cpuaddr); | ||
| 1000 | *attr = DT_CPUADDR_TO_ATTR(cpuaddr); | ||
| 1001 | return 0; | ||
| 1002 | } | ||
| 1003 | } | ||
| 1004 | |||
| 1005 | return -ENOENT; | ||
| 1006 | } | ||
| 1007 | |||
| 1008 | #ifdef CONFIG_PM_SLEEP | ||
| 1009 | static int mvebu_pcie_suspend(struct device *dev) | ||
| 1010 | { | ||
| 1011 | struct mvebu_pcie *pcie; | ||
| 1012 | int i; | ||
| 1013 | |||
| 1014 | pcie = dev_get_drvdata(dev); | ||
| 1015 | for (i = 0; i < pcie->nports; i++) { | ||
| 1016 | struct mvebu_pcie_port *port = pcie->ports + i; | ||
| 1017 | port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF); | ||
| 1018 | } | ||
| 1019 | |||
| 1020 | return 0; | ||
| 1021 | } | ||
| 1022 | |||
| 1023 | static int mvebu_pcie_resume(struct device *dev) | ||
| 1024 | { | ||
| 1025 | struct mvebu_pcie *pcie; | ||
| 1026 | int i; | ||
| 1027 | |||
| 1028 | pcie = dev_get_drvdata(dev); | ||
| 1029 | for (i = 0; i < pcie->nports; i++) { | ||
| 1030 | struct mvebu_pcie_port *port = pcie->ports + i; | ||
| 1031 | mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF); | ||
| 1032 | mvebu_pcie_setup_hw(port); | ||
| 1033 | } | ||
| 1034 | |||
| 1035 | return 0; | ||
| 1036 | } | ||
| 1037 | #endif | ||
| 1038 | |||
| 1039 | static void mvebu_pcie_port_clk_put(void *data) | ||
| 1040 | { | ||
| 1041 | struct mvebu_pcie_port *port = data; | ||
| 1042 | |||
| 1043 | clk_put(port->clk); | ||
| 1044 | } | ||
| 1045 | |||
| 1046 | static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie, | ||
| 1047 | struct mvebu_pcie_port *port, struct device_node *child) | ||
| 1048 | { | ||
| 1049 | struct device *dev = &pcie->pdev->dev; | ||
| 1050 | enum of_gpio_flags flags; | ||
| 1051 | int reset_gpio, ret; | ||
| 1052 | |||
| 1053 | port->pcie = pcie; | ||
| 1054 | |||
| 1055 | if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) { | ||
| 1056 | dev_warn(dev, "ignoring %pOF, missing pcie-port property\n", | ||
| 1057 | child); | ||
| 1058 | goto skip; | ||
| 1059 | } | ||
| 1060 | |||
| 1061 | if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane)) | ||
| 1062 | port->lane = 0; | ||
| 1063 | |||
| 1064 | port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port, | ||
| 1065 | port->lane); | ||
| 1066 | if (!port->name) { | ||
| 1067 | ret = -ENOMEM; | ||
| 1068 | goto err; | ||
| 1069 | } | ||
| 1070 | |||
| 1071 | port->devfn = of_pci_get_devfn(child); | ||
| 1072 | if (port->devfn < 0) | ||
| 1073 | goto skip; | ||
| 1074 | |||
| 1075 | ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM, | ||
| 1076 | &port->mem_target, &port->mem_attr); | ||
| 1077 | if (ret < 0) { | ||
| 1078 | dev_err(dev, "%s: cannot get tgt/attr for mem window\n", | ||
| 1079 | port->name); | ||
| 1080 | goto skip; | ||
| 1081 | } | ||
| 1082 | |||
| 1083 | if (resource_size(&pcie->io) != 0) { | ||
| 1084 | mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO, | ||
| 1085 | &port->io_target, &port->io_attr); | ||
| 1086 | } else { | ||
| 1087 | port->io_target = -1; | ||
| 1088 | port->io_attr = -1; | ||
| 1089 | } | ||
| 1090 | |||
| 1091 | reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags); | ||
| 1092 | if (reset_gpio == -EPROBE_DEFER) { | ||
| 1093 | ret = reset_gpio; | ||
| 1094 | goto err; | ||
| 1095 | } | ||
| 1096 | |||
| 1097 | if (gpio_is_valid(reset_gpio)) { | ||
| 1098 | unsigned long gpio_flags; | ||
| 1099 | |||
| 1100 | port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset", | ||
| 1101 | port->name); | ||
| 1102 | if (!port->reset_name) { | ||
| 1103 | ret = -ENOMEM; | ||
| 1104 | goto err; | ||
| 1105 | } | ||
| 1106 | |||
| 1107 | if (flags & OF_GPIO_ACTIVE_LOW) { | ||
| 1108 | dev_info(dev, "%pOF: reset gpio is active low\n", | ||
| 1109 | child); | ||
| 1110 | gpio_flags = GPIOF_ACTIVE_LOW | | ||
| 1111 | GPIOF_OUT_INIT_LOW; | ||
| 1112 | } else { | ||
| 1113 | gpio_flags = GPIOF_OUT_INIT_HIGH; | ||
| 1114 | } | ||
| 1115 | |||
| 1116 | ret = devm_gpio_request_one(dev, reset_gpio, gpio_flags, | ||
| 1117 | port->reset_name); | ||
| 1118 | if (ret) { | ||
| 1119 | if (ret == -EPROBE_DEFER) | ||
| 1120 | goto err; | ||
| 1121 | goto skip; | ||
| 1122 | } | ||
| 1123 | |||
| 1124 | port->reset_gpio = gpio_to_desc(reset_gpio); | ||
| 1125 | } | ||
| 1126 | |||
| 1127 | port->clk = of_clk_get_by_name(child, NULL); | ||
| 1128 | if (IS_ERR(port->clk)) { | ||
| 1129 | dev_err(dev, "%s: cannot get clock\n", port->name); | ||
| 1130 | goto skip; | ||
| 1131 | } | ||
| 1132 | |||
| 1133 | ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port); | ||
| 1134 | if (ret < 0) { | ||
| 1135 | clk_put(port->clk); | ||
| 1136 | goto err; | ||
| 1137 | } | ||
| 1138 | |||
| 1139 | return 1; | ||
| 1140 | |||
| 1141 | skip: | ||
| 1142 | ret = 0; | ||
| 1143 | |||
| 1144 | /* In the case of skipping, we need to free these */ | ||
| 1145 | devm_kfree(dev, port->reset_name); | ||
| 1146 | port->reset_name = NULL; | ||
| 1147 | devm_kfree(dev, port->name); | ||
| 1148 | port->name = NULL; | ||
| 1149 | |||
| 1150 | err: | ||
| 1151 | return ret; | ||
| 1152 | } | ||
| 1153 | |||
| 1154 | /* | ||
| 1155 | * Power up a PCIe port. PCIe requires the refclk to be stable for 100µs | ||
| 1156 | * prior to releasing PERST. See table 2-4 in section 2.6.2 AC Specifications | ||
| 1157 | * of the PCI Express Card Electromechanical Specification, 1.1. | ||
| 1158 | */ | ||
| 1159 | static int mvebu_pcie_powerup(struct mvebu_pcie_port *port) | ||
| 1160 | { | ||
| 1161 | int ret; | ||
| 1162 | |||
| 1163 | ret = clk_prepare_enable(port->clk); | ||
| 1164 | if (ret < 0) | ||
| 1165 | return ret; | ||
| 1166 | |||
| 1167 | if (port->reset_gpio) { | ||
| 1168 | u32 reset_udelay = PCI_PM_D3COLD_WAIT * 1000; | ||
| 1169 | |||
| 1170 | of_property_read_u32(port->dn, "reset-delay-us", | ||
| 1171 | &reset_udelay); | ||
| 1172 | |||
| 1173 | udelay(100); | ||
| 1174 | |||
| 1175 | gpiod_set_value_cansleep(port->reset_gpio, 0); | ||
| 1176 | msleep(reset_udelay / 1000); | ||
| 1177 | } | ||
| 1178 | |||
| 1179 | return 0; | ||
| 1180 | } | ||
| 1181 | |||
| 1182 | /* | ||
| 1183 | * Power down a PCIe port. Strictly, PCIe requires us to place the card | ||
| 1184 | * in D3hot state before asserting PERST#. | ||
| 1185 | */ | ||
| 1186 | static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port) | ||
| 1187 | { | ||
| 1188 | gpiod_set_value_cansleep(port->reset_gpio, 1); | ||
| 1189 | |||
| 1190 | clk_disable_unprepare(port->clk); | ||
| 1191 | } | ||
| 1192 | |||
| 1193 | static int mvebu_pcie_probe(struct platform_device *pdev) | ||
| 1194 | { | ||
| 1195 | struct device *dev = &pdev->dev; | ||
| 1196 | struct mvebu_pcie *pcie; | ||
| 1197 | struct device_node *np = dev->of_node; | ||
| 1198 | struct device_node *child; | ||
| 1199 | int num, i, ret; | ||
| 1200 | |||
| 1201 | pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); | ||
| 1202 | if (!pcie) | ||
| 1203 | return -ENOMEM; | ||
| 1204 | |||
| 1205 | pcie->pdev = pdev; | ||
| 1206 | platform_set_drvdata(pdev, pcie); | ||
| 1207 | |||
| 1208 | /* Get the PCIe memory and I/O aperture */ | ||
| 1209 | mvebu_mbus_get_pcie_mem_aperture(&pcie->mem); | ||
| 1210 | if (resource_size(&pcie->mem) == 0) { | ||
| 1211 | dev_err(dev, "invalid memory aperture size\n"); | ||
| 1212 | return -EINVAL; | ||
| 1213 | } | ||
| 1214 | |||
| 1215 | mvebu_mbus_get_pcie_io_aperture(&pcie->io); | ||
| 1216 | |||
| 1217 | if (resource_size(&pcie->io) != 0) { | ||
| 1218 | pcie->realio.flags = pcie->io.flags; | ||
| 1219 | pcie->realio.start = PCIBIOS_MIN_IO; | ||
| 1220 | pcie->realio.end = min_t(resource_size_t, | ||
| 1221 | IO_SPACE_LIMIT, | ||
| 1222 | resource_size(&pcie->io)); | ||
| 1223 | } else | ||
| 1224 | pcie->realio = pcie->io; | ||
| 1225 | |||
| 1226 | /* Get the bus range */ | ||
| 1227 | ret = of_pci_parse_bus_range(np, &pcie->busn); | ||
| 1228 | if (ret) { | ||
| 1229 | dev_err(dev, "failed to parse bus-range property: %d\n", ret); | ||
| 1230 | return ret; | ||
| 1231 | } | ||
| 1232 | |||
| 1233 | num = of_get_available_child_count(np); | ||
| 1234 | |||
| 1235 | pcie->ports = devm_kcalloc(dev, num, sizeof(*pcie->ports), GFP_KERNEL); | ||
| 1236 | if (!pcie->ports) | ||
| 1237 | return -ENOMEM; | ||
| 1238 | |||
| 1239 | i = 0; | ||
| 1240 | for_each_available_child_of_node(np, child) { | ||
| 1241 | struct mvebu_pcie_port *port = &pcie->ports[i]; | ||
| 1242 | |||
| 1243 | ret = mvebu_pcie_parse_port(pcie, port, child); | ||
| 1244 | if (ret < 0) { | ||
| 1245 | of_node_put(child); | ||
| 1246 | return ret; | ||
| 1247 | } else if (ret == 0) { | ||
| 1248 | continue; | ||
| 1249 | } | ||
| 1250 | |||
| 1251 | port->dn = child; | ||
| 1252 | i++; | ||
| 1253 | } | ||
| 1254 | pcie->nports = i; | ||
| 1255 | |||
| 1256 | for (i = 0; i < pcie->nports; i++) { | ||
| 1257 | struct mvebu_pcie_port *port = &pcie->ports[i]; | ||
| 1258 | |||
| 1259 | child = port->dn; | ||
| 1260 | if (!child) | ||
| 1261 | continue; | ||
| 1262 | |||
| 1263 | ret = mvebu_pcie_powerup(port); | ||
| 1264 | if (ret < 0) | ||
| 1265 | continue; | ||
| 1266 | |||
| 1267 | port->base = mvebu_pcie_map_registers(pdev, child, port); | ||
| 1268 | if (IS_ERR(port->base)) { | ||
| 1269 | dev_err(dev, "%s: cannot map registers\n", port->name); | ||
| 1270 | port->base = NULL; | ||
| 1271 | mvebu_pcie_powerdown(port); | ||
| 1272 | continue; | ||
| 1273 | } | ||
| 1274 | |||
| 1275 | mvebu_pcie_set_local_dev_nr(port, 1); | ||
| 1276 | mvebu_sw_pci_bridge_init(port); | ||
| 1277 | } | ||
| 1278 | |||
| 1279 | pcie->nports = i; | ||
| 1280 | |||
| 1281 | for (i = 0; i < (IO_SPACE_LIMIT - SZ_64K); i += SZ_64K) | ||
| 1282 | pci_ioremap_io(i, pcie->io.start + i); | ||
| 1283 | |||
| 1284 | mvebu_pcie_enable(pcie); | ||
| 1285 | |||
| 1286 | platform_set_drvdata(pdev, pcie); | ||
| 1287 | |||
| 1288 | return 0; | ||
| 1289 | } | ||
| 1290 | |||
| 1291 | static const struct of_device_id mvebu_pcie_of_match_table[] = { | ||
| 1292 | { .compatible = "marvell,armada-xp-pcie", }, | ||
| 1293 | { .compatible = "marvell,armada-370-pcie", }, | ||
| 1294 | { .compatible = "marvell,dove-pcie", }, | ||
| 1295 | { .compatible = "marvell,kirkwood-pcie", }, | ||
| 1296 | {}, | ||
| 1297 | }; | ||
| 1298 | |||
| 1299 | static const struct dev_pm_ops mvebu_pcie_pm_ops = { | ||
| 1300 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume) | ||
| 1301 | }; | ||
| 1302 | |||
| 1303 | static struct platform_driver mvebu_pcie_driver = { | ||
| 1304 | .driver = { | ||
| 1305 | .name = "mvebu-pcie", | ||
| 1306 | .of_match_table = mvebu_pcie_of_match_table, | ||
| 1307 | /* driver unloading/unbinding currently not supported */ | ||
| 1308 | .suppress_bind_attrs = true, | ||
| 1309 | .pm = &mvebu_pcie_pm_ops, | ||
| 1310 | }, | ||
| 1311 | .probe = mvebu_pcie_probe, | ||
| 1312 | }; | ||
| 1313 | builtin_platform_driver(mvebu_pcie_driver); | ||
diff --git a/drivers/pci/controller/pci-rcar-gen2.c b/drivers/pci/controller/pci-rcar-gen2.c new file mode 100644 index 000000000000..326171cb1a97 --- /dev/null +++ b/drivers/pci/controller/pci-rcar-gen2.c | |||
| @@ -0,0 +1,428 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * pci-rcar-gen2: internal PCI bus support | ||
| 4 | * | ||
| 5 | * Copyright (C) 2013 Renesas Solutions Corp. | ||
| 6 | * Copyright (C) 2013 Cogent Embedded, Inc. | ||
| 7 | * | ||
| 8 | * Author: Valentine Barshak <valentine.barshak@cogentembedded.com> | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/delay.h> | ||
| 12 | #include <linux/init.h> | ||
| 13 | #include <linux/interrupt.h> | ||
| 14 | #include <linux/io.h> | ||
| 15 | #include <linux/kernel.h> | ||
| 16 | #include <linux/of_address.h> | ||
| 17 | #include <linux/of_pci.h> | ||
| 18 | #include <linux/pci.h> | ||
| 19 | #include <linux/platform_device.h> | ||
| 20 | #include <linux/pm_runtime.h> | ||
| 21 | #include <linux/sizes.h> | ||
| 22 | #include <linux/slab.h> | ||
| 23 | |||
| 24 | #include "../pci.h" | ||
| 25 | |||
| 26 | /* AHB-PCI Bridge PCI communication registers */ | ||
| 27 | #define RCAR_AHBPCI_PCICOM_OFFSET 0x800 | ||
| 28 | |||
| 29 | #define RCAR_PCIAHB_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x00) | ||
| 30 | #define RCAR_PCIAHB_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x04) | ||
| 31 | #define RCAR_PCIAHB_PREFETCH0 0x0 | ||
| 32 | #define RCAR_PCIAHB_PREFETCH4 0x1 | ||
| 33 | #define RCAR_PCIAHB_PREFETCH8 0x2 | ||
| 34 | #define RCAR_PCIAHB_PREFETCH16 0x3 | ||
| 35 | |||
| 36 | #define RCAR_AHBPCI_WIN1_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x10) | ||
| 37 | #define RCAR_AHBPCI_WIN2_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x14) | ||
| 38 | #define RCAR_AHBPCI_WIN_CTR_MEM (3 << 1) | ||
| 39 | #define RCAR_AHBPCI_WIN_CTR_CFG (5 << 1) | ||
| 40 | #define RCAR_AHBPCI_WIN1_HOST (1 << 30) | ||
| 41 | #define RCAR_AHBPCI_WIN1_DEVICE (1 << 31) | ||
| 42 | |||
| 43 | #define RCAR_PCI_INT_ENABLE_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x20) | ||
| 44 | #define RCAR_PCI_INT_STATUS_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x24) | ||
| 45 | #define RCAR_PCI_INT_SIGTABORT (1 << 0) | ||
| 46 | #define RCAR_PCI_INT_SIGRETABORT (1 << 1) | ||
| 47 | #define RCAR_PCI_INT_REMABORT (1 << 2) | ||
| 48 | #define RCAR_PCI_INT_PERR (1 << 3) | ||
| 49 | #define RCAR_PCI_INT_SIGSERR (1 << 4) | ||
| 50 | #define RCAR_PCI_INT_RESERR (1 << 5) | ||
| 51 | #define RCAR_PCI_INT_WIN1ERR (1 << 12) | ||
| 52 | #define RCAR_PCI_INT_WIN2ERR (1 << 13) | ||
| 53 | #define RCAR_PCI_INT_A (1 << 16) | ||
| 54 | #define RCAR_PCI_INT_B (1 << 17) | ||
| 55 | #define RCAR_PCI_INT_PME (1 << 19) | ||
| 56 | #define RCAR_PCI_INT_ALLERRORS (RCAR_PCI_INT_SIGTABORT | \ | ||
| 57 | RCAR_PCI_INT_SIGRETABORT | \ | ||
| 58 | RCAR_PCI_INT_REMABORT | \ | ||
| 59 | RCAR_PCI_INT_PERR | \ | ||
| 60 | RCAR_PCI_INT_SIGSERR | \ | ||
| 61 | RCAR_PCI_INT_RESERR | \ | ||
| 62 | RCAR_PCI_INT_WIN1ERR | \ | ||
| 63 | RCAR_PCI_INT_WIN2ERR) | ||
| 64 | |||
| 65 | #define RCAR_AHB_BUS_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x30) | ||
| 66 | #define RCAR_AHB_BUS_MMODE_HTRANS (1 << 0) | ||
| 67 | #define RCAR_AHB_BUS_MMODE_BYTE_BURST (1 << 1) | ||
| 68 | #define RCAR_AHB_BUS_MMODE_WR_INCR (1 << 2) | ||
| 69 | #define RCAR_AHB_BUS_MMODE_HBUS_REQ (1 << 7) | ||
| 70 | #define RCAR_AHB_BUS_SMODE_READYCTR (1 << 17) | ||
| 71 | #define RCAR_AHB_BUS_MODE (RCAR_AHB_BUS_MMODE_HTRANS | \ | ||
| 72 | RCAR_AHB_BUS_MMODE_BYTE_BURST | \ | ||
| 73 | RCAR_AHB_BUS_MMODE_WR_INCR | \ | ||
| 74 | RCAR_AHB_BUS_MMODE_HBUS_REQ | \ | ||
| 75 | RCAR_AHB_BUS_SMODE_READYCTR) | ||
| 76 | |||
| 77 | #define RCAR_USBCTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x34) | ||
| 78 | #define RCAR_USBCTR_USBH_RST (1 << 0) | ||
| 79 | #define RCAR_USBCTR_PCICLK_MASK (1 << 1) | ||
| 80 | #define RCAR_USBCTR_PLL_RST (1 << 2) | ||
| 81 | #define RCAR_USBCTR_DIRPD (1 << 8) | ||
| 82 | #define RCAR_USBCTR_PCIAHB_WIN2_EN (1 << 9) | ||
| 83 | #define RCAR_USBCTR_PCIAHB_WIN1_256M (0 << 10) | ||
| 84 | #define RCAR_USBCTR_PCIAHB_WIN1_512M (1 << 10) | ||
| 85 | #define RCAR_USBCTR_PCIAHB_WIN1_1G (2 << 10) | ||
| 86 | #define RCAR_USBCTR_PCIAHB_WIN1_2G (3 << 10) | ||
| 87 | #define RCAR_USBCTR_PCIAHB_WIN1_MASK (3 << 10) | ||
| 88 | |||
| 89 | #define RCAR_PCI_ARBITER_CTR_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x40) | ||
| 90 | #define RCAR_PCI_ARBITER_PCIREQ0 (1 << 0) | ||
| 91 | #define RCAR_PCI_ARBITER_PCIREQ1 (1 << 1) | ||
| 92 | #define RCAR_PCI_ARBITER_PCIBP_MODE (1 << 12) | ||
| 93 | |||
| 94 | #define RCAR_PCI_UNIT_REV_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x48) | ||
| 95 | |||
| 96 | struct rcar_pci_priv { | ||
| 97 | struct device *dev; | ||
| 98 | void __iomem *reg; | ||
| 99 | struct resource mem_res; | ||
| 100 | struct resource *cfg_res; | ||
| 101 | unsigned busnr; | ||
| 102 | int irq; | ||
| 103 | unsigned long window_size; | ||
| 104 | unsigned long window_addr; | ||
| 105 | unsigned long window_pci; | ||
| 106 | }; | ||
| 107 | |||
| 108 | /* PCI configuration space operations */ | ||
| 109 | static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn, | ||
| 110 | int where) | ||
| 111 | { | ||
| 112 | struct pci_sys_data *sys = bus->sysdata; | ||
| 113 | struct rcar_pci_priv *priv = sys->private_data; | ||
| 114 | int slot, val; | ||
| 115 | |||
| 116 | if (sys->busnr != bus->number || PCI_FUNC(devfn)) | ||
| 117 | return NULL; | ||
| 118 | |||
| 119 | /* Only one EHCI/OHCI device built-in */ | ||
| 120 | slot = PCI_SLOT(devfn); | ||
| 121 | if (slot > 2) | ||
| 122 | return NULL; | ||
| 123 | |||
| 124 | /* bridge logic only has registers to 0x40 */ | ||
| 125 | if (slot == 0x0 && where >= 0x40) | ||
| 126 | return NULL; | ||
| 127 | |||
| 128 | val = slot ? RCAR_AHBPCI_WIN1_DEVICE | RCAR_AHBPCI_WIN_CTR_CFG : | ||
| 129 | RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG; | ||
| 130 | |||
| 131 | iowrite32(val, priv->reg + RCAR_AHBPCI_WIN1_CTR_REG); | ||
| 132 | return priv->reg + (slot >> 1) * 0x100 + where; | ||
| 133 | } | ||
| 134 | |||
| 135 | /* PCI interrupt mapping */ | ||
| 136 | static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | ||
| 137 | { | ||
| 138 | struct pci_sys_data *sys = dev->bus->sysdata; | ||
| 139 | struct rcar_pci_priv *priv = sys->private_data; | ||
| 140 | int irq; | ||
| 141 | |||
| 142 | irq = of_irq_parse_and_map_pci(dev, slot, pin); | ||
| 143 | if (!irq) | ||
| 144 | irq = priv->irq; | ||
| 145 | |||
| 146 | return irq; | ||
| 147 | } | ||
| 148 | |||
| 149 | #ifdef CONFIG_PCI_DEBUG | ||
| 150 | /* if debug enabled, then attach an error handler irq to the bridge */ | ||
| 151 | |||
| 152 | static irqreturn_t rcar_pci_err_irq(int irq, void *pw) | ||
| 153 | { | ||
| 154 | struct rcar_pci_priv *priv = pw; | ||
| 155 | struct device *dev = priv->dev; | ||
| 156 | u32 status = ioread32(priv->reg + RCAR_PCI_INT_STATUS_REG); | ||
| 157 | |||
| 158 | if (status & RCAR_PCI_INT_ALLERRORS) { | ||
| 159 | dev_err(dev, "error irq: status %08x\n", status); | ||
| 160 | |||
| 161 | /* clear the error(s) */ | ||
| 162 | iowrite32(status & RCAR_PCI_INT_ALLERRORS, | ||
| 163 | priv->reg + RCAR_PCI_INT_STATUS_REG); | ||
| 164 | return IRQ_HANDLED; | ||
| 165 | } | ||
| 166 | |||
| 167 | return IRQ_NONE; | ||
| 168 | } | ||
| 169 | |||
| 170 | static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) | ||
| 171 | { | ||
| 172 | struct device *dev = priv->dev; | ||
| 173 | int ret; | ||
| 174 | u32 val; | ||
| 175 | |||
| 176 | ret = devm_request_irq(dev, priv->irq, rcar_pci_err_irq, | ||
| 177 | IRQF_SHARED, "error irq", priv); | ||
| 178 | if (ret) { | ||
| 179 | dev_err(dev, "cannot claim IRQ for error handling\n"); | ||
| 180 | return; | ||
| 181 | } | ||
| 182 | |||
| 183 | val = ioread32(priv->reg + RCAR_PCI_INT_ENABLE_REG); | ||
| 184 | val |= RCAR_PCI_INT_ALLERRORS; | ||
| 185 | iowrite32(val, priv->reg + RCAR_PCI_INT_ENABLE_REG); | ||
| 186 | } | ||
| 187 | #else | ||
| 188 | static inline void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) { } | ||
| 189 | #endif | ||
| 190 | |||
| 191 | /* PCI host controller setup */ | ||
| 192 | static int rcar_pci_setup(int nr, struct pci_sys_data *sys) | ||
| 193 | { | ||
| 194 | struct rcar_pci_priv *priv = sys->private_data; | ||
| 195 | struct device *dev = priv->dev; | ||
| 196 | void __iomem *reg = priv->reg; | ||
| 197 | u32 val; | ||
| 198 | int ret; | ||
| 199 | |||
| 200 | pm_runtime_enable(dev); | ||
| 201 | pm_runtime_get_sync(dev); | ||
| 202 | |||
| 203 | val = ioread32(reg + RCAR_PCI_UNIT_REV_REG); | ||
| 204 | dev_info(dev, "PCI: bus%u revision %x\n", sys->busnr, val); | ||
| 205 | |||
| 206 | /* Disable Direct Power Down State and assert reset */ | ||
| 207 | val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD; | ||
| 208 | val |= RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST; | ||
| 209 | iowrite32(val, reg + RCAR_USBCTR_REG); | ||
| 210 | udelay(4); | ||
| 211 | |||
| 212 | /* De-assert reset and reset PCIAHB window1 size */ | ||
| 213 | val &= ~(RCAR_USBCTR_PCIAHB_WIN1_MASK | RCAR_USBCTR_PCICLK_MASK | | ||
| 214 | RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST); | ||
| 215 | |||
| 216 | /* Setup PCIAHB window1 size */ | ||
| 217 | switch (priv->window_size) { | ||
| 218 | case SZ_2G: | ||
| 219 | val |= RCAR_USBCTR_PCIAHB_WIN1_2G; | ||
| 220 | break; | ||
| 221 | case SZ_1G: | ||
| 222 | val |= RCAR_USBCTR_PCIAHB_WIN1_1G; | ||
| 223 | break; | ||
| 224 | case SZ_512M: | ||
| 225 | val |= RCAR_USBCTR_PCIAHB_WIN1_512M; | ||
| 226 | break; | ||
| 227 | default: | ||
| 228 | pr_warn("unknown window size %ld - defaulting to 256M\n", | ||
| 229 | priv->window_size); | ||
| 230 | priv->window_size = SZ_256M; | ||
| 231 | /* fall-through */ | ||
| 232 | case SZ_256M: | ||
| 233 | val |= RCAR_USBCTR_PCIAHB_WIN1_256M; | ||
| 234 | break; | ||
| 235 | } | ||
| 236 | iowrite32(val, reg + RCAR_USBCTR_REG); | ||
| 237 | |||
| 238 | /* Configure AHB master and slave modes */ | ||
| 239 | iowrite32(RCAR_AHB_BUS_MODE, reg + RCAR_AHB_BUS_CTR_REG); | ||
| 240 | |||
| 241 | /* Configure PCI arbiter */ | ||
| 242 | val = ioread32(reg + RCAR_PCI_ARBITER_CTR_REG); | ||
| 243 | val |= RCAR_PCI_ARBITER_PCIREQ0 | RCAR_PCI_ARBITER_PCIREQ1 | | ||
| 244 | RCAR_PCI_ARBITER_PCIBP_MODE; | ||
| 245 | iowrite32(val, reg + RCAR_PCI_ARBITER_CTR_REG); | ||
| 246 | |||
| 247 | /* PCI-AHB mapping */ | ||
| 248 | iowrite32(priv->window_addr | RCAR_PCIAHB_PREFETCH16, | ||
| 249 | reg + RCAR_PCIAHB_WIN1_CTR_REG); | ||
| 250 | |||
| 251 | /* AHB-PCI mapping: OHCI/EHCI registers */ | ||
| 252 | val = priv->mem_res.start | RCAR_AHBPCI_WIN_CTR_MEM; | ||
| 253 | iowrite32(val, reg + RCAR_AHBPCI_WIN2_CTR_REG); | ||
| 254 | |||
| 255 | /* Enable AHB-PCI bridge PCI configuration access */ | ||
| 256 | iowrite32(RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG, | ||
| 257 | reg + RCAR_AHBPCI_WIN1_CTR_REG); | ||
| 258 | /* Set PCI-AHB Window1 address */ | ||
| 259 | iowrite32(priv->window_pci | PCI_BASE_ADDRESS_MEM_PREFETCH, | ||
| 260 | reg + PCI_BASE_ADDRESS_1); | ||
| 261 | /* Set AHB-PCI bridge PCI communication area address */ | ||
| 262 | val = priv->cfg_res->start + RCAR_AHBPCI_PCICOM_OFFSET; | ||
| 263 | iowrite32(val, reg + PCI_BASE_ADDRESS_0); | ||
| 264 | |||
| 265 | val = ioread32(reg + PCI_COMMAND); | ||
| 266 | val |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY | | ||
| 267 | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; | ||
| 268 | iowrite32(val, reg + PCI_COMMAND); | ||
| 269 | |||
| 270 | /* Enable PCI interrupts */ | ||
| 271 | iowrite32(RCAR_PCI_INT_A | RCAR_PCI_INT_B | RCAR_PCI_INT_PME, | ||
| 272 | reg + RCAR_PCI_INT_ENABLE_REG); | ||
| 273 | |||
| 274 | if (priv->irq > 0) | ||
| 275 | rcar_pci_setup_errirq(priv); | ||
| 276 | |||
| 277 | /* Add PCI resources */ | ||
| 278 | pci_add_resource(&sys->resources, &priv->mem_res); | ||
| 279 | ret = devm_request_pci_bus_resources(dev, &sys->resources); | ||
| 280 | if (ret < 0) | ||
| 281 | return ret; | ||
| 282 | |||
| 283 | /* Setup bus number based on platform device id / of bus-range */ | ||
| 284 | sys->busnr = priv->busnr; | ||
| 285 | return 1; | ||
| 286 | } | ||
| 287 | |||
| 288 | static struct pci_ops rcar_pci_ops = { | ||
| 289 | .map_bus = rcar_pci_cfg_base, | ||
| 290 | .read = pci_generic_config_read, | ||
| 291 | .write = pci_generic_config_write, | ||
| 292 | }; | ||
| 293 | |||
| 294 | static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci, | ||
| 295 | struct device_node *np) | ||
| 296 | { | ||
| 297 | struct device *dev = pci->dev; | ||
| 298 | struct of_pci_range range; | ||
| 299 | struct of_pci_range_parser parser; | ||
| 300 | int index = 0; | ||
| 301 | |||
| 302 | /* Failure to parse is ok as we fall back to defaults */ | ||
| 303 | if (of_pci_dma_range_parser_init(&parser, np)) | ||
| 304 | return 0; | ||
| 305 | |||
| 306 | /* Get the dma-ranges from DT */ | ||
| 307 | for_each_of_pci_range(&parser, &range) { | ||
| 308 | /* Hardware only allows one inbound 32-bit range */ | ||
| 309 | if (index) | ||
| 310 | return -EINVAL; | ||
| 311 | |||
| 312 | pci->window_addr = (unsigned long)range.cpu_addr; | ||
| 313 | pci->window_pci = (unsigned long)range.pci_addr; | ||
| 314 | pci->window_size = (unsigned long)range.size; | ||
| 315 | |||
| 316 | /* Catch HW limitations */ | ||
| 317 | if (!(range.flags & IORESOURCE_PREFETCH)) { | ||
| 318 | dev_err(dev, "window must be prefetchable\n"); | ||
| 319 | return -EINVAL; | ||
| 320 | } | ||
| 321 | if (pci->window_addr) { | ||
| 322 | u32 lowaddr = 1 << (ffs(pci->window_addr) - 1); | ||
| 323 | |||
| 324 | if (lowaddr < pci->window_size) { | ||
| 325 | dev_err(dev, "invalid window size/addr\n"); | ||
| 326 | return -EINVAL; | ||
| 327 | } | ||
| 328 | } | ||
| 329 | index++; | ||
| 330 | } | ||
| 331 | |||
| 332 | return 0; | ||
| 333 | } | ||
| 334 | |||
| 335 | static int rcar_pci_probe(struct platform_device *pdev) | ||
| 336 | { | ||
| 337 | struct device *dev = &pdev->dev; | ||
| 338 | struct resource *cfg_res, *mem_res; | ||
| 339 | struct rcar_pci_priv *priv; | ||
| 340 | void __iomem *reg; | ||
| 341 | struct hw_pci hw; | ||
| 342 | void *hw_private[1]; | ||
| 343 | |||
| 344 | cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 345 | reg = devm_ioremap_resource(dev, cfg_res); | ||
| 346 | if (IS_ERR(reg)) | ||
| 347 | return PTR_ERR(reg); | ||
| 348 | |||
| 349 | mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 350 | if (!mem_res || !mem_res->start) | ||
| 351 | return -ENODEV; | ||
| 352 | |||
| 353 | if (mem_res->start & 0xFFFF) | ||
| 354 | return -EINVAL; | ||
| 355 | |||
| 356 | priv = devm_kzalloc(dev, sizeof(struct rcar_pci_priv), GFP_KERNEL); | ||
| 357 | if (!priv) | ||
| 358 | return -ENOMEM; | ||
| 359 | |||
| 360 | priv->mem_res = *mem_res; | ||
| 361 | priv->cfg_res = cfg_res; | ||
| 362 | |||
| 363 | priv->irq = platform_get_irq(pdev, 0); | ||
| 364 | priv->reg = reg; | ||
| 365 | priv->dev = dev; | ||
| 366 | |||
| 367 | if (priv->irq < 0) { | ||
| 368 | dev_err(dev, "no valid irq found\n"); | ||
| 369 | return priv->irq; | ||
| 370 | } | ||
| 371 | |||
| 372 | /* default window addr and size if not specified in DT */ | ||
| 373 | priv->window_addr = 0x40000000; | ||
| 374 | priv->window_pci = 0x40000000; | ||
| 375 | priv->window_size = SZ_1G; | ||
| 376 | |||
| 377 | if (dev->of_node) { | ||
| 378 | struct resource busnr; | ||
| 379 | int ret; | ||
| 380 | |||
| 381 | ret = of_pci_parse_bus_range(dev->of_node, &busnr); | ||
| 382 | if (ret < 0) { | ||
| 383 | dev_err(dev, "failed to parse bus-range\n"); | ||
| 384 | return ret; | ||
| 385 | } | ||
| 386 | |||
| 387 | priv->busnr = busnr.start; | ||
| 388 | if (busnr.end != busnr.start) | ||
| 389 | dev_warn(dev, "only one bus number supported\n"); | ||
| 390 | |||
| 391 | ret = rcar_pci_parse_map_dma_ranges(priv, dev->of_node); | ||
| 392 | if (ret < 0) { | ||
| 393 | dev_err(dev, "failed to parse dma-range\n"); | ||
| 394 | return ret; | ||
| 395 | } | ||
| 396 | } else { | ||
| 397 | priv->busnr = pdev->id; | ||
| 398 | } | ||
| 399 | |||
| 400 | hw_private[0] = priv; | ||
| 401 | memset(&hw, 0, sizeof(hw)); | ||
| 402 | hw.nr_controllers = ARRAY_SIZE(hw_private); | ||
| 403 | hw.io_optional = 1; | ||
| 404 | hw.private_data = hw_private; | ||
| 405 | hw.map_irq = rcar_pci_map_irq; | ||
| 406 | hw.ops = &rcar_pci_ops; | ||
| 407 | hw.setup = rcar_pci_setup; | ||
| 408 | pci_common_init_dev(dev, &hw); | ||
| 409 | return 0; | ||
| 410 | } | ||
| 411 | |||
| 412 | static const struct of_device_id rcar_pci_of_match[] = { | ||
| 413 | { .compatible = "renesas,pci-r8a7790", }, | ||
| 414 | { .compatible = "renesas,pci-r8a7791", }, | ||
| 415 | { .compatible = "renesas,pci-r8a7794", }, | ||
| 416 | { .compatible = "renesas,pci-rcar-gen2", }, | ||
| 417 | { }, | ||
| 418 | }; | ||
| 419 | |||
| 420 | static struct platform_driver rcar_pci_driver = { | ||
| 421 | .driver = { | ||
| 422 | .name = "pci-rcar-gen2", | ||
| 423 | .suppress_bind_attrs = true, | ||
| 424 | .of_match_table = rcar_pci_of_match, | ||
| 425 | }, | ||
| 426 | .probe = rcar_pci_probe, | ||
| 427 | }; | ||
| 428 | builtin_platform_driver(rcar_pci_driver); | ||
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c new file mode 100644 index 000000000000..f4f53d092e00 --- /dev/null +++ b/drivers/pci/controller/pci-tegra.c | |||
| @@ -0,0 +1,2531 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 2 | /* | ||
| 3 | * PCIe host controller driver for Tegra SoCs | ||
| 4 | * | ||
| 5 | * Copyright (c) 2010, CompuLab, Ltd. | ||
| 6 | * Author: Mike Rapoport <mike@compulab.co.il> | ||
| 7 | * | ||
| 8 | * Based on NVIDIA PCIe driver | ||
| 9 | * Copyright (c) 2008-2009, NVIDIA Corporation. | ||
| 10 | * | ||
| 11 | * Bits taken from arch/arm/mach-dove/pcie.c | ||
| 12 | * | ||
| 13 | * Author: Thierry Reding <treding@nvidia.com> | ||
| 14 | */ | ||
| 15 | |||
| 16 | #include <linux/clk.h> | ||
| 17 | #include <linux/debugfs.h> | ||
| 18 | #include <linux/delay.h> | ||
| 19 | #include <linux/export.h> | ||
| 20 | #include <linux/interrupt.h> | ||
| 21 | #include <linux/iopoll.h> | ||
| 22 | #include <linux/irq.h> | ||
| 23 | #include <linux/irqdomain.h> | ||
| 24 | #include <linux/kernel.h> | ||
| 25 | #include <linux/init.h> | ||
| 26 | #include <linux/module.h> | ||
| 27 | #include <linux/msi.h> | ||
| 28 | #include <linux/of_address.h> | ||
| 29 | #include <linux/of_pci.h> | ||
| 30 | #include <linux/of_platform.h> | ||
| 31 | #include <linux/pci.h> | ||
| 32 | #include <linux/phy/phy.h> | ||
| 33 | #include <linux/platform_device.h> | ||
| 34 | #include <linux/reset.h> | ||
| 35 | #include <linux/sizes.h> | ||
| 36 | #include <linux/slab.h> | ||
| 37 | #include <linux/vmalloc.h> | ||
| 38 | #include <linux/regulator/consumer.h> | ||
| 39 | |||
| 40 | #include <soc/tegra/cpuidle.h> | ||
| 41 | #include <soc/tegra/pmc.h> | ||
| 42 | |||
| 43 | #include "../pci.h" | ||
| 44 | |||
| 45 | #define INT_PCI_MSI_NR (8 * 32) | ||
| 46 | |||
| 47 | /* register definitions */ | ||
| 48 | |||
| 49 | #define AFI_AXI_BAR0_SZ 0x00 | ||
| 50 | #define AFI_AXI_BAR1_SZ 0x04 | ||
| 51 | #define AFI_AXI_BAR2_SZ 0x08 | ||
| 52 | #define AFI_AXI_BAR3_SZ 0x0c | ||
| 53 | #define AFI_AXI_BAR4_SZ 0x10 | ||
| 54 | #define AFI_AXI_BAR5_SZ 0x14 | ||
| 55 | |||
| 56 | #define AFI_AXI_BAR0_START 0x18 | ||
| 57 | #define AFI_AXI_BAR1_START 0x1c | ||
| 58 | #define AFI_AXI_BAR2_START 0x20 | ||
| 59 | #define AFI_AXI_BAR3_START 0x24 | ||
| 60 | #define AFI_AXI_BAR4_START 0x28 | ||
| 61 | #define AFI_AXI_BAR5_START 0x2c | ||
| 62 | |||
| 63 | #define AFI_FPCI_BAR0 0x30 | ||
| 64 | #define AFI_FPCI_BAR1 0x34 | ||
| 65 | #define AFI_FPCI_BAR2 0x38 | ||
| 66 | #define AFI_FPCI_BAR3 0x3c | ||
| 67 | #define AFI_FPCI_BAR4 0x40 | ||
| 68 | #define AFI_FPCI_BAR5 0x44 | ||
| 69 | |||
| 70 | #define AFI_CACHE_BAR0_SZ 0x48 | ||
| 71 | #define AFI_CACHE_BAR0_ST 0x4c | ||
| 72 | #define AFI_CACHE_BAR1_SZ 0x50 | ||
| 73 | #define AFI_CACHE_BAR1_ST 0x54 | ||
| 74 | |||
| 75 | #define AFI_MSI_BAR_SZ 0x60 | ||
| 76 | #define AFI_MSI_FPCI_BAR_ST 0x64 | ||
| 77 | #define AFI_MSI_AXI_BAR_ST 0x68 | ||
| 78 | |||
| 79 | #define AFI_MSI_VEC0 0x6c | ||
| 80 | #define AFI_MSI_VEC1 0x70 | ||
| 81 | #define AFI_MSI_VEC2 0x74 | ||
| 82 | #define AFI_MSI_VEC3 0x78 | ||
| 83 | #define AFI_MSI_VEC4 0x7c | ||
| 84 | #define AFI_MSI_VEC5 0x80 | ||
| 85 | #define AFI_MSI_VEC6 0x84 | ||
| 86 | #define AFI_MSI_VEC7 0x88 | ||
| 87 | |||
| 88 | #define AFI_MSI_EN_VEC0 0x8c | ||
| 89 | #define AFI_MSI_EN_VEC1 0x90 | ||
| 90 | #define AFI_MSI_EN_VEC2 0x94 | ||
| 91 | #define AFI_MSI_EN_VEC3 0x98 | ||
| 92 | #define AFI_MSI_EN_VEC4 0x9c | ||
| 93 | #define AFI_MSI_EN_VEC5 0xa0 | ||
| 94 | #define AFI_MSI_EN_VEC6 0xa4 | ||
| 95 | #define AFI_MSI_EN_VEC7 0xa8 | ||
| 96 | |||
| 97 | #define AFI_CONFIGURATION 0xac | ||
| 98 | #define AFI_CONFIGURATION_EN_FPCI (1 << 0) | ||
| 99 | |||
| 100 | #define AFI_FPCI_ERROR_MASKS 0xb0 | ||
| 101 | |||
| 102 | #define AFI_INTR_MASK 0xb4 | ||
| 103 | #define AFI_INTR_MASK_INT_MASK (1 << 0) | ||
| 104 | #define AFI_INTR_MASK_MSI_MASK (1 << 8) | ||
| 105 | |||
| 106 | #define AFI_INTR_CODE 0xb8 | ||
| 107 | #define AFI_INTR_CODE_MASK 0xf | ||
| 108 | #define AFI_INTR_INI_SLAVE_ERROR 1 | ||
| 109 | #define AFI_INTR_INI_DECODE_ERROR 2 | ||
| 110 | #define AFI_INTR_TARGET_ABORT 3 | ||
| 111 | #define AFI_INTR_MASTER_ABORT 4 | ||
| 112 | #define AFI_INTR_INVALID_WRITE 5 | ||
| 113 | #define AFI_INTR_LEGACY 6 | ||
| 114 | #define AFI_INTR_FPCI_DECODE_ERROR 7 | ||
| 115 | #define AFI_INTR_AXI_DECODE_ERROR 8 | ||
| 116 | #define AFI_INTR_FPCI_TIMEOUT 9 | ||
| 117 | #define AFI_INTR_PE_PRSNT_SENSE 10 | ||
| 118 | #define AFI_INTR_PE_CLKREQ_SENSE 11 | ||
| 119 | #define AFI_INTR_CLKCLAMP_SENSE 12 | ||
| 120 | #define AFI_INTR_RDY4PD_SENSE 13 | ||
| 121 | #define AFI_INTR_P2P_ERROR 14 | ||
| 122 | |||
| 123 | #define AFI_INTR_SIGNATURE 0xbc | ||
| 124 | #define AFI_UPPER_FPCI_ADDRESS 0xc0 | ||
| 125 | #define AFI_SM_INTR_ENABLE 0xc4 | ||
| 126 | #define AFI_SM_INTR_INTA_ASSERT (1 << 0) | ||
| 127 | #define AFI_SM_INTR_INTB_ASSERT (1 << 1) | ||
| 128 | #define AFI_SM_INTR_INTC_ASSERT (1 << 2) | ||
| 129 | #define AFI_SM_INTR_INTD_ASSERT (1 << 3) | ||
| 130 | #define AFI_SM_INTR_INTA_DEASSERT (1 << 4) | ||
| 131 | #define AFI_SM_INTR_INTB_DEASSERT (1 << 5) | ||
| 132 | #define AFI_SM_INTR_INTC_DEASSERT (1 << 6) | ||
| 133 | #define AFI_SM_INTR_INTD_DEASSERT (1 << 7) | ||
| 134 | |||
| 135 | #define AFI_AFI_INTR_ENABLE 0xc8 | ||
| 136 | #define AFI_INTR_EN_INI_SLVERR (1 << 0) | ||
| 137 | #define AFI_INTR_EN_INI_DECERR (1 << 1) | ||
| 138 | #define AFI_INTR_EN_TGT_SLVERR (1 << 2) | ||
| 139 | #define AFI_INTR_EN_TGT_DECERR (1 << 3) | ||
| 140 | #define AFI_INTR_EN_TGT_WRERR (1 << 4) | ||
| 141 | #define AFI_INTR_EN_DFPCI_DECERR (1 << 5) | ||
| 142 | #define AFI_INTR_EN_AXI_DECERR (1 << 6) | ||
| 143 | #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7) | ||
| 144 | #define AFI_INTR_EN_PRSNT_SENSE (1 << 8) | ||
| 145 | |||
| 146 | #define AFI_PCIE_PME 0xf0 | ||
| 147 | |||
| 148 | #define AFI_PCIE_CONFIG 0x0f8 | ||
| 149 | #define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1)) | ||
| 150 | #define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe | ||
| 151 | #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20) | ||
| 152 | #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20) | ||
| 153 | #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20) | ||
| 154 | #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20) | ||
| 155 | #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401 (0x0 << 20) | ||
| 156 | #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20) | ||
| 157 | #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20) | ||
| 158 | #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20) | ||
| 159 | #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211 (0x1 << 20) | ||
| 160 | #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20) | ||
| 161 | #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111 (0x2 << 20) | ||
| 162 | |||
| 163 | #define AFI_FUSE 0x104 | ||
| 164 | #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2) | ||
| 165 | |||
| 166 | #define AFI_PEX0_CTRL 0x110 | ||
| 167 | #define AFI_PEX1_CTRL 0x118 | ||
| 168 | #define AFI_PEX2_CTRL 0x128 | ||
| 169 | #define AFI_PEX_CTRL_RST (1 << 0) | ||
| 170 | #define AFI_PEX_CTRL_CLKREQ_EN (1 << 1) | ||
| 171 | #define AFI_PEX_CTRL_REFCLK_EN (1 << 3) | ||
| 172 | #define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4) | ||
| 173 | |||
| 174 | #define AFI_PLLE_CONTROL 0x160 | ||
| 175 | #define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9) | ||
| 176 | #define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1) | ||
| 177 | |||
| 178 | #define AFI_PEXBIAS_CTRL_0 0x168 | ||
| 179 | |||
| 180 | #define RP_VEND_XP 0x00000f00 | ||
| 181 | #define RP_VEND_XP_DL_UP (1 << 30) | ||
| 182 | |||
| 183 | #define RP_VEND_CTL2 0x00000fa8 | ||
| 184 | #define RP_VEND_CTL2_PCA_ENABLE (1 << 7) | ||
| 185 | |||
| 186 | #define RP_PRIV_MISC 0x00000fe0 | ||
| 187 | #define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0) | ||
| 188 | #define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0) | ||
| 189 | |||
| 190 | #define RP_LINK_CONTROL_STATUS 0x00000090 | ||
| 191 | #define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000 | ||
| 192 | #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000 | ||
| 193 | |||
| 194 | #define PADS_CTL_SEL 0x0000009c | ||
| 195 | |||
| 196 | #define PADS_CTL 0x000000a0 | ||
| 197 | #define PADS_CTL_IDDQ_1L (1 << 0) | ||
| 198 | #define PADS_CTL_TX_DATA_EN_1L (1 << 6) | ||
| 199 | #define PADS_CTL_RX_DATA_EN_1L (1 << 10) | ||
| 200 | |||
| 201 | #define PADS_PLL_CTL_TEGRA20 0x000000b8 | ||
| 202 | #define PADS_PLL_CTL_TEGRA30 0x000000b4 | ||
| 203 | #define PADS_PLL_CTL_RST_B4SM (1 << 1) | ||
| 204 | #define PADS_PLL_CTL_LOCKDET (1 << 8) | ||
| 205 | #define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16) | ||
| 206 | #define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16) | ||
| 207 | #define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16) | ||
| 208 | #define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16) | ||
| 209 | #define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20) | ||
| 210 | #define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20) | ||
| 211 | #define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20) | ||
| 212 | #define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22) | ||
| 213 | |||
| 214 | #define PADS_REFCLK_CFG0 0x000000c8 | ||
| 215 | #define PADS_REFCLK_CFG1 0x000000cc | ||
| 216 | #define PADS_REFCLK_BIAS 0x000000d0 | ||
| 217 | |||
| 218 | /* | ||
| 219 | * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit | ||
| 220 | * entries, one entry per PCIe port. These field definitions and desired | ||
| 221 | * values aren't in the TRM, but do come from NVIDIA. | ||
| 222 | */ | ||
| 223 | #define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */ | ||
| 224 | #define PADS_REFCLK_CFG_E_TERM_SHIFT 7 | ||
| 225 | #define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */ | ||
| 226 | #define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */ | ||
| 227 | |||
| 228 | #define PME_ACK_TIMEOUT 10000 | ||
| 229 | |||
| 230 | struct tegra_msi { | ||
| 231 | struct msi_controller chip; | ||
| 232 | DECLARE_BITMAP(used, INT_PCI_MSI_NR); | ||
| 233 | struct irq_domain *domain; | ||
| 234 | unsigned long pages; | ||
| 235 | struct mutex lock; | ||
| 236 | u64 phys; | ||
| 237 | int irq; | ||
| 238 | }; | ||
| 239 | |||
| 240 | /* used to differentiate between Tegra SoC generations */ | ||
| 241 | struct tegra_pcie_port_soc { | ||
| 242 | struct { | ||
| 243 | u8 turnoff_bit; | ||
| 244 | u8 ack_bit; | ||
| 245 | } pme; | ||
| 246 | }; | ||
| 247 | |||
| 248 | struct tegra_pcie_soc { | ||
| 249 | unsigned int num_ports; | ||
| 250 | const struct tegra_pcie_port_soc *ports; | ||
| 251 | unsigned int msi_base_shift; | ||
| 252 | u32 pads_pll_ctl; | ||
| 253 | u32 tx_ref_sel; | ||
| 254 | u32 pads_refclk_cfg0; | ||
| 255 | u32 pads_refclk_cfg1; | ||
| 256 | bool has_pex_clkreq_en; | ||
| 257 | bool has_pex_bias_ctrl; | ||
| 258 | bool has_intr_prsnt_sense; | ||
| 259 | bool has_cml_clk; | ||
| 260 | bool has_gen2; | ||
| 261 | bool force_pca_enable; | ||
| 262 | bool program_uphy; | ||
| 263 | }; | ||
| 264 | |||
| 265 | static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip) | ||
| 266 | { | ||
| 267 | return container_of(chip, struct tegra_msi, chip); | ||
| 268 | } | ||
| 269 | |||
| 270 | struct tegra_pcie { | ||
| 271 | struct device *dev; | ||
| 272 | |||
| 273 | void __iomem *pads; | ||
| 274 | void __iomem *afi; | ||
| 275 | void __iomem *cfg; | ||
| 276 | int irq; | ||
| 277 | |||
| 278 | struct resource cs; | ||
| 279 | struct resource io; | ||
| 280 | struct resource pio; | ||
| 281 | struct resource mem; | ||
| 282 | struct resource prefetch; | ||
| 283 | struct resource busn; | ||
| 284 | |||
| 285 | struct { | ||
| 286 | resource_size_t mem; | ||
| 287 | resource_size_t io; | ||
| 288 | } offset; | ||
| 289 | |||
| 290 | struct clk *pex_clk; | ||
| 291 | struct clk *afi_clk; | ||
| 292 | struct clk *pll_e; | ||
| 293 | struct clk *cml_clk; | ||
| 294 | |||
| 295 | struct reset_control *pex_rst; | ||
| 296 | struct reset_control *afi_rst; | ||
| 297 | struct reset_control *pcie_xrst; | ||
| 298 | |||
| 299 | bool legacy_phy; | ||
| 300 | struct phy *phy; | ||
| 301 | |||
| 302 | struct tegra_msi msi; | ||
| 303 | |||
| 304 | struct list_head ports; | ||
| 305 | u32 xbar_config; | ||
| 306 | |||
| 307 | struct regulator_bulk_data *supplies; | ||
| 308 | unsigned int num_supplies; | ||
| 309 | |||
| 310 | const struct tegra_pcie_soc *soc; | ||
| 311 | struct dentry *debugfs; | ||
| 312 | }; | ||
| 313 | |||
| 314 | struct tegra_pcie_port { | ||
| 315 | struct tegra_pcie *pcie; | ||
| 316 | struct device_node *np; | ||
| 317 | struct list_head list; | ||
| 318 | struct resource regs; | ||
| 319 | void __iomem *base; | ||
| 320 | unsigned int index; | ||
| 321 | unsigned int lanes; | ||
| 322 | |||
| 323 | struct phy **phys; | ||
| 324 | }; | ||
| 325 | |||
| 326 | struct tegra_pcie_bus { | ||
| 327 | struct list_head list; | ||
| 328 | unsigned int nr; | ||
| 329 | }; | ||
| 330 | |||
| 331 | static inline void afi_writel(struct tegra_pcie *pcie, u32 value, | ||
| 332 | unsigned long offset) | ||
| 333 | { | ||
| 334 | writel(value, pcie->afi + offset); | ||
| 335 | } | ||
| 336 | |||
| 337 | static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset) | ||
| 338 | { | ||
| 339 | return readl(pcie->afi + offset); | ||
| 340 | } | ||
| 341 | |||
| 342 | static inline void pads_writel(struct tegra_pcie *pcie, u32 value, | ||
| 343 | unsigned long offset) | ||
| 344 | { | ||
| 345 | writel(value, pcie->pads + offset); | ||
| 346 | } | ||
| 347 | |||
| 348 | static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset) | ||
| 349 | { | ||
| 350 | return readl(pcie->pads + offset); | ||
| 351 | } | ||
| 352 | |||
| 353 | /* | ||
| 354 | * The configuration space mapping on Tegra is somewhat similar to the ECAM | ||
| 355 | * defined by PCIe. However it deviates a bit in how the 4 bits for extended | ||
| 356 | * register accesses are mapped: | ||
| 357 | * | ||
| 358 | * [27:24] extended register number | ||
| 359 | * [23:16] bus number | ||
| 360 | * [15:11] device number | ||
| 361 | * [10: 8] function number | ||
| 362 | * [ 7: 0] register number | ||
| 363 | * | ||
| 364 | * Mapping the whole extended configuration space would require 256 MiB of | ||
| 365 | * virtual address space, only a small part of which will actually be used. | ||
| 366 | * | ||
| 367 | * To work around this, a 4 KiB region is used to generate the required | ||
| 368 | * configuration transaction with relevant B:D:F and register offset values. | ||
| 369 | * This is achieved by dynamically programming base address and size of | ||
| 370 | * AFI_AXI_BAR used for end point config space mapping to make sure that the | ||
| 371 | * address (access to which generates correct config transaction) falls in | ||
| 372 | * this 4 KiB region. | ||
| 373 | */ | ||
| 374 | static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn, | ||
| 375 | unsigned int where) | ||
| 376 | { | ||
| 377 | return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) | | ||
| 378 | (PCI_FUNC(devfn) << 8) | (where & 0xff); | ||
| 379 | } | ||
| 380 | |||
| 381 | static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus, | ||
| 382 | unsigned int devfn, | ||
| 383 | int where) | ||
| 384 | { | ||
| 385 | struct tegra_pcie *pcie = bus->sysdata; | ||
| 386 | void __iomem *addr = NULL; | ||
| 387 | |||
| 388 | if (bus->number == 0) { | ||
| 389 | unsigned int slot = PCI_SLOT(devfn); | ||
| 390 | struct tegra_pcie_port *port; | ||
| 391 | |||
| 392 | list_for_each_entry(port, &pcie->ports, list) { | ||
| 393 | if (port->index + 1 == slot) { | ||
| 394 | addr = port->base + (where & ~3); | ||
| 395 | break; | ||
| 396 | } | ||
| 397 | } | ||
| 398 | } else { | ||
| 399 | unsigned int offset; | ||
| 400 | u32 base; | ||
| 401 | |||
| 402 | offset = tegra_pcie_conf_offset(bus->number, devfn, where); | ||
| 403 | |||
| 404 | /* move 4 KiB window to offset within the FPCI region */ | ||
| 405 | base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8); | ||
| 406 | afi_writel(pcie, base, AFI_FPCI_BAR0); | ||
| 407 | |||
| 408 | /* move to correct offset within the 4 KiB page */ | ||
| 409 | addr = pcie->cfg + (offset & (SZ_4K - 1)); | ||
| 410 | } | ||
| 411 | |||
| 412 | return addr; | ||
| 413 | } | ||
| 414 | |||
| 415 | static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn, | ||
| 416 | int where, int size, u32 *value) | ||
| 417 | { | ||
| 418 | if (bus->number == 0) | ||
| 419 | return pci_generic_config_read32(bus, devfn, where, size, | ||
| 420 | value); | ||
| 421 | |||
| 422 | return pci_generic_config_read(bus, devfn, where, size, value); | ||
| 423 | } | ||
| 424 | |||
| 425 | static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn, | ||
| 426 | int where, int size, u32 value) | ||
| 427 | { | ||
| 428 | if (bus->number == 0) | ||
| 429 | return pci_generic_config_write32(bus, devfn, where, size, | ||
| 430 | value); | ||
| 431 | |||
| 432 | return pci_generic_config_write(bus, devfn, where, size, value); | ||
| 433 | } | ||
| 434 | |||
| 435 | static struct pci_ops tegra_pcie_ops = { | ||
| 436 | .map_bus = tegra_pcie_map_bus, | ||
| 437 | .read = tegra_pcie_config_read, | ||
| 438 | .write = tegra_pcie_config_write, | ||
| 439 | }; | ||
| 440 | |||
| 441 | static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port) | ||
| 442 | { | ||
| 443 | unsigned long ret = 0; | ||
| 444 | |||
| 445 | switch (port->index) { | ||
| 446 | case 0: | ||
| 447 | ret = AFI_PEX0_CTRL; | ||
| 448 | break; | ||
| 449 | |||
| 450 | case 1: | ||
| 451 | ret = AFI_PEX1_CTRL; | ||
| 452 | break; | ||
| 453 | |||
| 454 | case 2: | ||
| 455 | ret = AFI_PEX2_CTRL; | ||
| 456 | break; | ||
| 457 | } | ||
| 458 | |||
| 459 | return ret; | ||
| 460 | } | ||
| 461 | |||
| 462 | static void tegra_pcie_port_reset(struct tegra_pcie_port *port) | ||
| 463 | { | ||
| 464 | unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); | ||
| 465 | unsigned long value; | ||
| 466 | |||
| 467 | /* pulse reset signal */ | ||
| 468 | value = afi_readl(port->pcie, ctrl); | ||
| 469 | value &= ~AFI_PEX_CTRL_RST; | ||
| 470 | afi_writel(port->pcie, value, ctrl); | ||
| 471 | |||
| 472 | usleep_range(1000, 2000); | ||
| 473 | |||
| 474 | value = afi_readl(port->pcie, ctrl); | ||
| 475 | value |= AFI_PEX_CTRL_RST; | ||
| 476 | afi_writel(port->pcie, value, ctrl); | ||
| 477 | } | ||
| 478 | |||
| 479 | static void tegra_pcie_port_enable(struct tegra_pcie_port *port) | ||
| 480 | { | ||
| 481 | unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); | ||
| 482 | const struct tegra_pcie_soc *soc = port->pcie->soc; | ||
| 483 | unsigned long value; | ||
| 484 | |||
| 485 | /* enable reference clock */ | ||
| 486 | value = afi_readl(port->pcie, ctrl); | ||
| 487 | value |= AFI_PEX_CTRL_REFCLK_EN; | ||
| 488 | |||
| 489 | if (soc->has_pex_clkreq_en) | ||
| 490 | value |= AFI_PEX_CTRL_CLKREQ_EN; | ||
| 491 | |||
| 492 | value |= AFI_PEX_CTRL_OVERRIDE_EN; | ||
| 493 | |||
| 494 | afi_writel(port->pcie, value, ctrl); | ||
| 495 | |||
| 496 | tegra_pcie_port_reset(port); | ||
| 497 | |||
| 498 | if (soc->force_pca_enable) { | ||
| 499 | value = readl(port->base + RP_VEND_CTL2); | ||
| 500 | value |= RP_VEND_CTL2_PCA_ENABLE; | ||
| 501 | writel(value, port->base + RP_VEND_CTL2); | ||
| 502 | } | ||
| 503 | } | ||
| 504 | |||
| 505 | static void tegra_pcie_port_disable(struct tegra_pcie_port *port) | ||
| 506 | { | ||
| 507 | unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port); | ||
| 508 | const struct tegra_pcie_soc *soc = port->pcie->soc; | ||
| 509 | unsigned long value; | ||
| 510 | |||
| 511 | /* assert port reset */ | ||
| 512 | value = afi_readl(port->pcie, ctrl); | ||
| 513 | value &= ~AFI_PEX_CTRL_RST; | ||
| 514 | afi_writel(port->pcie, value, ctrl); | ||
| 515 | |||
| 516 | /* disable reference clock */ | ||
| 517 | value = afi_readl(port->pcie, ctrl); | ||
| 518 | |||
| 519 | if (soc->has_pex_clkreq_en) | ||
| 520 | value &= ~AFI_PEX_CTRL_CLKREQ_EN; | ||
| 521 | |||
| 522 | value &= ~AFI_PEX_CTRL_REFCLK_EN; | ||
| 523 | afi_writel(port->pcie, value, ctrl); | ||
| 524 | } | ||
| 525 | |||
| 526 | static void tegra_pcie_port_free(struct tegra_pcie_port *port) | ||
| 527 | { | ||
| 528 | struct tegra_pcie *pcie = port->pcie; | ||
| 529 | struct device *dev = pcie->dev; | ||
| 530 | |||
| 531 | devm_iounmap(dev, port->base); | ||
| 532 | devm_release_mem_region(dev, port->regs.start, | ||
| 533 | resource_size(&port->regs)); | ||
| 534 | list_del(&port->list); | ||
| 535 | devm_kfree(dev, port); | ||
| 536 | } | ||
| 537 | |||
| 538 | /* Tegra PCIE root complex wrongly reports device class */ | ||
| 539 | static void tegra_pcie_fixup_class(struct pci_dev *dev) | ||
| 540 | { | ||
| 541 | dev->class = PCI_CLASS_BRIDGE_PCI << 8; | ||
| 542 | } | ||
| 543 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class); | ||
| 544 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class); | ||
| 545 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class); | ||
| 546 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class); | ||
| 547 | |||
| 548 | /* Tegra PCIE requires relaxed ordering */ | ||
| 549 | static void tegra_pcie_relax_enable(struct pci_dev *dev) | ||
| 550 | { | ||
| 551 | pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN); | ||
| 552 | } | ||
| 553 | DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable); | ||
| 554 | |||
| 555 | static int tegra_pcie_request_resources(struct tegra_pcie *pcie) | ||
| 556 | { | ||
| 557 | struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); | ||
| 558 | struct list_head *windows = &host->windows; | ||
| 559 | struct device *dev = pcie->dev; | ||
| 560 | int err; | ||
| 561 | |||
| 562 | pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io); | ||
| 563 | pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem); | ||
| 564 | pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem); | ||
| 565 | pci_add_resource(windows, &pcie->busn); | ||
| 566 | |||
| 567 | err = devm_request_pci_bus_resources(dev, windows); | ||
| 568 | if (err < 0) { | ||
| 569 | pci_free_resource_list(windows); | ||
| 570 | return err; | ||
| 571 | } | ||
| 572 | |||
| 573 | pci_remap_iospace(&pcie->pio, pcie->io.start); | ||
| 574 | |||
| 575 | return 0; | ||
| 576 | } | ||
| 577 | |||
| 578 | static void tegra_pcie_free_resources(struct tegra_pcie *pcie) | ||
| 579 | { | ||
| 580 | struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); | ||
| 581 | struct list_head *windows = &host->windows; | ||
| 582 | |||
| 583 | pci_unmap_iospace(&pcie->pio); | ||
| 584 | pci_free_resource_list(windows); | ||
| 585 | } | ||
| 586 | |||
| 587 | static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin) | ||
| 588 | { | ||
| 589 | struct tegra_pcie *pcie = pdev->bus->sysdata; | ||
| 590 | int irq; | ||
| 591 | |||
| 592 | tegra_cpuidle_pcie_irqs_in_use(); | ||
| 593 | |||
| 594 | irq = of_irq_parse_and_map_pci(pdev, slot, pin); | ||
| 595 | if (!irq) | ||
| 596 | irq = pcie->irq; | ||
| 597 | |||
| 598 | return irq; | ||
| 599 | } | ||
| 600 | |||
| 601 | static irqreturn_t tegra_pcie_isr(int irq, void *arg) | ||
| 602 | { | ||
| 603 | const char *err_msg[] = { | ||
| 604 | "Unknown", | ||
| 605 | "AXI slave error", | ||
| 606 | "AXI decode error", | ||
| 607 | "Target abort", | ||
| 608 | "Master abort", | ||
| 609 | "Invalid write", | ||
| 610 | "Legacy interrupt", | ||
| 611 | "Response decoding error", | ||
| 612 | "AXI response decoding error", | ||
| 613 | "Transaction timeout", | ||
| 614 | "Slot present pin change", | ||
| 615 | "Slot clock request change", | ||
| 616 | "TMS clock ramp change", | ||
| 617 | "TMS ready for power down", | ||
| 618 | "Peer2Peer error", | ||
| 619 | }; | ||
| 620 | struct tegra_pcie *pcie = arg; | ||
| 621 | struct device *dev = pcie->dev; | ||
| 622 | u32 code, signature; | ||
| 623 | |||
| 624 | code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK; | ||
| 625 | signature = afi_readl(pcie, AFI_INTR_SIGNATURE); | ||
| 626 | afi_writel(pcie, 0, AFI_INTR_CODE); | ||
| 627 | |||
| 628 | if (code == AFI_INTR_LEGACY) | ||
| 629 | return IRQ_NONE; | ||
| 630 | |||
| 631 | if (code >= ARRAY_SIZE(err_msg)) | ||
| 632 | code = 0; | ||
| 633 | |||
| 634 | /* | ||
| 635 | * do not pollute kernel log with master abort reports since they | ||
| 636 | * happen a lot during enumeration | ||
| 637 | */ | ||
| 638 | if (code == AFI_INTR_MASTER_ABORT) | ||
| 639 | dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature); | ||
| 640 | else | ||
| 641 | dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature); | ||
| 642 | |||
| 643 | if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT || | ||
| 644 | code == AFI_INTR_FPCI_DECODE_ERROR) { | ||
| 645 | u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff; | ||
| 646 | u64 address = (u64)fpci << 32 | (signature & 0xfffffffc); | ||
| 647 | |||
| 648 | if (code == AFI_INTR_MASTER_ABORT) | ||
| 649 | dev_dbg(dev, " FPCI address: %10llx\n", address); | ||
| 650 | else | ||
| 651 | dev_err(dev, " FPCI address: %10llx\n", address); | ||
| 652 | } | ||
| 653 | |||
| 654 | return IRQ_HANDLED; | ||
| 655 | } | ||
| 656 | |||
| 657 | /* | ||
| 658 | * FPCI map is as follows: | ||
| 659 | * - 0xfdfc000000: I/O space | ||
| 660 | * - 0xfdfe000000: type 0 configuration space | ||
| 661 | * - 0xfdff000000: type 1 configuration space | ||
| 662 | * - 0xfe00000000: type 0 extended configuration space | ||
| 663 | * - 0xfe10000000: type 1 extended configuration space | ||
| 664 | */ | ||
| 665 | static void tegra_pcie_setup_translations(struct tegra_pcie *pcie) | ||
| 666 | { | ||
| 667 | u32 fpci_bar, size, axi_address; | ||
| 668 | |||
| 669 | /* Bar 0: type 1 extended configuration space */ | ||
| 670 | size = resource_size(&pcie->cs); | ||
| 671 | afi_writel(pcie, pcie->cs.start, AFI_AXI_BAR0_START); | ||
| 672 | afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ); | ||
| 673 | |||
| 674 | /* Bar 1: downstream IO bar */ | ||
| 675 | fpci_bar = 0xfdfc0000; | ||
| 676 | size = resource_size(&pcie->io); | ||
| 677 | axi_address = pcie->io.start; | ||
| 678 | afi_writel(pcie, axi_address, AFI_AXI_BAR1_START); | ||
| 679 | afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ); | ||
| 680 | afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1); | ||
| 681 | |||
| 682 | /* Bar 2: prefetchable memory BAR */ | ||
| 683 | fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1; | ||
| 684 | size = resource_size(&pcie->prefetch); | ||
| 685 | axi_address = pcie->prefetch.start; | ||
| 686 | afi_writel(pcie, axi_address, AFI_AXI_BAR2_START); | ||
| 687 | afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ); | ||
| 688 | afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2); | ||
| 689 | |||
| 690 | /* Bar 3: non prefetchable memory BAR */ | ||
| 691 | fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1; | ||
| 692 | size = resource_size(&pcie->mem); | ||
| 693 | axi_address = pcie->mem.start; | ||
| 694 | afi_writel(pcie, axi_address, AFI_AXI_BAR3_START); | ||
| 695 | afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ); | ||
| 696 | afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3); | ||
| 697 | |||
| 698 | /* NULL out the remaining BARs as they are not used */ | ||
| 699 | afi_writel(pcie, 0, AFI_AXI_BAR4_START); | ||
| 700 | afi_writel(pcie, 0, AFI_AXI_BAR4_SZ); | ||
| 701 | afi_writel(pcie, 0, AFI_FPCI_BAR4); | ||
| 702 | |||
| 703 | afi_writel(pcie, 0, AFI_AXI_BAR5_START); | ||
| 704 | afi_writel(pcie, 0, AFI_AXI_BAR5_SZ); | ||
| 705 | afi_writel(pcie, 0, AFI_FPCI_BAR5); | ||
| 706 | |||
| 707 | /* map all upstream transactions as uncached */ | ||
| 708 | afi_writel(pcie, 0, AFI_CACHE_BAR0_ST); | ||
| 709 | afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ); | ||
| 710 | afi_writel(pcie, 0, AFI_CACHE_BAR1_ST); | ||
| 711 | afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ); | ||
| 712 | |||
| 713 | /* MSI translations are setup only when needed */ | ||
| 714 | afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST); | ||
| 715 | afi_writel(pcie, 0, AFI_MSI_BAR_SZ); | ||
| 716 | afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST); | ||
| 717 | afi_writel(pcie, 0, AFI_MSI_BAR_SZ); | ||
| 718 | } | ||
| 719 | |||
| 720 | static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout) | ||
| 721 | { | ||
| 722 | const struct tegra_pcie_soc *soc = pcie->soc; | ||
| 723 | u32 value; | ||
| 724 | |||
| 725 | timeout = jiffies + msecs_to_jiffies(timeout); | ||
| 726 | |||
| 727 | while (time_before(jiffies, timeout)) { | ||
| 728 | value = pads_readl(pcie, soc->pads_pll_ctl); | ||
| 729 | if (value & PADS_PLL_CTL_LOCKDET) | ||
| 730 | return 0; | ||
| 731 | } | ||
| 732 | |||
| 733 | return -ETIMEDOUT; | ||
| 734 | } | ||
| 735 | |||
| 736 | static int tegra_pcie_phy_enable(struct tegra_pcie *pcie) | ||
| 737 | { | ||
| 738 | struct device *dev = pcie->dev; | ||
| 739 | const struct tegra_pcie_soc *soc = pcie->soc; | ||
| 740 | u32 value; | ||
| 741 | int err; | ||
| 742 | |||
| 743 | /* initialize internal PHY, enable up to 16 PCIE lanes */ | ||
| 744 | pads_writel(pcie, 0x0, PADS_CTL_SEL); | ||
| 745 | |||
| 746 | /* override IDDQ to 1 on all 4 lanes */ | ||
| 747 | value = pads_readl(pcie, PADS_CTL); | ||
| 748 | value |= PADS_CTL_IDDQ_1L; | ||
| 749 | pads_writel(pcie, value, PADS_CTL); | ||
| 750 | |||
| 751 | /* | ||
| 752 | * Set up PHY PLL inputs select PLLE output as refclock, | ||
| 753 | * set TX ref sel to div10 (not div5). | ||
| 754 | */ | ||
| 755 | value = pads_readl(pcie, soc->pads_pll_ctl); | ||
| 756 | value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK); | ||
| 757 | value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel; | ||
| 758 | pads_writel(pcie, value, soc->pads_pll_ctl); | ||
| 759 | |||
| 760 | /* reset PLL */ | ||
| 761 | value = pads_readl(pcie, soc->pads_pll_ctl); | ||
| 762 | value &= ~PADS_PLL_CTL_RST_B4SM; | ||
| 763 | pads_writel(pcie, value, soc->pads_pll_ctl); | ||
| 764 | |||
| 765 | usleep_range(20, 100); | ||
| 766 | |||
| 767 | /* take PLL out of reset */ | ||
| 768 | value = pads_readl(pcie, soc->pads_pll_ctl); | ||
| 769 | value |= PADS_PLL_CTL_RST_B4SM; | ||
| 770 | pads_writel(pcie, value, soc->pads_pll_ctl); | ||
| 771 | |||
| 772 | /* wait for the PLL to lock */ | ||
| 773 | err = tegra_pcie_pll_wait(pcie, 500); | ||
| 774 | if (err < 0) { | ||
| 775 | dev_err(dev, "PLL failed to lock: %d\n", err); | ||
| 776 | return err; | ||
| 777 | } | ||
| 778 | |||
| 779 | /* turn off IDDQ override */ | ||
| 780 | value = pads_readl(pcie, PADS_CTL); | ||
| 781 | value &= ~PADS_CTL_IDDQ_1L; | ||
| 782 | pads_writel(pcie, value, PADS_CTL); | ||
| 783 | |||
| 784 | /* enable TX/RX data */ | ||
| 785 | value = pads_readl(pcie, PADS_CTL); | ||
| 786 | value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L; | ||
| 787 | pads_writel(pcie, value, PADS_CTL); | ||
| 788 | |||
| 789 | return 0; | ||
| 790 | } | ||
| 791 | |||
| 792 | static int tegra_pcie_phy_disable(struct tegra_pcie *pcie) | ||
| 793 | { | ||
| 794 | const struct tegra_pcie_soc *soc = pcie->soc; | ||
| 795 | u32 value; | ||
| 796 | |||
| 797 | /* disable TX/RX data */ | ||
| 798 | value = pads_readl(pcie, PADS_CTL); | ||
| 799 | value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L); | ||
| 800 | pads_writel(pcie, value, PADS_CTL); | ||
| 801 | |||
| 802 | /* override IDDQ */ | ||
| 803 | value = pads_readl(pcie, PADS_CTL); | ||
| 804 | value |= PADS_CTL_IDDQ_1L; | ||
| 805 | pads_writel(pcie, value, PADS_CTL); | ||
| 806 | |||
| 807 | /* reset PLL */ | ||
| 808 | value = pads_readl(pcie, soc->pads_pll_ctl); | ||
| 809 | value &= ~PADS_PLL_CTL_RST_B4SM; | ||
| 810 | pads_writel(pcie, value, soc->pads_pll_ctl); | ||
| 811 | |||
| 812 | usleep_range(20, 100); | ||
| 813 | |||
| 814 | return 0; | ||
| 815 | } | ||
| 816 | |||
| 817 | static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port) | ||
| 818 | { | ||
| 819 | struct device *dev = port->pcie->dev; | ||
| 820 | unsigned int i; | ||
| 821 | int err; | ||
| 822 | |||
| 823 | for (i = 0; i < port->lanes; i++) { | ||
| 824 | err = phy_power_on(port->phys[i]); | ||
| 825 | if (err < 0) { | ||
| 826 | dev_err(dev, "failed to power on PHY#%u: %d\n", i, err); | ||
| 827 | return err; | ||
| 828 | } | ||
| 829 | } | ||
| 830 | |||
| 831 | return 0; | ||
| 832 | } | ||
| 833 | |||
| 834 | static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port) | ||
| 835 | { | ||
| 836 | struct device *dev = port->pcie->dev; | ||
| 837 | unsigned int i; | ||
| 838 | int err; | ||
| 839 | |||
| 840 | for (i = 0; i < port->lanes; i++) { | ||
| 841 | err = phy_power_off(port->phys[i]); | ||
| 842 | if (err < 0) { | ||
| 843 | dev_err(dev, "failed to power off PHY#%u: %d\n", i, | ||
| 844 | err); | ||
| 845 | return err; | ||
| 846 | } | ||
| 847 | } | ||
| 848 | |||
| 849 | return 0; | ||
| 850 | } | ||
| 851 | |||
| 852 | static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie) | ||
| 853 | { | ||
| 854 | struct device *dev = pcie->dev; | ||
| 855 | const struct tegra_pcie_soc *soc = pcie->soc; | ||
| 856 | struct tegra_pcie_port *port; | ||
| 857 | int err; | ||
| 858 | |||
| 859 | if (pcie->legacy_phy) { | ||
| 860 | if (pcie->phy) | ||
| 861 | err = phy_power_on(pcie->phy); | ||
| 862 | else | ||
| 863 | err = tegra_pcie_phy_enable(pcie); | ||
| 864 | |||
| 865 | if (err < 0) | ||
| 866 | dev_err(dev, "failed to power on PHY: %d\n", err); | ||
| 867 | |||
| 868 | return err; | ||
| 869 | } | ||
| 870 | |||
| 871 | list_for_each_entry(port, &pcie->ports, list) { | ||
| 872 | err = tegra_pcie_port_phy_power_on(port); | ||
| 873 | if (err < 0) { | ||
| 874 | dev_err(dev, | ||
| 875 | "failed to power on PCIe port %u PHY: %d\n", | ||
| 876 | port->index, err); | ||
| 877 | return err; | ||
| 878 | } | ||
| 879 | } | ||
| 880 | |||
| 881 | /* Configure the reference clock driver */ | ||
| 882 | pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0); | ||
| 883 | |||
| 884 | if (soc->num_ports > 2) | ||
| 885 | pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1); | ||
| 886 | |||
| 887 | return 0; | ||
| 888 | } | ||
| 889 | |||
| 890 | static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie) | ||
| 891 | { | ||
| 892 | struct device *dev = pcie->dev; | ||
| 893 | struct tegra_pcie_port *port; | ||
| 894 | int err; | ||
| 895 | |||
| 896 | if (pcie->legacy_phy) { | ||
| 897 | if (pcie->phy) | ||
| 898 | err = phy_power_off(pcie->phy); | ||
| 899 | else | ||
| 900 | err = tegra_pcie_phy_disable(pcie); | ||
| 901 | |||
| 902 | if (err < 0) | ||
| 903 | dev_err(dev, "failed to power off PHY: %d\n", err); | ||
| 904 | |||
| 905 | return err; | ||
| 906 | } | ||
| 907 | |||
| 908 | list_for_each_entry(port, &pcie->ports, list) { | ||
| 909 | err = tegra_pcie_port_phy_power_off(port); | ||
| 910 | if (err < 0) { | ||
| 911 | dev_err(dev, | ||
| 912 | "failed to power off PCIe port %u PHY: %d\n", | ||
| 913 | port->index, err); | ||
| 914 | return err; | ||
| 915 | } | ||
| 916 | } | ||
| 917 | |||
| 918 | return 0; | ||
| 919 | } | ||
| 920 | |||
| 921 | static int tegra_pcie_enable_controller(struct tegra_pcie *pcie) | ||
| 922 | { | ||
| 923 | struct device *dev = pcie->dev; | ||
| 924 | const struct tegra_pcie_soc *soc = pcie->soc; | ||
| 925 | struct tegra_pcie_port *port; | ||
| 926 | unsigned long value; | ||
| 927 | int err; | ||
| 928 | |||
| 929 | /* enable PLL power down */ | ||
| 930 | if (pcie->phy) { | ||
| 931 | value = afi_readl(pcie, AFI_PLLE_CONTROL); | ||
| 932 | value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL; | ||
| 933 | value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN; | ||
| 934 | afi_writel(pcie, value, AFI_PLLE_CONTROL); | ||
| 935 | } | ||
| 936 | |||
| 937 | /* power down PCIe slot clock bias pad */ | ||
| 938 | if (soc->has_pex_bias_ctrl) | ||
| 939 | afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0); | ||
| 940 | |||
| 941 | /* configure mode and disable all ports */ | ||
| 942 | value = afi_readl(pcie, AFI_PCIE_CONFIG); | ||
| 943 | value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK; | ||
| 944 | value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config; | ||
| 945 | |||
| 946 | list_for_each_entry(port, &pcie->ports, list) | ||
| 947 | value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index); | ||
| 948 | |||
| 949 | afi_writel(pcie, value, AFI_PCIE_CONFIG); | ||
| 950 | |||
| 951 | if (soc->has_gen2) { | ||
| 952 | value = afi_readl(pcie, AFI_FUSE); | ||
| 953 | value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS; | ||
| 954 | afi_writel(pcie, value, AFI_FUSE); | ||
| 955 | } else { | ||
| 956 | value = afi_readl(pcie, AFI_FUSE); | ||
| 957 | value |= AFI_FUSE_PCIE_T0_GEN2_DIS; | ||
| 958 | afi_writel(pcie, value, AFI_FUSE); | ||
| 959 | } | ||
| 960 | |||
| 961 | if (soc->program_uphy) { | ||
| 962 | err = tegra_pcie_phy_power_on(pcie); | ||
| 963 | if (err < 0) { | ||
| 964 | dev_err(dev, "failed to power on PHY(s): %d\n", err); | ||
| 965 | return err; | ||
| 966 | } | ||
| 967 | } | ||
| 968 | |||
| 969 | /* take the PCIe interface module out of reset */ | ||
| 970 | reset_control_deassert(pcie->pcie_xrst); | ||
| 971 | |||
| 972 | /* finally enable PCIe */ | ||
| 973 | value = afi_readl(pcie, AFI_CONFIGURATION); | ||
| 974 | value |= AFI_CONFIGURATION_EN_FPCI; | ||
| 975 | afi_writel(pcie, value, AFI_CONFIGURATION); | ||
| 976 | |||
| 977 | value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR | | ||
| 978 | AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR | | ||
| 979 | AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR; | ||
| 980 | |||
| 981 | if (soc->has_intr_prsnt_sense) | ||
| 982 | value |= AFI_INTR_EN_PRSNT_SENSE; | ||
| 983 | |||
| 984 | afi_writel(pcie, value, AFI_AFI_INTR_ENABLE); | ||
| 985 | afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE); | ||
| 986 | |||
| 987 | /* don't enable MSI for now, only when needed */ | ||
| 988 | afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK); | ||
| 989 | |||
| 990 | /* disable all exceptions */ | ||
| 991 | afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS); | ||
| 992 | |||
| 993 | return 0; | ||
| 994 | } | ||
| 995 | |||
| 996 | static void tegra_pcie_disable_controller(struct tegra_pcie *pcie) | ||
| 997 | { | ||
| 998 | int err; | ||
| 999 | |||
| 1000 | reset_control_assert(pcie->pcie_xrst); | ||
| 1001 | |||
| 1002 | if (pcie->soc->program_uphy) { | ||
| 1003 | err = tegra_pcie_phy_power_off(pcie); | ||
| 1004 | if (err < 0) | ||
| 1005 | dev_err(pcie->dev, "failed to power off PHY(s): %d\n", | ||
| 1006 | err); | ||
| 1007 | } | ||
| 1008 | } | ||
| 1009 | |||
| 1010 | static void tegra_pcie_power_off(struct tegra_pcie *pcie) | ||
| 1011 | { | ||
| 1012 | struct device *dev = pcie->dev; | ||
| 1013 | const struct tegra_pcie_soc *soc = pcie->soc; | ||
| 1014 | int err; | ||
| 1015 | |||
| 1016 | reset_control_assert(pcie->afi_rst); | ||
| 1017 | reset_control_assert(pcie->pex_rst); | ||
| 1018 | |||
| 1019 | clk_disable_unprepare(pcie->pll_e); | ||
| 1020 | if (soc->has_cml_clk) | ||
| 1021 | clk_disable_unprepare(pcie->cml_clk); | ||
| 1022 | clk_disable_unprepare(pcie->afi_clk); | ||
| 1023 | clk_disable_unprepare(pcie->pex_clk); | ||
| 1024 | |||
| 1025 | if (!dev->pm_domain) | ||
| 1026 | tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); | ||
| 1027 | |||
| 1028 | err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies); | ||
| 1029 | if (err < 0) | ||
| 1030 | dev_warn(dev, "failed to disable regulators: %d\n", err); | ||
| 1031 | } | ||
| 1032 | |||
| 1033 | static int tegra_pcie_power_on(struct tegra_pcie *pcie) | ||
| 1034 | { | ||
| 1035 | struct device *dev = pcie->dev; | ||
| 1036 | const struct tegra_pcie_soc *soc = pcie->soc; | ||
| 1037 | int err; | ||
| 1038 | |||
| 1039 | reset_control_assert(pcie->pcie_xrst); | ||
| 1040 | reset_control_assert(pcie->afi_rst); | ||
| 1041 | reset_control_assert(pcie->pex_rst); | ||
| 1042 | |||
| 1043 | if (!dev->pm_domain) | ||
| 1044 | tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); | ||
| 1045 | |||
| 1046 | /* enable regulators */ | ||
| 1047 | err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies); | ||
| 1048 | if (err < 0) | ||
| 1049 | dev_err(dev, "failed to enable regulators: %d\n", err); | ||
| 1050 | |||
| 1051 | if (dev->pm_domain) { | ||
| 1052 | err = clk_prepare_enable(pcie->pex_clk); | ||
| 1053 | if (err) { | ||
| 1054 | dev_err(dev, "failed to enable PEX clock: %d\n", err); | ||
| 1055 | return err; | ||
| 1056 | } | ||
| 1057 | reset_control_deassert(pcie->pex_rst); | ||
| 1058 | } else { | ||
| 1059 | err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE, | ||
| 1060 | pcie->pex_clk, | ||
| 1061 | pcie->pex_rst); | ||
| 1062 | if (err) { | ||
| 1063 | dev_err(dev, "powerup sequence failed: %d\n", err); | ||
| 1064 | return err; | ||
| 1065 | } | ||
| 1066 | } | ||
| 1067 | |||
| 1068 | reset_control_deassert(pcie->afi_rst); | ||
| 1069 | |||
| 1070 | err = clk_prepare_enable(pcie->afi_clk); | ||
| 1071 | if (err < 0) { | ||
| 1072 | dev_err(dev, "failed to enable AFI clock: %d\n", err); | ||
| 1073 | return err; | ||
| 1074 | } | ||
| 1075 | |||
| 1076 | if (soc->has_cml_clk) { | ||
| 1077 | err = clk_prepare_enable(pcie->cml_clk); | ||
| 1078 | if (err < 0) { | ||
| 1079 | dev_err(dev, "failed to enable CML clock: %d\n", err); | ||
| 1080 | return err; | ||
| 1081 | } | ||
| 1082 | } | ||
| 1083 | |||
| 1084 | err = clk_prepare_enable(pcie->pll_e); | ||
| 1085 | if (err < 0) { | ||
| 1086 | dev_err(dev, "failed to enable PLLE clock: %d\n", err); | ||
| 1087 | return err; | ||
| 1088 | } | ||
| 1089 | |||
| 1090 | return 0; | ||
| 1091 | } | ||
| 1092 | |||
| 1093 | static int tegra_pcie_clocks_get(struct tegra_pcie *pcie) | ||
| 1094 | { | ||
| 1095 | struct device *dev = pcie->dev; | ||
| 1096 | const struct tegra_pcie_soc *soc = pcie->soc; | ||
| 1097 | |||
| 1098 | pcie->pex_clk = devm_clk_get(dev, "pex"); | ||
| 1099 | if (IS_ERR(pcie->pex_clk)) | ||
| 1100 | return PTR_ERR(pcie->pex_clk); | ||
| 1101 | |||
| 1102 | pcie->afi_clk = devm_clk_get(dev, "afi"); | ||
| 1103 | if (IS_ERR(pcie->afi_clk)) | ||
| 1104 | return PTR_ERR(pcie->afi_clk); | ||
| 1105 | |||
| 1106 | pcie->pll_e = devm_clk_get(dev, "pll_e"); | ||
| 1107 | if (IS_ERR(pcie->pll_e)) | ||
| 1108 | return PTR_ERR(pcie->pll_e); | ||
| 1109 | |||
| 1110 | if (soc->has_cml_clk) { | ||
| 1111 | pcie->cml_clk = devm_clk_get(dev, "cml"); | ||
| 1112 | if (IS_ERR(pcie->cml_clk)) | ||
| 1113 | return PTR_ERR(pcie->cml_clk); | ||
| 1114 | } | ||
| 1115 | |||
| 1116 | return 0; | ||
| 1117 | } | ||
| 1118 | |||
| 1119 | static int tegra_pcie_resets_get(struct tegra_pcie *pcie) | ||
| 1120 | { | ||
| 1121 | struct device *dev = pcie->dev; | ||
| 1122 | |||
| 1123 | pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex"); | ||
| 1124 | if (IS_ERR(pcie->pex_rst)) | ||
| 1125 | return PTR_ERR(pcie->pex_rst); | ||
| 1126 | |||
| 1127 | pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi"); | ||
| 1128 | if (IS_ERR(pcie->afi_rst)) | ||
| 1129 | return PTR_ERR(pcie->afi_rst); | ||
| 1130 | |||
| 1131 | pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x"); | ||
| 1132 | if (IS_ERR(pcie->pcie_xrst)) | ||
| 1133 | return PTR_ERR(pcie->pcie_xrst); | ||
| 1134 | |||
| 1135 | return 0; | ||
| 1136 | } | ||
| 1137 | |||
| 1138 | static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie) | ||
| 1139 | { | ||
| 1140 | struct device *dev = pcie->dev; | ||
| 1141 | int err; | ||
| 1142 | |||
| 1143 | pcie->phy = devm_phy_optional_get(dev, "pcie"); | ||
| 1144 | if (IS_ERR(pcie->phy)) { | ||
| 1145 | err = PTR_ERR(pcie->phy); | ||
| 1146 | dev_err(dev, "failed to get PHY: %d\n", err); | ||
| 1147 | return err; | ||
| 1148 | } | ||
| 1149 | |||
| 1150 | err = phy_init(pcie->phy); | ||
| 1151 | if (err < 0) { | ||
| 1152 | dev_err(dev, "failed to initialize PHY: %d\n", err); | ||
| 1153 | return err; | ||
| 1154 | } | ||
| 1155 | |||
| 1156 | pcie->legacy_phy = true; | ||
| 1157 | |||
| 1158 | return 0; | ||
| 1159 | } | ||
| 1160 | |||
| 1161 | static struct phy *devm_of_phy_optional_get_index(struct device *dev, | ||
| 1162 | struct device_node *np, | ||
| 1163 | const char *consumer, | ||
| 1164 | unsigned int index) | ||
| 1165 | { | ||
| 1166 | struct phy *phy; | ||
| 1167 | char *name; | ||
| 1168 | |||
| 1169 | name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index); | ||
| 1170 | if (!name) | ||
| 1171 | return ERR_PTR(-ENOMEM); | ||
| 1172 | |||
| 1173 | phy = devm_of_phy_get(dev, np, name); | ||
| 1174 | kfree(name); | ||
| 1175 | |||
| 1176 | if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV) | ||
| 1177 | phy = NULL; | ||
| 1178 | |||
| 1179 | return phy; | ||
| 1180 | } | ||
| 1181 | |||
| 1182 | static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port) | ||
| 1183 | { | ||
| 1184 | struct device *dev = port->pcie->dev; | ||
| 1185 | struct phy *phy; | ||
| 1186 | unsigned int i; | ||
| 1187 | int err; | ||
| 1188 | |||
| 1189 | port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL); | ||
| 1190 | if (!port->phys) | ||
| 1191 | return -ENOMEM; | ||
| 1192 | |||
| 1193 | for (i = 0; i < port->lanes; i++) { | ||
| 1194 | phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i); | ||
| 1195 | if (IS_ERR(phy)) { | ||
| 1196 | dev_err(dev, "failed to get PHY#%u: %ld\n", i, | ||
| 1197 | PTR_ERR(phy)); | ||
| 1198 | return PTR_ERR(phy); | ||
| 1199 | } | ||
| 1200 | |||
| 1201 | err = phy_init(phy); | ||
| 1202 | if (err < 0) { | ||
| 1203 | dev_err(dev, "failed to initialize PHY#%u: %d\n", i, | ||
| 1204 | err); | ||
| 1205 | return err; | ||
| 1206 | } | ||
| 1207 | |||
| 1208 | port->phys[i] = phy; | ||
| 1209 | } | ||
| 1210 | |||
| 1211 | return 0; | ||
| 1212 | } | ||
| 1213 | |||
| 1214 | static int tegra_pcie_phys_get(struct tegra_pcie *pcie) | ||
| 1215 | { | ||
| 1216 | const struct tegra_pcie_soc *soc = pcie->soc; | ||
| 1217 | struct device_node *np = pcie->dev->of_node; | ||
| 1218 | struct tegra_pcie_port *port; | ||
| 1219 | int err; | ||
| 1220 | |||
| 1221 | if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL) | ||
| 1222 | return tegra_pcie_phys_get_legacy(pcie); | ||
| 1223 | |||
| 1224 | list_for_each_entry(port, &pcie->ports, list) { | ||
| 1225 | err = tegra_pcie_port_get_phys(port); | ||
| 1226 | if (err < 0) | ||
| 1227 | return err; | ||
| 1228 | } | ||
| 1229 | |||
| 1230 | return 0; | ||
| 1231 | } | ||
| 1232 | |||
| 1233 | static void tegra_pcie_phys_put(struct tegra_pcie *pcie) | ||
| 1234 | { | ||
| 1235 | struct tegra_pcie_port *port; | ||
| 1236 | struct device *dev = pcie->dev; | ||
| 1237 | int err, i; | ||
| 1238 | |||
| 1239 | if (pcie->legacy_phy) { | ||
| 1240 | err = phy_exit(pcie->phy); | ||
| 1241 | if (err < 0) | ||
| 1242 | dev_err(dev, "failed to teardown PHY: %d\n", err); | ||
| 1243 | return; | ||
| 1244 | } | ||
| 1245 | |||
| 1246 | list_for_each_entry(port, &pcie->ports, list) { | ||
| 1247 | for (i = 0; i < port->lanes; i++) { | ||
| 1248 | err = phy_exit(port->phys[i]); | ||
| 1249 | if (err < 0) | ||
| 1250 | dev_err(dev, "failed to teardown PHY#%u: %d\n", | ||
| 1251 | i, err); | ||
| 1252 | } | ||
| 1253 | } | ||
| 1254 | } | ||
| 1255 | |||
| 1256 | |||
| 1257 | static int tegra_pcie_get_resources(struct tegra_pcie *pcie) | ||
| 1258 | { | ||
| 1259 | struct device *dev = pcie->dev; | ||
| 1260 | struct platform_device *pdev = to_platform_device(dev); | ||
| 1261 | struct resource *pads, *afi, *res; | ||
| 1262 | const struct tegra_pcie_soc *soc = pcie->soc; | ||
| 1263 | int err; | ||
| 1264 | |||
| 1265 | err = tegra_pcie_clocks_get(pcie); | ||
| 1266 | if (err) { | ||
| 1267 | dev_err(dev, "failed to get clocks: %d\n", err); | ||
| 1268 | return err; | ||
| 1269 | } | ||
| 1270 | |||
| 1271 | err = tegra_pcie_resets_get(pcie); | ||
| 1272 | if (err) { | ||
| 1273 | dev_err(dev, "failed to get resets: %d\n", err); | ||
| 1274 | return err; | ||
| 1275 | } | ||
| 1276 | |||
| 1277 | if (soc->program_uphy) { | ||
| 1278 | err = tegra_pcie_phys_get(pcie); | ||
| 1279 | if (err < 0) { | ||
| 1280 | dev_err(dev, "failed to get PHYs: %d\n", err); | ||
| 1281 | return err; | ||
| 1282 | } | ||
| 1283 | } | ||
| 1284 | |||
| 1285 | pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads"); | ||
| 1286 | pcie->pads = devm_ioremap_resource(dev, pads); | ||
| 1287 | if (IS_ERR(pcie->pads)) { | ||
| 1288 | err = PTR_ERR(pcie->pads); | ||
| 1289 | goto phys_put; | ||
| 1290 | } | ||
| 1291 | |||
| 1292 | afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi"); | ||
| 1293 | pcie->afi = devm_ioremap_resource(dev, afi); | ||
| 1294 | if (IS_ERR(pcie->afi)) { | ||
| 1295 | err = PTR_ERR(pcie->afi); | ||
| 1296 | goto phys_put; | ||
| 1297 | } | ||
| 1298 | |||
| 1299 | /* request configuration space, but remap later, on demand */ | ||
| 1300 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs"); | ||
| 1301 | if (!res) { | ||
| 1302 | err = -EADDRNOTAVAIL; | ||
| 1303 | goto phys_put; | ||
| 1304 | } | ||
| 1305 | |||
| 1306 | pcie->cs = *res; | ||
| 1307 | |||
| 1308 | /* constrain configuration space to 4 KiB */ | ||
| 1309 | pcie->cs.end = pcie->cs.start + SZ_4K - 1; | ||
| 1310 | |||
| 1311 | pcie->cfg = devm_ioremap_resource(dev, &pcie->cs); | ||
| 1312 | if (IS_ERR(pcie->cfg)) { | ||
| 1313 | err = PTR_ERR(pcie->cfg); | ||
| 1314 | goto phys_put; | ||
| 1315 | } | ||
| 1316 | |||
| 1317 | /* request interrupt */ | ||
| 1318 | err = platform_get_irq_byname(pdev, "intr"); | ||
| 1319 | if (err < 0) { | ||
| 1320 | dev_err(dev, "failed to get IRQ: %d\n", err); | ||
| 1321 | goto phys_put; | ||
| 1322 | } | ||
| 1323 | |||
| 1324 | pcie->irq = err; | ||
| 1325 | |||
| 1326 | err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie); | ||
| 1327 | if (err) { | ||
| 1328 | dev_err(dev, "failed to register IRQ: %d\n", err); | ||
| 1329 | goto phys_put; | ||
| 1330 | } | ||
| 1331 | |||
| 1332 | return 0; | ||
| 1333 | |||
| 1334 | phys_put: | ||
| 1335 | if (soc->program_uphy) | ||
| 1336 | tegra_pcie_phys_put(pcie); | ||
| 1337 | return err; | ||
| 1338 | } | ||
| 1339 | |||
| 1340 | static int tegra_pcie_put_resources(struct tegra_pcie *pcie) | ||
| 1341 | { | ||
| 1342 | const struct tegra_pcie_soc *soc = pcie->soc; | ||
| 1343 | |||
| 1344 | if (pcie->irq > 0) | ||
| 1345 | free_irq(pcie->irq, pcie); | ||
| 1346 | |||
| 1347 | if (soc->program_uphy) | ||
| 1348 | tegra_pcie_phys_put(pcie); | ||
| 1349 | |||
| 1350 | return 0; | ||
| 1351 | } | ||
| 1352 | |||
| 1353 | static void tegra_pcie_pme_turnoff(struct tegra_pcie_port *port) | ||
| 1354 | { | ||
| 1355 | struct tegra_pcie *pcie = port->pcie; | ||
| 1356 | const struct tegra_pcie_soc *soc = pcie->soc; | ||
| 1357 | int err; | ||
| 1358 | u32 val; | ||
| 1359 | u8 ack_bit; | ||
| 1360 | |||
| 1361 | val = afi_readl(pcie, AFI_PCIE_PME); | ||
| 1362 | val |= (0x1 << soc->ports[port->index].pme.turnoff_bit); | ||
| 1363 | afi_writel(pcie, val, AFI_PCIE_PME); | ||
| 1364 | |||
| 1365 | ack_bit = soc->ports[port->index].pme.ack_bit; | ||
| 1366 | err = readl_poll_timeout(pcie->afi + AFI_PCIE_PME, val, | ||
| 1367 | val & (0x1 << ack_bit), 1, PME_ACK_TIMEOUT); | ||
| 1368 | if (err) | ||
| 1369 | dev_err(pcie->dev, "PME Ack is not received on port: %d\n", | ||
| 1370 | port->index); | ||
| 1371 | |||
| 1372 | usleep_range(10000, 11000); | ||
| 1373 | |||
| 1374 | val = afi_readl(pcie, AFI_PCIE_PME); | ||
| 1375 | val &= ~(0x1 << soc->ports[port->index].pme.turnoff_bit); | ||
| 1376 | afi_writel(pcie, val, AFI_PCIE_PME); | ||
| 1377 | } | ||
| 1378 | |||
| 1379 | static int tegra_msi_alloc(struct tegra_msi *chip) | ||
| 1380 | { | ||
| 1381 | int msi; | ||
| 1382 | |||
| 1383 | mutex_lock(&chip->lock); | ||
| 1384 | |||
| 1385 | msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR); | ||
| 1386 | if (msi < INT_PCI_MSI_NR) | ||
| 1387 | set_bit(msi, chip->used); | ||
| 1388 | else | ||
| 1389 | msi = -ENOSPC; | ||
| 1390 | |||
| 1391 | mutex_unlock(&chip->lock); | ||
| 1392 | |||
| 1393 | return msi; | ||
| 1394 | } | ||
| 1395 | |||
| 1396 | static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq) | ||
| 1397 | { | ||
| 1398 | struct device *dev = chip->chip.dev; | ||
| 1399 | |||
| 1400 | mutex_lock(&chip->lock); | ||
| 1401 | |||
| 1402 | if (!test_bit(irq, chip->used)) | ||
| 1403 | dev_err(dev, "trying to free unused MSI#%lu\n", irq); | ||
| 1404 | else | ||
| 1405 | clear_bit(irq, chip->used); | ||
| 1406 | |||
| 1407 | mutex_unlock(&chip->lock); | ||
| 1408 | } | ||
| 1409 | |||
| 1410 | static irqreturn_t tegra_pcie_msi_irq(int irq, void *data) | ||
| 1411 | { | ||
| 1412 | struct tegra_pcie *pcie = data; | ||
| 1413 | struct device *dev = pcie->dev; | ||
| 1414 | struct tegra_msi *msi = &pcie->msi; | ||
| 1415 | unsigned int i, processed = 0; | ||
| 1416 | |||
| 1417 | for (i = 0; i < 8; i++) { | ||
| 1418 | unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4); | ||
| 1419 | |||
| 1420 | while (reg) { | ||
| 1421 | unsigned int offset = find_first_bit(®, 32); | ||
| 1422 | unsigned int index = i * 32 + offset; | ||
| 1423 | unsigned int irq; | ||
| 1424 | |||
| 1425 | /* clear the interrupt */ | ||
| 1426 | afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4); | ||
| 1427 | |||
| 1428 | irq = irq_find_mapping(msi->domain, index); | ||
| 1429 | if (irq) { | ||
| 1430 | if (test_bit(index, msi->used)) | ||
| 1431 | generic_handle_irq(irq); | ||
| 1432 | else | ||
| 1433 | dev_info(dev, "unhandled MSI\n"); | ||
| 1434 | } else { | ||
| 1435 | /* | ||
| 1436 | * that's weird who triggered this? | ||
| 1437 | * just clear it | ||
| 1438 | */ | ||
| 1439 | dev_info(dev, "unexpected MSI\n"); | ||
| 1440 | } | ||
| 1441 | |||
| 1442 | /* see if there's any more pending in this vector */ | ||
| 1443 | reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4); | ||
| 1444 | |||
| 1445 | processed++; | ||
| 1446 | } | ||
| 1447 | } | ||
| 1448 | |||
| 1449 | return processed > 0 ? IRQ_HANDLED : IRQ_NONE; | ||
| 1450 | } | ||
| 1451 | |||
| 1452 | static int tegra_msi_setup_irq(struct msi_controller *chip, | ||
| 1453 | struct pci_dev *pdev, struct msi_desc *desc) | ||
| 1454 | { | ||
| 1455 | struct tegra_msi *msi = to_tegra_msi(chip); | ||
| 1456 | struct msi_msg msg; | ||
| 1457 | unsigned int irq; | ||
| 1458 | int hwirq; | ||
| 1459 | |||
| 1460 | hwirq = tegra_msi_alloc(msi); | ||
| 1461 | if (hwirq < 0) | ||
| 1462 | return hwirq; | ||
| 1463 | |||
| 1464 | irq = irq_create_mapping(msi->domain, hwirq); | ||
| 1465 | if (!irq) { | ||
| 1466 | tegra_msi_free(msi, hwirq); | ||
| 1467 | return -EINVAL; | ||
| 1468 | } | ||
| 1469 | |||
| 1470 | irq_set_msi_desc(irq, desc); | ||
| 1471 | |||
| 1472 | msg.address_lo = lower_32_bits(msi->phys); | ||
| 1473 | msg.address_hi = upper_32_bits(msi->phys); | ||
| 1474 | msg.data = hwirq; | ||
| 1475 | |||
| 1476 | pci_write_msi_msg(irq, &msg); | ||
| 1477 | |||
| 1478 | return 0; | ||
| 1479 | } | ||
| 1480 | |||
| 1481 | static void tegra_msi_teardown_irq(struct msi_controller *chip, | ||
| 1482 | unsigned int irq) | ||
| 1483 | { | ||
| 1484 | struct tegra_msi *msi = to_tegra_msi(chip); | ||
| 1485 | struct irq_data *d = irq_get_irq_data(irq); | ||
| 1486 | irq_hw_number_t hwirq = irqd_to_hwirq(d); | ||
| 1487 | |||
| 1488 | irq_dispose_mapping(irq); | ||
| 1489 | tegra_msi_free(msi, hwirq); | ||
| 1490 | } | ||
| 1491 | |||
| 1492 | static struct irq_chip tegra_msi_irq_chip = { | ||
| 1493 | .name = "Tegra PCIe MSI", | ||
| 1494 | .irq_enable = pci_msi_unmask_irq, | ||
| 1495 | .irq_disable = pci_msi_mask_irq, | ||
| 1496 | .irq_mask = pci_msi_mask_irq, | ||
| 1497 | .irq_unmask = pci_msi_unmask_irq, | ||
| 1498 | }; | ||
| 1499 | |||
| 1500 | static int tegra_msi_map(struct irq_domain *domain, unsigned int irq, | ||
| 1501 | irq_hw_number_t hwirq) | ||
| 1502 | { | ||
| 1503 | irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq); | ||
| 1504 | irq_set_chip_data(irq, domain->host_data); | ||
| 1505 | |||
| 1506 | tegra_cpuidle_pcie_irqs_in_use(); | ||
| 1507 | |||
| 1508 | return 0; | ||
| 1509 | } | ||
| 1510 | |||
| 1511 | static const struct irq_domain_ops msi_domain_ops = { | ||
| 1512 | .map = tegra_msi_map, | ||
| 1513 | }; | ||
| 1514 | |||
| 1515 | static int tegra_pcie_msi_setup(struct tegra_pcie *pcie) | ||
| 1516 | { | ||
| 1517 | struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); | ||
| 1518 | struct platform_device *pdev = to_platform_device(pcie->dev); | ||
| 1519 | struct tegra_msi *msi = &pcie->msi; | ||
| 1520 | struct device *dev = pcie->dev; | ||
| 1521 | int err; | ||
| 1522 | |||
| 1523 | mutex_init(&msi->lock); | ||
| 1524 | |||
| 1525 | msi->chip.dev = dev; | ||
| 1526 | msi->chip.setup_irq = tegra_msi_setup_irq; | ||
| 1527 | msi->chip.teardown_irq = tegra_msi_teardown_irq; | ||
| 1528 | |||
| 1529 | msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR, | ||
| 1530 | &msi_domain_ops, &msi->chip); | ||
| 1531 | if (!msi->domain) { | ||
| 1532 | dev_err(dev, "failed to create IRQ domain\n"); | ||
| 1533 | return -ENOMEM; | ||
| 1534 | } | ||
| 1535 | |||
| 1536 | err = platform_get_irq_byname(pdev, "msi"); | ||
| 1537 | if (err < 0) { | ||
| 1538 | dev_err(dev, "failed to get IRQ: %d\n", err); | ||
| 1539 | goto err; | ||
| 1540 | } | ||
| 1541 | |||
| 1542 | msi->irq = err; | ||
| 1543 | |||
| 1544 | err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD, | ||
| 1545 | tegra_msi_irq_chip.name, pcie); | ||
| 1546 | if (err < 0) { | ||
| 1547 | dev_err(dev, "failed to request IRQ: %d\n", err); | ||
| 1548 | goto err; | ||
| 1549 | } | ||
| 1550 | |||
| 1551 | /* setup AFI/FPCI range */ | ||
| 1552 | msi->pages = __get_free_pages(GFP_KERNEL, 0); | ||
| 1553 | msi->phys = virt_to_phys((void *)msi->pages); | ||
| 1554 | host->msi = &msi->chip; | ||
| 1555 | |||
| 1556 | return 0; | ||
| 1557 | |||
| 1558 | err: | ||
| 1559 | irq_domain_remove(msi->domain); | ||
| 1560 | return err; | ||
| 1561 | } | ||
| 1562 | |||
| 1563 | static void tegra_pcie_enable_msi(struct tegra_pcie *pcie) | ||
| 1564 | { | ||
| 1565 | const struct tegra_pcie_soc *soc = pcie->soc; | ||
| 1566 | struct tegra_msi *msi = &pcie->msi; | ||
| 1567 | u32 reg; | ||
| 1568 | |||
| 1569 | afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); | ||
| 1570 | afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST); | ||
| 1571 | /* this register is in 4K increments */ | ||
| 1572 | afi_writel(pcie, 1, AFI_MSI_BAR_SZ); | ||
| 1573 | |||
| 1574 | /* enable all MSI vectors */ | ||
| 1575 | afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0); | ||
| 1576 | afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1); | ||
| 1577 | afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2); | ||
| 1578 | afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3); | ||
| 1579 | afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4); | ||
| 1580 | afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5); | ||
| 1581 | afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6); | ||
| 1582 | afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7); | ||
| 1583 | |||
| 1584 | /* and unmask the MSI interrupt */ | ||
| 1585 | reg = afi_readl(pcie, AFI_INTR_MASK); | ||
| 1586 | reg |= AFI_INTR_MASK_MSI_MASK; | ||
| 1587 | afi_writel(pcie, reg, AFI_INTR_MASK); | ||
| 1588 | } | ||
| 1589 | |||
| 1590 | static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie) | ||
| 1591 | { | ||
| 1592 | struct tegra_msi *msi = &pcie->msi; | ||
| 1593 | unsigned int i, irq; | ||
| 1594 | |||
| 1595 | free_pages(msi->pages, 0); | ||
| 1596 | |||
| 1597 | if (msi->irq > 0) | ||
| 1598 | free_irq(msi->irq, pcie); | ||
| 1599 | |||
| 1600 | for (i = 0; i < INT_PCI_MSI_NR; i++) { | ||
| 1601 | irq = irq_find_mapping(msi->domain, i); | ||
| 1602 | if (irq > 0) | ||
| 1603 | irq_dispose_mapping(irq); | ||
| 1604 | } | ||
| 1605 | |||
| 1606 | irq_domain_remove(msi->domain); | ||
| 1607 | } | ||
| 1608 | |||
| 1609 | static int tegra_pcie_disable_msi(struct tegra_pcie *pcie) | ||
| 1610 | { | ||
| 1611 | u32 value; | ||
| 1612 | |||
| 1613 | /* mask the MSI interrupt */ | ||
| 1614 | value = afi_readl(pcie, AFI_INTR_MASK); | ||
| 1615 | value &= ~AFI_INTR_MASK_MSI_MASK; | ||
| 1616 | afi_writel(pcie, value, AFI_INTR_MASK); | ||
| 1617 | |||
| 1618 | /* disable all MSI vectors */ | ||
| 1619 | afi_writel(pcie, 0, AFI_MSI_EN_VEC0); | ||
| 1620 | afi_writel(pcie, 0, AFI_MSI_EN_VEC1); | ||
| 1621 | afi_writel(pcie, 0, AFI_MSI_EN_VEC2); | ||
| 1622 | afi_writel(pcie, 0, AFI_MSI_EN_VEC3); | ||
| 1623 | afi_writel(pcie, 0, AFI_MSI_EN_VEC4); | ||
| 1624 | afi_writel(pcie, 0, AFI_MSI_EN_VEC5); | ||
| 1625 | afi_writel(pcie, 0, AFI_MSI_EN_VEC6); | ||
| 1626 | afi_writel(pcie, 0, AFI_MSI_EN_VEC7); | ||
| 1627 | |||
| 1628 | return 0; | ||
| 1629 | } | ||
| 1630 | |||
| 1631 | static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes, | ||
| 1632 | u32 *xbar) | ||
| 1633 | { | ||
| 1634 | struct device *dev = pcie->dev; | ||
| 1635 | struct device_node *np = dev->of_node; | ||
| 1636 | |||
| 1637 | if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) { | ||
| 1638 | switch (lanes) { | ||
| 1639 | case 0x010004: | ||
| 1640 | dev_info(dev, "4x1, 1x1 configuration\n"); | ||
| 1641 | *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401; | ||
| 1642 | return 0; | ||
| 1643 | |||
| 1644 | case 0x010102: | ||
| 1645 | dev_info(dev, "2x1, 1X1, 1x1 configuration\n"); | ||
| 1646 | *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211; | ||
| 1647 | return 0; | ||
| 1648 | |||
| 1649 | case 0x010101: | ||
| 1650 | dev_info(dev, "1x1, 1x1, 1x1 configuration\n"); | ||
| 1651 | *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111; | ||
| 1652 | return 0; | ||
| 1653 | |||
| 1654 | default: | ||
| 1655 | dev_info(dev, "wrong configuration updated in DT, " | ||
| 1656 | "switching to default 2x1, 1x1, 1x1 " | ||
| 1657 | "configuration\n"); | ||
| 1658 | *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211; | ||
| 1659 | return 0; | ||
| 1660 | } | ||
| 1661 | } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") || | ||
| 1662 | of_device_is_compatible(np, "nvidia,tegra210-pcie")) { | ||
| 1663 | switch (lanes) { | ||
| 1664 | case 0x0000104: | ||
| 1665 | dev_info(dev, "4x1, 1x1 configuration\n"); | ||
| 1666 | *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1; | ||
| 1667 | return 0; | ||
| 1668 | |||
| 1669 | case 0x0000102: | ||
| 1670 | dev_info(dev, "2x1, 1x1 configuration\n"); | ||
| 1671 | *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1; | ||
| 1672 | return 0; | ||
| 1673 | } | ||
| 1674 | } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { | ||
| 1675 | switch (lanes) { | ||
| 1676 | case 0x00000204: | ||
| 1677 | dev_info(dev, "4x1, 2x1 configuration\n"); | ||
| 1678 | *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420; | ||
| 1679 | return 0; | ||
| 1680 | |||
| 1681 | case 0x00020202: | ||
| 1682 | dev_info(dev, "2x3 configuration\n"); | ||
| 1683 | *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222; | ||
| 1684 | return 0; | ||
| 1685 | |||
| 1686 | case 0x00010104: | ||
| 1687 | dev_info(dev, "4x1, 1x2 configuration\n"); | ||
| 1688 | *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411; | ||
| 1689 | return 0; | ||
| 1690 | } | ||
| 1691 | } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) { | ||
| 1692 | switch (lanes) { | ||
| 1693 | case 0x00000004: | ||
| 1694 | dev_info(dev, "single-mode configuration\n"); | ||
| 1695 | *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE; | ||
| 1696 | return 0; | ||
| 1697 | |||
| 1698 | case 0x00000202: | ||
| 1699 | dev_info(dev, "dual-mode configuration\n"); | ||
| 1700 | *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL; | ||
| 1701 | return 0; | ||
| 1702 | } | ||
| 1703 | } | ||
| 1704 | |||
| 1705 | return -EINVAL; | ||
| 1706 | } | ||
| 1707 | |||
| 1708 | /* | ||
| 1709 | * Check whether a given set of supplies is available in a device tree node. | ||
| 1710 | * This is used to check whether the new or the legacy device tree bindings | ||
| 1711 | * should be used. | ||
| 1712 | */ | ||
| 1713 | static bool of_regulator_bulk_available(struct device_node *np, | ||
| 1714 | struct regulator_bulk_data *supplies, | ||
| 1715 | unsigned int num_supplies) | ||
| 1716 | { | ||
| 1717 | char property[32]; | ||
| 1718 | unsigned int i; | ||
| 1719 | |||
| 1720 | for (i = 0; i < num_supplies; i++) { | ||
| 1721 | snprintf(property, 32, "%s-supply", supplies[i].supply); | ||
| 1722 | |||
| 1723 | if (of_find_property(np, property, NULL) == NULL) | ||
| 1724 | return false; | ||
| 1725 | } | ||
| 1726 | |||
| 1727 | return true; | ||
| 1728 | } | ||
| 1729 | |||
| 1730 | /* | ||
| 1731 | * Old versions of the device tree binding for this device used a set of power | ||
| 1732 | * supplies that didn't match the hardware inputs. This happened to work for a | ||
| 1733 | * number of cases but is not future proof. However to preserve backwards- | ||
| 1734 | * compatibility with old device trees, this function will try to use the old | ||
| 1735 | * set of supplies. | ||
| 1736 | */ | ||
| 1737 | static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie) | ||
| 1738 | { | ||
| 1739 | struct device *dev = pcie->dev; | ||
| 1740 | struct device_node *np = dev->of_node; | ||
| 1741 | |||
| 1742 | if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) | ||
| 1743 | pcie->num_supplies = 3; | ||
| 1744 | else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) | ||
| 1745 | pcie->num_supplies = 2; | ||
| 1746 | |||
| 1747 | if (pcie->num_supplies == 0) { | ||
| 1748 | dev_err(dev, "device %pOF not supported in legacy mode\n", np); | ||
| 1749 | return -ENODEV; | ||
| 1750 | } | ||
| 1751 | |||
| 1752 | pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, | ||
| 1753 | sizeof(*pcie->supplies), | ||
| 1754 | GFP_KERNEL); | ||
| 1755 | if (!pcie->supplies) | ||
| 1756 | return -ENOMEM; | ||
| 1757 | |||
| 1758 | pcie->supplies[0].supply = "pex-clk"; | ||
| 1759 | pcie->supplies[1].supply = "vdd"; | ||
| 1760 | |||
| 1761 | if (pcie->num_supplies > 2) | ||
| 1762 | pcie->supplies[2].supply = "avdd"; | ||
| 1763 | |||
| 1764 | return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies); | ||
| 1765 | } | ||
| 1766 | |||
| 1767 | /* | ||
| 1768 | * Obtains the list of regulators required for a particular generation of the | ||
| 1769 | * IP block. | ||
| 1770 | * | ||
| 1771 | * This would've been nice to do simply by providing static tables for use | ||
| 1772 | * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky | ||
| 1773 | * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB) | ||
| 1774 | * and either seems to be optional depending on which ports are being used. | ||
| 1775 | */ | ||
| 1776 | static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) | ||
| 1777 | { | ||
| 1778 | struct device *dev = pcie->dev; | ||
| 1779 | struct device_node *np = dev->of_node; | ||
| 1780 | unsigned int i = 0; | ||
| 1781 | |||
| 1782 | if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) { | ||
| 1783 | pcie->num_supplies = 4; | ||
| 1784 | |||
| 1785 | pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, | ||
| 1786 | sizeof(*pcie->supplies), | ||
| 1787 | GFP_KERNEL); | ||
| 1788 | if (!pcie->supplies) | ||
| 1789 | return -ENOMEM; | ||
| 1790 | |||
| 1791 | pcie->supplies[i++].supply = "dvdd-pex"; | ||
| 1792 | pcie->supplies[i++].supply = "hvdd-pex-pll"; | ||
| 1793 | pcie->supplies[i++].supply = "hvdd-pex"; | ||
| 1794 | pcie->supplies[i++].supply = "vddio-pexctl-aud"; | ||
| 1795 | } else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) { | ||
| 1796 | pcie->num_supplies = 6; | ||
| 1797 | |||
| 1798 | pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, | ||
| 1799 | sizeof(*pcie->supplies), | ||
| 1800 | GFP_KERNEL); | ||
| 1801 | if (!pcie->supplies) | ||
| 1802 | return -ENOMEM; | ||
| 1803 | |||
| 1804 | pcie->supplies[i++].supply = "avdd-pll-uerefe"; | ||
| 1805 | pcie->supplies[i++].supply = "hvddio-pex"; | ||
| 1806 | pcie->supplies[i++].supply = "dvddio-pex"; | ||
| 1807 | pcie->supplies[i++].supply = "dvdd-pex-pll"; | ||
| 1808 | pcie->supplies[i++].supply = "hvdd-pex-pll-e"; | ||
| 1809 | pcie->supplies[i++].supply = "vddio-pex-ctl"; | ||
| 1810 | } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) { | ||
| 1811 | pcie->num_supplies = 7; | ||
| 1812 | |||
| 1813 | pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, | ||
| 1814 | sizeof(*pcie->supplies), | ||
| 1815 | GFP_KERNEL); | ||
| 1816 | if (!pcie->supplies) | ||
| 1817 | return -ENOMEM; | ||
| 1818 | |||
| 1819 | pcie->supplies[i++].supply = "avddio-pex"; | ||
| 1820 | pcie->supplies[i++].supply = "dvddio-pex"; | ||
| 1821 | pcie->supplies[i++].supply = "avdd-pex-pll"; | ||
| 1822 | pcie->supplies[i++].supply = "hvdd-pex"; | ||
| 1823 | pcie->supplies[i++].supply = "hvdd-pex-pll-e"; | ||
| 1824 | pcie->supplies[i++].supply = "vddio-pex-ctl"; | ||
| 1825 | pcie->supplies[i++].supply = "avdd-pll-erefe"; | ||
| 1826 | } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { | ||
| 1827 | bool need_pexa = false, need_pexb = false; | ||
| 1828 | |||
| 1829 | /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */ | ||
| 1830 | if (lane_mask & 0x0f) | ||
| 1831 | need_pexa = true; | ||
| 1832 | |||
| 1833 | /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */ | ||
| 1834 | if (lane_mask & 0x30) | ||
| 1835 | need_pexb = true; | ||
| 1836 | |||
| 1837 | pcie->num_supplies = 4 + (need_pexa ? 2 : 0) + | ||
| 1838 | (need_pexb ? 2 : 0); | ||
| 1839 | |||
| 1840 | pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, | ||
| 1841 | sizeof(*pcie->supplies), | ||
| 1842 | GFP_KERNEL); | ||
| 1843 | if (!pcie->supplies) | ||
| 1844 | return -ENOMEM; | ||
| 1845 | |||
| 1846 | pcie->supplies[i++].supply = "avdd-pex-pll"; | ||
| 1847 | pcie->supplies[i++].supply = "hvdd-pex"; | ||
| 1848 | pcie->supplies[i++].supply = "vddio-pex-ctl"; | ||
| 1849 | pcie->supplies[i++].supply = "avdd-plle"; | ||
| 1850 | |||
| 1851 | if (need_pexa) { | ||
| 1852 | pcie->supplies[i++].supply = "avdd-pexa"; | ||
| 1853 | pcie->supplies[i++].supply = "vdd-pexa"; | ||
| 1854 | } | ||
| 1855 | |||
| 1856 | if (need_pexb) { | ||
| 1857 | pcie->supplies[i++].supply = "avdd-pexb"; | ||
| 1858 | pcie->supplies[i++].supply = "vdd-pexb"; | ||
| 1859 | } | ||
| 1860 | } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) { | ||
| 1861 | pcie->num_supplies = 5; | ||
| 1862 | |||
| 1863 | pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, | ||
| 1864 | sizeof(*pcie->supplies), | ||
| 1865 | GFP_KERNEL); | ||
| 1866 | if (!pcie->supplies) | ||
| 1867 | return -ENOMEM; | ||
| 1868 | |||
| 1869 | pcie->supplies[0].supply = "avdd-pex"; | ||
| 1870 | pcie->supplies[1].supply = "vdd-pex"; | ||
| 1871 | pcie->supplies[2].supply = "avdd-pex-pll"; | ||
| 1872 | pcie->supplies[3].supply = "avdd-plle"; | ||
| 1873 | pcie->supplies[4].supply = "vddio-pex-clk"; | ||
| 1874 | } | ||
| 1875 | |||
| 1876 | if (of_regulator_bulk_available(dev->of_node, pcie->supplies, | ||
| 1877 | pcie->num_supplies)) | ||
| 1878 | return devm_regulator_bulk_get(dev, pcie->num_supplies, | ||
| 1879 | pcie->supplies); | ||
| 1880 | |||
| 1881 | /* | ||
| 1882 | * If not all regulators are available for this new scheme, assume | ||
| 1883 | * that the device tree complies with an older version of the device | ||
| 1884 | * tree binding. | ||
| 1885 | */ | ||
| 1886 | dev_info(dev, "using legacy DT binding for power supplies\n"); | ||
| 1887 | |||
| 1888 | devm_kfree(dev, pcie->supplies); | ||
| 1889 | pcie->num_supplies = 0; | ||
| 1890 | |||
| 1891 | return tegra_pcie_get_legacy_regulators(pcie); | ||
| 1892 | } | ||
| 1893 | |||
| 1894 | static int tegra_pcie_parse_dt(struct tegra_pcie *pcie) | ||
| 1895 | { | ||
| 1896 | struct device *dev = pcie->dev; | ||
| 1897 | struct device_node *np = dev->of_node, *port; | ||
| 1898 | const struct tegra_pcie_soc *soc = pcie->soc; | ||
| 1899 | struct of_pci_range_parser parser; | ||
| 1900 | struct of_pci_range range; | ||
| 1901 | u32 lanes = 0, mask = 0; | ||
| 1902 | unsigned int lane = 0; | ||
| 1903 | struct resource res; | ||
| 1904 | int err; | ||
| 1905 | |||
| 1906 | if (of_pci_range_parser_init(&parser, np)) { | ||
| 1907 | dev_err(dev, "missing \"ranges\" property\n"); | ||
| 1908 | return -EINVAL; | ||
| 1909 | } | ||
| 1910 | |||
| 1911 | for_each_of_pci_range(&parser, &range) { | ||
| 1912 | err = of_pci_range_to_resource(&range, np, &res); | ||
| 1913 | if (err < 0) | ||
| 1914 | return err; | ||
| 1915 | |||
| 1916 | switch (res.flags & IORESOURCE_TYPE_BITS) { | ||
| 1917 | case IORESOURCE_IO: | ||
| 1918 | /* Track the bus -> CPU I/O mapping offset. */ | ||
| 1919 | pcie->offset.io = res.start - range.pci_addr; | ||
| 1920 | |||
| 1921 | memcpy(&pcie->pio, &res, sizeof(res)); | ||
| 1922 | pcie->pio.name = np->full_name; | ||
| 1923 | |||
| 1924 | /* | ||
| 1925 | * The Tegra PCIe host bridge uses this to program the | ||
| 1926 | * mapping of the I/O space to the physical address, | ||
| 1927 | * so we override the .start and .end fields here that | ||
| 1928 | * of_pci_range_to_resource() converted to I/O space. | ||
| 1929 | * We also set the IORESOURCE_MEM type to clarify that | ||
| 1930 | * the resource is in the physical memory space. | ||
| 1931 | */ | ||
| 1932 | pcie->io.start = range.cpu_addr; | ||
| 1933 | pcie->io.end = range.cpu_addr + range.size - 1; | ||
| 1934 | pcie->io.flags = IORESOURCE_MEM; | ||
| 1935 | pcie->io.name = "I/O"; | ||
| 1936 | |||
| 1937 | memcpy(&res, &pcie->io, sizeof(res)); | ||
| 1938 | break; | ||
| 1939 | |||
| 1940 | case IORESOURCE_MEM: | ||
| 1941 | /* | ||
| 1942 | * Track the bus -> CPU memory mapping offset. This | ||
| 1943 | * assumes that the prefetchable and non-prefetchable | ||
| 1944 | * regions will be the last of type IORESOURCE_MEM in | ||
| 1945 | * the ranges property. | ||
| 1946 | * */ | ||
| 1947 | pcie->offset.mem = res.start - range.pci_addr; | ||
| 1948 | |||
| 1949 | if (res.flags & IORESOURCE_PREFETCH) { | ||
| 1950 | memcpy(&pcie->prefetch, &res, sizeof(res)); | ||
| 1951 | pcie->prefetch.name = "prefetchable"; | ||
| 1952 | } else { | ||
| 1953 | memcpy(&pcie->mem, &res, sizeof(res)); | ||
| 1954 | pcie->mem.name = "non-prefetchable"; | ||
| 1955 | } | ||
| 1956 | break; | ||
| 1957 | } | ||
| 1958 | } | ||
| 1959 | |||
| 1960 | err = of_pci_parse_bus_range(np, &pcie->busn); | ||
| 1961 | if (err < 0) { | ||
| 1962 | dev_err(dev, "failed to parse ranges property: %d\n", err); | ||
| 1963 | pcie->busn.name = np->name; | ||
| 1964 | pcie->busn.start = 0; | ||
| 1965 | pcie->busn.end = 0xff; | ||
| 1966 | pcie->busn.flags = IORESOURCE_BUS; | ||
| 1967 | } | ||
| 1968 | |||
| 1969 | /* parse root ports */ | ||
| 1970 | for_each_child_of_node(np, port) { | ||
| 1971 | struct tegra_pcie_port *rp; | ||
| 1972 | unsigned int index; | ||
| 1973 | u32 value; | ||
| 1974 | |||
| 1975 | err = of_pci_get_devfn(port); | ||
| 1976 | if (err < 0) { | ||
| 1977 | dev_err(dev, "failed to parse address: %d\n", err); | ||
| 1978 | return err; | ||
| 1979 | } | ||
| 1980 | |||
| 1981 | index = PCI_SLOT(err); | ||
| 1982 | |||
| 1983 | if (index < 1 || index > soc->num_ports) { | ||
| 1984 | dev_err(dev, "invalid port number: %d\n", index); | ||
| 1985 | return -EINVAL; | ||
| 1986 | } | ||
| 1987 | |||
| 1988 | index--; | ||
| 1989 | |||
| 1990 | err = of_property_read_u32(port, "nvidia,num-lanes", &value); | ||
| 1991 | if (err < 0) { | ||
| 1992 | dev_err(dev, "failed to parse # of lanes: %d\n", | ||
| 1993 | err); | ||
| 1994 | return err; | ||
| 1995 | } | ||
| 1996 | |||
| 1997 | if (value > 16) { | ||
| 1998 | dev_err(dev, "invalid # of lanes: %u\n", value); | ||
| 1999 | return -EINVAL; | ||
| 2000 | } | ||
| 2001 | |||
| 2002 | lanes |= value << (index << 3); | ||
| 2003 | |||
| 2004 | if (!of_device_is_available(port)) { | ||
| 2005 | lane += value; | ||
| 2006 | continue; | ||
| 2007 | } | ||
| 2008 | |||
| 2009 | mask |= ((1 << value) - 1) << lane; | ||
| 2010 | lane += value; | ||
| 2011 | |||
| 2012 | rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL); | ||
| 2013 | if (!rp) | ||
| 2014 | return -ENOMEM; | ||
| 2015 | |||
| 2016 | err = of_address_to_resource(port, 0, &rp->regs); | ||
| 2017 | if (err < 0) { | ||
| 2018 | dev_err(dev, "failed to parse address: %d\n", err); | ||
| 2019 | return err; | ||
| 2020 | } | ||
| 2021 | |||
| 2022 | INIT_LIST_HEAD(&rp->list); | ||
| 2023 | rp->index = index; | ||
| 2024 | rp->lanes = value; | ||
| 2025 | rp->pcie = pcie; | ||
| 2026 | rp->np = port; | ||
| 2027 | |||
| 2028 | rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs); | ||
| 2029 | if (IS_ERR(rp->base)) | ||
| 2030 | return PTR_ERR(rp->base); | ||
| 2031 | |||
| 2032 | list_add_tail(&rp->list, &pcie->ports); | ||
| 2033 | } | ||
| 2034 | |||
| 2035 | err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config); | ||
| 2036 | if (err < 0) { | ||
| 2037 | dev_err(dev, "invalid lane configuration\n"); | ||
| 2038 | return err; | ||
| 2039 | } | ||
| 2040 | |||
| 2041 | err = tegra_pcie_get_regulators(pcie, mask); | ||
| 2042 | if (err < 0) | ||
| 2043 | return err; | ||
| 2044 | |||
| 2045 | return 0; | ||
| 2046 | } | ||
| 2047 | |||
| 2048 | /* | ||
| 2049 | * FIXME: If there are no PCIe cards attached, then calling this function | ||
| 2050 | * can result in the increase of the bootup time as there are big timeout | ||
| 2051 | * loops. | ||
| 2052 | */ | ||
| 2053 | #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */ | ||
| 2054 | static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port) | ||
| 2055 | { | ||
| 2056 | struct device *dev = port->pcie->dev; | ||
| 2057 | unsigned int retries = 3; | ||
| 2058 | unsigned long value; | ||
| 2059 | |||
| 2060 | /* override presence detection */ | ||
| 2061 | value = readl(port->base + RP_PRIV_MISC); | ||
| 2062 | value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT; | ||
| 2063 | value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT; | ||
| 2064 | writel(value, port->base + RP_PRIV_MISC); | ||
| 2065 | |||
| 2066 | do { | ||
| 2067 | unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT; | ||
| 2068 | |||
| 2069 | do { | ||
| 2070 | value = readl(port->base + RP_VEND_XP); | ||
| 2071 | |||
| 2072 | if (value & RP_VEND_XP_DL_UP) | ||
| 2073 | break; | ||
| 2074 | |||
| 2075 | usleep_range(1000, 2000); | ||
| 2076 | } while (--timeout); | ||
| 2077 | |||
| 2078 | if (!timeout) { | ||
| 2079 | dev_err(dev, "link %u down, retrying\n", port->index); | ||
| 2080 | goto retry; | ||
| 2081 | } | ||
| 2082 | |||
| 2083 | timeout = TEGRA_PCIE_LINKUP_TIMEOUT; | ||
| 2084 | |||
| 2085 | do { | ||
| 2086 | value = readl(port->base + RP_LINK_CONTROL_STATUS); | ||
| 2087 | |||
| 2088 | if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE) | ||
| 2089 | return true; | ||
| 2090 | |||
| 2091 | usleep_range(1000, 2000); | ||
| 2092 | } while (--timeout); | ||
| 2093 | |||
| 2094 | retry: | ||
| 2095 | tegra_pcie_port_reset(port); | ||
| 2096 | } while (--retries); | ||
| 2097 | |||
| 2098 | return false; | ||
| 2099 | } | ||
| 2100 | |||
| 2101 | static void tegra_pcie_enable_ports(struct tegra_pcie *pcie) | ||
| 2102 | { | ||
| 2103 | struct device *dev = pcie->dev; | ||
| 2104 | struct tegra_pcie_port *port, *tmp; | ||
| 2105 | |||
| 2106 | list_for_each_entry_safe(port, tmp, &pcie->ports, list) { | ||
| 2107 | dev_info(dev, "probing port %u, using %u lanes\n", | ||
| 2108 | port->index, port->lanes); | ||
| 2109 | |||
| 2110 | tegra_pcie_port_enable(port); | ||
| 2111 | |||
| 2112 | if (tegra_pcie_port_check_link(port)) | ||
| 2113 | continue; | ||
| 2114 | |||
| 2115 | dev_info(dev, "link %u down, ignoring\n", port->index); | ||
| 2116 | |||
| 2117 | tegra_pcie_port_disable(port); | ||
| 2118 | tegra_pcie_port_free(port); | ||
| 2119 | } | ||
| 2120 | } | ||
| 2121 | |||
| 2122 | static void tegra_pcie_disable_ports(struct tegra_pcie *pcie) | ||
| 2123 | { | ||
| 2124 | struct tegra_pcie_port *port, *tmp; | ||
| 2125 | |||
| 2126 | list_for_each_entry_safe(port, tmp, &pcie->ports, list) | ||
| 2127 | tegra_pcie_port_disable(port); | ||
| 2128 | } | ||
| 2129 | |||
| 2130 | static const struct tegra_pcie_port_soc tegra20_pcie_ports[] = { | ||
| 2131 | { .pme.turnoff_bit = 0, .pme.ack_bit = 5 }, | ||
| 2132 | { .pme.turnoff_bit = 8, .pme.ack_bit = 10 }, | ||
| 2133 | }; | ||
| 2134 | |||
| 2135 | static const struct tegra_pcie_soc tegra20_pcie = { | ||
| 2136 | .num_ports = 2, | ||
| 2137 | .ports = tegra20_pcie_ports, | ||
| 2138 | .msi_base_shift = 0, | ||
| 2139 | .pads_pll_ctl = PADS_PLL_CTL_TEGRA20, | ||
| 2140 | .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10, | ||
| 2141 | .pads_refclk_cfg0 = 0xfa5cfa5c, | ||
| 2142 | .has_pex_clkreq_en = false, | ||
| 2143 | .has_pex_bias_ctrl = false, | ||
| 2144 | .has_intr_prsnt_sense = false, | ||
| 2145 | .has_cml_clk = false, | ||
| 2146 | .has_gen2 = false, | ||
| 2147 | .force_pca_enable = false, | ||
| 2148 | .program_uphy = true, | ||
| 2149 | }; | ||
| 2150 | |||
| 2151 | static const struct tegra_pcie_port_soc tegra30_pcie_ports[] = { | ||
| 2152 | { .pme.turnoff_bit = 0, .pme.ack_bit = 5 }, | ||
| 2153 | { .pme.turnoff_bit = 8, .pme.ack_bit = 10 }, | ||
| 2154 | { .pme.turnoff_bit = 16, .pme.ack_bit = 18 }, | ||
| 2155 | }; | ||
| 2156 | |||
| 2157 | static const struct tegra_pcie_soc tegra30_pcie = { | ||
| 2158 | .num_ports = 3, | ||
| 2159 | .ports = tegra30_pcie_ports, | ||
| 2160 | .msi_base_shift = 8, | ||
| 2161 | .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, | ||
| 2162 | .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, | ||
| 2163 | .pads_refclk_cfg0 = 0xfa5cfa5c, | ||
| 2164 | .pads_refclk_cfg1 = 0xfa5cfa5c, | ||
| 2165 | .has_pex_clkreq_en = true, | ||
| 2166 | .has_pex_bias_ctrl = true, | ||
| 2167 | .has_intr_prsnt_sense = true, | ||
| 2168 | .has_cml_clk = true, | ||
| 2169 | .has_gen2 = false, | ||
| 2170 | .force_pca_enable = false, | ||
| 2171 | .program_uphy = true, | ||
| 2172 | }; | ||
| 2173 | |||
| 2174 | static const struct tegra_pcie_soc tegra124_pcie = { | ||
| 2175 | .num_ports = 2, | ||
| 2176 | .ports = tegra20_pcie_ports, | ||
| 2177 | .msi_base_shift = 8, | ||
| 2178 | .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, | ||
| 2179 | .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, | ||
| 2180 | .pads_refclk_cfg0 = 0x44ac44ac, | ||
| 2181 | .has_pex_clkreq_en = true, | ||
| 2182 | .has_pex_bias_ctrl = true, | ||
| 2183 | .has_intr_prsnt_sense = true, | ||
| 2184 | .has_cml_clk = true, | ||
| 2185 | .has_gen2 = true, | ||
| 2186 | .force_pca_enable = false, | ||
| 2187 | .program_uphy = true, | ||
| 2188 | }; | ||
| 2189 | |||
| 2190 | static const struct tegra_pcie_soc tegra210_pcie = { | ||
| 2191 | .num_ports = 2, | ||
| 2192 | .ports = tegra20_pcie_ports, | ||
| 2193 | .msi_base_shift = 8, | ||
| 2194 | .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, | ||
| 2195 | .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, | ||
| 2196 | .pads_refclk_cfg0 = 0x90b890b8, | ||
| 2197 | .has_pex_clkreq_en = true, | ||
| 2198 | .has_pex_bias_ctrl = true, | ||
| 2199 | .has_intr_prsnt_sense = true, | ||
| 2200 | .has_cml_clk = true, | ||
| 2201 | .has_gen2 = true, | ||
| 2202 | .force_pca_enable = true, | ||
| 2203 | .program_uphy = true, | ||
| 2204 | }; | ||
| 2205 | |||
| 2206 | static const struct tegra_pcie_port_soc tegra186_pcie_ports[] = { | ||
| 2207 | { .pme.turnoff_bit = 0, .pme.ack_bit = 5 }, | ||
| 2208 | { .pme.turnoff_bit = 8, .pme.ack_bit = 10 }, | ||
| 2209 | { .pme.turnoff_bit = 12, .pme.ack_bit = 14 }, | ||
| 2210 | }; | ||
| 2211 | |||
| 2212 | static const struct tegra_pcie_soc tegra186_pcie = { | ||
| 2213 | .num_ports = 3, | ||
| 2214 | .ports = tegra186_pcie_ports, | ||
| 2215 | .msi_base_shift = 8, | ||
| 2216 | .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, | ||
| 2217 | .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, | ||
| 2218 | .pads_refclk_cfg0 = 0x80b880b8, | ||
| 2219 | .pads_refclk_cfg1 = 0x000480b8, | ||
| 2220 | .has_pex_clkreq_en = true, | ||
| 2221 | .has_pex_bias_ctrl = true, | ||
| 2222 | .has_intr_prsnt_sense = true, | ||
| 2223 | .has_cml_clk = false, | ||
| 2224 | .has_gen2 = true, | ||
| 2225 | .force_pca_enable = false, | ||
| 2226 | .program_uphy = false, | ||
| 2227 | }; | ||
| 2228 | |||
| 2229 | static const struct of_device_id tegra_pcie_of_match[] = { | ||
| 2230 | { .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie }, | ||
| 2231 | { .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie }, | ||
| 2232 | { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie }, | ||
| 2233 | { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie }, | ||
| 2234 | { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie }, | ||
| 2235 | { }, | ||
| 2236 | }; | ||
| 2237 | |||
| 2238 | static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos) | ||
| 2239 | { | ||
| 2240 | struct tegra_pcie *pcie = s->private; | ||
| 2241 | |||
| 2242 | if (list_empty(&pcie->ports)) | ||
| 2243 | return NULL; | ||
| 2244 | |||
| 2245 | seq_printf(s, "Index Status\n"); | ||
| 2246 | |||
| 2247 | return seq_list_start(&pcie->ports, *pos); | ||
| 2248 | } | ||
| 2249 | |||
| 2250 | static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos) | ||
| 2251 | { | ||
| 2252 | struct tegra_pcie *pcie = s->private; | ||
| 2253 | |||
| 2254 | return seq_list_next(v, &pcie->ports, pos); | ||
| 2255 | } | ||
| 2256 | |||
| 2257 | static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v) | ||
| 2258 | { | ||
| 2259 | } | ||
| 2260 | |||
| 2261 | static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v) | ||
| 2262 | { | ||
| 2263 | bool up = false, active = false; | ||
| 2264 | struct tegra_pcie_port *port; | ||
| 2265 | unsigned int value; | ||
| 2266 | |||
| 2267 | port = list_entry(v, struct tegra_pcie_port, list); | ||
| 2268 | |||
| 2269 | value = readl(port->base + RP_VEND_XP); | ||
| 2270 | |||
| 2271 | if (value & RP_VEND_XP_DL_UP) | ||
| 2272 | up = true; | ||
| 2273 | |||
| 2274 | value = readl(port->base + RP_LINK_CONTROL_STATUS); | ||
| 2275 | |||
| 2276 | if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE) | ||
| 2277 | active = true; | ||
| 2278 | |||
| 2279 | seq_printf(s, "%2u ", port->index); | ||
| 2280 | |||
| 2281 | if (up) | ||
| 2282 | seq_printf(s, "up"); | ||
| 2283 | |||
| 2284 | if (active) { | ||
| 2285 | if (up) | ||
| 2286 | seq_printf(s, ", "); | ||
| 2287 | |||
| 2288 | seq_printf(s, "active"); | ||
| 2289 | } | ||
| 2290 | |||
| 2291 | seq_printf(s, "\n"); | ||
| 2292 | return 0; | ||
| 2293 | } | ||
| 2294 | |||
| 2295 | static const struct seq_operations tegra_pcie_ports_seq_ops = { | ||
| 2296 | .start = tegra_pcie_ports_seq_start, | ||
| 2297 | .next = tegra_pcie_ports_seq_next, | ||
| 2298 | .stop = tegra_pcie_ports_seq_stop, | ||
| 2299 | .show = tegra_pcie_ports_seq_show, | ||
| 2300 | }; | ||
| 2301 | |||
| 2302 | static int tegra_pcie_ports_open(struct inode *inode, struct file *file) | ||
| 2303 | { | ||
| 2304 | struct tegra_pcie *pcie = inode->i_private; | ||
| 2305 | struct seq_file *s; | ||
| 2306 | int err; | ||
| 2307 | |||
| 2308 | err = seq_open(file, &tegra_pcie_ports_seq_ops); | ||
| 2309 | if (err) | ||
| 2310 | return err; | ||
| 2311 | |||
| 2312 | s = file->private_data; | ||
| 2313 | s->private = pcie; | ||
| 2314 | |||
| 2315 | return 0; | ||
| 2316 | } | ||
| 2317 | |||
| 2318 | static const struct file_operations tegra_pcie_ports_ops = { | ||
| 2319 | .owner = THIS_MODULE, | ||
| 2320 | .open = tegra_pcie_ports_open, | ||
| 2321 | .read = seq_read, | ||
| 2322 | .llseek = seq_lseek, | ||
| 2323 | .release = seq_release, | ||
| 2324 | }; | ||
| 2325 | |||
| 2326 | static void tegra_pcie_debugfs_exit(struct tegra_pcie *pcie) | ||
| 2327 | { | ||
| 2328 | debugfs_remove_recursive(pcie->debugfs); | ||
| 2329 | pcie->debugfs = NULL; | ||
| 2330 | } | ||
| 2331 | |||
| 2332 | static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie) | ||
| 2333 | { | ||
| 2334 | struct dentry *file; | ||
| 2335 | |||
| 2336 | pcie->debugfs = debugfs_create_dir("pcie", NULL); | ||
| 2337 | if (!pcie->debugfs) | ||
| 2338 | return -ENOMEM; | ||
| 2339 | |||
| 2340 | file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs, | ||
| 2341 | pcie, &tegra_pcie_ports_ops); | ||
| 2342 | if (!file) | ||
| 2343 | goto remove; | ||
| 2344 | |||
| 2345 | return 0; | ||
| 2346 | |||
| 2347 | remove: | ||
| 2348 | tegra_pcie_debugfs_exit(pcie); | ||
| 2349 | return -ENOMEM; | ||
| 2350 | } | ||
| 2351 | |||
| 2352 | static int tegra_pcie_probe(struct platform_device *pdev) | ||
| 2353 | { | ||
| 2354 | struct device *dev = &pdev->dev; | ||
| 2355 | struct pci_host_bridge *host; | ||
| 2356 | struct tegra_pcie *pcie; | ||
| 2357 | struct pci_bus *child; | ||
| 2358 | int err; | ||
| 2359 | |||
| 2360 | host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); | ||
| 2361 | if (!host) | ||
| 2362 | return -ENOMEM; | ||
| 2363 | |||
| 2364 | pcie = pci_host_bridge_priv(host); | ||
| 2365 | host->sysdata = pcie; | ||
| 2366 | platform_set_drvdata(pdev, pcie); | ||
| 2367 | |||
| 2368 | pcie->soc = of_device_get_match_data(dev); | ||
| 2369 | INIT_LIST_HEAD(&pcie->ports); | ||
| 2370 | pcie->dev = dev; | ||
| 2371 | |||
| 2372 | err = tegra_pcie_parse_dt(pcie); | ||
| 2373 | if (err < 0) | ||
| 2374 | return err; | ||
| 2375 | |||
| 2376 | err = tegra_pcie_get_resources(pcie); | ||
| 2377 | if (err < 0) { | ||
| 2378 | dev_err(dev, "failed to request resources: %d\n", err); | ||
| 2379 | return err; | ||
| 2380 | } | ||
| 2381 | |||
| 2382 | err = tegra_pcie_msi_setup(pcie); | ||
| 2383 | if (err < 0) { | ||
| 2384 | dev_err(dev, "failed to enable MSI support: %d\n", err); | ||
| 2385 | goto put_resources; | ||
| 2386 | } | ||
| 2387 | |||
| 2388 | pm_runtime_enable(pcie->dev); | ||
| 2389 | err = pm_runtime_get_sync(pcie->dev); | ||
| 2390 | if (err) { | ||
| 2391 | dev_err(dev, "fail to enable pcie controller: %d\n", err); | ||
| 2392 | goto teardown_msi; | ||
| 2393 | } | ||
| 2394 | |||
| 2395 | err = tegra_pcie_request_resources(pcie); | ||
| 2396 | if (err) | ||
| 2397 | goto pm_runtime_put; | ||
| 2398 | |||
| 2399 | host->busnr = pcie->busn.start; | ||
| 2400 | host->dev.parent = &pdev->dev; | ||
| 2401 | host->ops = &tegra_pcie_ops; | ||
| 2402 | host->map_irq = tegra_pcie_map_irq; | ||
| 2403 | host->swizzle_irq = pci_common_swizzle; | ||
| 2404 | |||
| 2405 | err = pci_scan_root_bus_bridge(host); | ||
| 2406 | if (err < 0) { | ||
| 2407 | dev_err(dev, "failed to register host: %d\n", err); | ||
| 2408 | goto free_resources; | ||
| 2409 | } | ||
| 2410 | |||
| 2411 | pci_bus_size_bridges(host->bus); | ||
| 2412 | pci_bus_assign_resources(host->bus); | ||
| 2413 | |||
| 2414 | list_for_each_entry(child, &host->bus->children, node) | ||
| 2415 | pcie_bus_configure_settings(child); | ||
| 2416 | |||
| 2417 | pci_bus_add_devices(host->bus); | ||
| 2418 | |||
| 2419 | if (IS_ENABLED(CONFIG_DEBUG_FS)) { | ||
| 2420 | err = tegra_pcie_debugfs_init(pcie); | ||
| 2421 | if (err < 0) | ||
| 2422 | dev_err(dev, "failed to setup debugfs: %d\n", err); | ||
| 2423 | } | ||
| 2424 | |||
| 2425 | return 0; | ||
| 2426 | |||
| 2427 | free_resources: | ||
| 2428 | tegra_pcie_free_resources(pcie); | ||
| 2429 | pm_runtime_put: | ||
| 2430 | pm_runtime_put_sync(pcie->dev); | ||
| 2431 | pm_runtime_disable(pcie->dev); | ||
| 2432 | teardown_msi: | ||
| 2433 | tegra_pcie_msi_teardown(pcie); | ||
| 2434 | put_resources: | ||
| 2435 | tegra_pcie_put_resources(pcie); | ||
| 2436 | return err; | ||
| 2437 | } | ||
| 2438 | |||
| 2439 | static int tegra_pcie_remove(struct platform_device *pdev) | ||
| 2440 | { | ||
| 2441 | struct tegra_pcie *pcie = platform_get_drvdata(pdev); | ||
| 2442 | struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); | ||
| 2443 | struct tegra_pcie_port *port, *tmp; | ||
| 2444 | |||
| 2445 | if (IS_ENABLED(CONFIG_DEBUG_FS)) | ||
| 2446 | tegra_pcie_debugfs_exit(pcie); | ||
| 2447 | |||
| 2448 | pci_stop_root_bus(host->bus); | ||
| 2449 | pci_remove_root_bus(host->bus); | ||
| 2450 | tegra_pcie_free_resources(pcie); | ||
| 2451 | pm_runtime_put_sync(pcie->dev); | ||
| 2452 | pm_runtime_disable(pcie->dev); | ||
| 2453 | |||
| 2454 | if (IS_ENABLED(CONFIG_PCI_MSI)) | ||
| 2455 | tegra_pcie_msi_teardown(pcie); | ||
| 2456 | |||
| 2457 | tegra_pcie_put_resources(pcie); | ||
| 2458 | |||
| 2459 | list_for_each_entry_safe(port, tmp, &pcie->ports, list) | ||
| 2460 | tegra_pcie_port_free(port); | ||
| 2461 | |||
| 2462 | return 0; | ||
| 2463 | } | ||
| 2464 | |||
| 2465 | static int __maybe_unused tegra_pcie_pm_suspend(struct device *dev) | ||
| 2466 | { | ||
| 2467 | struct tegra_pcie *pcie = dev_get_drvdata(dev); | ||
| 2468 | struct tegra_pcie_port *port; | ||
| 2469 | |||
| 2470 | list_for_each_entry(port, &pcie->ports, list) | ||
| 2471 | tegra_pcie_pme_turnoff(port); | ||
| 2472 | |||
| 2473 | tegra_pcie_disable_ports(pcie); | ||
| 2474 | |||
| 2475 | if (IS_ENABLED(CONFIG_PCI_MSI)) | ||
| 2476 | tegra_pcie_disable_msi(pcie); | ||
| 2477 | |||
| 2478 | tegra_pcie_disable_controller(pcie); | ||
| 2479 | tegra_pcie_power_off(pcie); | ||
| 2480 | |||
| 2481 | return 0; | ||
| 2482 | } | ||
| 2483 | |||
| 2484 | static int __maybe_unused tegra_pcie_pm_resume(struct device *dev) | ||
| 2485 | { | ||
| 2486 | struct tegra_pcie *pcie = dev_get_drvdata(dev); | ||
| 2487 | int err; | ||
| 2488 | |||
| 2489 | err = tegra_pcie_power_on(pcie); | ||
| 2490 | if (err) { | ||
| 2491 | dev_err(dev, "tegra pcie power on fail: %d\n", err); | ||
| 2492 | return err; | ||
| 2493 | } | ||
| 2494 | err = tegra_pcie_enable_controller(pcie); | ||
| 2495 | if (err) { | ||
| 2496 | dev_err(dev, "tegra pcie controller enable fail: %d\n", err); | ||
| 2497 | goto poweroff; | ||
| 2498 | } | ||
| 2499 | tegra_pcie_setup_translations(pcie); | ||
| 2500 | |||
| 2501 | if (IS_ENABLED(CONFIG_PCI_MSI)) | ||
| 2502 | tegra_pcie_enable_msi(pcie); | ||
| 2503 | |||
| 2504 | tegra_pcie_enable_ports(pcie); | ||
| 2505 | |||
| 2506 | return 0; | ||
| 2507 | |||
| 2508 | poweroff: | ||
| 2509 | tegra_pcie_power_off(pcie); | ||
| 2510 | |||
| 2511 | return err; | ||
| 2512 | } | ||
| 2513 | |||
| 2514 | static const struct dev_pm_ops tegra_pcie_pm_ops = { | ||
| 2515 | SET_RUNTIME_PM_OPS(tegra_pcie_pm_suspend, tegra_pcie_pm_resume, NULL) | ||
| 2516 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_pcie_pm_suspend, | ||
| 2517 | tegra_pcie_pm_resume) | ||
| 2518 | }; | ||
| 2519 | |||
| 2520 | static struct platform_driver tegra_pcie_driver = { | ||
| 2521 | .driver = { | ||
| 2522 | .name = "tegra-pcie", | ||
| 2523 | .of_match_table = tegra_pcie_of_match, | ||
| 2524 | .suppress_bind_attrs = true, | ||
| 2525 | .pm = &tegra_pcie_pm_ops, | ||
| 2526 | }, | ||
| 2527 | .probe = tegra_pcie_probe, | ||
| 2528 | .remove = tegra_pcie_remove, | ||
| 2529 | }; | ||
| 2530 | module_platform_driver(tegra_pcie_driver); | ||
| 2531 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c new file mode 100644 index 000000000000..32d1d7b81ef4 --- /dev/null +++ b/drivers/pci/controller/pci-thunder-ecam.c | |||
| @@ -0,0 +1,380 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Copyright (C) 2015, 2016 Cavium, Inc. | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include <linux/kernel.h> | ||
| 7 | #include <linux/init.h> | ||
| 8 | #include <linux/ioport.h> | ||
| 9 | #include <linux/of_pci.h> | ||
| 10 | #include <linux/of.h> | ||
| 11 | #include <linux/pci-ecam.h> | ||
| 12 | #include <linux/platform_device.h> | ||
| 13 | |||
| 14 | #if defined(CONFIG_PCI_HOST_THUNDER_ECAM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) | ||
| 15 | |||
| 16 | static void set_val(u32 v, int where, int size, u32 *val) | ||
| 17 | { | ||
| 18 | int shift = (where & 3) * 8; | ||
| 19 | |||
| 20 | pr_debug("set_val %04x: %08x\n", (unsigned)(where & ~3), v); | ||
| 21 | v >>= shift; | ||
| 22 | if (size == 1) | ||
| 23 | v &= 0xff; | ||
| 24 | else if (size == 2) | ||
| 25 | v &= 0xffff; | ||
| 26 | *val = v; | ||
| 27 | } | ||
| 28 | |||
| 29 | static int handle_ea_bar(u32 e0, int bar, struct pci_bus *bus, | ||
| 30 | unsigned int devfn, int where, int size, u32 *val) | ||
| 31 | { | ||
| 32 | void __iomem *addr; | ||
| 33 | u32 v; | ||
| 34 | |||
| 35 | /* Entries are 16-byte aligned; bits[2,3] select word in entry */ | ||
| 36 | int where_a = where & 0xc; | ||
| 37 | |||
| 38 | if (where_a == 0) { | ||
| 39 | set_val(e0, where, size, val); | ||
| 40 | return PCIBIOS_SUCCESSFUL; | ||
| 41 | } | ||
| 42 | if (where_a == 0x4) { | ||
| 43 | addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */ | ||
| 44 | if (!addr) { | ||
| 45 | *val = ~0; | ||
| 46 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 47 | } | ||
| 48 | v = readl(addr); | ||
| 49 | v &= ~0xf; | ||
| 50 | v |= 2; /* EA entry-1. Base-L */ | ||
| 51 | set_val(v, where, size, val); | ||
| 52 | return PCIBIOS_SUCCESSFUL; | ||
| 53 | } | ||
| 54 | if (where_a == 0x8) { | ||
| 55 | u32 barl_orig; | ||
| 56 | u32 barl_rb; | ||
| 57 | |||
| 58 | addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */ | ||
| 59 | if (!addr) { | ||
| 60 | *val = ~0; | ||
| 61 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 62 | } | ||
| 63 | barl_orig = readl(addr + 0); | ||
| 64 | writel(0xffffffff, addr + 0); | ||
| 65 | barl_rb = readl(addr + 0); | ||
| 66 | writel(barl_orig, addr + 0); | ||
| 67 | /* zeros in unsettable bits */ | ||
| 68 | v = ~barl_rb & ~3; | ||
| 69 | v |= 0xc; /* EA entry-2. Offset-L */ | ||
| 70 | set_val(v, where, size, val); | ||
| 71 | return PCIBIOS_SUCCESSFUL; | ||
| 72 | } | ||
| 73 | if (where_a == 0xc) { | ||
| 74 | addr = bus->ops->map_bus(bus, devfn, bar + 4); /* BAR 1 */ | ||
| 75 | if (!addr) { | ||
| 76 | *val = ~0; | ||
| 77 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 78 | } | ||
| 79 | v = readl(addr); /* EA entry-3. Base-H */ | ||
| 80 | set_val(v, where, size, val); | ||
| 81 | return PCIBIOS_SUCCESSFUL; | ||
| 82 | } | ||
| 83 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 84 | } | ||
| 85 | |||
| 86 | static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn, | ||
| 87 | int where, int size, u32 *val) | ||
| 88 | { | ||
| 89 | struct pci_config_window *cfg = bus->sysdata; | ||
| 90 | int where_a = where & ~3; | ||
| 91 | void __iomem *addr; | ||
| 92 | u32 node_bits; | ||
| 93 | u32 v; | ||
| 94 | |||
| 95 | /* EA Base[63:32] may be missing some bits ... */ | ||
| 96 | switch (where_a) { | ||
| 97 | case 0xa8: | ||
| 98 | case 0xbc: | ||
| 99 | case 0xd0: | ||
| 100 | case 0xe4: | ||
| 101 | break; | ||
| 102 | default: | ||
| 103 | return pci_generic_config_read(bus, devfn, where, size, val); | ||
| 104 | } | ||
| 105 | |||
| 106 | addr = bus->ops->map_bus(bus, devfn, where_a); | ||
| 107 | if (!addr) { | ||
| 108 | *val = ~0; | ||
| 109 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 110 | } | ||
| 111 | |||
| 112 | v = readl(addr); | ||
| 113 | |||
| 114 | /* | ||
| 115 | * Bit 44 of the 64-bit Base must match the same bit in | ||
| 116 | * the config space access window. Since we are working with | ||
| 117 | * the high-order 32 bits, shift everything down by 32 bits. | ||
| 118 | */ | ||
| 119 | node_bits = (cfg->res.start >> 32) & (1 << 12); | ||
| 120 | |||
| 121 | v |= node_bits; | ||
| 122 | set_val(v, where, size, val); | ||
| 123 | |||
| 124 | return PCIBIOS_SUCCESSFUL; | ||
| 125 | } | ||
| 126 | |||
| 127 | static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn, | ||
| 128 | int where, int size, u32 *val) | ||
| 129 | { | ||
| 130 | u32 v; | ||
| 131 | u32 vendor_device; | ||
| 132 | u32 class_rev; | ||
| 133 | void __iomem *addr; | ||
| 134 | int cfg_type; | ||
| 135 | int where_a = where & ~3; | ||
| 136 | |||
| 137 | addr = bus->ops->map_bus(bus, devfn, 0xc); | ||
| 138 | if (!addr) { | ||
| 139 | *val = ~0; | ||
| 140 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 141 | } | ||
| 142 | |||
| 143 | v = readl(addr); | ||
| 144 | |||
| 145 | /* Check for non type-00 header */ | ||
| 146 | cfg_type = (v >> 16) & 0x7f; | ||
| 147 | |||
| 148 | addr = bus->ops->map_bus(bus, devfn, 8); | ||
| 149 | if (!addr) { | ||
| 150 | *val = ~0; | ||
| 151 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 152 | } | ||
| 153 | |||
| 154 | class_rev = readl(addr); | ||
| 155 | if (class_rev == 0xffffffff) | ||
| 156 | goto no_emulation; | ||
| 157 | |||
| 158 | if ((class_rev & 0xff) >= 8) { | ||
| 159 | /* Pass-2 handling */ | ||
| 160 | if (cfg_type) | ||
| 161 | goto no_emulation; | ||
| 162 | return thunder_ecam_p2_config_read(bus, devfn, where, | ||
| 163 | size, val); | ||
| 164 | } | ||
| 165 | |||
| 166 | /* | ||
| 167 | * All BARs have fixed addresses specified by the EA | ||
| 168 | * capability; they must return zero on read. | ||
| 169 | */ | ||
| 170 | if (cfg_type == 0 && | ||
| 171 | ((where >= 0x10 && where < 0x2c) || | ||
| 172 | (where >= 0x1a4 && where < 0x1bc))) { | ||
| 173 | /* BAR or SR-IOV BAR */ | ||
| 174 | *val = 0; | ||
| 175 | return PCIBIOS_SUCCESSFUL; | ||
| 176 | } | ||
| 177 | |||
| 178 | addr = bus->ops->map_bus(bus, devfn, 0); | ||
| 179 | if (!addr) { | ||
| 180 | *val = ~0; | ||
| 181 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 182 | } | ||
| 183 | |||
| 184 | vendor_device = readl(addr); | ||
| 185 | if (vendor_device == 0xffffffff) | ||
| 186 | goto no_emulation; | ||
| 187 | |||
| 188 | pr_debug("%04x:%04x - Fix pass#: %08x, where: %03x, devfn: %03x\n", | ||
| 189 | vendor_device & 0xffff, vendor_device >> 16, class_rev, | ||
| 190 | (unsigned) where, devfn); | ||
| 191 | |||
| 192 | /* Check for non type-00 header */ | ||
| 193 | if (cfg_type == 0) { | ||
| 194 | bool has_msix; | ||
| 195 | bool is_nic = (vendor_device == 0xa01e177d); | ||
| 196 | bool is_tns = (vendor_device == 0xa01f177d); | ||
| 197 | |||
| 198 | addr = bus->ops->map_bus(bus, devfn, 0x70); | ||
| 199 | if (!addr) { | ||
| 200 | *val = ~0; | ||
| 201 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 202 | } | ||
| 203 | /* E_CAP */ | ||
| 204 | v = readl(addr); | ||
| 205 | has_msix = (v & 0xff00) != 0; | ||
| 206 | |||
| 207 | if (!has_msix && where_a == 0x70) { | ||
| 208 | v |= 0xbc00; /* next capability is EA at 0xbc */ | ||
| 209 | set_val(v, where, size, val); | ||
| 210 | return PCIBIOS_SUCCESSFUL; | ||
| 211 | } | ||
| 212 | if (where_a == 0xb0) { | ||
| 213 | addr = bus->ops->map_bus(bus, devfn, where_a); | ||
| 214 | if (!addr) { | ||
| 215 | *val = ~0; | ||
| 216 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 217 | } | ||
| 218 | v = readl(addr); | ||
| 219 | if (v & 0xff00) | ||
| 220 | pr_err("Bad MSIX cap header: %08x\n", v); | ||
| 221 | v |= 0xbc00; /* next capability is EA at 0xbc */ | ||
| 222 | set_val(v, where, size, val); | ||
| 223 | return PCIBIOS_SUCCESSFUL; | ||
| 224 | } | ||
| 225 | if (where_a == 0xbc) { | ||
| 226 | if (is_nic) | ||
| 227 | v = 0x40014; /* EA last in chain, 4 entries */ | ||
| 228 | else if (is_tns) | ||
| 229 | v = 0x30014; /* EA last in chain, 3 entries */ | ||
| 230 | else if (has_msix) | ||
| 231 | v = 0x20014; /* EA last in chain, 2 entries */ | ||
| 232 | else | ||
| 233 | v = 0x10014; /* EA last in chain, 1 entry */ | ||
| 234 | set_val(v, where, size, val); | ||
| 235 | return PCIBIOS_SUCCESSFUL; | ||
| 236 | } | ||
| 237 | if (where_a >= 0xc0 && where_a < 0xd0) | ||
| 238 | /* EA entry-0. PP=0, BAR0 Size:3 */ | ||
| 239 | return handle_ea_bar(0x80ff0003, | ||
| 240 | 0x10, bus, devfn, where, | ||
| 241 | size, val); | ||
| 242 | if (where_a >= 0xd0 && where_a < 0xe0 && has_msix) | ||
| 243 | /* EA entry-1. PP=0, BAR4 Size:3 */ | ||
| 244 | return handle_ea_bar(0x80ff0043, | ||
| 245 | 0x20, bus, devfn, where, | ||
| 246 | size, val); | ||
| 247 | if (where_a >= 0xe0 && where_a < 0xf0 && is_tns) | ||
| 248 | /* EA entry-2. PP=0, BAR2, Size:3 */ | ||
| 249 | return handle_ea_bar(0x80ff0023, | ||
| 250 | 0x18, bus, devfn, where, | ||
| 251 | size, val); | ||
| 252 | if (where_a >= 0xe0 && where_a < 0xf0 && is_nic) | ||
| 253 | /* EA entry-2. PP=4, VF_BAR0 (9), Size:3 */ | ||
| 254 | return handle_ea_bar(0x80ff0493, | ||
| 255 | 0x1a4, bus, devfn, where, | ||
| 256 | size, val); | ||
| 257 | if (where_a >= 0xf0 && where_a < 0x100 && is_nic) | ||
| 258 | /* EA entry-3. PP=4, VF_BAR4 (d), Size:3 */ | ||
| 259 | return handle_ea_bar(0x80ff04d3, | ||
| 260 | 0x1b4, bus, devfn, where, | ||
| 261 | size, val); | ||
| 262 | } else if (cfg_type == 1) { | ||
| 263 | bool is_rsl_bridge = devfn == 0x08; | ||
| 264 | bool is_rad_bridge = devfn == 0xa0; | ||
| 265 | bool is_zip_bridge = devfn == 0xa8; | ||
| 266 | bool is_dfa_bridge = devfn == 0xb0; | ||
| 267 | bool is_nic_bridge = devfn == 0x10; | ||
| 268 | |||
| 269 | if (where_a == 0x70) { | ||
| 270 | addr = bus->ops->map_bus(bus, devfn, where_a); | ||
| 271 | if (!addr) { | ||
| 272 | *val = ~0; | ||
| 273 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 274 | } | ||
| 275 | v = readl(addr); | ||
| 276 | if (v & 0xff00) | ||
| 277 | pr_err("Bad PCIe cap header: %08x\n", v); | ||
| 278 | v |= 0xbc00; /* next capability is EA at 0xbc */ | ||
| 279 | set_val(v, where, size, val); | ||
| 280 | return PCIBIOS_SUCCESSFUL; | ||
| 281 | } | ||
| 282 | if (where_a == 0xbc) { | ||
| 283 | if (is_nic_bridge) | ||
| 284 | v = 0x10014; /* EA last in chain, 1 entry */ | ||
| 285 | else | ||
| 286 | v = 0x00014; /* EA last in chain, no entries */ | ||
| 287 | set_val(v, where, size, val); | ||
| 288 | return PCIBIOS_SUCCESSFUL; | ||
| 289 | } | ||
| 290 | if (where_a == 0xc0) { | ||
| 291 | if (is_rsl_bridge || is_nic_bridge) | ||
| 292 | v = 0x0101; /* subordinate:secondary = 1:1 */ | ||
| 293 | else if (is_rad_bridge) | ||
| 294 | v = 0x0202; /* subordinate:secondary = 2:2 */ | ||
| 295 | else if (is_zip_bridge) | ||
| 296 | v = 0x0303; /* subordinate:secondary = 3:3 */ | ||
| 297 | else if (is_dfa_bridge) | ||
| 298 | v = 0x0404; /* subordinate:secondary = 4:4 */ | ||
| 299 | set_val(v, where, size, val); | ||
| 300 | return PCIBIOS_SUCCESSFUL; | ||
| 301 | } | ||
| 302 | if (where_a == 0xc4 && is_nic_bridge) { | ||
| 303 | /* Enabled, not-Write, SP=ff, PP=05, BEI=6, ES=4 */ | ||
| 304 | v = 0x80ff0564; | ||
| 305 | set_val(v, where, size, val); | ||
| 306 | return PCIBIOS_SUCCESSFUL; | ||
| 307 | } | ||
| 308 | if (where_a == 0xc8 && is_nic_bridge) { | ||
| 309 | v = 0x00000002; /* Base-L 64-bit */ | ||
| 310 | set_val(v, where, size, val); | ||
| 311 | return PCIBIOS_SUCCESSFUL; | ||
| 312 | } | ||
| 313 | if (where_a == 0xcc && is_nic_bridge) { | ||
| 314 | v = 0xfffffffe; /* MaxOffset-L 64-bit */ | ||
| 315 | set_val(v, where, size, val); | ||
| 316 | return PCIBIOS_SUCCESSFUL; | ||
| 317 | } | ||
| 318 | if (where_a == 0xd0 && is_nic_bridge) { | ||
| 319 | v = 0x00008430; /* NIC Base-H */ | ||
| 320 | set_val(v, where, size, val); | ||
| 321 | return PCIBIOS_SUCCESSFUL; | ||
| 322 | } | ||
| 323 | if (where_a == 0xd4 && is_nic_bridge) { | ||
| 324 | v = 0x0000000f; /* MaxOffset-H */ | ||
| 325 | set_val(v, where, size, val); | ||
| 326 | return PCIBIOS_SUCCESSFUL; | ||
| 327 | } | ||
| 328 | } | ||
| 329 | no_emulation: | ||
| 330 | return pci_generic_config_read(bus, devfn, where, size, val); | ||
| 331 | } | ||
| 332 | |||
| 333 | static int thunder_ecam_config_write(struct pci_bus *bus, unsigned int devfn, | ||
| 334 | int where, int size, u32 val) | ||
| 335 | { | ||
| 336 | /* | ||
| 337 | * All BARs have fixed addresses; ignore BAR writes so they | ||
| 338 | * don't get corrupted. | ||
| 339 | */ | ||
| 340 | if ((where >= 0x10 && where < 0x2c) || | ||
| 341 | (where >= 0x1a4 && where < 0x1bc)) | ||
| 342 | /* BAR or SR-IOV BAR */ | ||
| 343 | return PCIBIOS_SUCCESSFUL; | ||
| 344 | |||
| 345 | return pci_generic_config_write(bus, devfn, where, size, val); | ||
| 346 | } | ||
| 347 | |||
| 348 | struct pci_ecam_ops pci_thunder_ecam_ops = { | ||
| 349 | .bus_shift = 20, | ||
| 350 | .pci_ops = { | ||
| 351 | .map_bus = pci_ecam_map_bus, | ||
| 352 | .read = thunder_ecam_config_read, | ||
| 353 | .write = thunder_ecam_config_write, | ||
| 354 | } | ||
| 355 | }; | ||
| 356 | |||
| 357 | #ifdef CONFIG_PCI_HOST_THUNDER_ECAM | ||
| 358 | |||
| 359 | static const struct of_device_id thunder_ecam_of_match[] = { | ||
| 360 | { .compatible = "cavium,pci-host-thunder-ecam" }, | ||
| 361 | { }, | ||
| 362 | }; | ||
| 363 | |||
| 364 | static int thunder_ecam_probe(struct platform_device *pdev) | ||
| 365 | { | ||
| 366 | return pci_host_common_probe(pdev, &pci_thunder_ecam_ops); | ||
| 367 | } | ||
| 368 | |||
| 369 | static struct platform_driver thunder_ecam_driver = { | ||
| 370 | .driver = { | ||
| 371 | .name = KBUILD_MODNAME, | ||
| 372 | .of_match_table = thunder_ecam_of_match, | ||
| 373 | .suppress_bind_attrs = true, | ||
| 374 | }, | ||
| 375 | .probe = thunder_ecam_probe, | ||
| 376 | }; | ||
| 377 | builtin_platform_driver(thunder_ecam_driver); | ||
| 378 | |||
| 379 | #endif | ||
| 380 | #endif | ||
diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c new file mode 100644 index 000000000000..f127ce8bd4ef --- /dev/null +++ b/drivers/pci/controller/pci-thunder-pem.c | |||
| @@ -0,0 +1,473 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Copyright (C) 2015 - 2016 Cavium, Inc. | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include <linux/bitfield.h> | ||
| 7 | #include <linux/kernel.h> | ||
| 8 | #include <linux/init.h> | ||
| 9 | #include <linux/of_address.h> | ||
| 10 | #include <linux/of_pci.h> | ||
| 11 | #include <linux/pci-acpi.h> | ||
| 12 | #include <linux/pci-ecam.h> | ||
| 13 | #include <linux/platform_device.h> | ||
| 14 | #include "../pci.h" | ||
| 15 | |||
| 16 | #if defined(CONFIG_PCI_HOST_THUNDER_PEM) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) | ||
| 17 | |||
| 18 | #define PEM_CFG_WR 0x28 | ||
| 19 | #define PEM_CFG_RD 0x30 | ||
| 20 | |||
| 21 | struct thunder_pem_pci { | ||
| 22 | u32 ea_entry[3]; | ||
| 23 | void __iomem *pem_reg_base; | ||
| 24 | }; | ||
| 25 | |||
| 26 | static int thunder_pem_bridge_read(struct pci_bus *bus, unsigned int devfn, | ||
| 27 | int where, int size, u32 *val) | ||
| 28 | { | ||
| 29 | u64 read_val, tmp_val; | ||
| 30 | struct pci_config_window *cfg = bus->sysdata; | ||
| 31 | struct thunder_pem_pci *pem_pci = (struct thunder_pem_pci *)cfg->priv; | ||
| 32 | |||
| 33 | if (devfn != 0 || where >= 2048) { | ||
| 34 | *val = ~0; | ||
| 35 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 36 | } | ||
| 37 | |||
| 38 | /* | ||
| 39 | * 32-bit accesses only. Write the address to the low order | ||
| 40 | * bits of PEM_CFG_RD, then trigger the read by reading back. | ||
| 41 | * The config data lands in the upper 32-bits of PEM_CFG_RD. | ||
| 42 | */ | ||
| 43 | read_val = where & ~3ull; | ||
| 44 | writeq(read_val, pem_pci->pem_reg_base + PEM_CFG_RD); | ||
| 45 | read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); | ||
| 46 | read_val >>= 32; | ||
| 47 | |||
| 48 | /* | ||
| 49 | * The config space contains some garbage, fix it up. Also | ||
| 50 | * synthesize an EA capability for the BAR used by MSI-X. | ||
| 51 | */ | ||
| 52 | switch (where & ~3) { | ||
| 53 | case 0x40: | ||
| 54 | read_val &= 0xffff00ff; | ||
| 55 | read_val |= 0x00007000; /* Skip MSI CAP */ | ||
| 56 | break; | ||
| 57 | case 0x70: /* Express Cap */ | ||
| 58 | /* | ||
| 59 | * Change PME interrupt to vector 2 on T88 where it | ||
| 60 | * reads as 0, else leave it alone. | ||
| 61 | */ | ||
| 62 | if (!(read_val & (0x1f << 25))) | ||
| 63 | read_val |= (2u << 25); | ||
| 64 | break; | ||
| 65 | case 0xb0: /* MSI-X Cap */ | ||
| 66 | /* TableSize=2 or 4, Next Cap is EA */ | ||
| 67 | read_val &= 0xc00000ff; | ||
| 68 | /* | ||
| 69 | * If Express Cap(0x70) raw PME vector reads as 0 we are on | ||
| 70 | * T88 and TableSize is reported as 4, else TableSize | ||
| 71 | * is 2. | ||
| 72 | */ | ||
| 73 | writeq(0x70, pem_pci->pem_reg_base + PEM_CFG_RD); | ||
| 74 | tmp_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); | ||
| 75 | tmp_val >>= 32; | ||
| 76 | if (!(tmp_val & (0x1f << 25))) | ||
| 77 | read_val |= 0x0003bc00; | ||
| 78 | else | ||
| 79 | read_val |= 0x0001bc00; | ||
| 80 | break; | ||
| 81 | case 0xb4: | ||
| 82 | /* Table offset=0, BIR=0 */ | ||
| 83 | read_val = 0x00000000; | ||
| 84 | break; | ||
| 85 | case 0xb8: | ||
| 86 | /* BPA offset=0xf0000, BIR=0 */ | ||
| 87 | read_val = 0x000f0000; | ||
| 88 | break; | ||
| 89 | case 0xbc: | ||
| 90 | /* EA, 1 entry, no next Cap */ | ||
| 91 | read_val = 0x00010014; | ||
| 92 | break; | ||
| 93 | case 0xc0: | ||
| 94 | /* DW2 for type-1 */ | ||
| 95 | read_val = 0x00000000; | ||
| 96 | break; | ||
| 97 | case 0xc4: | ||
| 98 | /* Entry BEI=0, PP=0x00, SP=0xff, ES=3 */ | ||
| 99 | read_val = 0x80ff0003; | ||
| 100 | break; | ||
| 101 | case 0xc8: | ||
| 102 | read_val = pem_pci->ea_entry[0]; | ||
| 103 | break; | ||
| 104 | case 0xcc: | ||
| 105 | read_val = pem_pci->ea_entry[1]; | ||
| 106 | break; | ||
| 107 | case 0xd0: | ||
| 108 | read_val = pem_pci->ea_entry[2]; | ||
| 109 | break; | ||
| 110 | default: | ||
| 111 | break; | ||
| 112 | } | ||
| 113 | read_val >>= (8 * (where & 3)); | ||
| 114 | switch (size) { | ||
| 115 | case 1: | ||
| 116 | read_val &= 0xff; | ||
| 117 | break; | ||
| 118 | case 2: | ||
| 119 | read_val &= 0xffff; | ||
| 120 | break; | ||
| 121 | default: | ||
| 122 | break; | ||
| 123 | } | ||
| 124 | *val = read_val; | ||
| 125 | return PCIBIOS_SUCCESSFUL; | ||
| 126 | } | ||
| 127 | |||
| 128 | static int thunder_pem_config_read(struct pci_bus *bus, unsigned int devfn, | ||
| 129 | int where, int size, u32 *val) | ||
| 130 | { | ||
| 131 | struct pci_config_window *cfg = bus->sysdata; | ||
| 132 | |||
| 133 | if (bus->number < cfg->busr.start || | ||
| 134 | bus->number > cfg->busr.end) | ||
| 135 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 136 | |||
| 137 | /* | ||
| 138 | * The first device on the bus is the PEM PCIe bridge. | ||
| 139 | * Special case its config access. | ||
| 140 | */ | ||
| 141 | if (bus->number == cfg->busr.start) | ||
| 142 | return thunder_pem_bridge_read(bus, devfn, where, size, val); | ||
| 143 | |||
| 144 | return pci_generic_config_read(bus, devfn, where, size, val); | ||
| 145 | } | ||
| 146 | |||
| 147 | /* | ||
| 148 | * Some of the w1c_bits below also include read-only or non-writable | ||
| 149 | * reserved bits, this makes the code simpler and is OK as the bits | ||
| 150 | * are not affected by writing zeros to them. | ||
| 151 | */ | ||
| 152 | static u32 thunder_pem_bridge_w1c_bits(u64 where_aligned) | ||
| 153 | { | ||
| 154 | u32 w1c_bits = 0; | ||
| 155 | |||
| 156 | switch (where_aligned) { | ||
| 157 | case 0x04: /* Command/Status */ | ||
| 158 | case 0x1c: /* Base and I/O Limit/Secondary Status */ | ||
| 159 | w1c_bits = 0xff000000; | ||
| 160 | break; | ||
| 161 | case 0x44: /* Power Management Control and Status */ | ||
| 162 | w1c_bits = 0xfffffe00; | ||
| 163 | break; | ||
| 164 | case 0x78: /* Device Control/Device Status */ | ||
| 165 | case 0x80: /* Link Control/Link Status */ | ||
| 166 | case 0x88: /* Slot Control/Slot Status */ | ||
| 167 | case 0x90: /* Root Status */ | ||
| 168 | case 0xa0: /* Link Control 2 Registers/Link Status 2 */ | ||
| 169 | w1c_bits = 0xffff0000; | ||
| 170 | break; | ||
| 171 | case 0x104: /* Uncorrectable Error Status */ | ||
| 172 | case 0x110: /* Correctable Error Status */ | ||
| 173 | case 0x130: /* Error Status */ | ||
| 174 | case 0x160: /* Link Control 4 */ | ||
| 175 | w1c_bits = 0xffffffff; | ||
| 176 | break; | ||
| 177 | default: | ||
| 178 | break; | ||
| 179 | } | ||
| 180 | return w1c_bits; | ||
| 181 | } | ||
| 182 | |||
| 183 | /* Some bits must be written to one so they appear to be read-only. */ | ||
| 184 | static u32 thunder_pem_bridge_w1_bits(u64 where_aligned) | ||
| 185 | { | ||
| 186 | u32 w1_bits; | ||
| 187 | |||
| 188 | switch (where_aligned) { | ||
| 189 | case 0x1c: /* I/O Base / I/O Limit, Secondary Status */ | ||
| 190 | /* Force 32-bit I/O addressing. */ | ||
| 191 | w1_bits = 0x0101; | ||
| 192 | break; | ||
| 193 | case 0x24: /* Prefetchable Memory Base / Prefetchable Memory Limit */ | ||
| 194 | /* Force 64-bit addressing */ | ||
| 195 | w1_bits = 0x00010001; | ||
| 196 | break; | ||
| 197 | default: | ||
| 198 | w1_bits = 0; | ||
| 199 | break; | ||
| 200 | } | ||
| 201 | return w1_bits; | ||
| 202 | } | ||
| 203 | |||
| 204 | static int thunder_pem_bridge_write(struct pci_bus *bus, unsigned int devfn, | ||
| 205 | int where, int size, u32 val) | ||
| 206 | { | ||
| 207 | struct pci_config_window *cfg = bus->sysdata; | ||
| 208 | struct thunder_pem_pci *pem_pci = (struct thunder_pem_pci *)cfg->priv; | ||
| 209 | u64 write_val, read_val; | ||
| 210 | u64 where_aligned = where & ~3ull; | ||
| 211 | u32 mask = 0; | ||
| 212 | |||
| 213 | |||
| 214 | if (devfn != 0 || where >= 2048) | ||
| 215 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 216 | |||
| 217 | /* | ||
| 218 | * 32-bit accesses only. If the write is for a size smaller | ||
| 219 | * than 32-bits, we must first read the 32-bit value and merge | ||
| 220 | * in the desired bits and then write the whole 32-bits back | ||
| 221 | * out. | ||
| 222 | */ | ||
| 223 | switch (size) { | ||
| 224 | case 1: | ||
| 225 | writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD); | ||
| 226 | read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); | ||
| 227 | read_val >>= 32; | ||
| 228 | mask = ~(0xff << (8 * (where & 3))); | ||
| 229 | read_val &= mask; | ||
| 230 | val = (val & 0xff) << (8 * (where & 3)); | ||
| 231 | val |= (u32)read_val; | ||
| 232 | break; | ||
| 233 | case 2: | ||
| 234 | writeq(where_aligned, pem_pci->pem_reg_base + PEM_CFG_RD); | ||
| 235 | read_val = readq(pem_pci->pem_reg_base + PEM_CFG_RD); | ||
| 236 | read_val >>= 32; | ||
| 237 | mask = ~(0xffff << (8 * (where & 3))); | ||
| 238 | read_val &= mask; | ||
| 239 | val = (val & 0xffff) << (8 * (where & 3)); | ||
| 240 | val |= (u32)read_val; | ||
| 241 | break; | ||
| 242 | default: | ||
| 243 | break; | ||
| 244 | } | ||
| 245 | |||
| 246 | /* | ||
| 247 | * By expanding the write width to 32 bits, we may | ||
| 248 | * inadvertently hit some W1C bits that were not intended to | ||
| 249 | * be written. Calculate the mask that must be applied to the | ||
| 250 | * data to be written to avoid these cases. | ||
| 251 | */ | ||
| 252 | if (mask) { | ||
| 253 | u32 w1c_bits = thunder_pem_bridge_w1c_bits(where); | ||
| 254 | |||
| 255 | if (w1c_bits) { | ||
| 256 | mask &= w1c_bits; | ||
| 257 | val &= ~mask; | ||
| 258 | } | ||
| 259 | } | ||
| 260 | |||
| 261 | /* | ||
| 262 | * Some bits must be read-only with value of one. Since the | ||
| 263 | * access method allows these to be cleared if a zero is | ||
| 264 | * written, force them to one before writing. | ||
| 265 | */ | ||
| 266 | val |= thunder_pem_bridge_w1_bits(where_aligned); | ||
| 267 | |||
| 268 | /* | ||
| 269 | * Low order bits are the config address, the high order 32 | ||
| 270 | * bits are the data to be written. | ||
| 271 | */ | ||
| 272 | write_val = (((u64)val) << 32) | where_aligned; | ||
| 273 | writeq(write_val, pem_pci->pem_reg_base + PEM_CFG_WR); | ||
| 274 | return PCIBIOS_SUCCESSFUL; | ||
| 275 | } | ||
| 276 | |||
| 277 | static int thunder_pem_config_write(struct pci_bus *bus, unsigned int devfn, | ||
| 278 | int where, int size, u32 val) | ||
| 279 | { | ||
| 280 | struct pci_config_window *cfg = bus->sysdata; | ||
| 281 | |||
| 282 | if (bus->number < cfg->busr.start || | ||
| 283 | bus->number > cfg->busr.end) | ||
| 284 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 285 | /* | ||
| 286 | * The first device on the bus is the PEM PCIe bridge. | ||
| 287 | * Special case its config access. | ||
| 288 | */ | ||
| 289 | if (bus->number == cfg->busr.start) | ||
| 290 | return thunder_pem_bridge_write(bus, devfn, where, size, val); | ||
| 291 | |||
| 292 | |||
| 293 | return pci_generic_config_write(bus, devfn, where, size, val); | ||
| 294 | } | ||
| 295 | |||
| 296 | static int thunder_pem_init(struct device *dev, struct pci_config_window *cfg, | ||
| 297 | struct resource *res_pem) | ||
| 298 | { | ||
| 299 | struct thunder_pem_pci *pem_pci; | ||
| 300 | resource_size_t bar4_start; | ||
| 301 | |||
| 302 | pem_pci = devm_kzalloc(dev, sizeof(*pem_pci), GFP_KERNEL); | ||
| 303 | if (!pem_pci) | ||
| 304 | return -ENOMEM; | ||
| 305 | |||
| 306 | pem_pci->pem_reg_base = devm_ioremap(dev, res_pem->start, 0x10000); | ||
| 307 | if (!pem_pci->pem_reg_base) | ||
| 308 | return -ENOMEM; | ||
| 309 | |||
| 310 | /* | ||
| 311 | * The MSI-X BAR for the PEM and AER interrupts is located at | ||
| 312 | * a fixed offset from the PEM register base. Generate a | ||
| 313 | * fragment of the synthesized Enhanced Allocation capability | ||
| 314 | * structure here for the BAR. | ||
| 315 | */ | ||
| 316 | bar4_start = res_pem->start + 0xf00000; | ||
| 317 | pem_pci->ea_entry[0] = (u32)bar4_start | 2; | ||
| 318 | pem_pci->ea_entry[1] = (u32)(res_pem->end - bar4_start) & ~3u; | ||
| 319 | pem_pci->ea_entry[2] = (u32)(bar4_start >> 32); | ||
| 320 | |||
| 321 | cfg->priv = pem_pci; | ||
| 322 | return 0; | ||
| 323 | } | ||
| 324 | |||
| 325 | #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) | ||
| 326 | |||
| 327 | #define PEM_RES_BASE 0x87e0c0000000UL | ||
| 328 | #define PEM_NODE_MASK GENMASK(45, 44) | ||
| 329 | #define PEM_INDX_MASK GENMASK(26, 24) | ||
| 330 | #define PEM_MIN_DOM_IN_NODE 4 | ||
| 331 | #define PEM_MAX_DOM_IN_NODE 10 | ||
| 332 | |||
| 333 | static void thunder_pem_reserve_range(struct device *dev, int seg, | ||
| 334 | struct resource *r) | ||
| 335 | { | ||
| 336 | resource_size_t start = r->start, end = r->end; | ||
| 337 | struct resource *res; | ||
| 338 | const char *regionid; | ||
| 339 | |||
| 340 | regionid = kasprintf(GFP_KERNEL, "PEM RC:%d", seg); | ||
| 341 | if (!regionid) | ||
| 342 | return; | ||
| 343 | |||
| 344 | res = request_mem_region(start, end - start + 1, regionid); | ||
| 345 | if (res) | ||
| 346 | res->flags &= ~IORESOURCE_BUSY; | ||
| 347 | else | ||
| 348 | kfree(regionid); | ||
| 349 | |||
| 350 | dev_info(dev, "%pR %s reserved\n", r, | ||
| 351 | res ? "has been" : "could not be"); | ||
| 352 | } | ||
| 353 | |||
| 354 | static void thunder_pem_legacy_fw(struct acpi_pci_root *root, | ||
| 355 | struct resource *res_pem) | ||
| 356 | { | ||
| 357 | int node = acpi_get_node(root->device->handle); | ||
| 358 | int index; | ||
| 359 | |||
| 360 | if (node == NUMA_NO_NODE) | ||
| 361 | node = 0; | ||
| 362 | |||
| 363 | index = root->segment - PEM_MIN_DOM_IN_NODE; | ||
| 364 | index -= node * PEM_MAX_DOM_IN_NODE; | ||
| 365 | res_pem->start = PEM_RES_BASE | FIELD_PREP(PEM_NODE_MASK, node) | | ||
| 366 | FIELD_PREP(PEM_INDX_MASK, index); | ||
| 367 | res_pem->flags = IORESOURCE_MEM; | ||
| 368 | } | ||
| 369 | |||
| 370 | static int thunder_pem_acpi_init(struct pci_config_window *cfg) | ||
| 371 | { | ||
| 372 | struct device *dev = cfg->parent; | ||
| 373 | struct acpi_device *adev = to_acpi_device(dev); | ||
| 374 | struct acpi_pci_root *root = acpi_driver_data(adev); | ||
| 375 | struct resource *res_pem; | ||
| 376 | int ret; | ||
| 377 | |||
| 378 | res_pem = devm_kzalloc(&adev->dev, sizeof(*res_pem), GFP_KERNEL); | ||
| 379 | if (!res_pem) | ||
| 380 | return -ENOMEM; | ||
| 381 | |||
| 382 | ret = acpi_get_rc_resources(dev, "CAVA02B", root->segment, res_pem); | ||
| 383 | |||
| 384 | /* | ||
| 385 | * If we fail to gather resources it means that we run with old | ||
| 386 | * FW where we need to calculate PEM-specific resources manually. | ||
| 387 | */ | ||
| 388 | if (ret) { | ||
| 389 | thunder_pem_legacy_fw(root, res_pem); | ||
| 390 | /* | ||
| 391 | * Reserve 64K size PEM specific resources. The full 16M range | ||
| 392 | * size is required for thunder_pem_init() call. | ||
| 393 | */ | ||
| 394 | res_pem->end = res_pem->start + SZ_64K - 1; | ||
| 395 | thunder_pem_reserve_range(dev, root->segment, res_pem); | ||
| 396 | res_pem->end = res_pem->start + SZ_16M - 1; | ||
| 397 | |||
| 398 | /* Reserve PCI configuration space as well. */ | ||
| 399 | thunder_pem_reserve_range(dev, root->segment, &cfg->res); | ||
| 400 | } | ||
| 401 | |||
| 402 | return thunder_pem_init(dev, cfg, res_pem); | ||
| 403 | } | ||
| 404 | |||
| 405 | struct pci_ecam_ops thunder_pem_ecam_ops = { | ||
| 406 | .bus_shift = 24, | ||
| 407 | .init = thunder_pem_acpi_init, | ||
| 408 | .pci_ops = { | ||
| 409 | .map_bus = pci_ecam_map_bus, | ||
| 410 | .read = thunder_pem_config_read, | ||
| 411 | .write = thunder_pem_config_write, | ||
| 412 | } | ||
| 413 | }; | ||
| 414 | |||
| 415 | #endif | ||
| 416 | |||
| 417 | #ifdef CONFIG_PCI_HOST_THUNDER_PEM | ||
| 418 | |||
| 419 | static int thunder_pem_platform_init(struct pci_config_window *cfg) | ||
| 420 | { | ||
| 421 | struct device *dev = cfg->parent; | ||
| 422 | struct platform_device *pdev = to_platform_device(dev); | ||
| 423 | struct resource *res_pem; | ||
| 424 | |||
| 425 | if (!dev->of_node) | ||
| 426 | return -EINVAL; | ||
| 427 | |||
| 428 | /* | ||
| 429 | * The second register range is the PEM bridge to the PCIe | ||
| 430 | * bus. It has a different config access method than those | ||
| 431 | * devices behind the bridge. | ||
| 432 | */ | ||
| 433 | res_pem = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 434 | if (!res_pem) { | ||
| 435 | dev_err(dev, "missing \"reg[1]\"property\n"); | ||
| 436 | return -EINVAL; | ||
| 437 | } | ||
| 438 | |||
| 439 | return thunder_pem_init(dev, cfg, res_pem); | ||
| 440 | } | ||
| 441 | |||
| 442 | static struct pci_ecam_ops pci_thunder_pem_ops = { | ||
| 443 | .bus_shift = 24, | ||
| 444 | .init = thunder_pem_platform_init, | ||
| 445 | .pci_ops = { | ||
| 446 | .map_bus = pci_ecam_map_bus, | ||
| 447 | .read = thunder_pem_config_read, | ||
| 448 | .write = thunder_pem_config_write, | ||
| 449 | } | ||
| 450 | }; | ||
| 451 | |||
| 452 | static const struct of_device_id thunder_pem_of_match[] = { | ||
| 453 | { .compatible = "cavium,pci-host-thunder-pem" }, | ||
| 454 | { }, | ||
| 455 | }; | ||
| 456 | |||
| 457 | static int thunder_pem_probe(struct platform_device *pdev) | ||
| 458 | { | ||
| 459 | return pci_host_common_probe(pdev, &pci_thunder_pem_ops); | ||
| 460 | } | ||
| 461 | |||
| 462 | static struct platform_driver thunder_pem_driver = { | ||
| 463 | .driver = { | ||
| 464 | .name = KBUILD_MODNAME, | ||
| 465 | .of_match_table = thunder_pem_of_match, | ||
| 466 | .suppress_bind_attrs = true, | ||
| 467 | }, | ||
| 468 | .probe = thunder_pem_probe, | ||
| 469 | }; | ||
| 470 | builtin_platform_driver(thunder_pem_driver); | ||
| 471 | |||
| 472 | #endif | ||
| 473 | #endif | ||
diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c new file mode 100644 index 000000000000..68b8bfbdb867 --- /dev/null +++ b/drivers/pci/controller/pci-v3-semi.c | |||
| @@ -0,0 +1,963 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Support for V3 Semiconductor PCI Local Bus to PCI Bridge | ||
| 4 | * Copyright (C) 2017 Linus Walleij <linus.walleij@linaro.org> | ||
| 5 | * | ||
| 6 | * Based on the code from arch/arm/mach-integrator/pci_v3.c | ||
| 7 | * Copyright (C) 1999 ARM Limited | ||
| 8 | * Copyright (C) 2000-2001 Deep Blue Solutions Ltd | ||
| 9 | * | ||
| 10 | * Contributors to the old driver include: | ||
| 11 | * Russell King <linux@armlinux.org.uk> | ||
| 12 | * David A. Rusling <david.rusling@linaro.org> (uHAL, ARM Firmware suite) | ||
| 13 | * Rob Herring <robh@kernel.org> | ||
| 14 | * Liviu Dudau <Liviu.Dudau@arm.com> | ||
| 15 | * Grant Likely <grant.likely@secretlab.ca> | ||
| 16 | * Arnd Bergmann <arnd@arndb.de> | ||
| 17 | * Bjorn Helgaas <bhelgaas@google.com> | ||
| 18 | */ | ||
| 19 | #include <linux/init.h> | ||
| 20 | #include <linux/interrupt.h> | ||
| 21 | #include <linux/io.h> | ||
| 22 | #include <linux/kernel.h> | ||
| 23 | #include <linux/of_address.h> | ||
| 24 | #include <linux/of_device.h> | ||
| 25 | #include <linux/of_irq.h> | ||
| 26 | #include <linux/of_pci.h> | ||
| 27 | #include <linux/pci.h> | ||
| 28 | #include <linux/platform_device.h> | ||
| 29 | #include <linux/slab.h> | ||
| 30 | #include <linux/bitops.h> | ||
| 31 | #include <linux/irq.h> | ||
| 32 | #include <linux/mfd/syscon.h> | ||
| 33 | #include <linux/regmap.h> | ||
| 34 | #include <linux/clk.h> | ||
| 35 | |||
| 36 | #include "../pci.h" | ||
| 37 | |||
| 38 | #define V3_PCI_VENDOR 0x00000000 | ||
| 39 | #define V3_PCI_DEVICE 0x00000002 | ||
| 40 | #define V3_PCI_CMD 0x00000004 | ||
| 41 | #define V3_PCI_STAT 0x00000006 | ||
| 42 | #define V3_PCI_CC_REV 0x00000008 | ||
| 43 | #define V3_PCI_HDR_CFG 0x0000000C | ||
| 44 | #define V3_PCI_IO_BASE 0x00000010 | ||
| 45 | #define V3_PCI_BASE0 0x00000014 | ||
| 46 | #define V3_PCI_BASE1 0x00000018 | ||
| 47 | #define V3_PCI_SUB_VENDOR 0x0000002C | ||
| 48 | #define V3_PCI_SUB_ID 0x0000002E | ||
| 49 | #define V3_PCI_ROM 0x00000030 | ||
| 50 | #define V3_PCI_BPARAM 0x0000003C | ||
| 51 | #define V3_PCI_MAP0 0x00000040 | ||
| 52 | #define V3_PCI_MAP1 0x00000044 | ||
| 53 | #define V3_PCI_INT_STAT 0x00000048 | ||
| 54 | #define V3_PCI_INT_CFG 0x0000004C | ||
| 55 | #define V3_LB_BASE0 0x00000054 | ||
| 56 | #define V3_LB_BASE1 0x00000058 | ||
| 57 | #define V3_LB_MAP0 0x0000005E | ||
| 58 | #define V3_LB_MAP1 0x00000062 | ||
| 59 | #define V3_LB_BASE2 0x00000064 | ||
| 60 | #define V3_LB_MAP2 0x00000066 | ||
| 61 | #define V3_LB_SIZE 0x00000068 | ||
| 62 | #define V3_LB_IO_BASE 0x0000006E | ||
| 63 | #define V3_FIFO_CFG 0x00000070 | ||
| 64 | #define V3_FIFO_PRIORITY 0x00000072 | ||
| 65 | #define V3_FIFO_STAT 0x00000074 | ||
| 66 | #define V3_LB_ISTAT 0x00000076 | ||
| 67 | #define V3_LB_IMASK 0x00000077 | ||
| 68 | #define V3_SYSTEM 0x00000078 | ||
| 69 | #define V3_LB_CFG 0x0000007A | ||
| 70 | #define V3_PCI_CFG 0x0000007C | ||
| 71 | #define V3_DMA_PCI_ADR0 0x00000080 | ||
| 72 | #define V3_DMA_PCI_ADR1 0x00000090 | ||
| 73 | #define V3_DMA_LOCAL_ADR0 0x00000084 | ||
| 74 | #define V3_DMA_LOCAL_ADR1 0x00000094 | ||
| 75 | #define V3_DMA_LENGTH0 0x00000088 | ||
| 76 | #define V3_DMA_LENGTH1 0x00000098 | ||
| 77 | #define V3_DMA_CSR0 0x0000008B | ||
| 78 | #define V3_DMA_CSR1 0x0000009B | ||
| 79 | #define V3_DMA_CTLB_ADR0 0x0000008C | ||
| 80 | #define V3_DMA_CTLB_ADR1 0x0000009C | ||
| 81 | #define V3_DMA_DELAY 0x000000E0 | ||
| 82 | #define V3_MAIL_DATA 0x000000C0 | ||
| 83 | #define V3_PCI_MAIL_IEWR 0x000000D0 | ||
| 84 | #define V3_PCI_MAIL_IERD 0x000000D2 | ||
| 85 | #define V3_LB_MAIL_IEWR 0x000000D4 | ||
| 86 | #define V3_LB_MAIL_IERD 0x000000D6 | ||
| 87 | #define V3_MAIL_WR_STAT 0x000000D8 | ||
| 88 | #define V3_MAIL_RD_STAT 0x000000DA | ||
| 89 | #define V3_QBA_MAP 0x000000DC | ||
| 90 | |||
| 91 | /* PCI STATUS bits */ | ||
| 92 | #define V3_PCI_STAT_PAR_ERR BIT(15) | ||
| 93 | #define V3_PCI_STAT_SYS_ERR BIT(14) | ||
| 94 | #define V3_PCI_STAT_M_ABORT_ERR BIT(13) | ||
| 95 | #define V3_PCI_STAT_T_ABORT_ERR BIT(12) | ||
| 96 | |||
| 97 | /* LB ISTAT bits */ | ||
| 98 | #define V3_LB_ISTAT_MAILBOX BIT(7) | ||
| 99 | #define V3_LB_ISTAT_PCI_RD BIT(6) | ||
| 100 | #define V3_LB_ISTAT_PCI_WR BIT(5) | ||
| 101 | #define V3_LB_ISTAT_PCI_INT BIT(4) | ||
| 102 | #define V3_LB_ISTAT_PCI_PERR BIT(3) | ||
| 103 | #define V3_LB_ISTAT_I2O_QWR BIT(2) | ||
| 104 | #define V3_LB_ISTAT_DMA1 BIT(1) | ||
| 105 | #define V3_LB_ISTAT_DMA0 BIT(0) | ||
| 106 | |||
| 107 | /* PCI COMMAND bits */ | ||
| 108 | #define V3_COMMAND_M_FBB_EN BIT(9) | ||
| 109 | #define V3_COMMAND_M_SERR_EN BIT(8) | ||
| 110 | #define V3_COMMAND_M_PAR_EN BIT(6) | ||
| 111 | #define V3_COMMAND_M_MASTER_EN BIT(2) | ||
| 112 | #define V3_COMMAND_M_MEM_EN BIT(1) | ||
| 113 | #define V3_COMMAND_M_IO_EN BIT(0) | ||
| 114 | |||
| 115 | /* SYSTEM bits */ | ||
| 116 | #define V3_SYSTEM_M_RST_OUT BIT(15) | ||
| 117 | #define V3_SYSTEM_M_LOCK BIT(14) | ||
| 118 | #define V3_SYSTEM_UNLOCK 0xa05f | ||
| 119 | |||
| 120 | /* PCI CFG bits */ | ||
| 121 | #define V3_PCI_CFG_M_I2O_EN BIT(15) | ||
| 122 | #define V3_PCI_CFG_M_IO_REG_DIS BIT(14) | ||
| 123 | #define V3_PCI_CFG_M_IO_DIS BIT(13) | ||
| 124 | #define V3_PCI_CFG_M_EN3V BIT(12) | ||
| 125 | #define V3_PCI_CFG_M_RETRY_EN BIT(10) | ||
| 126 | #define V3_PCI_CFG_M_AD_LOW1 BIT(9) | ||
| 127 | #define V3_PCI_CFG_M_AD_LOW0 BIT(8) | ||
| 128 | /* | ||
| 129 | * This is the value applied to C/BE[3:1], with bit 0 always held 0 | ||
| 130 | * during DMA access. | ||
| 131 | */ | ||
| 132 | #define V3_PCI_CFG_M_RTYPE_SHIFT 5 | ||
| 133 | #define V3_PCI_CFG_M_WTYPE_SHIFT 1 | ||
| 134 | #define V3_PCI_CFG_TYPE_DEFAULT 0x3 | ||
| 135 | |||
| 136 | /* PCI BASE bits (PCI -> Local Bus) */ | ||
| 137 | #define V3_PCI_BASE_M_ADR_BASE 0xFFF00000U | ||
| 138 | #define V3_PCI_BASE_M_ADR_BASEL 0x000FFF00U | ||
| 139 | #define V3_PCI_BASE_M_PREFETCH BIT(3) | ||
| 140 | #define V3_PCI_BASE_M_TYPE (3 << 1) | ||
| 141 | #define V3_PCI_BASE_M_IO BIT(0) | ||
| 142 | |||
| 143 | /* PCI MAP bits (PCI -> Local bus) */ | ||
| 144 | #define V3_PCI_MAP_M_MAP_ADR 0xFFF00000U | ||
| 145 | #define V3_PCI_MAP_M_RD_POST_INH BIT(15) | ||
| 146 | #define V3_PCI_MAP_M_ROM_SIZE (3 << 10) | ||
| 147 | #define V3_PCI_MAP_M_SWAP (3 << 8) | ||
| 148 | #define V3_PCI_MAP_M_ADR_SIZE 0x000000F0U | ||
| 149 | #define V3_PCI_MAP_M_REG_EN BIT(1) | ||
| 150 | #define V3_PCI_MAP_M_ENABLE BIT(0) | ||
| 151 | |||
| 152 | /* LB_BASE0,1 bits (Local bus -> PCI) */ | ||
| 153 | #define V3_LB_BASE_ADR_BASE 0xfff00000U | ||
| 154 | #define V3_LB_BASE_SWAP (3 << 8) | ||
| 155 | #define V3_LB_BASE_ADR_SIZE (15 << 4) | ||
| 156 | #define V3_LB_BASE_PREFETCH BIT(3) | ||
| 157 | #define V3_LB_BASE_ENABLE BIT(0) | ||
| 158 | |||
| 159 | #define V3_LB_BASE_ADR_SIZE_1MB (0 << 4) | ||
| 160 | #define V3_LB_BASE_ADR_SIZE_2MB (1 << 4) | ||
| 161 | #define V3_LB_BASE_ADR_SIZE_4MB (2 << 4) | ||
| 162 | #define V3_LB_BASE_ADR_SIZE_8MB (3 << 4) | ||
| 163 | #define V3_LB_BASE_ADR_SIZE_16MB (4 << 4) | ||
| 164 | #define V3_LB_BASE_ADR_SIZE_32MB (5 << 4) | ||
| 165 | #define V3_LB_BASE_ADR_SIZE_64MB (6 << 4) | ||
| 166 | #define V3_LB_BASE_ADR_SIZE_128MB (7 << 4) | ||
| 167 | #define V3_LB_BASE_ADR_SIZE_256MB (8 << 4) | ||
| 168 | #define V3_LB_BASE_ADR_SIZE_512MB (9 << 4) | ||
| 169 | #define V3_LB_BASE_ADR_SIZE_1GB (10 << 4) | ||
| 170 | #define V3_LB_BASE_ADR_SIZE_2GB (11 << 4) | ||
| 171 | |||
| 172 | #define v3_addr_to_lb_base(a) ((a) & V3_LB_BASE_ADR_BASE) | ||
| 173 | |||
| 174 | /* LB_MAP0,1 bits (Local bus -> PCI) */ | ||
| 175 | #define V3_LB_MAP_MAP_ADR 0xfff0U | ||
| 176 | #define V3_LB_MAP_TYPE (7 << 1) | ||
| 177 | #define V3_LB_MAP_AD_LOW_EN BIT(0) | ||
| 178 | |||
| 179 | #define V3_LB_MAP_TYPE_IACK (0 << 1) | ||
| 180 | #define V3_LB_MAP_TYPE_IO (1 << 1) | ||
| 181 | #define V3_LB_MAP_TYPE_MEM (3 << 1) | ||
| 182 | #define V3_LB_MAP_TYPE_CONFIG (5 << 1) | ||
| 183 | #define V3_LB_MAP_TYPE_MEM_MULTIPLE (6 << 1) | ||
| 184 | |||
| 185 | #define v3_addr_to_lb_map(a) (((a) >> 16) & V3_LB_MAP_MAP_ADR) | ||
| 186 | |||
| 187 | /* LB_BASE2 bits (Local bus -> PCI IO) */ | ||
| 188 | #define V3_LB_BASE2_ADR_BASE 0xff00U | ||
| 189 | #define V3_LB_BASE2_SWAP_AUTO (3 << 6) | ||
| 190 | #define V3_LB_BASE2_ENABLE BIT(0) | ||
| 191 | |||
| 192 | #define v3_addr_to_lb_base2(a) (((a) >> 16) & V3_LB_BASE2_ADR_BASE) | ||
| 193 | |||
| 194 | /* LB_MAP2 bits (Local bus -> PCI IO) */ | ||
| 195 | #define V3_LB_MAP2_MAP_ADR 0xff00U | ||
| 196 | |||
| 197 | #define v3_addr_to_lb_map2(a) (((a) >> 16) & V3_LB_MAP2_MAP_ADR) | ||
| 198 | |||
| 199 | /* FIFO priority bits */ | ||
| 200 | #define V3_FIFO_PRIO_LOCAL BIT(12) | ||
| 201 | #define V3_FIFO_PRIO_LB_RD1_FLUSH_EOB BIT(10) | ||
| 202 | #define V3_FIFO_PRIO_LB_RD1_FLUSH_AP1 BIT(11) | ||
| 203 | #define V3_FIFO_PRIO_LB_RD1_FLUSH_ANY (BIT(10)|BIT(11)) | ||
| 204 | #define V3_FIFO_PRIO_LB_RD0_FLUSH_EOB BIT(8) | ||
| 205 | #define V3_FIFO_PRIO_LB_RD0_FLUSH_AP1 BIT(9) | ||
| 206 | #define V3_FIFO_PRIO_LB_RD0_FLUSH_ANY (BIT(8)|BIT(9)) | ||
| 207 | #define V3_FIFO_PRIO_PCI BIT(4) | ||
| 208 | #define V3_FIFO_PRIO_PCI_RD1_FLUSH_EOB BIT(2) | ||
| 209 | #define V3_FIFO_PRIO_PCI_RD1_FLUSH_AP1 BIT(3) | ||
| 210 | #define V3_FIFO_PRIO_PCI_RD1_FLUSH_ANY (BIT(2)|BIT(3)) | ||
| 211 | #define V3_FIFO_PRIO_PCI_RD0_FLUSH_EOB BIT(0) | ||
| 212 | #define V3_FIFO_PRIO_PCI_RD0_FLUSH_AP1 BIT(1) | ||
| 213 | #define V3_FIFO_PRIO_PCI_RD0_FLUSH_ANY (BIT(0)|BIT(1)) | ||
| 214 | |||
| 215 | /* Local bus configuration bits */ | ||
| 216 | #define V3_LB_CFG_LB_TO_64_CYCLES 0x0000 | ||
| 217 | #define V3_LB_CFG_LB_TO_256_CYCLES BIT(13) | ||
| 218 | #define V3_LB_CFG_LB_TO_512_CYCLES BIT(14) | ||
| 219 | #define V3_LB_CFG_LB_TO_1024_CYCLES (BIT(13)|BIT(14)) | ||
| 220 | #define V3_LB_CFG_LB_RST BIT(12) | ||
| 221 | #define V3_LB_CFG_LB_PPC_RDY BIT(11) | ||
| 222 | #define V3_LB_CFG_LB_LB_INT BIT(10) | ||
| 223 | #define V3_LB_CFG_LB_ERR_EN BIT(9) | ||
| 224 | #define V3_LB_CFG_LB_RDY_EN BIT(8) | ||
| 225 | #define V3_LB_CFG_LB_BE_IMODE BIT(7) | ||
| 226 | #define V3_LB_CFG_LB_BE_OMODE BIT(6) | ||
| 227 | #define V3_LB_CFG_LB_ENDIAN BIT(5) | ||
| 228 | #define V3_LB_CFG_LB_PARK_EN BIT(4) | ||
| 229 | #define V3_LB_CFG_LB_FBB_DIS BIT(2) | ||
| 230 | |||
| 231 | /* ARM Integrator-specific extended control registers */ | ||
| 232 | #define INTEGRATOR_SC_PCI_OFFSET 0x18 | ||
| 233 | #define INTEGRATOR_SC_PCI_ENABLE BIT(0) | ||
| 234 | #define INTEGRATOR_SC_PCI_INTCLR BIT(1) | ||
| 235 | #define INTEGRATOR_SC_LBFADDR_OFFSET 0x20 | ||
| 236 | #define INTEGRATOR_SC_LBFCODE_OFFSET 0x24 | ||
| 237 | |||
| 238 | struct v3_pci { | ||
| 239 | struct device *dev; | ||
| 240 | void __iomem *base; | ||
| 241 | void __iomem *config_base; | ||
| 242 | struct pci_bus *bus; | ||
| 243 | u32 config_mem; | ||
| 244 | u32 io_mem; | ||
| 245 | u32 non_pre_mem; | ||
| 246 | u32 pre_mem; | ||
| 247 | phys_addr_t io_bus_addr; | ||
| 248 | phys_addr_t non_pre_bus_addr; | ||
| 249 | phys_addr_t pre_bus_addr; | ||
| 250 | struct regmap *map; | ||
| 251 | }; | ||
| 252 | |||
| 253 | /* | ||
| 254 | * The V3 PCI interface chip in Integrator provides several windows from | ||
| 255 | * local bus memory into the PCI memory areas. Unfortunately, there | ||
| 256 | * are not really enough windows for our usage, therefore we reuse | ||
| 257 | * one of the windows for access to PCI configuration space. On the | ||
| 258 | * Integrator/AP, the memory map is as follows: | ||
| 259 | * | ||
| 260 | * Local Bus Memory Usage | ||
| 261 | * | ||
| 262 | * 40000000 - 4FFFFFFF PCI memory. 256M non-prefetchable | ||
| 263 | * 50000000 - 5FFFFFFF PCI memory. 256M prefetchable | ||
| 264 | * 60000000 - 60FFFFFF PCI IO. 16M | ||
| 265 | * 61000000 - 61FFFFFF PCI Configuration. 16M | ||
| 266 | * | ||
| 267 | * There are three V3 windows, each described by a pair of V3 registers. | ||
| 268 | * These are LB_BASE0/LB_MAP0, LB_BASE1/LB_MAP1 and LB_BASE2/LB_MAP2. | ||
| 269 | * Base0 and Base1 can be used for any type of PCI memory access. Base2 | ||
| 270 | * can be used either for PCI I/O or for I20 accesses. By default, uHAL | ||
| 271 | * uses this only for PCI IO space. | ||
| 272 | * | ||
| 273 | * Normally these spaces are mapped using the following base registers: | ||
| 274 | * | ||
| 275 | * Usage Local Bus Memory Base/Map registers used | ||
| 276 | * | ||
| 277 | * Mem 40000000 - 4FFFFFFF LB_BASE0/LB_MAP0 | ||
| 278 | * Mem 50000000 - 5FFFFFFF LB_BASE1/LB_MAP1 | ||
| 279 | * IO 60000000 - 60FFFFFF LB_BASE2/LB_MAP2 | ||
| 280 | * Cfg 61000000 - 61FFFFFF | ||
| 281 | * | ||
| 282 | * This means that I20 and PCI configuration space accesses will fail. | ||
| 283 | * When PCI configuration accesses are needed (via the uHAL PCI | ||
| 284 | * configuration space primitives) we must remap the spaces as follows: | ||
| 285 | * | ||
| 286 | * Usage Local Bus Memory Base/Map registers used | ||
| 287 | * | ||
| 288 | * Mem 40000000 - 4FFFFFFF LB_BASE0/LB_MAP0 | ||
| 289 | * Mem 50000000 - 5FFFFFFF LB_BASE0/LB_MAP0 | ||
| 290 | * IO 60000000 - 60FFFFFF LB_BASE2/LB_MAP2 | ||
| 291 | * Cfg 61000000 - 61FFFFFF LB_BASE1/LB_MAP1 | ||
| 292 | * | ||
| 293 | * To make this work, the code depends on overlapping windows working. | ||
| 294 | * The V3 chip translates an address by checking its range within | ||
| 295 | * each of the BASE/MAP pairs in turn (in ascending register number | ||
| 296 | * order). It will use the first matching pair. So, for example, | ||
| 297 | * if the same address is mapped by both LB_BASE0/LB_MAP0 and | ||
| 298 | * LB_BASE1/LB_MAP1, the V3 will use the translation from | ||
| 299 | * LB_BASE0/LB_MAP0. | ||
| 300 | * | ||
| 301 | * To allow PCI Configuration space access, the code enlarges the | ||
| 302 | * window mapped by LB_BASE0/LB_MAP0 from 256M to 512M. This occludes | ||
| 303 | * the windows currently mapped by LB_BASE1/LB_MAP1 so that it can | ||
| 304 | * be remapped for use by configuration cycles. | ||
| 305 | * | ||
| 306 | * At the end of the PCI Configuration space accesses, | ||
| 307 | * LB_BASE1/LB_MAP1 is reset to map PCI Memory. Finally the window | ||
| 308 | * mapped by LB_BASE0/LB_MAP0 is reduced in size from 512M to 256M to | ||
| 309 | * reveal the now restored LB_BASE1/LB_MAP1 window. | ||
| 310 | * | ||
| 311 | * NOTE: We do not set up I2O mapping. I suspect that this is only | ||
| 312 | * for an intelligent (target) device. Using I2O disables most of | ||
| 313 | * the mappings into PCI memory. | ||
| 314 | */ | ||
| 315 | static void __iomem *v3_map_bus(struct pci_bus *bus, | ||
| 316 | unsigned int devfn, int offset) | ||
| 317 | { | ||
| 318 | struct v3_pci *v3 = bus->sysdata; | ||
| 319 | unsigned int address, mapaddress, busnr; | ||
| 320 | |||
| 321 | busnr = bus->number; | ||
| 322 | if (busnr == 0) { | ||
| 323 | int slot = PCI_SLOT(devfn); | ||
| 324 | |||
| 325 | /* | ||
| 326 | * local bus segment so need a type 0 config cycle | ||
| 327 | * | ||
| 328 | * build the PCI configuration "address" with one-hot in | ||
| 329 | * A31-A11 | ||
| 330 | * | ||
| 331 | * mapaddress: | ||
| 332 | * 3:1 = config cycle (101) | ||
| 333 | * 0 = PCI A1 & A0 are 0 (0) | ||
| 334 | */ | ||
| 335 | address = PCI_FUNC(devfn) << 8; | ||
| 336 | mapaddress = V3_LB_MAP_TYPE_CONFIG; | ||
| 337 | |||
| 338 | if (slot > 12) | ||
| 339 | /* | ||
| 340 | * high order bits are handled by the MAP register | ||
| 341 | */ | ||
| 342 | mapaddress |= BIT(slot - 5); | ||
| 343 | else | ||
| 344 | /* | ||
| 345 | * low order bits handled directly in the address | ||
| 346 | */ | ||
| 347 | address |= BIT(slot + 11); | ||
| 348 | } else { | ||
| 349 | /* | ||
| 350 | * not the local bus segment so need a type 1 config cycle | ||
| 351 | * | ||
| 352 | * address: | ||
| 353 | * 23:16 = bus number | ||
| 354 | * 15:11 = slot number (7:3 of devfn) | ||
| 355 | * 10:8 = func number (2:0 of devfn) | ||
| 356 | * | ||
| 357 | * mapaddress: | ||
| 358 | * 3:1 = config cycle (101) | ||
| 359 | * 0 = PCI A1 & A0 from host bus (1) | ||
| 360 | */ | ||
| 361 | mapaddress = V3_LB_MAP_TYPE_CONFIG | V3_LB_MAP_AD_LOW_EN; | ||
| 362 | address = (busnr << 16) | (devfn << 8); | ||
| 363 | } | ||
| 364 | |||
| 365 | /* | ||
| 366 | * Set up base0 to see all 512Mbytes of memory space (not | ||
| 367 | * prefetchable), this frees up base1 for re-use by | ||
| 368 | * configuration memory | ||
| 369 | */ | ||
| 370 | writel(v3_addr_to_lb_base(v3->non_pre_mem) | | ||
| 371 | V3_LB_BASE_ADR_SIZE_512MB | V3_LB_BASE_ENABLE, | ||
| 372 | v3->base + V3_LB_BASE0); | ||
| 373 | |||
| 374 | /* | ||
| 375 | * Set up base1/map1 to point into configuration space. | ||
| 376 | * The config mem is always 16MB. | ||
| 377 | */ | ||
| 378 | writel(v3_addr_to_lb_base(v3->config_mem) | | ||
| 379 | V3_LB_BASE_ADR_SIZE_16MB | V3_LB_BASE_ENABLE, | ||
| 380 | v3->base + V3_LB_BASE1); | ||
| 381 | writew(mapaddress, v3->base + V3_LB_MAP1); | ||
| 382 | |||
| 383 | return v3->config_base + address + offset; | ||
| 384 | } | ||
| 385 | |||
| 386 | static void v3_unmap_bus(struct v3_pci *v3) | ||
| 387 | { | ||
| 388 | /* | ||
| 389 | * Reassign base1 for use by prefetchable PCI memory | ||
| 390 | */ | ||
| 391 | writel(v3_addr_to_lb_base(v3->pre_mem) | | ||
| 392 | V3_LB_BASE_ADR_SIZE_256MB | V3_LB_BASE_PREFETCH | | ||
| 393 | V3_LB_BASE_ENABLE, | ||
| 394 | v3->base + V3_LB_BASE1); | ||
| 395 | writew(v3_addr_to_lb_map(v3->pre_bus_addr) | | ||
| 396 | V3_LB_MAP_TYPE_MEM, /* was V3_LB_MAP_TYPE_MEM_MULTIPLE */ | ||
| 397 | v3->base + V3_LB_MAP1); | ||
| 398 | |||
| 399 | /* | ||
| 400 | * And shrink base0 back to a 256M window (NOTE: MAP0 already correct) | ||
| 401 | */ | ||
| 402 | writel(v3_addr_to_lb_base(v3->non_pre_mem) | | ||
| 403 | V3_LB_BASE_ADR_SIZE_256MB | V3_LB_BASE_ENABLE, | ||
| 404 | v3->base + V3_LB_BASE0); | ||
| 405 | } | ||
| 406 | |||
| 407 | static int v3_pci_read_config(struct pci_bus *bus, unsigned int fn, | ||
| 408 | int config, int size, u32 *value) | ||
| 409 | { | ||
| 410 | struct v3_pci *v3 = bus->sysdata; | ||
| 411 | int ret; | ||
| 412 | |||
| 413 | dev_dbg(&bus->dev, | ||
| 414 | "[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", | ||
| 415 | PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value); | ||
| 416 | ret = pci_generic_config_read(bus, fn, config, size, value); | ||
| 417 | v3_unmap_bus(v3); | ||
| 418 | return ret; | ||
| 419 | } | ||
| 420 | |||
| 421 | static int v3_pci_write_config(struct pci_bus *bus, unsigned int fn, | ||
| 422 | int config, int size, u32 value) | ||
| 423 | { | ||
| 424 | struct v3_pci *v3 = bus->sysdata; | ||
| 425 | int ret; | ||
| 426 | |||
| 427 | dev_dbg(&bus->dev, | ||
| 428 | "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", | ||
| 429 | PCI_SLOT(fn), PCI_FUNC(fn), config, size, value); | ||
| 430 | ret = pci_generic_config_write(bus, fn, config, size, value); | ||
| 431 | v3_unmap_bus(v3); | ||
| 432 | return ret; | ||
| 433 | } | ||
| 434 | |||
| 435 | static struct pci_ops v3_pci_ops = { | ||
| 436 | .map_bus = v3_map_bus, | ||
| 437 | .read = v3_pci_read_config, | ||
| 438 | .write = v3_pci_write_config, | ||
| 439 | }; | ||
| 440 | |||
| 441 | static irqreturn_t v3_irq(int irq, void *data) | ||
| 442 | { | ||
| 443 | struct v3_pci *v3 = data; | ||
| 444 | struct device *dev = v3->dev; | ||
| 445 | u32 status; | ||
| 446 | |||
| 447 | status = readw(v3->base + V3_PCI_STAT); | ||
| 448 | if (status & V3_PCI_STAT_PAR_ERR) | ||
| 449 | dev_err(dev, "parity error interrupt\n"); | ||
| 450 | if (status & V3_PCI_STAT_SYS_ERR) | ||
| 451 | dev_err(dev, "system error interrupt\n"); | ||
| 452 | if (status & V3_PCI_STAT_M_ABORT_ERR) | ||
| 453 | dev_err(dev, "master abort error interrupt\n"); | ||
| 454 | if (status & V3_PCI_STAT_T_ABORT_ERR) | ||
| 455 | dev_err(dev, "target abort error interrupt\n"); | ||
| 456 | writew(status, v3->base + V3_PCI_STAT); | ||
| 457 | |||
| 458 | status = readb(v3->base + V3_LB_ISTAT); | ||
| 459 | if (status & V3_LB_ISTAT_MAILBOX) | ||
| 460 | dev_info(dev, "PCI mailbox interrupt\n"); | ||
| 461 | if (status & V3_LB_ISTAT_PCI_RD) | ||
| 462 | dev_err(dev, "PCI target LB->PCI READ abort interrupt\n"); | ||
| 463 | if (status & V3_LB_ISTAT_PCI_WR) | ||
| 464 | dev_err(dev, "PCI target LB->PCI WRITE abort interrupt\n"); | ||
| 465 | if (status & V3_LB_ISTAT_PCI_INT) | ||
| 466 | dev_info(dev, "PCI pin interrupt\n"); | ||
| 467 | if (status & V3_LB_ISTAT_PCI_PERR) | ||
| 468 | dev_err(dev, "PCI parity error interrupt\n"); | ||
| 469 | if (status & V3_LB_ISTAT_I2O_QWR) | ||
| 470 | dev_info(dev, "I2O inbound post queue interrupt\n"); | ||
| 471 | if (status & V3_LB_ISTAT_DMA1) | ||
| 472 | dev_info(dev, "DMA channel 1 interrupt\n"); | ||
| 473 | if (status & V3_LB_ISTAT_DMA0) | ||
| 474 | dev_info(dev, "DMA channel 0 interrupt\n"); | ||
| 475 | /* Clear all possible interrupts on the local bus */ | ||
| 476 | writeb(0, v3->base + V3_LB_ISTAT); | ||
| 477 | if (v3->map) | ||
| 478 | regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET, | ||
| 479 | INTEGRATOR_SC_PCI_ENABLE | | ||
| 480 | INTEGRATOR_SC_PCI_INTCLR); | ||
| 481 | |||
| 482 | return IRQ_HANDLED; | ||
| 483 | } | ||
| 484 | |||
| 485 | static int v3_integrator_init(struct v3_pci *v3) | ||
| 486 | { | ||
| 487 | unsigned int val; | ||
| 488 | |||
| 489 | v3->map = | ||
| 490 | syscon_regmap_lookup_by_compatible("arm,integrator-ap-syscon"); | ||
| 491 | if (IS_ERR(v3->map)) { | ||
| 492 | dev_err(v3->dev, "no syscon\n"); | ||
| 493 | return -ENODEV; | ||
| 494 | } | ||
| 495 | |||
| 496 | regmap_read(v3->map, INTEGRATOR_SC_PCI_OFFSET, &val); | ||
| 497 | /* Take the PCI bridge out of reset, clear IRQs */ | ||
| 498 | regmap_write(v3->map, INTEGRATOR_SC_PCI_OFFSET, | ||
| 499 | INTEGRATOR_SC_PCI_ENABLE | | ||
| 500 | INTEGRATOR_SC_PCI_INTCLR); | ||
| 501 | |||
| 502 | if (!(val & INTEGRATOR_SC_PCI_ENABLE)) { | ||
| 503 | /* If we were in reset we need to sleep a bit */ | ||
| 504 | msleep(230); | ||
| 505 | |||
| 506 | /* Set the physical base for the controller itself */ | ||
| 507 | writel(0x6200, v3->base + V3_LB_IO_BASE); | ||
| 508 | |||
| 509 | /* Wait for the mailbox to settle after reset */ | ||
| 510 | do { | ||
| 511 | writeb(0xaa, v3->base + V3_MAIL_DATA); | ||
| 512 | writeb(0x55, v3->base + V3_MAIL_DATA + 4); | ||
| 513 | } while (readb(v3->base + V3_MAIL_DATA) != 0xaa && | ||
| 514 | readb(v3->base + V3_MAIL_DATA) != 0x55); | ||
| 515 | } | ||
| 516 | |||
| 517 | dev_info(v3->dev, "initialized PCI V3 Integrator/AP integration\n"); | ||
| 518 | |||
| 519 | return 0; | ||
| 520 | } | ||
| 521 | |||
| 522 | static int v3_pci_setup_resource(struct v3_pci *v3, | ||
| 523 | resource_size_t io_base, | ||
| 524 | struct pci_host_bridge *host, | ||
| 525 | struct resource_entry *win) | ||
| 526 | { | ||
| 527 | struct device *dev = v3->dev; | ||
| 528 | struct resource *mem; | ||
| 529 | struct resource *io; | ||
| 530 | int ret; | ||
| 531 | |||
| 532 | switch (resource_type(win->res)) { | ||
| 533 | case IORESOURCE_IO: | ||
| 534 | io = win->res; | ||
| 535 | io->name = "V3 PCI I/O"; | ||
| 536 | v3->io_mem = io_base; | ||
| 537 | v3->io_bus_addr = io->start - win->offset; | ||
| 538 | dev_dbg(dev, "I/O window %pR, bus addr %pap\n", | ||
| 539 | io, &v3->io_bus_addr); | ||
| 540 | ret = pci_remap_iospace(io, io_base); | ||
| 541 | if (ret) { | ||
| 542 | dev_warn(dev, | ||
| 543 | "error %d: failed to map resource %pR\n", | ||
| 544 | ret, io); | ||
| 545 | return ret; | ||
| 546 | } | ||
| 547 | /* Setup window 2 - PCI I/O */ | ||
| 548 | writel(v3_addr_to_lb_base2(v3->io_mem) | | ||
| 549 | V3_LB_BASE2_ENABLE, | ||
| 550 | v3->base + V3_LB_BASE2); | ||
| 551 | writew(v3_addr_to_lb_map2(v3->io_bus_addr), | ||
| 552 | v3->base + V3_LB_MAP2); | ||
| 553 | break; | ||
| 554 | case IORESOURCE_MEM: | ||
| 555 | mem = win->res; | ||
| 556 | if (mem->flags & IORESOURCE_PREFETCH) { | ||
| 557 | mem->name = "V3 PCI PRE-MEM"; | ||
| 558 | v3->pre_mem = mem->start; | ||
| 559 | v3->pre_bus_addr = mem->start - win->offset; | ||
| 560 | dev_dbg(dev, "PREFETCHABLE MEM window %pR, bus addr %pap\n", | ||
| 561 | mem, &v3->pre_bus_addr); | ||
| 562 | if (resource_size(mem) != SZ_256M) { | ||
| 563 | dev_err(dev, "prefetchable memory range is not 256MB\n"); | ||
| 564 | return -EINVAL; | ||
| 565 | } | ||
| 566 | if (v3->non_pre_mem && | ||
| 567 | (mem->start != v3->non_pre_mem + SZ_256M)) { | ||
| 568 | dev_err(dev, | ||
| 569 | "prefetchable memory is not adjacent to non-prefetchable memory\n"); | ||
| 570 | return -EINVAL; | ||
| 571 | } | ||
| 572 | /* Setup window 1 - PCI prefetchable memory */ | ||
| 573 | writel(v3_addr_to_lb_base(v3->pre_mem) | | ||
| 574 | V3_LB_BASE_ADR_SIZE_256MB | | ||
| 575 | V3_LB_BASE_PREFETCH | | ||
| 576 | V3_LB_BASE_ENABLE, | ||
| 577 | v3->base + V3_LB_BASE1); | ||
| 578 | writew(v3_addr_to_lb_map(v3->pre_bus_addr) | | ||
| 579 | V3_LB_MAP_TYPE_MEM, /* Was V3_LB_MAP_TYPE_MEM_MULTIPLE */ | ||
| 580 | v3->base + V3_LB_MAP1); | ||
| 581 | } else { | ||
| 582 | mem->name = "V3 PCI NON-PRE-MEM"; | ||
| 583 | v3->non_pre_mem = mem->start; | ||
| 584 | v3->non_pre_bus_addr = mem->start - win->offset; | ||
| 585 | dev_dbg(dev, "NON-PREFETCHABLE MEM window %pR, bus addr %pap\n", | ||
| 586 | mem, &v3->non_pre_bus_addr); | ||
| 587 | if (resource_size(mem) != SZ_256M) { | ||
| 588 | dev_err(dev, | ||
| 589 | "non-prefetchable memory range is not 256MB\n"); | ||
| 590 | return -EINVAL; | ||
| 591 | } | ||
| 592 | /* Setup window 0 - PCI non-prefetchable memory */ | ||
| 593 | writel(v3_addr_to_lb_base(v3->non_pre_mem) | | ||
| 594 | V3_LB_BASE_ADR_SIZE_256MB | | ||
| 595 | V3_LB_BASE_ENABLE, | ||
| 596 | v3->base + V3_LB_BASE0); | ||
| 597 | writew(v3_addr_to_lb_map(v3->non_pre_bus_addr) | | ||
| 598 | V3_LB_MAP_TYPE_MEM, | ||
| 599 | v3->base + V3_LB_MAP0); | ||
| 600 | } | ||
| 601 | break; | ||
| 602 | case IORESOURCE_BUS: | ||
| 603 | dev_dbg(dev, "BUS %pR\n", win->res); | ||
| 604 | host->busnr = win->res->start; | ||
| 605 | break; | ||
| 606 | default: | ||
| 607 | dev_info(dev, "Unknown resource type %lu\n", | ||
| 608 | resource_type(win->res)); | ||
| 609 | break; | ||
| 610 | } | ||
| 611 | |||
| 612 | return 0; | ||
| 613 | } | ||
| 614 | |||
| 615 | static int v3_get_dma_range_config(struct v3_pci *v3, | ||
| 616 | struct of_pci_range *range, | ||
| 617 | u32 *pci_base, u32 *pci_map) | ||
| 618 | { | ||
| 619 | struct device *dev = v3->dev; | ||
| 620 | u64 cpu_end = range->cpu_addr + range->size - 1; | ||
| 621 | u64 pci_end = range->pci_addr + range->size - 1; | ||
| 622 | u32 val; | ||
| 623 | |||
| 624 | if (range->pci_addr & ~V3_PCI_BASE_M_ADR_BASE) { | ||
| 625 | dev_err(dev, "illegal range, only PCI bits 31..20 allowed\n"); | ||
| 626 | return -EINVAL; | ||
| 627 | } | ||
| 628 | val = ((u32)range->pci_addr) & V3_PCI_BASE_M_ADR_BASE; | ||
| 629 | *pci_base = val; | ||
| 630 | |||
| 631 | if (range->cpu_addr & ~V3_PCI_MAP_M_MAP_ADR) { | ||
| 632 | dev_err(dev, "illegal range, only CPU bits 31..20 allowed\n"); | ||
| 633 | return -EINVAL; | ||
| 634 | } | ||
| 635 | val = ((u32)range->cpu_addr) & V3_PCI_MAP_M_MAP_ADR; | ||
| 636 | |||
| 637 | switch (range->size) { | ||
| 638 | case SZ_1M: | ||
| 639 | val |= V3_LB_BASE_ADR_SIZE_1MB; | ||
| 640 | break; | ||
| 641 | case SZ_2M: | ||
| 642 | val |= V3_LB_BASE_ADR_SIZE_2MB; | ||
| 643 | break; | ||
| 644 | case SZ_4M: | ||
| 645 | val |= V3_LB_BASE_ADR_SIZE_4MB; | ||
| 646 | break; | ||
| 647 | case SZ_8M: | ||
| 648 | val |= V3_LB_BASE_ADR_SIZE_8MB; | ||
| 649 | break; | ||
| 650 | case SZ_16M: | ||
| 651 | val |= V3_LB_BASE_ADR_SIZE_16MB; | ||
| 652 | break; | ||
| 653 | case SZ_32M: | ||
| 654 | val |= V3_LB_BASE_ADR_SIZE_32MB; | ||
| 655 | break; | ||
| 656 | case SZ_64M: | ||
| 657 | val |= V3_LB_BASE_ADR_SIZE_64MB; | ||
| 658 | break; | ||
| 659 | case SZ_128M: | ||
| 660 | val |= V3_LB_BASE_ADR_SIZE_128MB; | ||
| 661 | break; | ||
| 662 | case SZ_256M: | ||
| 663 | val |= V3_LB_BASE_ADR_SIZE_256MB; | ||
| 664 | break; | ||
| 665 | case SZ_512M: | ||
| 666 | val |= V3_LB_BASE_ADR_SIZE_512MB; | ||
| 667 | break; | ||
| 668 | case SZ_1G: | ||
| 669 | val |= V3_LB_BASE_ADR_SIZE_1GB; | ||
| 670 | break; | ||
| 671 | case SZ_2G: | ||
| 672 | val |= V3_LB_BASE_ADR_SIZE_2GB; | ||
| 673 | break; | ||
| 674 | default: | ||
| 675 | dev_err(v3->dev, "illegal dma memory chunk size\n"); | ||
| 676 | return -EINVAL; | ||
| 677 | break; | ||
| 678 | } | ||
| 679 | val |= V3_PCI_MAP_M_REG_EN | V3_PCI_MAP_M_ENABLE; | ||
| 680 | *pci_map = val; | ||
| 681 | |||
| 682 | dev_dbg(dev, | ||
| 683 | "DMA MEM CPU: 0x%016llx -> 0x%016llx => " | ||
| 684 | "PCI: 0x%016llx -> 0x%016llx base %08x map %08x\n", | ||
| 685 | range->cpu_addr, cpu_end, | ||
| 686 | range->pci_addr, pci_end, | ||
| 687 | *pci_base, *pci_map); | ||
| 688 | |||
| 689 | return 0; | ||
| 690 | } | ||
| 691 | |||
| 692 | static int v3_pci_parse_map_dma_ranges(struct v3_pci *v3, | ||
| 693 | struct device_node *np) | ||
| 694 | { | ||
| 695 | struct of_pci_range range; | ||
| 696 | struct of_pci_range_parser parser; | ||
| 697 | struct device *dev = v3->dev; | ||
| 698 | int i = 0; | ||
| 699 | |||
| 700 | if (of_pci_dma_range_parser_init(&parser, np)) { | ||
| 701 | dev_err(dev, "missing dma-ranges property\n"); | ||
| 702 | return -EINVAL; | ||
| 703 | } | ||
| 704 | |||
| 705 | /* | ||
| 706 | * Get the dma-ranges from the device tree | ||
| 707 | */ | ||
| 708 | for_each_of_pci_range(&parser, &range) { | ||
| 709 | int ret; | ||
| 710 | u32 pci_base, pci_map; | ||
| 711 | |||
| 712 | ret = v3_get_dma_range_config(v3, &range, &pci_base, &pci_map); | ||
| 713 | if (ret) | ||
| 714 | return ret; | ||
| 715 | |||
| 716 | if (i == 0) { | ||
| 717 | writel(pci_base, v3->base + V3_PCI_BASE0); | ||
| 718 | writel(pci_map, v3->base + V3_PCI_MAP0); | ||
| 719 | } else if (i == 1) { | ||
| 720 | writel(pci_base, v3->base + V3_PCI_BASE1); | ||
| 721 | writel(pci_map, v3->base + V3_PCI_MAP1); | ||
| 722 | } else { | ||
| 723 | dev_err(dev, "too many ranges, only two supported\n"); | ||
| 724 | dev_err(dev, "range %d ignored\n", i); | ||
| 725 | } | ||
| 726 | i++; | ||
| 727 | } | ||
| 728 | return 0; | ||
| 729 | } | ||
| 730 | |||
| 731 | static int v3_pci_probe(struct platform_device *pdev) | ||
| 732 | { | ||
| 733 | struct device *dev = &pdev->dev; | ||
| 734 | struct device_node *np = dev->of_node; | ||
| 735 | resource_size_t io_base; | ||
| 736 | struct resource *regs; | ||
| 737 | struct resource_entry *win; | ||
| 738 | struct v3_pci *v3; | ||
| 739 | struct pci_host_bridge *host; | ||
| 740 | struct clk *clk; | ||
| 741 | u16 val; | ||
| 742 | int irq; | ||
| 743 | int ret; | ||
| 744 | LIST_HEAD(res); | ||
| 745 | |||
| 746 | host = pci_alloc_host_bridge(sizeof(*v3)); | ||
| 747 | if (!host) | ||
| 748 | return -ENOMEM; | ||
| 749 | |||
| 750 | host->dev.parent = dev; | ||
| 751 | host->ops = &v3_pci_ops; | ||
| 752 | host->busnr = 0; | ||
| 753 | host->msi = NULL; | ||
| 754 | host->map_irq = of_irq_parse_and_map_pci; | ||
| 755 | host->swizzle_irq = pci_common_swizzle; | ||
| 756 | v3 = pci_host_bridge_priv(host); | ||
| 757 | host->sysdata = v3; | ||
| 758 | v3->dev = dev; | ||
| 759 | |||
| 760 | /* Get and enable host clock */ | ||
| 761 | clk = devm_clk_get(dev, NULL); | ||
| 762 | if (IS_ERR(clk)) { | ||
| 763 | dev_err(dev, "clock not found\n"); | ||
| 764 | return PTR_ERR(clk); | ||
| 765 | } | ||
| 766 | ret = clk_prepare_enable(clk); | ||
| 767 | if (ret) { | ||
| 768 | dev_err(dev, "unable to enable clock\n"); | ||
| 769 | return ret; | ||
| 770 | } | ||
| 771 | |||
| 772 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 773 | v3->base = devm_ioremap_resource(dev, regs); | ||
| 774 | if (IS_ERR(v3->base)) | ||
| 775 | return PTR_ERR(v3->base); | ||
| 776 | /* | ||
| 777 | * The hardware has a register with the physical base address | ||
| 778 | * of the V3 controller itself, verify that this is the same | ||
| 779 | * as the physical memory we've remapped it from. | ||
| 780 | */ | ||
| 781 | if (readl(v3->base + V3_LB_IO_BASE) != (regs->start >> 16)) | ||
| 782 | dev_err(dev, "V3_LB_IO_BASE = %08x but device is @%pR\n", | ||
| 783 | readl(v3->base + V3_LB_IO_BASE), regs); | ||
| 784 | |||
| 785 | /* Configuration space is 16MB directly mapped */ | ||
| 786 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 787 | if (resource_size(regs) != SZ_16M) { | ||
| 788 | dev_err(dev, "config mem is not 16MB!\n"); | ||
| 789 | return -EINVAL; | ||
| 790 | } | ||
| 791 | v3->config_mem = regs->start; | ||
| 792 | v3->config_base = devm_ioremap_resource(dev, regs); | ||
| 793 | if (IS_ERR(v3->config_base)) | ||
| 794 | return PTR_ERR(v3->config_base); | ||
| 795 | |||
| 796 | ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res, | ||
| 797 | &io_base); | ||
| 798 | if (ret) | ||
| 799 | return ret; | ||
| 800 | |||
| 801 | ret = devm_request_pci_bus_resources(dev, &res); | ||
| 802 | if (ret) | ||
| 803 | return ret; | ||
| 804 | |||
| 805 | /* Get and request error IRQ resource */ | ||
| 806 | irq = platform_get_irq(pdev, 0); | ||
| 807 | if (irq <= 0) { | ||
| 808 | dev_err(dev, "unable to obtain PCIv3 error IRQ\n"); | ||
| 809 | return -ENODEV; | ||
| 810 | } | ||
| 811 | ret = devm_request_irq(dev, irq, v3_irq, 0, | ||
| 812 | "PCIv3 error", v3); | ||
| 813 | if (ret < 0) { | ||
| 814 | dev_err(dev, | ||
| 815 | "unable to request PCIv3 error IRQ %d (%d)\n", | ||
| 816 | irq, ret); | ||
| 817 | return ret; | ||
| 818 | } | ||
| 819 | |||
| 820 | /* | ||
| 821 | * Unlock V3 registers, but only if they were previously locked. | ||
| 822 | */ | ||
| 823 | if (readw(v3->base + V3_SYSTEM) & V3_SYSTEM_M_LOCK) | ||
| 824 | writew(V3_SYSTEM_UNLOCK, v3->base + V3_SYSTEM); | ||
| 825 | |||
| 826 | /* Disable all slave access while we set up the windows */ | ||
| 827 | val = readw(v3->base + V3_PCI_CMD); | ||
| 828 | val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); | ||
| 829 | writew(val, v3->base + V3_PCI_CMD); | ||
| 830 | |||
| 831 | /* Put the PCI bus into reset */ | ||
| 832 | val = readw(v3->base + V3_SYSTEM); | ||
| 833 | val &= ~V3_SYSTEM_M_RST_OUT; | ||
| 834 | writew(val, v3->base + V3_SYSTEM); | ||
| 835 | |||
| 836 | /* Retry until we're ready */ | ||
| 837 | val = readw(v3->base + V3_PCI_CFG); | ||
| 838 | val |= V3_PCI_CFG_M_RETRY_EN; | ||
| 839 | writew(val, v3->base + V3_PCI_CFG); | ||
| 840 | |||
| 841 | /* Set up the local bus protocol */ | ||
| 842 | val = readw(v3->base + V3_LB_CFG); | ||
| 843 | val |= V3_LB_CFG_LB_BE_IMODE; /* Byte enable input */ | ||
| 844 | val |= V3_LB_CFG_LB_BE_OMODE; /* Byte enable output */ | ||
| 845 | val &= ~V3_LB_CFG_LB_ENDIAN; /* Little endian */ | ||
| 846 | val &= ~V3_LB_CFG_LB_PPC_RDY; /* TODO: when using on PPC403Gx, set to 1 */ | ||
| 847 | writew(val, v3->base + V3_LB_CFG); | ||
| 848 | |||
| 849 | /* Enable the PCI bus master */ | ||
| 850 | val = readw(v3->base + V3_PCI_CMD); | ||
| 851 | val |= PCI_COMMAND_MASTER; | ||
| 852 | writew(val, v3->base + V3_PCI_CMD); | ||
| 853 | |||
| 854 | /* Get the I/O and memory ranges from DT */ | ||
| 855 | resource_list_for_each_entry(win, &res) { | ||
| 856 | ret = v3_pci_setup_resource(v3, io_base, host, win); | ||
| 857 | if (ret) { | ||
| 858 | dev_err(dev, "error setting up resources\n"); | ||
| 859 | return ret; | ||
| 860 | } | ||
| 861 | } | ||
| 862 | ret = v3_pci_parse_map_dma_ranges(v3, np); | ||
| 863 | if (ret) | ||
| 864 | return ret; | ||
| 865 | |||
| 866 | /* | ||
| 867 | * Disable PCI to host IO cycles, enable I/O buffers @3.3V, | ||
| 868 | * set AD_LOW0 to 1 if one of the LB_MAP registers choose | ||
| 869 | * to use this (should be unused). | ||
| 870 | */ | ||
| 871 | writel(0x00000000, v3->base + V3_PCI_IO_BASE); | ||
| 872 | val = V3_PCI_CFG_M_IO_REG_DIS | V3_PCI_CFG_M_IO_DIS | | ||
| 873 | V3_PCI_CFG_M_EN3V | V3_PCI_CFG_M_AD_LOW0; | ||
| 874 | /* | ||
| 875 | * DMA read and write from PCI bus commands types | ||
| 876 | */ | ||
| 877 | val |= V3_PCI_CFG_TYPE_DEFAULT << V3_PCI_CFG_M_RTYPE_SHIFT; | ||
| 878 | val |= V3_PCI_CFG_TYPE_DEFAULT << V3_PCI_CFG_M_WTYPE_SHIFT; | ||
| 879 | writew(val, v3->base + V3_PCI_CFG); | ||
| 880 | |||
| 881 | /* | ||
| 882 | * Set the V3 FIFO such that writes have higher priority than | ||
| 883 | * reads, and local bus write causes local bus read fifo flush | ||
| 884 | * on aperture 1. Same for PCI. | ||
| 885 | */ | ||
| 886 | writew(V3_FIFO_PRIO_LB_RD1_FLUSH_AP1 | | ||
| 887 | V3_FIFO_PRIO_LB_RD0_FLUSH_AP1 | | ||
| 888 | V3_FIFO_PRIO_PCI_RD1_FLUSH_AP1 | | ||
| 889 | V3_FIFO_PRIO_PCI_RD0_FLUSH_AP1, | ||
| 890 | v3->base + V3_FIFO_PRIORITY); | ||
| 891 | |||
| 892 | |||
| 893 | /* | ||
| 894 | * Clear any error interrupts, and enable parity and write error | ||
| 895 | * interrupts | ||
| 896 | */ | ||
| 897 | writeb(0, v3->base + V3_LB_ISTAT); | ||
| 898 | val = readw(v3->base + V3_LB_CFG); | ||
| 899 | val |= V3_LB_CFG_LB_LB_INT; | ||
| 900 | writew(val, v3->base + V3_LB_CFG); | ||
| 901 | writeb(V3_LB_ISTAT_PCI_WR | V3_LB_ISTAT_PCI_PERR, | ||
| 902 | v3->base + V3_LB_IMASK); | ||
| 903 | |||
| 904 | /* Special Integrator initialization */ | ||
| 905 | if (of_device_is_compatible(np, "arm,integrator-ap-pci")) { | ||
| 906 | ret = v3_integrator_init(v3); | ||
| 907 | if (ret) | ||
| 908 | return ret; | ||
| 909 | } | ||
| 910 | |||
| 911 | /* Post-init: enable PCI memory and invalidate (master already on) */ | ||
| 912 | val = readw(v3->base + V3_PCI_CMD); | ||
| 913 | val |= PCI_COMMAND_MEMORY | PCI_COMMAND_INVALIDATE; | ||
| 914 | writew(val, v3->base + V3_PCI_CMD); | ||
| 915 | |||
| 916 | /* Clear pending interrupts */ | ||
| 917 | writeb(0, v3->base + V3_LB_ISTAT); | ||
| 918 | /* Read or write errors and parity errors cause interrupts */ | ||
| 919 | writeb(V3_LB_ISTAT_PCI_RD | V3_LB_ISTAT_PCI_WR | V3_LB_ISTAT_PCI_PERR, | ||
| 920 | v3->base + V3_LB_IMASK); | ||
| 921 | |||
| 922 | /* Take the PCI bus out of reset so devices can initialize */ | ||
| 923 | val = readw(v3->base + V3_SYSTEM); | ||
| 924 | val |= V3_SYSTEM_M_RST_OUT; | ||
| 925 | writew(val, v3->base + V3_SYSTEM); | ||
| 926 | |||
| 927 | /* | ||
| 928 | * Re-lock the system register. | ||
| 929 | */ | ||
| 930 | val = readw(v3->base + V3_SYSTEM); | ||
| 931 | val |= V3_SYSTEM_M_LOCK; | ||
| 932 | writew(val, v3->base + V3_SYSTEM); | ||
| 933 | |||
| 934 | list_splice_init(&res, &host->windows); | ||
| 935 | ret = pci_scan_root_bus_bridge(host); | ||
| 936 | if (ret) { | ||
| 937 | dev_err(dev, "failed to register host: %d\n", ret); | ||
| 938 | return ret; | ||
| 939 | } | ||
| 940 | v3->bus = host->bus; | ||
| 941 | |||
| 942 | pci_bus_assign_resources(v3->bus); | ||
| 943 | pci_bus_add_devices(v3->bus); | ||
| 944 | |||
| 945 | return 0; | ||
| 946 | } | ||
| 947 | |||
| 948 | static const struct of_device_id v3_pci_of_match[] = { | ||
| 949 | { | ||
| 950 | .compatible = "v3,v360epc-pci", | ||
| 951 | }, | ||
| 952 | {}, | ||
| 953 | }; | ||
| 954 | |||
| 955 | static struct platform_driver v3_pci_driver = { | ||
| 956 | .driver = { | ||
| 957 | .name = "pci-v3-semi", | ||
| 958 | .of_match_table = of_match_ptr(v3_pci_of_match), | ||
| 959 | .suppress_bind_attrs = true, | ||
| 960 | }, | ||
| 961 | .probe = v3_pci_probe, | ||
| 962 | }; | ||
| 963 | builtin_platform_driver(v3_pci_driver); | ||
diff --git a/drivers/pci/controller/pci-versatile.c b/drivers/pci/controller/pci-versatile.c new file mode 100644 index 000000000000..994f32061b32 --- /dev/null +++ b/drivers/pci/controller/pci-versatile.c | |||
| @@ -0,0 +1,239 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Copyright 2004 Koninklijke Philips Electronics NV | ||
| 4 | * | ||
| 5 | * Conversion to platform driver and DT: | ||
| 6 | * Copyright 2014 Linaro Ltd. | ||
| 7 | * | ||
| 8 | * 14/04/2005 Initial version, colin.king@philips.com | ||
| 9 | */ | ||
| 10 | #include <linux/kernel.h> | ||
| 11 | #include <linux/module.h> | ||
| 12 | #include <linux/of_address.h> | ||
| 13 | #include <linux/of_pci.h> | ||
| 14 | #include <linux/of_platform.h> | ||
| 15 | #include <linux/pci.h> | ||
| 16 | #include <linux/platform_device.h> | ||
| 17 | |||
| 18 | #include "../pci.h" | ||
| 19 | |||
| 20 | static void __iomem *versatile_pci_base; | ||
| 21 | static void __iomem *versatile_cfg_base[2]; | ||
| 22 | |||
| 23 | #define PCI_IMAP(m) (versatile_pci_base + ((m) * 4)) | ||
| 24 | #define PCI_SMAP(m) (versatile_pci_base + 0x14 + ((m) * 4)) | ||
| 25 | #define PCI_SELFID (versatile_pci_base + 0xc) | ||
| 26 | |||
| 27 | #define VP_PCI_DEVICE_ID 0x030010ee | ||
| 28 | #define VP_PCI_CLASS_ID 0x0b400000 | ||
| 29 | |||
| 30 | static u32 pci_slot_ignore; | ||
| 31 | |||
| 32 | static int __init versatile_pci_slot_ignore(char *str) | ||
| 33 | { | ||
| 34 | int retval; | ||
| 35 | int slot; | ||
| 36 | |||
| 37 | while ((retval = get_option(&str, &slot))) { | ||
| 38 | if ((slot < 0) || (slot > 31)) | ||
| 39 | pr_err("Illegal slot value: %d\n", slot); | ||
| 40 | else | ||
| 41 | pci_slot_ignore |= (1 << slot); | ||
| 42 | } | ||
| 43 | return 1; | ||
| 44 | } | ||
| 45 | __setup("pci_slot_ignore=", versatile_pci_slot_ignore); | ||
| 46 | |||
| 47 | |||
| 48 | static void __iomem *versatile_map_bus(struct pci_bus *bus, | ||
| 49 | unsigned int devfn, int offset) | ||
| 50 | { | ||
| 51 | unsigned int busnr = bus->number; | ||
| 52 | |||
| 53 | if (pci_slot_ignore & (1 << PCI_SLOT(devfn))) | ||
| 54 | return NULL; | ||
| 55 | |||
| 56 | return versatile_cfg_base[1] + ((busnr << 16) | (devfn << 8) | offset); | ||
| 57 | } | ||
| 58 | |||
| 59 | static struct pci_ops pci_versatile_ops = { | ||
| 60 | .map_bus = versatile_map_bus, | ||
| 61 | .read = pci_generic_config_read32, | ||
| 62 | .write = pci_generic_config_write, | ||
| 63 | }; | ||
| 64 | |||
| 65 | static int versatile_pci_parse_request_of_pci_ranges(struct device *dev, | ||
| 66 | struct list_head *res) | ||
| 67 | { | ||
| 68 | int err, mem = 1, res_valid = 0; | ||
| 69 | resource_size_t iobase; | ||
| 70 | struct resource_entry *win, *tmp; | ||
| 71 | |||
| 72 | err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, res, &iobase); | ||
| 73 | if (err) | ||
| 74 | return err; | ||
| 75 | |||
| 76 | err = devm_request_pci_bus_resources(dev, res); | ||
| 77 | if (err) | ||
| 78 | goto out_release_res; | ||
| 79 | |||
| 80 | resource_list_for_each_entry_safe(win, tmp, res) { | ||
| 81 | struct resource *res = win->res; | ||
| 82 | |||
| 83 | switch (resource_type(res)) { | ||
| 84 | case IORESOURCE_IO: | ||
| 85 | err = pci_remap_iospace(res, iobase); | ||
| 86 | if (err) { | ||
| 87 | dev_warn(dev, "error %d: failed to map resource %pR\n", | ||
| 88 | err, res); | ||
| 89 | resource_list_destroy_entry(win); | ||
| 90 | } | ||
| 91 | break; | ||
| 92 | case IORESOURCE_MEM: | ||
| 93 | res_valid |= !(res->flags & IORESOURCE_PREFETCH); | ||
| 94 | |||
| 95 | writel(res->start >> 28, PCI_IMAP(mem)); | ||
| 96 | writel(PHYS_OFFSET >> 28, PCI_SMAP(mem)); | ||
| 97 | mem++; | ||
| 98 | |||
| 99 | break; | ||
| 100 | } | ||
| 101 | } | ||
| 102 | |||
| 103 | if (res_valid) | ||
| 104 | return 0; | ||
| 105 | |||
| 106 | dev_err(dev, "non-prefetchable memory resource required\n"); | ||
| 107 | err = -EINVAL; | ||
| 108 | |||
| 109 | out_release_res: | ||
| 110 | pci_free_resource_list(res); | ||
| 111 | return err; | ||
| 112 | } | ||
| 113 | |||
| 114 | static int versatile_pci_probe(struct platform_device *pdev) | ||
| 115 | { | ||
| 116 | struct device *dev = &pdev->dev; | ||
| 117 | struct resource *res; | ||
| 118 | int ret, i, myslot = -1; | ||
| 119 | u32 val; | ||
| 120 | void __iomem *local_pci_cfg_base; | ||
| 121 | struct pci_bus *bus, *child; | ||
| 122 | struct pci_host_bridge *bridge; | ||
| 123 | LIST_HEAD(pci_res); | ||
| 124 | |||
| 125 | bridge = devm_pci_alloc_host_bridge(dev, 0); | ||
| 126 | if (!bridge) | ||
| 127 | return -ENOMEM; | ||
| 128 | |||
| 129 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 130 | versatile_pci_base = devm_ioremap_resource(dev, res); | ||
| 131 | if (IS_ERR(versatile_pci_base)) | ||
| 132 | return PTR_ERR(versatile_pci_base); | ||
| 133 | |||
| 134 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 135 | versatile_cfg_base[0] = devm_ioremap_resource(dev, res); | ||
| 136 | if (IS_ERR(versatile_cfg_base[0])) | ||
| 137 | return PTR_ERR(versatile_cfg_base[0]); | ||
| 138 | |||
| 139 | res = platform_get_resource(pdev, IORESOURCE_MEM, 2); | ||
| 140 | versatile_cfg_base[1] = devm_pci_remap_cfg_resource(dev, res); | ||
| 141 | if (IS_ERR(versatile_cfg_base[1])) | ||
| 142 | return PTR_ERR(versatile_cfg_base[1]); | ||
| 143 | |||
| 144 | ret = versatile_pci_parse_request_of_pci_ranges(dev, &pci_res); | ||
| 145 | if (ret) | ||
| 146 | return ret; | ||
| 147 | |||
| 148 | /* | ||
| 149 | * We need to discover the PCI core first to configure itself | ||
| 150 | * before the main PCI probing is performed | ||
| 151 | */ | ||
| 152 | for (i = 0; i < 32; i++) { | ||
| 153 | if ((readl(versatile_cfg_base[0] + (i << 11) + PCI_VENDOR_ID) == VP_PCI_DEVICE_ID) && | ||
| 154 | (readl(versatile_cfg_base[0] + (i << 11) + PCI_CLASS_REVISION) == VP_PCI_CLASS_ID)) { | ||
| 155 | myslot = i; | ||
| 156 | break; | ||
| 157 | } | ||
| 158 | } | ||
| 159 | if (myslot == -1) { | ||
| 160 | dev_err(dev, "Cannot find PCI core!\n"); | ||
| 161 | return -EIO; | ||
| 162 | } | ||
| 163 | /* | ||
| 164 | * Do not to map Versatile FPGA PCI device into memory space | ||
| 165 | */ | ||
| 166 | pci_slot_ignore |= (1 << myslot); | ||
| 167 | |||
| 168 | dev_info(dev, "PCI core found (slot %d)\n", myslot); | ||
| 169 | |||
| 170 | writel(myslot, PCI_SELFID); | ||
| 171 | local_pci_cfg_base = versatile_cfg_base[1] + (myslot << 11); | ||
| 172 | |||
| 173 | val = readl(local_pci_cfg_base + PCI_COMMAND); | ||
| 174 | val |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE; | ||
| 175 | writel(val, local_pci_cfg_base + PCI_COMMAND); | ||
| 176 | |||
| 177 | /* | ||
| 178 | * Configure the PCI inbound memory windows to be 1:1 mapped to SDRAM | ||
| 179 | */ | ||
| 180 | writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_0); | ||
| 181 | writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_1); | ||
| 182 | writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2); | ||
| 183 | |||
| 184 | /* | ||
| 185 | * For many years the kernel and QEMU were symbiotically buggy | ||
| 186 | * in that they both assumed the same broken IRQ mapping. | ||
| 187 | * QEMU therefore attempts to auto-detect old broken kernels | ||
| 188 | * so that they still work on newer QEMU as they did on old | ||
| 189 | * QEMU. Since we now use the correct (ie matching-hardware) | ||
| 190 | * IRQ mapping we write a definitely different value to a | ||
| 191 | * PCI_INTERRUPT_LINE register to tell QEMU that we expect | ||
| 192 | * real hardware behaviour and it need not be backwards | ||
| 193 | * compatible for us. This write is harmless on real hardware. | ||
| 194 | */ | ||
| 195 | writel(0, versatile_cfg_base[0] + PCI_INTERRUPT_LINE); | ||
| 196 | |||
| 197 | pci_add_flags(PCI_ENABLE_PROC_DOMAINS); | ||
| 198 | pci_add_flags(PCI_REASSIGN_ALL_BUS); | ||
| 199 | |||
| 200 | list_splice_init(&pci_res, &bridge->windows); | ||
| 201 | bridge->dev.parent = dev; | ||
| 202 | bridge->sysdata = NULL; | ||
| 203 | bridge->busnr = 0; | ||
| 204 | bridge->ops = &pci_versatile_ops; | ||
| 205 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 206 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 207 | |||
| 208 | ret = pci_scan_root_bus_bridge(bridge); | ||
| 209 | if (ret < 0) | ||
| 210 | return ret; | ||
| 211 | |||
| 212 | bus = bridge->bus; | ||
| 213 | |||
| 214 | pci_assign_unassigned_bus_resources(bus); | ||
| 215 | list_for_each_entry(child, &bus->children, node) | ||
| 216 | pcie_bus_configure_settings(child); | ||
| 217 | pci_bus_add_devices(bus); | ||
| 218 | |||
| 219 | return 0; | ||
| 220 | } | ||
| 221 | |||
| 222 | static const struct of_device_id versatile_pci_of_match[] = { | ||
| 223 | { .compatible = "arm,versatile-pci", }, | ||
| 224 | { }, | ||
| 225 | }; | ||
| 226 | MODULE_DEVICE_TABLE(of, versatile_pci_of_match); | ||
| 227 | |||
| 228 | static struct platform_driver versatile_pci_driver = { | ||
| 229 | .driver = { | ||
| 230 | .name = "versatile-pci", | ||
| 231 | .of_match_table = versatile_pci_of_match, | ||
| 232 | .suppress_bind_attrs = true, | ||
| 233 | }, | ||
| 234 | .probe = versatile_pci_probe, | ||
| 235 | }; | ||
| 236 | module_platform_driver(versatile_pci_driver); | ||
| 237 | |||
| 238 | MODULE_DESCRIPTION("Versatile PCI driver"); | ||
| 239 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c new file mode 100644 index 000000000000..f4c02da84e59 --- /dev/null +++ b/drivers/pci/controller/pci-xgene-msi.c | |||
| @@ -0,0 +1,543 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 2 | /* | ||
| 3 | * APM X-Gene MSI Driver | ||
| 4 | * | ||
| 5 | * Copyright (c) 2014, Applied Micro Circuits Corporation | ||
| 6 | * Author: Tanmay Inamdar <tinamdar@apm.com> | ||
| 7 | * Duc Dang <dhdang@apm.com> | ||
| 8 | */ | ||
| 9 | #include <linux/cpu.h> | ||
| 10 | #include <linux/interrupt.h> | ||
| 11 | #include <linux/module.h> | ||
| 12 | #include <linux/msi.h> | ||
| 13 | #include <linux/of_irq.h> | ||
| 14 | #include <linux/irqchip/chained_irq.h> | ||
| 15 | #include <linux/pci.h> | ||
| 16 | #include <linux/platform_device.h> | ||
| 17 | #include <linux/of_pci.h> | ||
| 18 | |||
| 19 | #define MSI_IR0 0x000000 | ||
| 20 | #define MSI_INT0 0x800000 | ||
| 21 | #define IDX_PER_GROUP 8 | ||
| 22 | #define IRQS_PER_IDX 16 | ||
| 23 | #define NR_HW_IRQS 16 | ||
| 24 | #define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS) | ||
| 25 | |||
| 26 | struct xgene_msi_group { | ||
| 27 | struct xgene_msi *msi; | ||
| 28 | int gic_irq; | ||
| 29 | u32 msi_grp; | ||
| 30 | }; | ||
| 31 | |||
| 32 | struct xgene_msi { | ||
| 33 | struct device_node *node; | ||
| 34 | struct irq_domain *inner_domain; | ||
| 35 | struct irq_domain *msi_domain; | ||
| 36 | u64 msi_addr; | ||
| 37 | void __iomem *msi_regs; | ||
| 38 | unsigned long *bitmap; | ||
| 39 | struct mutex bitmap_lock; | ||
| 40 | struct xgene_msi_group *msi_groups; | ||
| 41 | int num_cpus; | ||
| 42 | }; | ||
| 43 | |||
| 44 | /* Global data */ | ||
| 45 | static struct xgene_msi xgene_msi_ctrl; | ||
| 46 | |||
| 47 | static struct irq_chip xgene_msi_top_irq_chip = { | ||
| 48 | .name = "X-Gene1 MSI", | ||
| 49 | .irq_enable = pci_msi_unmask_irq, | ||
| 50 | .irq_disable = pci_msi_mask_irq, | ||
| 51 | .irq_mask = pci_msi_mask_irq, | ||
| 52 | .irq_unmask = pci_msi_unmask_irq, | ||
| 53 | }; | ||
| 54 | |||
| 55 | static struct msi_domain_info xgene_msi_domain_info = { | ||
| 56 | .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | | ||
| 57 | MSI_FLAG_PCI_MSIX), | ||
| 58 | .chip = &xgene_msi_top_irq_chip, | ||
| 59 | }; | ||
| 60 | |||
| 61 | /* | ||
| 62 | * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where | ||
| 63 | * n is group number (0..F), x is index of registers in each group (0..7) | ||
| 64 | * The register layout is as follows: | ||
| 65 | * MSI0IR0 base_addr | ||
| 66 | * MSI0IR1 base_addr + 0x10000 | ||
| 67 | * ... ... | ||
| 68 | * MSI0IR6 base_addr + 0x60000 | ||
| 69 | * MSI0IR7 base_addr + 0x70000 | ||
| 70 | * MSI1IR0 base_addr + 0x80000 | ||
| 71 | * MSI1IR1 base_addr + 0x90000 | ||
| 72 | * ... ... | ||
| 73 | * MSI1IR7 base_addr + 0xF0000 | ||
| 74 | * MSI2IR0 base_addr + 0x100000 | ||
| 75 | * ... ... | ||
| 76 | * MSIFIR0 base_addr + 0x780000 | ||
| 77 | * MSIFIR1 base_addr + 0x790000 | ||
| 78 | * ... ... | ||
| 79 | * MSIFIR7 base_addr + 0x7F0000 | ||
| 80 | * MSIINT0 base_addr + 0x800000 | ||
| 81 | * MSIINT1 base_addr + 0x810000 | ||
| 82 | * ... ... | ||
| 83 | * MSIINTF base_addr + 0x8F0000 | ||
| 84 | * | ||
| 85 | * Each index register supports 16 MSI vectors (0..15) to generate interrupt. | ||
| 86 | * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination | ||
| 87 | * registers. | ||
| 88 | * | ||
| 89 | * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate | ||
| 90 | * the MSI pending status caused by 1 of its 8 index registers. | ||
| 91 | */ | ||
| 92 | |||
| 93 | /* MSInIRx read helper */ | ||
| 94 | static u32 xgene_msi_ir_read(struct xgene_msi *msi, | ||
| 95 | u32 msi_grp, u32 msir_idx) | ||
| 96 | { | ||
| 97 | return readl_relaxed(msi->msi_regs + MSI_IR0 + | ||
| 98 | (msi_grp << 19) + (msir_idx << 16)); | ||
| 99 | } | ||
| 100 | |||
| 101 | /* MSIINTn read helper */ | ||
| 102 | static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp) | ||
| 103 | { | ||
| 104 | return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16)); | ||
| 105 | } | ||
| 106 | |||
| 107 | /* | ||
| 108 | * With 2048 MSI vectors supported, the MSI message can be constructed using | ||
| 109 | * following scheme: | ||
| 110 | * - Divide into 8 256-vector groups | ||
| 111 | * Group 0: 0-255 | ||
| 112 | * Group 1: 256-511 | ||
| 113 | * Group 2: 512-767 | ||
| 114 | * ... | ||
| 115 | * Group 7: 1792-2047 | ||
| 116 | * - Each 256-vector group is divided into 16 16-vector groups | ||
| 117 | * As an example: 16 16-vector groups for 256-vector group 0-255 is | ||
| 118 | * Group 0: 0-15 | ||
| 119 | * Group 1: 16-32 | ||
| 120 | * ... | ||
| 121 | * Group 15: 240-255 | ||
| 122 | * - The termination address of MSI vector in 256-vector group n and 16-vector | ||
| 123 | * group x is the address of MSIxIRn | ||
| 124 | * - The data for MSI vector in 16-vector group x is x | ||
| 125 | */ | ||
| 126 | static u32 hwirq_to_reg_set(unsigned long hwirq) | ||
| 127 | { | ||
| 128 | return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX)); | ||
| 129 | } | ||
| 130 | |||
| 131 | static u32 hwirq_to_group(unsigned long hwirq) | ||
| 132 | { | ||
| 133 | return (hwirq % NR_HW_IRQS); | ||
| 134 | } | ||
| 135 | |||
| 136 | static u32 hwirq_to_msi_data(unsigned long hwirq) | ||
| 137 | { | ||
| 138 | return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX); | ||
| 139 | } | ||
| 140 | |||
| 141 | static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | ||
| 142 | { | ||
| 143 | struct xgene_msi *msi = irq_data_get_irq_chip_data(data); | ||
| 144 | u32 reg_set = hwirq_to_reg_set(data->hwirq); | ||
| 145 | u32 group = hwirq_to_group(data->hwirq); | ||
| 146 | u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16); | ||
| 147 | |||
| 148 | msg->address_hi = upper_32_bits(target_addr); | ||
| 149 | msg->address_lo = lower_32_bits(target_addr); | ||
| 150 | msg->data = hwirq_to_msi_data(data->hwirq); | ||
| 151 | } | ||
| 152 | |||
| 153 | /* | ||
| 154 | * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain | ||
| 155 | * the expected behaviour of .set_affinity for each MSI interrupt, the 16 | ||
| 156 | * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs | ||
| 157 | * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another | ||
| 158 | * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a | ||
| 159 | * consequence, the total MSI vectors that X-Gene v1 supports will be | ||
| 160 | * reduced to 256 (2048/8) vectors. | ||
| 161 | */ | ||
| 162 | static int hwirq_to_cpu(unsigned long hwirq) | ||
| 163 | { | ||
| 164 | return (hwirq % xgene_msi_ctrl.num_cpus); | ||
| 165 | } | ||
| 166 | |||
| 167 | static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq) | ||
| 168 | { | ||
| 169 | return (hwirq - hwirq_to_cpu(hwirq)); | ||
| 170 | } | ||
| 171 | |||
| 172 | static int xgene_msi_set_affinity(struct irq_data *irqdata, | ||
| 173 | const struct cpumask *mask, bool force) | ||
| 174 | { | ||
| 175 | int target_cpu = cpumask_first(mask); | ||
| 176 | int curr_cpu; | ||
| 177 | |||
| 178 | curr_cpu = hwirq_to_cpu(irqdata->hwirq); | ||
| 179 | if (curr_cpu == target_cpu) | ||
| 180 | return IRQ_SET_MASK_OK_DONE; | ||
| 181 | |||
| 182 | /* Update MSI number to target the new CPU */ | ||
| 183 | irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu; | ||
| 184 | |||
| 185 | return IRQ_SET_MASK_OK; | ||
| 186 | } | ||
| 187 | |||
| 188 | static struct irq_chip xgene_msi_bottom_irq_chip = { | ||
| 189 | .name = "MSI", | ||
| 190 | .irq_set_affinity = xgene_msi_set_affinity, | ||
| 191 | .irq_compose_msi_msg = xgene_compose_msi_msg, | ||
| 192 | }; | ||
| 193 | |||
| 194 | static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | ||
| 195 | unsigned int nr_irqs, void *args) | ||
| 196 | { | ||
| 197 | struct xgene_msi *msi = domain->host_data; | ||
| 198 | int msi_irq; | ||
| 199 | |||
| 200 | mutex_lock(&msi->bitmap_lock); | ||
| 201 | |||
| 202 | msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0, | ||
| 203 | msi->num_cpus, 0); | ||
| 204 | if (msi_irq < NR_MSI_VEC) | ||
| 205 | bitmap_set(msi->bitmap, msi_irq, msi->num_cpus); | ||
| 206 | else | ||
| 207 | msi_irq = -ENOSPC; | ||
| 208 | |||
| 209 | mutex_unlock(&msi->bitmap_lock); | ||
| 210 | |||
| 211 | if (msi_irq < 0) | ||
| 212 | return msi_irq; | ||
| 213 | |||
| 214 | irq_domain_set_info(domain, virq, msi_irq, | ||
| 215 | &xgene_msi_bottom_irq_chip, domain->host_data, | ||
| 216 | handle_simple_irq, NULL, NULL); | ||
| 217 | |||
| 218 | return 0; | ||
| 219 | } | ||
| 220 | |||
| 221 | static void xgene_irq_domain_free(struct irq_domain *domain, | ||
| 222 | unsigned int virq, unsigned int nr_irqs) | ||
| 223 | { | ||
| 224 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); | ||
| 225 | struct xgene_msi *msi = irq_data_get_irq_chip_data(d); | ||
| 226 | u32 hwirq; | ||
| 227 | |||
| 228 | mutex_lock(&msi->bitmap_lock); | ||
| 229 | |||
| 230 | hwirq = hwirq_to_canonical_hwirq(d->hwirq); | ||
| 231 | bitmap_clear(msi->bitmap, hwirq, msi->num_cpus); | ||
| 232 | |||
| 233 | mutex_unlock(&msi->bitmap_lock); | ||
| 234 | |||
| 235 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); | ||
| 236 | } | ||
| 237 | |||
| 238 | static const struct irq_domain_ops msi_domain_ops = { | ||
| 239 | .alloc = xgene_irq_domain_alloc, | ||
| 240 | .free = xgene_irq_domain_free, | ||
| 241 | }; | ||
| 242 | |||
| 243 | static int xgene_allocate_domains(struct xgene_msi *msi) | ||
| 244 | { | ||
| 245 | msi->inner_domain = irq_domain_add_linear(NULL, NR_MSI_VEC, | ||
| 246 | &msi_domain_ops, msi); | ||
| 247 | if (!msi->inner_domain) | ||
| 248 | return -ENOMEM; | ||
| 249 | |||
| 250 | msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(msi->node), | ||
| 251 | &xgene_msi_domain_info, | ||
| 252 | msi->inner_domain); | ||
| 253 | |||
| 254 | if (!msi->msi_domain) { | ||
| 255 | irq_domain_remove(msi->inner_domain); | ||
| 256 | return -ENOMEM; | ||
| 257 | } | ||
| 258 | |||
| 259 | return 0; | ||
| 260 | } | ||
| 261 | |||
| 262 | static void xgene_free_domains(struct xgene_msi *msi) | ||
| 263 | { | ||
| 264 | if (msi->msi_domain) | ||
| 265 | irq_domain_remove(msi->msi_domain); | ||
| 266 | if (msi->inner_domain) | ||
| 267 | irq_domain_remove(msi->inner_domain); | ||
| 268 | } | ||
| 269 | |||
| 270 | static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi) | ||
| 271 | { | ||
| 272 | int size = BITS_TO_LONGS(NR_MSI_VEC) * sizeof(long); | ||
| 273 | |||
| 274 | xgene_msi->bitmap = kzalloc(size, GFP_KERNEL); | ||
| 275 | if (!xgene_msi->bitmap) | ||
| 276 | return -ENOMEM; | ||
| 277 | |||
| 278 | mutex_init(&xgene_msi->bitmap_lock); | ||
| 279 | |||
| 280 | xgene_msi->msi_groups = kcalloc(NR_HW_IRQS, | ||
| 281 | sizeof(struct xgene_msi_group), | ||
| 282 | GFP_KERNEL); | ||
| 283 | if (!xgene_msi->msi_groups) | ||
| 284 | return -ENOMEM; | ||
| 285 | |||
| 286 | return 0; | ||
| 287 | } | ||
| 288 | |||
| 289 | static void xgene_msi_isr(struct irq_desc *desc) | ||
| 290 | { | ||
| 291 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
| 292 | struct xgene_msi_group *msi_groups; | ||
| 293 | struct xgene_msi *xgene_msi; | ||
| 294 | unsigned int virq; | ||
| 295 | int msir_index, msir_val, hw_irq; | ||
| 296 | u32 intr_index, grp_select, msi_grp; | ||
| 297 | |||
| 298 | chained_irq_enter(chip, desc); | ||
| 299 | |||
| 300 | msi_groups = irq_desc_get_handler_data(desc); | ||
| 301 | xgene_msi = msi_groups->msi; | ||
| 302 | msi_grp = msi_groups->msi_grp; | ||
| 303 | |||
| 304 | /* | ||
| 305 | * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt | ||
| 306 | * If bit x of this register is set (x is 0..7), one or more interupts | ||
| 307 | * corresponding to MSInIRx is set. | ||
| 308 | */ | ||
| 309 | grp_select = xgene_msi_int_read(xgene_msi, msi_grp); | ||
| 310 | while (grp_select) { | ||
| 311 | msir_index = ffs(grp_select) - 1; | ||
| 312 | /* | ||
| 313 | * Calculate MSInIRx address to read to check for interrupts | ||
| 314 | * (refer to termination address and data assignment | ||
| 315 | * described in xgene_compose_msi_msg() ) | ||
| 316 | */ | ||
| 317 | msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index); | ||
| 318 | while (msir_val) { | ||
| 319 | intr_index = ffs(msir_val) - 1; | ||
| 320 | /* | ||
| 321 | * Calculate MSI vector number (refer to the termination | ||
| 322 | * address and data assignment described in | ||
| 323 | * xgene_compose_msi_msg function) | ||
| 324 | */ | ||
| 325 | hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) * | ||
| 326 | NR_HW_IRQS) + msi_grp; | ||
| 327 | /* | ||
| 328 | * As we have multiple hw_irq that maps to single MSI, | ||
| 329 | * always look up the virq using the hw_irq as seen from | ||
| 330 | * CPU0 | ||
| 331 | */ | ||
| 332 | hw_irq = hwirq_to_canonical_hwirq(hw_irq); | ||
| 333 | virq = irq_find_mapping(xgene_msi->inner_domain, hw_irq); | ||
| 334 | WARN_ON(!virq); | ||
| 335 | if (virq != 0) | ||
| 336 | generic_handle_irq(virq); | ||
| 337 | msir_val &= ~(1 << intr_index); | ||
| 338 | } | ||
| 339 | grp_select &= ~(1 << msir_index); | ||
| 340 | |||
| 341 | if (!grp_select) { | ||
| 342 | /* | ||
| 343 | * We handled all interrupts happened in this group, | ||
| 344 | * resample this group MSI_INTx register in case | ||
| 345 | * something else has been made pending in the meantime | ||
| 346 | */ | ||
| 347 | grp_select = xgene_msi_int_read(xgene_msi, msi_grp); | ||
| 348 | } | ||
| 349 | } | ||
| 350 | |||
| 351 | chained_irq_exit(chip, desc); | ||
| 352 | } | ||
| 353 | |||
| 354 | static enum cpuhp_state pci_xgene_online; | ||
| 355 | |||
| 356 | static int xgene_msi_remove(struct platform_device *pdev) | ||
| 357 | { | ||
| 358 | struct xgene_msi *msi = platform_get_drvdata(pdev); | ||
| 359 | |||
| 360 | if (pci_xgene_online) | ||
| 361 | cpuhp_remove_state(pci_xgene_online); | ||
| 362 | cpuhp_remove_state(CPUHP_PCI_XGENE_DEAD); | ||
| 363 | |||
| 364 | kfree(msi->msi_groups); | ||
| 365 | |||
| 366 | kfree(msi->bitmap); | ||
| 367 | msi->bitmap = NULL; | ||
| 368 | |||
| 369 | xgene_free_domains(msi); | ||
| 370 | |||
| 371 | return 0; | ||
| 372 | } | ||
| 373 | |||
| 374 | static int xgene_msi_hwirq_alloc(unsigned int cpu) | ||
| 375 | { | ||
| 376 | struct xgene_msi *msi = &xgene_msi_ctrl; | ||
| 377 | struct xgene_msi_group *msi_group; | ||
| 378 | cpumask_var_t mask; | ||
| 379 | int i; | ||
| 380 | int err; | ||
| 381 | |||
| 382 | for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { | ||
| 383 | msi_group = &msi->msi_groups[i]; | ||
| 384 | if (!msi_group->gic_irq) | ||
| 385 | continue; | ||
| 386 | |||
| 387 | irq_set_chained_handler(msi_group->gic_irq, | ||
| 388 | xgene_msi_isr); | ||
| 389 | err = irq_set_handler_data(msi_group->gic_irq, msi_group); | ||
| 390 | if (err) { | ||
| 391 | pr_err("failed to register GIC IRQ handler\n"); | ||
| 392 | return -EINVAL; | ||
| 393 | } | ||
| 394 | /* | ||
| 395 | * Statically allocate MSI GIC IRQs to each CPU core. | ||
| 396 | * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated | ||
| 397 | * to each core. | ||
| 398 | */ | ||
| 399 | if (alloc_cpumask_var(&mask, GFP_KERNEL)) { | ||
| 400 | cpumask_clear(mask); | ||
| 401 | cpumask_set_cpu(cpu, mask); | ||
| 402 | err = irq_set_affinity(msi_group->gic_irq, mask); | ||
| 403 | if (err) | ||
| 404 | pr_err("failed to set affinity for GIC IRQ"); | ||
| 405 | free_cpumask_var(mask); | ||
| 406 | } else { | ||
| 407 | pr_err("failed to alloc CPU mask for affinity\n"); | ||
| 408 | err = -EINVAL; | ||
| 409 | } | ||
| 410 | |||
| 411 | if (err) { | ||
| 412 | irq_set_chained_handler_and_data(msi_group->gic_irq, | ||
| 413 | NULL, NULL); | ||
| 414 | return err; | ||
| 415 | } | ||
| 416 | } | ||
| 417 | |||
| 418 | return 0; | ||
| 419 | } | ||
| 420 | |||
| 421 | static int xgene_msi_hwirq_free(unsigned int cpu) | ||
| 422 | { | ||
| 423 | struct xgene_msi *msi = &xgene_msi_ctrl; | ||
| 424 | struct xgene_msi_group *msi_group; | ||
| 425 | int i; | ||
| 426 | |||
| 427 | for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { | ||
| 428 | msi_group = &msi->msi_groups[i]; | ||
| 429 | if (!msi_group->gic_irq) | ||
| 430 | continue; | ||
| 431 | |||
| 432 | irq_set_chained_handler_and_data(msi_group->gic_irq, NULL, | ||
| 433 | NULL); | ||
| 434 | } | ||
| 435 | return 0; | ||
| 436 | } | ||
| 437 | |||
| 438 | static const struct of_device_id xgene_msi_match_table[] = { | ||
| 439 | {.compatible = "apm,xgene1-msi"}, | ||
| 440 | {}, | ||
| 441 | }; | ||
| 442 | |||
| 443 | static int xgene_msi_probe(struct platform_device *pdev) | ||
| 444 | { | ||
| 445 | struct resource *res; | ||
| 446 | int rc, irq_index; | ||
| 447 | struct xgene_msi *xgene_msi; | ||
| 448 | int virt_msir; | ||
| 449 | u32 msi_val, msi_idx; | ||
| 450 | |||
| 451 | xgene_msi = &xgene_msi_ctrl; | ||
| 452 | |||
| 453 | platform_set_drvdata(pdev, xgene_msi); | ||
| 454 | |||
| 455 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 456 | xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res); | ||
| 457 | if (IS_ERR(xgene_msi->msi_regs)) { | ||
| 458 | dev_err(&pdev->dev, "no reg space\n"); | ||
| 459 | rc = PTR_ERR(xgene_msi->msi_regs); | ||
| 460 | goto error; | ||
| 461 | } | ||
| 462 | xgene_msi->msi_addr = res->start; | ||
| 463 | xgene_msi->node = pdev->dev.of_node; | ||
| 464 | xgene_msi->num_cpus = num_possible_cpus(); | ||
| 465 | |||
| 466 | rc = xgene_msi_init_allocator(xgene_msi); | ||
| 467 | if (rc) { | ||
| 468 | dev_err(&pdev->dev, "Error allocating MSI bitmap\n"); | ||
| 469 | goto error; | ||
| 470 | } | ||
| 471 | |||
| 472 | rc = xgene_allocate_domains(xgene_msi); | ||
| 473 | if (rc) { | ||
| 474 | dev_err(&pdev->dev, "Failed to allocate MSI domain\n"); | ||
| 475 | goto error; | ||
| 476 | } | ||
| 477 | |||
| 478 | for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { | ||
| 479 | virt_msir = platform_get_irq(pdev, irq_index); | ||
| 480 | if (virt_msir < 0) { | ||
| 481 | dev_err(&pdev->dev, "Cannot translate IRQ index %d\n", | ||
| 482 | irq_index); | ||
| 483 | rc = virt_msir; | ||
| 484 | goto error; | ||
| 485 | } | ||
| 486 | xgene_msi->msi_groups[irq_index].gic_irq = virt_msir; | ||
| 487 | xgene_msi->msi_groups[irq_index].msi_grp = irq_index; | ||
| 488 | xgene_msi->msi_groups[irq_index].msi = xgene_msi; | ||
| 489 | } | ||
| 490 | |||
| 491 | /* | ||
| 492 | * MSInIRx registers are read-to-clear; before registering | ||
| 493 | * interrupt handlers, read all of them to clear spurious | ||
| 494 | * interrupts that may occur before the driver is probed. | ||
| 495 | */ | ||
| 496 | for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { | ||
| 497 | for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++) | ||
| 498 | msi_val = xgene_msi_ir_read(xgene_msi, irq_index, | ||
| 499 | msi_idx); | ||
| 500 | /* Read MSIINTn to confirm */ | ||
| 501 | msi_val = xgene_msi_int_read(xgene_msi, irq_index); | ||
| 502 | if (msi_val) { | ||
| 503 | dev_err(&pdev->dev, "Failed to clear spurious IRQ\n"); | ||
| 504 | rc = -EINVAL; | ||
| 505 | goto error; | ||
| 506 | } | ||
| 507 | } | ||
| 508 | |||
| 509 | rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online", | ||
| 510 | xgene_msi_hwirq_alloc, NULL); | ||
| 511 | if (rc < 0) | ||
| 512 | goto err_cpuhp; | ||
| 513 | pci_xgene_online = rc; | ||
| 514 | rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL, | ||
| 515 | xgene_msi_hwirq_free); | ||
| 516 | if (rc) | ||
| 517 | goto err_cpuhp; | ||
| 518 | |||
| 519 | dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n"); | ||
| 520 | |||
| 521 | return 0; | ||
| 522 | |||
| 523 | err_cpuhp: | ||
| 524 | dev_err(&pdev->dev, "failed to add CPU MSI notifier\n"); | ||
| 525 | error: | ||
| 526 | xgene_msi_remove(pdev); | ||
| 527 | return rc; | ||
| 528 | } | ||
| 529 | |||
| 530 | static struct platform_driver xgene_msi_driver = { | ||
| 531 | .driver = { | ||
| 532 | .name = "xgene-msi", | ||
| 533 | .of_match_table = xgene_msi_match_table, | ||
| 534 | }, | ||
| 535 | .probe = xgene_msi_probe, | ||
| 536 | .remove = xgene_msi_remove, | ||
| 537 | }; | ||
| 538 | |||
| 539 | static int __init xgene_pcie_msi_init(void) | ||
| 540 | { | ||
| 541 | return platform_driver_register(&xgene_msi_driver); | ||
| 542 | } | ||
| 543 | subsys_initcall(xgene_pcie_msi_init); | ||
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c new file mode 100644 index 000000000000..d854d67e873c --- /dev/null +++ b/drivers/pci/controller/pci-xgene.c | |||
| @@ -0,0 +1,689 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 2 | /** | ||
| 3 | * APM X-Gene PCIe Driver | ||
| 4 | * | ||
| 5 | * Copyright (c) 2014 Applied Micro Circuits Corporation. | ||
| 6 | * | ||
| 7 | * Author: Tanmay Inamdar <tinamdar@apm.com>. | ||
| 8 | */ | ||
| 9 | #include <linux/clk.h> | ||
| 10 | #include <linux/delay.h> | ||
| 11 | #include <linux/io.h> | ||
| 12 | #include <linux/jiffies.h> | ||
| 13 | #include <linux/memblock.h> | ||
| 14 | #include <linux/init.h> | ||
| 15 | #include <linux/of.h> | ||
| 16 | #include <linux/of_address.h> | ||
| 17 | #include <linux/of_irq.h> | ||
| 18 | #include <linux/of_pci.h> | ||
| 19 | #include <linux/pci.h> | ||
| 20 | #include <linux/pci-acpi.h> | ||
| 21 | #include <linux/pci-ecam.h> | ||
| 22 | #include <linux/platform_device.h> | ||
| 23 | #include <linux/slab.h> | ||
| 24 | |||
| 25 | #include "../pci.h" | ||
| 26 | |||
| 27 | #define PCIECORE_CTLANDSTATUS 0x50 | ||
| 28 | #define PIM1_1L 0x80 | ||
| 29 | #define IBAR2 0x98 | ||
| 30 | #define IR2MSK 0x9c | ||
| 31 | #define PIM2_1L 0xa0 | ||
| 32 | #define IBAR3L 0xb4 | ||
| 33 | #define IR3MSKL 0xbc | ||
| 34 | #define PIM3_1L 0xc4 | ||
| 35 | #define OMR1BARL 0x100 | ||
| 36 | #define OMR2BARL 0x118 | ||
| 37 | #define OMR3BARL 0x130 | ||
| 38 | #define CFGBARL 0x154 | ||
| 39 | #define CFGBARH 0x158 | ||
| 40 | #define CFGCTL 0x15c | ||
| 41 | #define RTDID 0x160 | ||
| 42 | #define BRIDGE_CFG_0 0x2000 | ||
| 43 | #define BRIDGE_CFG_4 0x2010 | ||
| 44 | #define BRIDGE_STATUS_0 0x2600 | ||
| 45 | |||
| 46 | #define LINK_UP_MASK 0x00000100 | ||
| 47 | #define AXI_EP_CFG_ACCESS 0x10000 | ||
| 48 | #define EN_COHERENCY 0xF0000000 | ||
| 49 | #define EN_REG 0x00000001 | ||
| 50 | #define OB_LO_IO 0x00000002 | ||
| 51 | #define XGENE_PCIE_VENDORID 0x10E8 | ||
| 52 | #define XGENE_PCIE_DEVICEID 0xE004 | ||
| 53 | #define SZ_1T (SZ_1G*1024ULL) | ||
| 54 | #define PIPE_PHY_RATE_RD(src) ((0xc000 & (u32)(src)) >> 0xe) | ||
| 55 | |||
| 56 | #define XGENE_V1_PCI_EXP_CAP 0x40 | ||
| 57 | |||
| 58 | /* PCIe IP version */ | ||
| 59 | #define XGENE_PCIE_IP_VER_UNKN 0 | ||
| 60 | #define XGENE_PCIE_IP_VER_1 1 | ||
| 61 | #define XGENE_PCIE_IP_VER_2 2 | ||
| 62 | |||
| 63 | #if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) | ||
| 64 | struct xgene_pcie_port { | ||
| 65 | struct device_node *node; | ||
| 66 | struct device *dev; | ||
| 67 | struct clk *clk; | ||
| 68 | void __iomem *csr_base; | ||
| 69 | void __iomem *cfg_base; | ||
| 70 | unsigned long cfg_addr; | ||
| 71 | bool link_up; | ||
| 72 | u32 version; | ||
| 73 | }; | ||
| 74 | |||
| 75 | static u32 xgene_pcie_readl(struct xgene_pcie_port *port, u32 reg) | ||
| 76 | { | ||
| 77 | return readl(port->csr_base + reg); | ||
| 78 | } | ||
| 79 | |||
| 80 | static void xgene_pcie_writel(struct xgene_pcie_port *port, u32 reg, u32 val) | ||
| 81 | { | ||
| 82 | writel(val, port->csr_base + reg); | ||
| 83 | } | ||
| 84 | |||
| 85 | static inline u32 pcie_bar_low_val(u32 addr, u32 flags) | ||
| 86 | { | ||
| 87 | return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags; | ||
| 88 | } | ||
| 89 | |||
| 90 | static inline struct xgene_pcie_port *pcie_bus_to_port(struct pci_bus *bus) | ||
| 91 | { | ||
| 92 | struct pci_config_window *cfg; | ||
| 93 | |||
| 94 | if (acpi_disabled) | ||
| 95 | return (struct xgene_pcie_port *)(bus->sysdata); | ||
| 96 | |||
| 97 | cfg = bus->sysdata; | ||
| 98 | return (struct xgene_pcie_port *)(cfg->priv); | ||
| 99 | } | ||
| 100 | |||
| 101 | /* | ||
| 102 | * When the address bit [17:16] is 2'b01, the Configuration access will be | ||
| 103 | * treated as Type 1 and it will be forwarded to external PCIe device. | ||
| 104 | */ | ||
| 105 | static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus) | ||
| 106 | { | ||
| 107 | struct xgene_pcie_port *port = pcie_bus_to_port(bus); | ||
| 108 | |||
| 109 | if (bus->number >= (bus->primary + 1)) | ||
| 110 | return port->cfg_base + AXI_EP_CFG_ACCESS; | ||
| 111 | |||
| 112 | return port->cfg_base; | ||
| 113 | } | ||
| 114 | |||
| 115 | /* | ||
| 116 | * For Configuration request, RTDID register is used as Bus Number, | ||
| 117 | * Device Number and Function number of the header fields. | ||
| 118 | */ | ||
| 119 | static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn) | ||
| 120 | { | ||
| 121 | struct xgene_pcie_port *port = pcie_bus_to_port(bus); | ||
| 122 | unsigned int b, d, f; | ||
| 123 | u32 rtdid_val = 0; | ||
| 124 | |||
| 125 | b = bus->number; | ||
| 126 | d = PCI_SLOT(devfn); | ||
| 127 | f = PCI_FUNC(devfn); | ||
| 128 | |||
| 129 | if (!pci_is_root_bus(bus)) | ||
| 130 | rtdid_val = (b << 8) | (d << 3) | f; | ||
| 131 | |||
| 132 | xgene_pcie_writel(port, RTDID, rtdid_val); | ||
| 133 | /* read the register back to ensure flush */ | ||
| 134 | xgene_pcie_readl(port, RTDID); | ||
| 135 | } | ||
| 136 | |||
| 137 | /* | ||
| 138 | * X-Gene PCIe port uses BAR0-BAR1 of RC's configuration space as | ||
| 139 | * the translation from PCI bus to native BUS. Entire DDR region | ||
| 140 | * is mapped into PCIe space using these registers, so it can be | ||
| 141 | * reached by DMA from EP devices. The BAR0/1 of bridge should be | ||
| 142 | * hidden during enumeration to avoid the sizing and resource allocation | ||
| 143 | * by PCIe core. | ||
| 144 | */ | ||
| 145 | static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset) | ||
| 146 | { | ||
| 147 | if (pci_is_root_bus(bus) && ((offset == PCI_BASE_ADDRESS_0) || | ||
| 148 | (offset == PCI_BASE_ADDRESS_1))) | ||
| 149 | return true; | ||
| 150 | |||
| 151 | return false; | ||
| 152 | } | ||
| 153 | |||
| 154 | static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, | ||
| 155 | int offset) | ||
| 156 | { | ||
| 157 | if ((pci_is_root_bus(bus) && devfn != 0) || | ||
| 158 | xgene_pcie_hide_rc_bars(bus, offset)) | ||
| 159 | return NULL; | ||
| 160 | |||
| 161 | xgene_pcie_set_rtdid_reg(bus, devfn); | ||
| 162 | return xgene_pcie_get_cfg_base(bus) + offset; | ||
| 163 | } | ||
| 164 | |||
| 165 | static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, | ||
| 166 | int where, int size, u32 *val) | ||
| 167 | { | ||
| 168 | struct xgene_pcie_port *port = pcie_bus_to_port(bus); | ||
| 169 | |||
| 170 | if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) != | ||
| 171 | PCIBIOS_SUCCESSFUL) | ||
| 172 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 173 | |||
| 174 | /* | ||
| 175 | * The v1 controller has a bug in its Configuration Request | ||
| 176 | * Retry Status (CRS) logic: when CRS is enabled and we read the | ||
| 177 | * Vendor and Device ID of a non-existent device, the controller | ||
| 178 | * fabricates return data of 0xFFFF0001 ("device exists but is not | ||
| 179 | * ready") instead of 0xFFFFFFFF ("device does not exist"). This | ||
| 180 | * causes the PCI core to retry the read until it times out. | ||
| 181 | * Avoid this by not claiming to support CRS. | ||
| 182 | */ | ||
| 183 | if (pci_is_root_bus(bus) && (port->version == XGENE_PCIE_IP_VER_1) && | ||
| 184 | ((where & ~0x3) == XGENE_V1_PCI_EXP_CAP + PCI_EXP_RTCTL)) | ||
| 185 | *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); | ||
| 186 | |||
| 187 | if (size <= 2) | ||
| 188 | *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); | ||
| 189 | |||
| 190 | return PCIBIOS_SUCCESSFUL; | ||
| 191 | } | ||
| 192 | #endif | ||
| 193 | |||
| 194 | #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) | ||
| 195 | static int xgene_get_csr_resource(struct acpi_device *adev, | ||
| 196 | struct resource *res) | ||
| 197 | { | ||
| 198 | struct device *dev = &adev->dev; | ||
| 199 | struct resource_entry *entry; | ||
| 200 | struct list_head list; | ||
| 201 | unsigned long flags; | ||
| 202 | int ret; | ||
| 203 | |||
| 204 | INIT_LIST_HEAD(&list); | ||
| 205 | flags = IORESOURCE_MEM; | ||
| 206 | ret = acpi_dev_get_resources(adev, &list, | ||
| 207 | acpi_dev_filter_resource_type_cb, | ||
| 208 | (void *) flags); | ||
| 209 | if (ret < 0) { | ||
| 210 | dev_err(dev, "failed to parse _CRS method, error code %d\n", | ||
| 211 | ret); | ||
| 212 | return ret; | ||
| 213 | } | ||
| 214 | |||
| 215 | if (ret == 0) { | ||
| 216 | dev_err(dev, "no IO and memory resources present in _CRS\n"); | ||
| 217 | return -EINVAL; | ||
| 218 | } | ||
| 219 | |||
| 220 | entry = list_first_entry(&list, struct resource_entry, node); | ||
| 221 | *res = *entry->res; | ||
| 222 | acpi_dev_free_resource_list(&list); | ||
| 223 | return 0; | ||
| 224 | } | ||
| 225 | |||
| 226 | static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion) | ||
| 227 | { | ||
| 228 | struct device *dev = cfg->parent; | ||
| 229 | struct acpi_device *adev = to_acpi_device(dev); | ||
| 230 | struct xgene_pcie_port *port; | ||
| 231 | struct resource csr; | ||
| 232 | int ret; | ||
| 233 | |||
| 234 | port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); | ||
| 235 | if (!port) | ||
| 236 | return -ENOMEM; | ||
| 237 | |||
| 238 | ret = xgene_get_csr_resource(adev, &csr); | ||
| 239 | if (ret) { | ||
| 240 | dev_err(dev, "can't get CSR resource\n"); | ||
| 241 | return ret; | ||
| 242 | } | ||
| 243 | port->csr_base = devm_pci_remap_cfg_resource(dev, &csr); | ||
| 244 | if (IS_ERR(port->csr_base)) | ||
| 245 | return PTR_ERR(port->csr_base); | ||
| 246 | |||
| 247 | port->cfg_base = cfg->win; | ||
| 248 | port->version = ipversion; | ||
| 249 | |||
| 250 | cfg->priv = port; | ||
| 251 | return 0; | ||
| 252 | } | ||
| 253 | |||
| 254 | static int xgene_v1_pcie_ecam_init(struct pci_config_window *cfg) | ||
| 255 | { | ||
| 256 | return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_1); | ||
| 257 | } | ||
| 258 | |||
| 259 | struct pci_ecam_ops xgene_v1_pcie_ecam_ops = { | ||
| 260 | .bus_shift = 16, | ||
| 261 | .init = xgene_v1_pcie_ecam_init, | ||
| 262 | .pci_ops = { | ||
| 263 | .map_bus = xgene_pcie_map_bus, | ||
| 264 | .read = xgene_pcie_config_read32, | ||
| 265 | .write = pci_generic_config_write, | ||
| 266 | } | ||
| 267 | }; | ||
| 268 | |||
| 269 | static int xgene_v2_pcie_ecam_init(struct pci_config_window *cfg) | ||
| 270 | { | ||
| 271 | return xgene_pcie_ecam_init(cfg, XGENE_PCIE_IP_VER_2); | ||
| 272 | } | ||
| 273 | |||
| 274 | struct pci_ecam_ops xgene_v2_pcie_ecam_ops = { | ||
| 275 | .bus_shift = 16, | ||
| 276 | .init = xgene_v2_pcie_ecam_init, | ||
| 277 | .pci_ops = { | ||
| 278 | .map_bus = xgene_pcie_map_bus, | ||
| 279 | .read = xgene_pcie_config_read32, | ||
| 280 | .write = pci_generic_config_write, | ||
| 281 | } | ||
| 282 | }; | ||
| 283 | #endif | ||
| 284 | |||
| 285 | #if defined(CONFIG_PCI_XGENE) | ||
| 286 | static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr, | ||
| 287 | u32 flags, u64 size) | ||
| 288 | { | ||
| 289 | u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags; | ||
| 290 | u32 val32 = 0; | ||
| 291 | u32 val; | ||
| 292 | |||
| 293 | val32 = xgene_pcie_readl(port, addr); | ||
| 294 | val = (val32 & 0x0000ffff) | (lower_32_bits(mask) << 16); | ||
| 295 | xgene_pcie_writel(port, addr, val); | ||
| 296 | |||
| 297 | val32 = xgene_pcie_readl(port, addr + 0x04); | ||
| 298 | val = (val32 & 0xffff0000) | (lower_32_bits(mask) >> 16); | ||
| 299 | xgene_pcie_writel(port, addr + 0x04, val); | ||
| 300 | |||
| 301 | val32 = xgene_pcie_readl(port, addr + 0x04); | ||
| 302 | val = (val32 & 0x0000ffff) | (upper_32_bits(mask) << 16); | ||
| 303 | xgene_pcie_writel(port, addr + 0x04, val); | ||
| 304 | |||
| 305 | val32 = xgene_pcie_readl(port, addr + 0x08); | ||
| 306 | val = (val32 & 0xffff0000) | (upper_32_bits(mask) >> 16); | ||
| 307 | xgene_pcie_writel(port, addr + 0x08, val); | ||
| 308 | |||
| 309 | return mask; | ||
| 310 | } | ||
| 311 | |||
| 312 | static void xgene_pcie_linkup(struct xgene_pcie_port *port, | ||
| 313 | u32 *lanes, u32 *speed) | ||
| 314 | { | ||
| 315 | u32 val32; | ||
| 316 | |||
| 317 | port->link_up = false; | ||
| 318 | val32 = xgene_pcie_readl(port, PCIECORE_CTLANDSTATUS); | ||
| 319 | if (val32 & LINK_UP_MASK) { | ||
| 320 | port->link_up = true; | ||
| 321 | *speed = PIPE_PHY_RATE_RD(val32); | ||
| 322 | val32 = xgene_pcie_readl(port, BRIDGE_STATUS_0); | ||
| 323 | *lanes = val32 >> 26; | ||
| 324 | } | ||
| 325 | } | ||
| 326 | |||
| 327 | static int xgene_pcie_init_port(struct xgene_pcie_port *port) | ||
| 328 | { | ||
| 329 | struct device *dev = port->dev; | ||
| 330 | int rc; | ||
| 331 | |||
| 332 | port->clk = clk_get(dev, NULL); | ||
| 333 | if (IS_ERR(port->clk)) { | ||
| 334 | dev_err(dev, "clock not available\n"); | ||
| 335 | return -ENODEV; | ||
| 336 | } | ||
| 337 | |||
| 338 | rc = clk_prepare_enable(port->clk); | ||
| 339 | if (rc) { | ||
| 340 | dev_err(dev, "clock enable failed\n"); | ||
| 341 | return rc; | ||
| 342 | } | ||
| 343 | |||
| 344 | return 0; | ||
| 345 | } | ||
| 346 | |||
| 347 | static int xgene_pcie_map_reg(struct xgene_pcie_port *port, | ||
| 348 | struct platform_device *pdev) | ||
| 349 | { | ||
| 350 | struct device *dev = port->dev; | ||
| 351 | struct resource *res; | ||
| 352 | |||
| 353 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr"); | ||
| 354 | port->csr_base = devm_pci_remap_cfg_resource(dev, res); | ||
| 355 | if (IS_ERR(port->csr_base)) | ||
| 356 | return PTR_ERR(port->csr_base); | ||
| 357 | |||
| 358 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); | ||
| 359 | port->cfg_base = devm_ioremap_resource(dev, res); | ||
| 360 | if (IS_ERR(port->cfg_base)) | ||
| 361 | return PTR_ERR(port->cfg_base); | ||
| 362 | port->cfg_addr = res->start; | ||
| 363 | |||
| 364 | return 0; | ||
| 365 | } | ||
| 366 | |||
| 367 | static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port, | ||
| 368 | struct resource *res, u32 offset, | ||
| 369 | u64 cpu_addr, u64 pci_addr) | ||
| 370 | { | ||
| 371 | struct device *dev = port->dev; | ||
| 372 | resource_size_t size = resource_size(res); | ||
| 373 | u64 restype = resource_type(res); | ||
| 374 | u64 mask = 0; | ||
| 375 | u32 min_size; | ||
| 376 | u32 flag = EN_REG; | ||
| 377 | |||
| 378 | if (restype == IORESOURCE_MEM) { | ||
| 379 | min_size = SZ_128M; | ||
| 380 | } else { | ||
| 381 | min_size = 128; | ||
| 382 | flag |= OB_LO_IO; | ||
| 383 | } | ||
| 384 | |||
| 385 | if (size >= min_size) | ||
| 386 | mask = ~(size - 1) | flag; | ||
| 387 | else | ||
| 388 | dev_warn(dev, "res size 0x%llx less than minimum 0x%x\n", | ||
| 389 | (u64)size, min_size); | ||
| 390 | |||
| 391 | xgene_pcie_writel(port, offset, lower_32_bits(cpu_addr)); | ||
| 392 | xgene_pcie_writel(port, offset + 0x04, upper_32_bits(cpu_addr)); | ||
| 393 | xgene_pcie_writel(port, offset + 0x08, lower_32_bits(mask)); | ||
| 394 | xgene_pcie_writel(port, offset + 0x0c, upper_32_bits(mask)); | ||
| 395 | xgene_pcie_writel(port, offset + 0x10, lower_32_bits(pci_addr)); | ||
| 396 | xgene_pcie_writel(port, offset + 0x14, upper_32_bits(pci_addr)); | ||
| 397 | } | ||
| 398 | |||
| 399 | static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port) | ||
| 400 | { | ||
| 401 | u64 addr = port->cfg_addr; | ||
| 402 | |||
| 403 | xgene_pcie_writel(port, CFGBARL, lower_32_bits(addr)); | ||
| 404 | xgene_pcie_writel(port, CFGBARH, upper_32_bits(addr)); | ||
| 405 | xgene_pcie_writel(port, CFGCTL, EN_REG); | ||
| 406 | } | ||
| 407 | |||
| 408 | static int xgene_pcie_map_ranges(struct xgene_pcie_port *port, | ||
| 409 | struct list_head *res, | ||
| 410 | resource_size_t io_base) | ||
| 411 | { | ||
| 412 | struct resource_entry *window; | ||
| 413 | struct device *dev = port->dev; | ||
| 414 | int ret; | ||
| 415 | |||
| 416 | resource_list_for_each_entry(window, res) { | ||
| 417 | struct resource *res = window->res; | ||
| 418 | u64 restype = resource_type(res); | ||
| 419 | |||
| 420 | dev_dbg(dev, "%pR\n", res); | ||
| 421 | |||
| 422 | switch (restype) { | ||
| 423 | case IORESOURCE_IO: | ||
| 424 | xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base, | ||
| 425 | res->start - window->offset); | ||
| 426 | ret = pci_remap_iospace(res, io_base); | ||
| 427 | if (ret < 0) | ||
| 428 | return ret; | ||
| 429 | break; | ||
| 430 | case IORESOURCE_MEM: | ||
| 431 | if (res->flags & IORESOURCE_PREFETCH) | ||
| 432 | xgene_pcie_setup_ob_reg(port, res, OMR2BARL, | ||
| 433 | res->start, | ||
| 434 | res->start - | ||
| 435 | window->offset); | ||
| 436 | else | ||
| 437 | xgene_pcie_setup_ob_reg(port, res, OMR1BARL, | ||
| 438 | res->start, | ||
| 439 | res->start - | ||
| 440 | window->offset); | ||
| 441 | break; | ||
| 442 | case IORESOURCE_BUS: | ||
| 443 | break; | ||
| 444 | default: | ||
| 445 | dev_err(dev, "invalid resource %pR\n", res); | ||
| 446 | return -EINVAL; | ||
| 447 | } | ||
| 448 | } | ||
| 449 | xgene_pcie_setup_cfg_reg(port); | ||
| 450 | return 0; | ||
| 451 | } | ||
| 452 | |||
| 453 | static void xgene_pcie_setup_pims(struct xgene_pcie_port *port, u32 pim_reg, | ||
| 454 | u64 pim, u64 size) | ||
| 455 | { | ||
| 456 | xgene_pcie_writel(port, pim_reg, lower_32_bits(pim)); | ||
| 457 | xgene_pcie_writel(port, pim_reg + 0x04, | ||
| 458 | upper_32_bits(pim) | EN_COHERENCY); | ||
| 459 | xgene_pcie_writel(port, pim_reg + 0x10, lower_32_bits(size)); | ||
| 460 | xgene_pcie_writel(port, pim_reg + 0x14, upper_32_bits(size)); | ||
| 461 | } | ||
| 462 | |||
| 463 | /* | ||
| 464 | * X-Gene PCIe support maximum 3 inbound memory regions | ||
| 465 | * This function helps to select a region based on size of region | ||
| 466 | */ | ||
| 467 | static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size) | ||
| 468 | { | ||
| 469 | if ((size > 4) && (size < SZ_16M) && !(*ib_reg_mask & (1 << 1))) { | ||
| 470 | *ib_reg_mask |= (1 << 1); | ||
| 471 | return 1; | ||
| 472 | } | ||
| 473 | |||
| 474 | if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) { | ||
| 475 | *ib_reg_mask |= (1 << 0); | ||
| 476 | return 0; | ||
| 477 | } | ||
| 478 | |||
| 479 | if ((size > SZ_1M) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 2))) { | ||
| 480 | *ib_reg_mask |= (1 << 2); | ||
| 481 | return 2; | ||
| 482 | } | ||
| 483 | |||
| 484 | return -EINVAL; | ||
| 485 | } | ||
| 486 | |||
| 487 | static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port, | ||
| 488 | struct of_pci_range *range, u8 *ib_reg_mask) | ||
| 489 | { | ||
| 490 | void __iomem *cfg_base = port->cfg_base; | ||
| 491 | struct device *dev = port->dev; | ||
| 492 | void *bar_addr; | ||
| 493 | u32 pim_reg; | ||
| 494 | u64 cpu_addr = range->cpu_addr; | ||
| 495 | u64 pci_addr = range->pci_addr; | ||
| 496 | u64 size = range->size; | ||
| 497 | u64 mask = ~(size - 1) | EN_REG; | ||
| 498 | u32 flags = PCI_BASE_ADDRESS_MEM_TYPE_64; | ||
| 499 | u32 bar_low; | ||
| 500 | int region; | ||
| 501 | |||
| 502 | region = xgene_pcie_select_ib_reg(ib_reg_mask, range->size); | ||
| 503 | if (region < 0) { | ||
| 504 | dev_warn(dev, "invalid pcie dma-range config\n"); | ||
| 505 | return; | ||
| 506 | } | ||
| 507 | |||
| 508 | if (range->flags & IORESOURCE_PREFETCH) | ||
| 509 | flags |= PCI_BASE_ADDRESS_MEM_PREFETCH; | ||
| 510 | |||
| 511 | bar_low = pcie_bar_low_val((u32)cpu_addr, flags); | ||
| 512 | switch (region) { | ||
| 513 | case 0: | ||
| 514 | xgene_pcie_set_ib_mask(port, BRIDGE_CFG_4, flags, size); | ||
| 515 | bar_addr = cfg_base + PCI_BASE_ADDRESS_0; | ||
| 516 | writel(bar_low, bar_addr); | ||
| 517 | writel(upper_32_bits(cpu_addr), bar_addr + 0x4); | ||
| 518 | pim_reg = PIM1_1L; | ||
| 519 | break; | ||
| 520 | case 1: | ||
| 521 | xgene_pcie_writel(port, IBAR2, bar_low); | ||
| 522 | xgene_pcie_writel(port, IR2MSK, lower_32_bits(mask)); | ||
| 523 | pim_reg = PIM2_1L; | ||
| 524 | break; | ||
| 525 | case 2: | ||
| 526 | xgene_pcie_writel(port, IBAR3L, bar_low); | ||
| 527 | xgene_pcie_writel(port, IBAR3L + 0x4, upper_32_bits(cpu_addr)); | ||
| 528 | xgene_pcie_writel(port, IR3MSKL, lower_32_bits(mask)); | ||
| 529 | xgene_pcie_writel(port, IR3MSKL + 0x4, upper_32_bits(mask)); | ||
| 530 | pim_reg = PIM3_1L; | ||
| 531 | break; | ||
| 532 | } | ||
| 533 | |||
| 534 | xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1)); | ||
| 535 | } | ||
| 536 | |||
| 537 | static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port) | ||
| 538 | { | ||
| 539 | struct device_node *np = port->node; | ||
| 540 | struct of_pci_range range; | ||
| 541 | struct of_pci_range_parser parser; | ||
| 542 | struct device *dev = port->dev; | ||
| 543 | u8 ib_reg_mask = 0; | ||
| 544 | |||
| 545 | if (of_pci_dma_range_parser_init(&parser, np)) { | ||
| 546 | dev_err(dev, "missing dma-ranges property\n"); | ||
| 547 | return -EINVAL; | ||
| 548 | } | ||
| 549 | |||
| 550 | /* Get the dma-ranges from DT */ | ||
| 551 | for_each_of_pci_range(&parser, &range) { | ||
| 552 | u64 end = range.cpu_addr + range.size - 1; | ||
| 553 | |||
| 554 | dev_dbg(dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", | ||
| 555 | range.flags, range.cpu_addr, end, range.pci_addr); | ||
| 556 | xgene_pcie_setup_ib_reg(port, &range, &ib_reg_mask); | ||
| 557 | } | ||
| 558 | return 0; | ||
| 559 | } | ||
| 560 | |||
| 561 | /* clear BAR configuration which was done by firmware */ | ||
| 562 | static void xgene_pcie_clear_config(struct xgene_pcie_port *port) | ||
| 563 | { | ||
| 564 | int i; | ||
| 565 | |||
| 566 | for (i = PIM1_1L; i <= CFGCTL; i += 4) | ||
| 567 | xgene_pcie_writel(port, i, 0); | ||
| 568 | } | ||
| 569 | |||
| 570 | static int xgene_pcie_setup(struct xgene_pcie_port *port, struct list_head *res, | ||
| 571 | resource_size_t io_base) | ||
| 572 | { | ||
| 573 | struct device *dev = port->dev; | ||
| 574 | u32 val, lanes = 0, speed = 0; | ||
| 575 | int ret; | ||
| 576 | |||
| 577 | xgene_pcie_clear_config(port); | ||
| 578 | |||
| 579 | /* setup the vendor and device IDs correctly */ | ||
| 580 | val = (XGENE_PCIE_DEVICEID << 16) | XGENE_PCIE_VENDORID; | ||
| 581 | xgene_pcie_writel(port, BRIDGE_CFG_0, val); | ||
| 582 | |||
| 583 | ret = xgene_pcie_map_ranges(port, res, io_base); | ||
| 584 | if (ret) | ||
| 585 | return ret; | ||
| 586 | |||
| 587 | ret = xgene_pcie_parse_map_dma_ranges(port); | ||
| 588 | if (ret) | ||
| 589 | return ret; | ||
| 590 | |||
| 591 | xgene_pcie_linkup(port, &lanes, &speed); | ||
| 592 | if (!port->link_up) | ||
| 593 | dev_info(dev, "(rc) link down\n"); | ||
| 594 | else | ||
| 595 | dev_info(dev, "(rc) x%d gen-%d link up\n", lanes, speed + 1); | ||
| 596 | return 0; | ||
| 597 | } | ||
| 598 | |||
| 599 | static struct pci_ops xgene_pcie_ops = { | ||
| 600 | .map_bus = xgene_pcie_map_bus, | ||
| 601 | .read = xgene_pcie_config_read32, | ||
| 602 | .write = pci_generic_config_write32, | ||
| 603 | }; | ||
| 604 | |||
| 605 | static int xgene_pcie_probe(struct platform_device *pdev) | ||
| 606 | { | ||
| 607 | struct device *dev = &pdev->dev; | ||
| 608 | struct device_node *dn = dev->of_node; | ||
| 609 | struct xgene_pcie_port *port; | ||
| 610 | resource_size_t iobase = 0; | ||
| 611 | struct pci_bus *bus, *child; | ||
| 612 | struct pci_host_bridge *bridge; | ||
| 613 | int ret; | ||
| 614 | LIST_HEAD(res); | ||
| 615 | |||
| 616 | bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); | ||
| 617 | if (!bridge) | ||
| 618 | return -ENOMEM; | ||
| 619 | |||
| 620 | port = pci_host_bridge_priv(bridge); | ||
| 621 | |||
| 622 | port->node = of_node_get(dn); | ||
| 623 | port->dev = dev; | ||
| 624 | |||
| 625 | port->version = XGENE_PCIE_IP_VER_UNKN; | ||
| 626 | if (of_device_is_compatible(port->node, "apm,xgene-pcie")) | ||
| 627 | port->version = XGENE_PCIE_IP_VER_1; | ||
| 628 | |||
| 629 | ret = xgene_pcie_map_reg(port, pdev); | ||
| 630 | if (ret) | ||
| 631 | return ret; | ||
| 632 | |||
| 633 | ret = xgene_pcie_init_port(port); | ||
| 634 | if (ret) | ||
| 635 | return ret; | ||
| 636 | |||
| 637 | ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res, | ||
| 638 | &iobase); | ||
| 639 | if (ret) | ||
| 640 | return ret; | ||
| 641 | |||
| 642 | ret = devm_request_pci_bus_resources(dev, &res); | ||
| 643 | if (ret) | ||
| 644 | goto error; | ||
| 645 | |||
| 646 | ret = xgene_pcie_setup(port, &res, iobase); | ||
| 647 | if (ret) | ||
| 648 | goto error; | ||
| 649 | |||
| 650 | list_splice_init(&res, &bridge->windows); | ||
| 651 | bridge->dev.parent = dev; | ||
| 652 | bridge->sysdata = port; | ||
| 653 | bridge->busnr = 0; | ||
| 654 | bridge->ops = &xgene_pcie_ops; | ||
| 655 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 656 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 657 | |||
| 658 | ret = pci_scan_root_bus_bridge(bridge); | ||
| 659 | if (ret < 0) | ||
| 660 | goto error; | ||
| 661 | |||
| 662 | bus = bridge->bus; | ||
| 663 | |||
| 664 | pci_assign_unassigned_bus_resources(bus); | ||
| 665 | list_for_each_entry(child, &bus->children, node) | ||
| 666 | pcie_bus_configure_settings(child); | ||
| 667 | pci_bus_add_devices(bus); | ||
| 668 | return 0; | ||
| 669 | |||
| 670 | error: | ||
| 671 | pci_free_resource_list(&res); | ||
| 672 | return ret; | ||
| 673 | } | ||
| 674 | |||
| 675 | static const struct of_device_id xgene_pcie_match_table[] = { | ||
| 676 | {.compatible = "apm,xgene-pcie",}, | ||
| 677 | {}, | ||
| 678 | }; | ||
| 679 | |||
| 680 | static struct platform_driver xgene_pcie_driver = { | ||
| 681 | .driver = { | ||
| 682 | .name = "xgene-pcie", | ||
| 683 | .of_match_table = of_match_ptr(xgene_pcie_match_table), | ||
| 684 | .suppress_bind_attrs = true, | ||
| 685 | }, | ||
| 686 | .probe = xgene_pcie_probe, | ||
| 687 | }; | ||
| 688 | builtin_platform_driver(xgene_pcie_driver); | ||
| 689 | #endif | ||
diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c new file mode 100644 index 000000000000..025ef7d9a046 --- /dev/null +++ b/drivers/pci/controller/pcie-altera-msi.c | |||
| @@ -0,0 +1,291 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Altera PCIe MSI support | ||
| 4 | * | ||
| 5 | * Author: Ley Foon Tan <lftan@altera.com> | ||
| 6 | * | ||
| 7 | * Copyright Altera Corporation (C) 2013-2015. All rights reserved | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <linux/interrupt.h> | ||
| 11 | #include <linux/irqchip/chained_irq.h> | ||
| 12 | #include <linux/init.h> | ||
| 13 | #include <linux/msi.h> | ||
| 14 | #include <linux/of_address.h> | ||
| 15 | #include <linux/of_irq.h> | ||
| 16 | #include <linux/of_pci.h> | ||
| 17 | #include <linux/pci.h> | ||
| 18 | #include <linux/platform_device.h> | ||
| 19 | #include <linux/slab.h> | ||
| 20 | |||
| 21 | #define MSI_STATUS 0x0 | ||
| 22 | #define MSI_ERROR 0x4 | ||
| 23 | #define MSI_INTMASK 0x8 | ||
| 24 | |||
| 25 | #define MAX_MSI_VECTORS 32 | ||
| 26 | |||
| 27 | struct altera_msi { | ||
| 28 | DECLARE_BITMAP(used, MAX_MSI_VECTORS); | ||
| 29 | struct mutex lock; /* protect "used" bitmap */ | ||
| 30 | struct platform_device *pdev; | ||
| 31 | struct irq_domain *msi_domain; | ||
| 32 | struct irq_domain *inner_domain; | ||
| 33 | void __iomem *csr_base; | ||
| 34 | void __iomem *vector_base; | ||
| 35 | phys_addr_t vector_phy; | ||
| 36 | u32 num_of_vectors; | ||
| 37 | int irq; | ||
| 38 | }; | ||
| 39 | |||
| 40 | static inline void msi_writel(struct altera_msi *msi, const u32 value, | ||
| 41 | const u32 reg) | ||
| 42 | { | ||
| 43 | writel_relaxed(value, msi->csr_base + reg); | ||
| 44 | } | ||
| 45 | |||
| 46 | static inline u32 msi_readl(struct altera_msi *msi, const u32 reg) | ||
| 47 | { | ||
| 48 | return readl_relaxed(msi->csr_base + reg); | ||
| 49 | } | ||
| 50 | |||
| 51 | static void altera_msi_isr(struct irq_desc *desc) | ||
| 52 | { | ||
| 53 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
| 54 | struct altera_msi *msi; | ||
| 55 | unsigned long status; | ||
| 56 | u32 bit; | ||
| 57 | u32 virq; | ||
| 58 | |||
| 59 | chained_irq_enter(chip, desc); | ||
| 60 | msi = irq_desc_get_handler_data(desc); | ||
| 61 | |||
| 62 | while ((status = msi_readl(msi, MSI_STATUS)) != 0) { | ||
| 63 | for_each_set_bit(bit, &status, msi->num_of_vectors) { | ||
| 64 | /* Dummy read from vector to clear the interrupt */ | ||
| 65 | readl_relaxed(msi->vector_base + (bit * sizeof(u32))); | ||
| 66 | |||
| 67 | virq = irq_find_mapping(msi->inner_domain, bit); | ||
| 68 | if (virq) | ||
| 69 | generic_handle_irq(virq); | ||
| 70 | else | ||
| 71 | dev_err(&msi->pdev->dev, "unexpected MSI\n"); | ||
| 72 | } | ||
| 73 | } | ||
| 74 | |||
| 75 | chained_irq_exit(chip, desc); | ||
| 76 | } | ||
| 77 | |||
| 78 | static struct irq_chip altera_msi_irq_chip = { | ||
| 79 | .name = "Altera PCIe MSI", | ||
| 80 | .irq_mask = pci_msi_mask_irq, | ||
| 81 | .irq_unmask = pci_msi_unmask_irq, | ||
| 82 | }; | ||
| 83 | |||
| 84 | static struct msi_domain_info altera_msi_domain_info = { | ||
| 85 | .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | | ||
| 86 | MSI_FLAG_PCI_MSIX), | ||
| 87 | .chip = &altera_msi_irq_chip, | ||
| 88 | }; | ||
| 89 | |||
| 90 | static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | ||
| 91 | { | ||
| 92 | struct altera_msi *msi = irq_data_get_irq_chip_data(data); | ||
| 93 | phys_addr_t addr = msi->vector_phy + (data->hwirq * sizeof(u32)); | ||
| 94 | |||
| 95 | msg->address_lo = lower_32_bits(addr); | ||
| 96 | msg->address_hi = upper_32_bits(addr); | ||
| 97 | msg->data = data->hwirq; | ||
| 98 | |||
| 99 | dev_dbg(&msi->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n", | ||
| 100 | (int)data->hwirq, msg->address_hi, msg->address_lo); | ||
| 101 | } | ||
| 102 | |||
| 103 | static int altera_msi_set_affinity(struct irq_data *irq_data, | ||
| 104 | const struct cpumask *mask, bool force) | ||
| 105 | { | ||
| 106 | return -EINVAL; | ||
| 107 | } | ||
| 108 | |||
| 109 | static struct irq_chip altera_msi_bottom_irq_chip = { | ||
| 110 | .name = "Altera MSI", | ||
| 111 | .irq_compose_msi_msg = altera_compose_msi_msg, | ||
| 112 | .irq_set_affinity = altera_msi_set_affinity, | ||
| 113 | }; | ||
| 114 | |||
| 115 | static int altera_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | ||
| 116 | unsigned int nr_irqs, void *args) | ||
| 117 | { | ||
| 118 | struct altera_msi *msi = domain->host_data; | ||
| 119 | unsigned long bit; | ||
| 120 | u32 mask; | ||
| 121 | |||
| 122 | WARN_ON(nr_irqs != 1); | ||
| 123 | mutex_lock(&msi->lock); | ||
| 124 | |||
| 125 | bit = find_first_zero_bit(msi->used, msi->num_of_vectors); | ||
| 126 | if (bit >= msi->num_of_vectors) { | ||
| 127 | mutex_unlock(&msi->lock); | ||
| 128 | return -ENOSPC; | ||
| 129 | } | ||
| 130 | |||
| 131 | set_bit(bit, msi->used); | ||
| 132 | |||
| 133 | mutex_unlock(&msi->lock); | ||
| 134 | |||
| 135 | irq_domain_set_info(domain, virq, bit, &altera_msi_bottom_irq_chip, | ||
| 136 | domain->host_data, handle_simple_irq, | ||
| 137 | NULL, NULL); | ||
| 138 | |||
| 139 | mask = msi_readl(msi, MSI_INTMASK); | ||
| 140 | mask |= 1 << bit; | ||
| 141 | msi_writel(msi, mask, MSI_INTMASK); | ||
| 142 | |||
| 143 | return 0; | ||
| 144 | } | ||
| 145 | |||
| 146 | static void altera_irq_domain_free(struct irq_domain *domain, | ||
| 147 | unsigned int virq, unsigned int nr_irqs) | ||
| 148 | { | ||
| 149 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); | ||
| 150 | struct altera_msi *msi = irq_data_get_irq_chip_data(d); | ||
| 151 | u32 mask; | ||
| 152 | |||
| 153 | mutex_lock(&msi->lock); | ||
| 154 | |||
| 155 | if (!test_bit(d->hwirq, msi->used)) { | ||
| 156 | dev_err(&msi->pdev->dev, "trying to free unused MSI#%lu\n", | ||
| 157 | d->hwirq); | ||
| 158 | } else { | ||
| 159 | __clear_bit(d->hwirq, msi->used); | ||
| 160 | mask = msi_readl(msi, MSI_INTMASK); | ||
| 161 | mask &= ~(1 << d->hwirq); | ||
| 162 | msi_writel(msi, mask, MSI_INTMASK); | ||
| 163 | } | ||
| 164 | |||
| 165 | mutex_unlock(&msi->lock); | ||
| 166 | } | ||
| 167 | |||
| 168 | static const struct irq_domain_ops msi_domain_ops = { | ||
| 169 | .alloc = altera_irq_domain_alloc, | ||
| 170 | .free = altera_irq_domain_free, | ||
| 171 | }; | ||
| 172 | |||
| 173 | static int altera_allocate_domains(struct altera_msi *msi) | ||
| 174 | { | ||
| 175 | struct fwnode_handle *fwnode = of_node_to_fwnode(msi->pdev->dev.of_node); | ||
| 176 | |||
| 177 | msi->inner_domain = irq_domain_add_linear(NULL, msi->num_of_vectors, | ||
| 178 | &msi_domain_ops, msi); | ||
| 179 | if (!msi->inner_domain) { | ||
| 180 | dev_err(&msi->pdev->dev, "failed to create IRQ domain\n"); | ||
| 181 | return -ENOMEM; | ||
| 182 | } | ||
| 183 | |||
| 184 | msi->msi_domain = pci_msi_create_irq_domain(fwnode, | ||
| 185 | &altera_msi_domain_info, msi->inner_domain); | ||
| 186 | if (!msi->msi_domain) { | ||
| 187 | dev_err(&msi->pdev->dev, "failed to create MSI domain\n"); | ||
| 188 | irq_domain_remove(msi->inner_domain); | ||
| 189 | return -ENOMEM; | ||
| 190 | } | ||
| 191 | |||
| 192 | return 0; | ||
| 193 | } | ||
| 194 | |||
| 195 | static void altera_free_domains(struct altera_msi *msi) | ||
| 196 | { | ||
| 197 | irq_domain_remove(msi->msi_domain); | ||
| 198 | irq_domain_remove(msi->inner_domain); | ||
| 199 | } | ||
| 200 | |||
| 201 | static int altera_msi_remove(struct platform_device *pdev) | ||
| 202 | { | ||
| 203 | struct altera_msi *msi = platform_get_drvdata(pdev); | ||
| 204 | |||
| 205 | msi_writel(msi, 0, MSI_INTMASK); | ||
| 206 | irq_set_chained_handler(msi->irq, NULL); | ||
| 207 | irq_set_handler_data(msi->irq, NULL); | ||
| 208 | |||
| 209 | altera_free_domains(msi); | ||
| 210 | |||
| 211 | platform_set_drvdata(pdev, NULL); | ||
| 212 | return 0; | ||
| 213 | } | ||
| 214 | |||
| 215 | static int altera_msi_probe(struct platform_device *pdev) | ||
| 216 | { | ||
| 217 | struct altera_msi *msi; | ||
| 218 | struct device_node *np = pdev->dev.of_node; | ||
| 219 | struct resource *res; | ||
| 220 | int ret; | ||
| 221 | |||
| 222 | msi = devm_kzalloc(&pdev->dev, sizeof(struct altera_msi), | ||
| 223 | GFP_KERNEL); | ||
| 224 | if (!msi) | ||
| 225 | return -ENOMEM; | ||
| 226 | |||
| 227 | mutex_init(&msi->lock); | ||
| 228 | msi->pdev = pdev; | ||
| 229 | |||
| 230 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr"); | ||
| 231 | msi->csr_base = devm_ioremap_resource(&pdev->dev, res); | ||
| 232 | if (IS_ERR(msi->csr_base)) { | ||
| 233 | dev_err(&pdev->dev, "failed to map csr memory\n"); | ||
| 234 | return PTR_ERR(msi->csr_base); | ||
| 235 | } | ||
| 236 | |||
| 237 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | ||
| 238 | "vector_slave"); | ||
| 239 | msi->vector_base = devm_ioremap_resource(&pdev->dev, res); | ||
| 240 | if (IS_ERR(msi->vector_base)) { | ||
| 241 | dev_err(&pdev->dev, "failed to map vector_slave memory\n"); | ||
| 242 | return PTR_ERR(msi->vector_base); | ||
| 243 | } | ||
| 244 | |||
| 245 | msi->vector_phy = res->start; | ||
| 246 | |||
| 247 | if (of_property_read_u32(np, "num-vectors", &msi->num_of_vectors)) { | ||
| 248 | dev_err(&pdev->dev, "failed to parse the number of vectors\n"); | ||
| 249 | return -EINVAL; | ||
| 250 | } | ||
| 251 | |||
| 252 | ret = altera_allocate_domains(msi); | ||
| 253 | if (ret) | ||
| 254 | return ret; | ||
| 255 | |||
| 256 | msi->irq = platform_get_irq(pdev, 0); | ||
| 257 | if (msi->irq < 0) { | ||
| 258 | dev_err(&pdev->dev, "failed to map IRQ: %d\n", msi->irq); | ||
| 259 | ret = msi->irq; | ||
| 260 | goto err; | ||
| 261 | } | ||
| 262 | |||
| 263 | irq_set_chained_handler_and_data(msi->irq, altera_msi_isr, msi); | ||
| 264 | platform_set_drvdata(pdev, msi); | ||
| 265 | |||
| 266 | return 0; | ||
| 267 | |||
| 268 | err: | ||
| 269 | altera_msi_remove(pdev); | ||
| 270 | return ret; | ||
| 271 | } | ||
| 272 | |||
| 273 | static const struct of_device_id altera_msi_of_match[] = { | ||
| 274 | { .compatible = "altr,msi-1.0", NULL }, | ||
| 275 | { }, | ||
| 276 | }; | ||
| 277 | |||
| 278 | static struct platform_driver altera_msi_driver = { | ||
| 279 | .driver = { | ||
| 280 | .name = "altera-msi", | ||
| 281 | .of_match_table = altera_msi_of_match, | ||
| 282 | }, | ||
| 283 | .probe = altera_msi_probe, | ||
| 284 | .remove = altera_msi_remove, | ||
| 285 | }; | ||
| 286 | |||
| 287 | static int __init altera_msi_init(void) | ||
| 288 | { | ||
| 289 | return platform_driver_register(&altera_msi_driver); | ||
| 290 | } | ||
| 291 | subsys_initcall(altera_msi_init); | ||
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c new file mode 100644 index 000000000000..7d05e51205b3 --- /dev/null +++ b/drivers/pci/controller/pcie-altera.c | |||
| @@ -0,0 +1,645 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Copyright Altera Corporation (C) 2013-2015. All rights reserved | ||
| 4 | * | ||
| 5 | * Author: Ley Foon Tan <lftan@altera.com> | ||
| 6 | * Description: Altera PCIe host controller driver | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/delay.h> | ||
| 10 | #include <linux/interrupt.h> | ||
| 11 | #include <linux/irqchip/chained_irq.h> | ||
| 12 | #include <linux/init.h> | ||
| 13 | #include <linux/of_address.h> | ||
| 14 | #include <linux/of_irq.h> | ||
| 15 | #include <linux/of_pci.h> | ||
| 16 | #include <linux/pci.h> | ||
| 17 | #include <linux/platform_device.h> | ||
| 18 | #include <linux/slab.h> | ||
| 19 | |||
| 20 | #include "../pci.h" | ||
| 21 | |||
| 22 | #define RP_TX_REG0 0x2000 | ||
| 23 | #define RP_TX_REG1 0x2004 | ||
| 24 | #define RP_TX_CNTRL 0x2008 | ||
| 25 | #define RP_TX_EOP 0x2 | ||
| 26 | #define RP_TX_SOP 0x1 | ||
| 27 | #define RP_RXCPL_STATUS 0x2010 | ||
| 28 | #define RP_RXCPL_EOP 0x2 | ||
| 29 | #define RP_RXCPL_SOP 0x1 | ||
| 30 | #define RP_RXCPL_REG0 0x2014 | ||
| 31 | #define RP_RXCPL_REG1 0x2018 | ||
| 32 | #define P2A_INT_STATUS 0x3060 | ||
| 33 | #define P2A_INT_STS_ALL 0xf | ||
| 34 | #define P2A_INT_ENABLE 0x3070 | ||
| 35 | #define P2A_INT_ENA_ALL 0xf | ||
| 36 | #define RP_LTSSM 0x3c64 | ||
| 37 | #define RP_LTSSM_MASK 0x1f | ||
| 38 | #define LTSSM_L0 0xf | ||
| 39 | |||
| 40 | #define PCIE_CAP_OFFSET 0x80 | ||
| 41 | /* TLP configuration type 0 and 1 */ | ||
| 42 | #define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */ | ||
| 43 | #define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */ | ||
| 44 | #define TLP_FMTTYPE_CFGRD1 0x05 /* Configuration Read Type 1 */ | ||
| 45 | #define TLP_FMTTYPE_CFGWR1 0x45 /* Configuration Write Type 1 */ | ||
| 46 | #define TLP_PAYLOAD_SIZE 0x01 | ||
| 47 | #define TLP_READ_TAG 0x1d | ||
| 48 | #define TLP_WRITE_TAG 0x10 | ||
| 49 | #define RP_DEVFN 0 | ||
| 50 | #define TLP_REQ_ID(bus, devfn) (((bus) << 8) | (devfn)) | ||
| 51 | #define TLP_CFGRD_DW0(pcie, bus) \ | ||
| 52 | ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGRD0 \ | ||
| 53 | : TLP_FMTTYPE_CFGRD1) << 24) | \ | ||
| 54 | TLP_PAYLOAD_SIZE) | ||
| 55 | #define TLP_CFGWR_DW0(pcie, bus) \ | ||
| 56 | ((((bus == pcie->root_bus_nr) ? TLP_FMTTYPE_CFGWR0 \ | ||
| 57 | : TLP_FMTTYPE_CFGWR1) << 24) | \ | ||
| 58 | TLP_PAYLOAD_SIZE) | ||
| 59 | #define TLP_CFG_DW1(pcie, tag, be) \ | ||
| 60 | (((TLP_REQ_ID(pcie->root_bus_nr, RP_DEVFN)) << 16) | (tag << 8) | (be)) | ||
| 61 | #define TLP_CFG_DW2(bus, devfn, offset) \ | ||
| 62 | (((bus) << 24) | ((devfn) << 16) | (offset)) | ||
| 63 | #define TLP_COMP_STATUS(s) (((s) >> 13) & 7) | ||
| 64 | #define TLP_HDR_SIZE 3 | ||
| 65 | #define TLP_LOOP 500 | ||
| 66 | |||
| 67 | #define LINK_UP_TIMEOUT HZ | ||
| 68 | #define LINK_RETRAIN_TIMEOUT HZ | ||
| 69 | |||
| 70 | #define DWORD_MASK 3 | ||
| 71 | |||
| 72 | struct altera_pcie { | ||
| 73 | struct platform_device *pdev; | ||
| 74 | void __iomem *cra_base; /* DT Cra */ | ||
| 75 | int irq; | ||
| 76 | u8 root_bus_nr; | ||
| 77 | struct irq_domain *irq_domain; | ||
| 78 | struct resource bus_range; | ||
| 79 | struct list_head resources; | ||
| 80 | }; | ||
| 81 | |||
| 82 | struct tlp_rp_regpair_t { | ||
| 83 | u32 ctrl; | ||
| 84 | u32 reg0; | ||
| 85 | u32 reg1; | ||
| 86 | }; | ||
| 87 | |||
| 88 | static inline void cra_writel(struct altera_pcie *pcie, const u32 value, | ||
| 89 | const u32 reg) | ||
| 90 | { | ||
| 91 | writel_relaxed(value, pcie->cra_base + reg); | ||
| 92 | } | ||
| 93 | |||
| 94 | static inline u32 cra_readl(struct altera_pcie *pcie, const u32 reg) | ||
| 95 | { | ||
| 96 | return readl_relaxed(pcie->cra_base + reg); | ||
| 97 | } | ||
| 98 | |||
| 99 | static bool altera_pcie_link_up(struct altera_pcie *pcie) | ||
| 100 | { | ||
| 101 | return !!((cra_readl(pcie, RP_LTSSM) & RP_LTSSM_MASK) == LTSSM_L0); | ||
| 102 | } | ||
| 103 | |||
| 104 | /* | ||
| 105 | * Altera PCIe port uses BAR0 of RC's configuration space as the translation | ||
| 106 | * from PCI bus to native BUS. Entire DDR region is mapped into PCIe space | ||
| 107 | * using these registers, so it can be reached by DMA from EP devices. | ||
| 108 | * This BAR0 will also access to MSI vector when receiving MSI/MSIX interrupt | ||
| 109 | * from EP devices, eventually trigger interrupt to GIC. The BAR0 of bridge | ||
| 110 | * should be hidden during enumeration to avoid the sizing and resource | ||
| 111 | * allocation by PCIe core. | ||
| 112 | */ | ||
| 113 | static bool altera_pcie_hide_rc_bar(struct pci_bus *bus, unsigned int devfn, | ||
| 114 | int offset) | ||
| 115 | { | ||
| 116 | if (pci_is_root_bus(bus) && (devfn == 0) && | ||
| 117 | (offset == PCI_BASE_ADDRESS_0)) | ||
| 118 | return true; | ||
| 119 | |||
| 120 | return false; | ||
| 121 | } | ||
| 122 | |||
| 123 | static void tlp_write_tx(struct altera_pcie *pcie, | ||
| 124 | struct tlp_rp_regpair_t *tlp_rp_regdata) | ||
| 125 | { | ||
| 126 | cra_writel(pcie, tlp_rp_regdata->reg0, RP_TX_REG0); | ||
| 127 | cra_writel(pcie, tlp_rp_regdata->reg1, RP_TX_REG1); | ||
| 128 | cra_writel(pcie, tlp_rp_regdata->ctrl, RP_TX_CNTRL); | ||
| 129 | } | ||
| 130 | |||
| 131 | static bool altera_pcie_valid_device(struct altera_pcie *pcie, | ||
| 132 | struct pci_bus *bus, int dev) | ||
| 133 | { | ||
| 134 | /* If there is no link, then there is no device */ | ||
| 135 | if (bus->number != pcie->root_bus_nr) { | ||
| 136 | if (!altera_pcie_link_up(pcie)) | ||
| 137 | return false; | ||
| 138 | } | ||
| 139 | |||
| 140 | /* access only one slot on each root port */ | ||
| 141 | if (bus->number == pcie->root_bus_nr && dev > 0) | ||
| 142 | return false; | ||
| 143 | |||
| 144 | return true; | ||
| 145 | } | ||
| 146 | |||
| 147 | static int tlp_read_packet(struct altera_pcie *pcie, u32 *value) | ||
| 148 | { | ||
| 149 | int i; | ||
| 150 | bool sop = false; | ||
| 151 | u32 ctrl; | ||
| 152 | u32 reg0, reg1; | ||
| 153 | u32 comp_status = 1; | ||
| 154 | |||
| 155 | /* | ||
| 156 | * Minimum 2 loops to read TLP headers and 1 loop to read data | ||
| 157 | * payload. | ||
| 158 | */ | ||
| 159 | for (i = 0; i < TLP_LOOP; i++) { | ||
| 160 | ctrl = cra_readl(pcie, RP_RXCPL_STATUS); | ||
| 161 | if ((ctrl & RP_RXCPL_SOP) || (ctrl & RP_RXCPL_EOP) || sop) { | ||
| 162 | reg0 = cra_readl(pcie, RP_RXCPL_REG0); | ||
| 163 | reg1 = cra_readl(pcie, RP_RXCPL_REG1); | ||
| 164 | |||
| 165 | if (ctrl & RP_RXCPL_SOP) { | ||
| 166 | sop = true; | ||
| 167 | comp_status = TLP_COMP_STATUS(reg1); | ||
| 168 | } | ||
| 169 | |||
| 170 | if (ctrl & RP_RXCPL_EOP) { | ||
| 171 | if (comp_status) | ||
| 172 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 173 | |||
| 174 | if (value) | ||
| 175 | *value = reg0; | ||
| 176 | |||
| 177 | return PCIBIOS_SUCCESSFUL; | ||
| 178 | } | ||
| 179 | } | ||
| 180 | udelay(5); | ||
| 181 | } | ||
| 182 | |||
| 183 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 184 | } | ||
| 185 | |||
| 186 | static void tlp_write_packet(struct altera_pcie *pcie, u32 *headers, | ||
| 187 | u32 data, bool align) | ||
| 188 | { | ||
| 189 | struct tlp_rp_regpair_t tlp_rp_regdata; | ||
| 190 | |||
| 191 | tlp_rp_regdata.reg0 = headers[0]; | ||
| 192 | tlp_rp_regdata.reg1 = headers[1]; | ||
| 193 | tlp_rp_regdata.ctrl = RP_TX_SOP; | ||
| 194 | tlp_write_tx(pcie, &tlp_rp_regdata); | ||
| 195 | |||
| 196 | if (align) { | ||
| 197 | tlp_rp_regdata.reg0 = headers[2]; | ||
| 198 | tlp_rp_regdata.reg1 = 0; | ||
| 199 | tlp_rp_regdata.ctrl = 0; | ||
| 200 | tlp_write_tx(pcie, &tlp_rp_regdata); | ||
| 201 | |||
| 202 | tlp_rp_regdata.reg0 = data; | ||
| 203 | tlp_rp_regdata.reg1 = 0; | ||
| 204 | } else { | ||
| 205 | tlp_rp_regdata.reg0 = headers[2]; | ||
| 206 | tlp_rp_regdata.reg1 = data; | ||
| 207 | } | ||
| 208 | |||
| 209 | tlp_rp_regdata.ctrl = RP_TX_EOP; | ||
| 210 | tlp_write_tx(pcie, &tlp_rp_regdata); | ||
| 211 | } | ||
| 212 | |||
| 213 | static int tlp_cfg_dword_read(struct altera_pcie *pcie, u8 bus, u32 devfn, | ||
| 214 | int where, u8 byte_en, u32 *value) | ||
| 215 | { | ||
| 216 | u32 headers[TLP_HDR_SIZE]; | ||
| 217 | |||
| 218 | headers[0] = TLP_CFGRD_DW0(pcie, bus); | ||
| 219 | headers[1] = TLP_CFG_DW1(pcie, TLP_READ_TAG, byte_en); | ||
| 220 | headers[2] = TLP_CFG_DW2(bus, devfn, where); | ||
| 221 | |||
| 222 | tlp_write_packet(pcie, headers, 0, false); | ||
| 223 | |||
| 224 | return tlp_read_packet(pcie, value); | ||
| 225 | } | ||
| 226 | |||
| 227 | static int tlp_cfg_dword_write(struct altera_pcie *pcie, u8 bus, u32 devfn, | ||
| 228 | int where, u8 byte_en, u32 value) | ||
| 229 | { | ||
| 230 | u32 headers[TLP_HDR_SIZE]; | ||
| 231 | int ret; | ||
| 232 | |||
| 233 | headers[0] = TLP_CFGWR_DW0(pcie, bus); | ||
| 234 | headers[1] = TLP_CFG_DW1(pcie, TLP_WRITE_TAG, byte_en); | ||
| 235 | headers[2] = TLP_CFG_DW2(bus, devfn, where); | ||
| 236 | |||
| 237 | /* check alignment to Qword */ | ||
| 238 | if ((where & 0x7) == 0) | ||
| 239 | tlp_write_packet(pcie, headers, value, true); | ||
| 240 | else | ||
| 241 | tlp_write_packet(pcie, headers, value, false); | ||
| 242 | |||
| 243 | ret = tlp_read_packet(pcie, NULL); | ||
| 244 | if (ret != PCIBIOS_SUCCESSFUL) | ||
| 245 | return ret; | ||
| 246 | |||
| 247 | /* | ||
| 248 | * Monitor changes to PCI_PRIMARY_BUS register on root port | ||
| 249 | * and update local copy of root bus number accordingly. | ||
| 250 | */ | ||
| 251 | if ((bus == pcie->root_bus_nr) && (where == PCI_PRIMARY_BUS)) | ||
| 252 | pcie->root_bus_nr = (u8)(value); | ||
| 253 | |||
| 254 | return PCIBIOS_SUCCESSFUL; | ||
| 255 | } | ||
| 256 | |||
| 257 | static int _altera_pcie_cfg_read(struct altera_pcie *pcie, u8 busno, | ||
| 258 | unsigned int devfn, int where, int size, | ||
| 259 | u32 *value) | ||
| 260 | { | ||
| 261 | int ret; | ||
| 262 | u32 data; | ||
| 263 | u8 byte_en; | ||
| 264 | |||
| 265 | switch (size) { | ||
| 266 | case 1: | ||
| 267 | byte_en = 1 << (where & 3); | ||
| 268 | break; | ||
| 269 | case 2: | ||
| 270 | byte_en = 3 << (where & 3); | ||
| 271 | break; | ||
| 272 | default: | ||
| 273 | byte_en = 0xf; | ||
| 274 | break; | ||
| 275 | } | ||
| 276 | |||
| 277 | ret = tlp_cfg_dword_read(pcie, busno, devfn, | ||
| 278 | (where & ~DWORD_MASK), byte_en, &data); | ||
| 279 | if (ret != PCIBIOS_SUCCESSFUL) | ||
| 280 | return ret; | ||
| 281 | |||
| 282 | switch (size) { | ||
| 283 | case 1: | ||
| 284 | *value = (data >> (8 * (where & 0x3))) & 0xff; | ||
| 285 | break; | ||
| 286 | case 2: | ||
| 287 | *value = (data >> (8 * (where & 0x2))) & 0xffff; | ||
| 288 | break; | ||
| 289 | default: | ||
| 290 | *value = data; | ||
| 291 | break; | ||
| 292 | } | ||
| 293 | |||
| 294 | return PCIBIOS_SUCCESSFUL; | ||
| 295 | } | ||
| 296 | |||
| 297 | static int _altera_pcie_cfg_write(struct altera_pcie *pcie, u8 busno, | ||
| 298 | unsigned int devfn, int where, int size, | ||
| 299 | u32 value) | ||
| 300 | { | ||
| 301 | u32 data32; | ||
| 302 | u32 shift = 8 * (where & 3); | ||
| 303 | u8 byte_en; | ||
| 304 | |||
| 305 | switch (size) { | ||
| 306 | case 1: | ||
| 307 | data32 = (value & 0xff) << shift; | ||
| 308 | byte_en = 1 << (where & 3); | ||
| 309 | break; | ||
| 310 | case 2: | ||
| 311 | data32 = (value & 0xffff) << shift; | ||
| 312 | byte_en = 3 << (where & 3); | ||
| 313 | break; | ||
| 314 | default: | ||
| 315 | data32 = value; | ||
| 316 | byte_en = 0xf; | ||
| 317 | break; | ||
| 318 | } | ||
| 319 | |||
| 320 | return tlp_cfg_dword_write(pcie, busno, devfn, (where & ~DWORD_MASK), | ||
| 321 | byte_en, data32); | ||
| 322 | } | ||
| 323 | |||
| 324 | static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn, | ||
| 325 | int where, int size, u32 *value) | ||
| 326 | { | ||
| 327 | struct altera_pcie *pcie = bus->sysdata; | ||
| 328 | |||
| 329 | if (altera_pcie_hide_rc_bar(bus, devfn, where)) | ||
| 330 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
| 331 | |||
| 332 | if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) { | ||
| 333 | *value = 0xffffffff; | ||
| 334 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 335 | } | ||
| 336 | |||
| 337 | return _altera_pcie_cfg_read(pcie, bus->number, devfn, where, size, | ||
| 338 | value); | ||
| 339 | } | ||
| 340 | |||
| 341 | static int altera_pcie_cfg_write(struct pci_bus *bus, unsigned int devfn, | ||
| 342 | int where, int size, u32 value) | ||
| 343 | { | ||
| 344 | struct altera_pcie *pcie = bus->sysdata; | ||
| 345 | |||
| 346 | if (altera_pcie_hide_rc_bar(bus, devfn, where)) | ||
| 347 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
| 348 | |||
| 349 | if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) | ||
| 350 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 351 | |||
| 352 | return _altera_pcie_cfg_write(pcie, bus->number, devfn, where, size, | ||
| 353 | value); | ||
| 354 | } | ||
| 355 | |||
| 356 | static struct pci_ops altera_pcie_ops = { | ||
| 357 | .read = altera_pcie_cfg_read, | ||
| 358 | .write = altera_pcie_cfg_write, | ||
| 359 | }; | ||
| 360 | |||
| 361 | static int altera_read_cap_word(struct altera_pcie *pcie, u8 busno, | ||
| 362 | unsigned int devfn, int offset, u16 *value) | ||
| 363 | { | ||
| 364 | u32 data; | ||
| 365 | int ret; | ||
| 366 | |||
| 367 | ret = _altera_pcie_cfg_read(pcie, busno, devfn, | ||
| 368 | PCIE_CAP_OFFSET + offset, sizeof(*value), | ||
| 369 | &data); | ||
| 370 | *value = data; | ||
| 371 | return ret; | ||
| 372 | } | ||
| 373 | |||
| 374 | static int altera_write_cap_word(struct altera_pcie *pcie, u8 busno, | ||
| 375 | unsigned int devfn, int offset, u16 value) | ||
| 376 | { | ||
| 377 | return _altera_pcie_cfg_write(pcie, busno, devfn, | ||
| 378 | PCIE_CAP_OFFSET + offset, sizeof(value), | ||
| 379 | value); | ||
| 380 | } | ||
| 381 | |||
| 382 | static void altera_wait_link_retrain(struct altera_pcie *pcie) | ||
| 383 | { | ||
| 384 | struct device *dev = &pcie->pdev->dev; | ||
| 385 | u16 reg16; | ||
| 386 | unsigned long start_jiffies; | ||
| 387 | |||
| 388 | /* Wait for link training end. */ | ||
| 389 | start_jiffies = jiffies; | ||
| 390 | for (;;) { | ||
| 391 | altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, | ||
| 392 | PCI_EXP_LNKSTA, ®16); | ||
| 393 | if (!(reg16 & PCI_EXP_LNKSTA_LT)) | ||
| 394 | break; | ||
| 395 | |||
| 396 | if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) { | ||
| 397 | dev_err(dev, "link retrain timeout\n"); | ||
| 398 | break; | ||
| 399 | } | ||
| 400 | udelay(100); | ||
| 401 | } | ||
| 402 | |||
| 403 | /* Wait for link is up */ | ||
| 404 | start_jiffies = jiffies; | ||
| 405 | for (;;) { | ||
| 406 | if (altera_pcie_link_up(pcie)) | ||
| 407 | break; | ||
| 408 | |||
| 409 | if (time_after(jiffies, start_jiffies + LINK_UP_TIMEOUT)) { | ||
| 410 | dev_err(dev, "link up timeout\n"); | ||
| 411 | break; | ||
| 412 | } | ||
| 413 | udelay(100); | ||
| 414 | } | ||
| 415 | } | ||
| 416 | |||
| 417 | static void altera_pcie_retrain(struct altera_pcie *pcie) | ||
| 418 | { | ||
| 419 | u16 linkcap, linkstat, linkctl; | ||
| 420 | |||
| 421 | if (!altera_pcie_link_up(pcie)) | ||
| 422 | return; | ||
| 423 | |||
| 424 | /* | ||
| 425 | * Set the retrain bit if the PCIe rootport support > 2.5GB/s, but | ||
| 426 | * current speed is 2.5 GB/s. | ||
| 427 | */ | ||
| 428 | altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKCAP, | ||
| 429 | &linkcap); | ||
| 430 | if ((linkcap & PCI_EXP_LNKCAP_SLS) <= PCI_EXP_LNKCAP_SLS_2_5GB) | ||
| 431 | return; | ||
| 432 | |||
| 433 | altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, PCI_EXP_LNKSTA, | ||
| 434 | &linkstat); | ||
| 435 | if ((linkstat & PCI_EXP_LNKSTA_CLS) == PCI_EXP_LNKSTA_CLS_2_5GB) { | ||
| 436 | altera_read_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, | ||
| 437 | PCI_EXP_LNKCTL, &linkctl); | ||
| 438 | linkctl |= PCI_EXP_LNKCTL_RL; | ||
| 439 | altera_write_cap_word(pcie, pcie->root_bus_nr, RP_DEVFN, | ||
| 440 | PCI_EXP_LNKCTL, linkctl); | ||
| 441 | |||
| 442 | altera_wait_link_retrain(pcie); | ||
| 443 | } | ||
| 444 | } | ||
| 445 | |||
| 446 | static int altera_pcie_intx_map(struct irq_domain *domain, unsigned int irq, | ||
| 447 | irq_hw_number_t hwirq) | ||
| 448 | { | ||
| 449 | irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); | ||
| 450 | irq_set_chip_data(irq, domain->host_data); | ||
| 451 | return 0; | ||
| 452 | } | ||
| 453 | |||
| 454 | static const struct irq_domain_ops intx_domain_ops = { | ||
| 455 | .map = altera_pcie_intx_map, | ||
| 456 | .xlate = pci_irqd_intx_xlate, | ||
| 457 | }; | ||
| 458 | |||
| 459 | static void altera_pcie_isr(struct irq_desc *desc) | ||
| 460 | { | ||
| 461 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
| 462 | struct altera_pcie *pcie; | ||
| 463 | struct device *dev; | ||
| 464 | unsigned long status; | ||
| 465 | u32 bit; | ||
| 466 | u32 virq; | ||
| 467 | |||
| 468 | chained_irq_enter(chip, desc); | ||
| 469 | pcie = irq_desc_get_handler_data(desc); | ||
| 470 | dev = &pcie->pdev->dev; | ||
| 471 | |||
| 472 | while ((status = cra_readl(pcie, P2A_INT_STATUS) | ||
| 473 | & P2A_INT_STS_ALL) != 0) { | ||
| 474 | for_each_set_bit(bit, &status, PCI_NUM_INTX) { | ||
| 475 | /* clear interrupts */ | ||
| 476 | cra_writel(pcie, 1 << bit, P2A_INT_STATUS); | ||
| 477 | |||
| 478 | virq = irq_find_mapping(pcie->irq_domain, bit); | ||
| 479 | if (virq) | ||
| 480 | generic_handle_irq(virq); | ||
| 481 | else | ||
| 482 | dev_err(dev, "unexpected IRQ, INT%d\n", bit); | ||
| 483 | } | ||
| 484 | } | ||
| 485 | |||
| 486 | chained_irq_exit(chip, desc); | ||
| 487 | } | ||
| 488 | |||
| 489 | static int altera_pcie_parse_request_of_pci_ranges(struct altera_pcie *pcie) | ||
| 490 | { | ||
| 491 | int err, res_valid = 0; | ||
| 492 | struct device *dev = &pcie->pdev->dev; | ||
| 493 | struct resource_entry *win; | ||
| 494 | |||
| 495 | err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, | ||
| 496 | &pcie->resources, NULL); | ||
| 497 | if (err) | ||
| 498 | return err; | ||
| 499 | |||
| 500 | err = devm_request_pci_bus_resources(dev, &pcie->resources); | ||
| 501 | if (err) | ||
| 502 | goto out_release_res; | ||
| 503 | |||
| 504 | resource_list_for_each_entry(win, &pcie->resources) { | ||
| 505 | struct resource *res = win->res; | ||
| 506 | |||
| 507 | if (resource_type(res) == IORESOURCE_MEM) | ||
| 508 | res_valid |= !(res->flags & IORESOURCE_PREFETCH); | ||
| 509 | } | ||
| 510 | |||
| 511 | if (res_valid) | ||
| 512 | return 0; | ||
| 513 | |||
| 514 | dev_err(dev, "non-prefetchable memory resource required\n"); | ||
| 515 | err = -EINVAL; | ||
| 516 | |||
| 517 | out_release_res: | ||
| 518 | pci_free_resource_list(&pcie->resources); | ||
| 519 | return err; | ||
| 520 | } | ||
| 521 | |||
| 522 | static int altera_pcie_init_irq_domain(struct altera_pcie *pcie) | ||
| 523 | { | ||
| 524 | struct device *dev = &pcie->pdev->dev; | ||
| 525 | struct device_node *node = dev->of_node; | ||
| 526 | |||
| 527 | /* Setup INTx */ | ||
| 528 | pcie->irq_domain = irq_domain_add_linear(node, PCI_NUM_INTX, | ||
| 529 | &intx_domain_ops, pcie); | ||
| 530 | if (!pcie->irq_domain) { | ||
| 531 | dev_err(dev, "Failed to get a INTx IRQ domain\n"); | ||
| 532 | return -ENOMEM; | ||
| 533 | } | ||
| 534 | |||
| 535 | return 0; | ||
| 536 | } | ||
| 537 | |||
| 538 | static int altera_pcie_parse_dt(struct altera_pcie *pcie) | ||
| 539 | { | ||
| 540 | struct device *dev = &pcie->pdev->dev; | ||
| 541 | struct platform_device *pdev = pcie->pdev; | ||
| 542 | struct resource *cra; | ||
| 543 | |||
| 544 | cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra"); | ||
| 545 | pcie->cra_base = devm_ioremap_resource(dev, cra); | ||
| 546 | if (IS_ERR(pcie->cra_base)) | ||
| 547 | return PTR_ERR(pcie->cra_base); | ||
| 548 | |||
| 549 | /* setup IRQ */ | ||
| 550 | pcie->irq = platform_get_irq(pdev, 0); | ||
| 551 | if (pcie->irq < 0) { | ||
| 552 | dev_err(dev, "failed to get IRQ: %d\n", pcie->irq); | ||
| 553 | return pcie->irq; | ||
| 554 | } | ||
| 555 | |||
| 556 | irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie); | ||
| 557 | return 0; | ||
| 558 | } | ||
| 559 | |||
| 560 | static void altera_pcie_host_init(struct altera_pcie *pcie) | ||
| 561 | { | ||
| 562 | altera_pcie_retrain(pcie); | ||
| 563 | } | ||
| 564 | |||
| 565 | static int altera_pcie_probe(struct platform_device *pdev) | ||
| 566 | { | ||
| 567 | struct device *dev = &pdev->dev; | ||
| 568 | struct altera_pcie *pcie; | ||
| 569 | struct pci_bus *bus; | ||
| 570 | struct pci_bus *child; | ||
| 571 | struct pci_host_bridge *bridge; | ||
| 572 | int ret; | ||
| 573 | |||
| 574 | bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); | ||
| 575 | if (!bridge) | ||
| 576 | return -ENOMEM; | ||
| 577 | |||
| 578 | pcie = pci_host_bridge_priv(bridge); | ||
| 579 | pcie->pdev = pdev; | ||
| 580 | |||
| 581 | ret = altera_pcie_parse_dt(pcie); | ||
| 582 | if (ret) { | ||
| 583 | dev_err(dev, "Parsing DT failed\n"); | ||
| 584 | return ret; | ||
| 585 | } | ||
| 586 | |||
| 587 | INIT_LIST_HEAD(&pcie->resources); | ||
| 588 | |||
| 589 | ret = altera_pcie_parse_request_of_pci_ranges(pcie); | ||
| 590 | if (ret) { | ||
| 591 | dev_err(dev, "Failed add resources\n"); | ||
| 592 | return ret; | ||
| 593 | } | ||
| 594 | |||
| 595 | ret = altera_pcie_init_irq_domain(pcie); | ||
| 596 | if (ret) { | ||
| 597 | dev_err(dev, "Failed creating IRQ Domain\n"); | ||
| 598 | return ret; | ||
| 599 | } | ||
| 600 | |||
| 601 | /* clear all interrupts */ | ||
| 602 | cra_writel(pcie, P2A_INT_STS_ALL, P2A_INT_STATUS); | ||
| 603 | /* enable all interrupts */ | ||
| 604 | cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE); | ||
| 605 | altera_pcie_host_init(pcie); | ||
| 606 | |||
| 607 | list_splice_init(&pcie->resources, &bridge->windows); | ||
| 608 | bridge->dev.parent = dev; | ||
| 609 | bridge->sysdata = pcie; | ||
| 610 | bridge->busnr = pcie->root_bus_nr; | ||
| 611 | bridge->ops = &altera_pcie_ops; | ||
| 612 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 613 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 614 | |||
| 615 | ret = pci_scan_root_bus_bridge(bridge); | ||
| 616 | if (ret < 0) | ||
| 617 | return ret; | ||
| 618 | |||
| 619 | bus = bridge->bus; | ||
| 620 | |||
| 621 | pci_assign_unassigned_bus_resources(bus); | ||
| 622 | |||
| 623 | /* Configure PCI Express setting. */ | ||
| 624 | list_for_each_entry(child, &bus->children, node) | ||
| 625 | pcie_bus_configure_settings(child); | ||
| 626 | |||
| 627 | pci_bus_add_devices(bus); | ||
| 628 | return ret; | ||
| 629 | } | ||
| 630 | |||
| 631 | static const struct of_device_id altera_pcie_of_match[] = { | ||
| 632 | { .compatible = "altr,pcie-root-port-1.0", }, | ||
| 633 | {}, | ||
| 634 | }; | ||
| 635 | |||
| 636 | static struct platform_driver altera_pcie_driver = { | ||
| 637 | .probe = altera_pcie_probe, | ||
| 638 | .driver = { | ||
| 639 | .name = "altera-pcie", | ||
| 640 | .of_match_table = altera_pcie_of_match, | ||
| 641 | .suppress_bind_attrs = true, | ||
| 642 | }, | ||
| 643 | }; | ||
| 644 | |||
| 645 | builtin_platform_driver(altera_pcie_driver); | ||
diff --git a/drivers/pci/controller/pcie-cadence-ep.c b/drivers/pci/controller/pcie-cadence-ep.c new file mode 100644 index 000000000000..3d8283e450a9 --- /dev/null +++ b/drivers/pci/controller/pcie-cadence-ep.c | |||
| @@ -0,0 +1,549 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (c) 2017 Cadence | ||
| 3 | // Cadence PCIe endpoint controller driver. | ||
| 4 | // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> | ||
| 5 | |||
| 6 | #include <linux/delay.h> | ||
| 7 | #include <linux/kernel.h> | ||
| 8 | #include <linux/of.h> | ||
| 9 | #include <linux/pci-epc.h> | ||
| 10 | #include <linux/platform_device.h> | ||
| 11 | #include <linux/pm_runtime.h> | ||
| 12 | #include <linux/sizes.h> | ||
| 13 | |||
| 14 | #include "pcie-cadence.h" | ||
| 15 | |||
| 16 | #define CDNS_PCIE_EP_MIN_APERTURE 128 /* 128 bytes */ | ||
| 17 | #define CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE 0x1 | ||
| 18 | #define CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY 0x3 | ||
| 19 | |||
| 20 | /** | ||
| 21 | * struct cdns_pcie_ep - private data for this PCIe endpoint controller driver | ||
| 22 | * @pcie: Cadence PCIe controller | ||
| 23 | * @max_regions: maximum number of regions supported by hardware | ||
| 24 | * @ob_region_map: bitmask of mapped outbound regions | ||
| 25 | * @ob_addr: base addresses in the AXI bus where the outbound regions start | ||
| 26 | * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ | ||
| 27 | * dedicated outbound regions is mapped. | ||
| 28 | * @irq_cpu_addr: base address in the CPU space where a write access triggers | ||
| 29 | * the sending of a memory write (MSI) / normal message (legacy | ||
| 30 | * IRQ) TLP through the PCIe bus. | ||
| 31 | * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ | ||
| 32 | * dedicated outbound region. | ||
| 33 | * @irq_pci_fn: the latest PCI function that has updated the mapping of | ||
| 34 | * the MSI/legacy IRQ dedicated outbound region. | ||
| 35 | * @irq_pending: bitmask of asserted legacy IRQs. | ||
| 36 | */ | ||
| 37 | struct cdns_pcie_ep { | ||
| 38 | struct cdns_pcie pcie; | ||
| 39 | u32 max_regions; | ||
| 40 | unsigned long ob_region_map; | ||
| 41 | phys_addr_t *ob_addr; | ||
| 42 | phys_addr_t irq_phys_addr; | ||
| 43 | void __iomem *irq_cpu_addr; | ||
| 44 | u64 irq_pci_addr; | ||
| 45 | u8 irq_pci_fn; | ||
| 46 | u8 irq_pending; | ||
| 47 | }; | ||
| 48 | |||
| 49 | static int cdns_pcie_ep_write_header(struct pci_epc *epc, u8 fn, | ||
| 50 | struct pci_epf_header *hdr) | ||
| 51 | { | ||
| 52 | struct cdns_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 53 | struct cdns_pcie *pcie = &ep->pcie; | ||
| 54 | |||
| 55 | cdns_pcie_ep_fn_writew(pcie, fn, PCI_DEVICE_ID, hdr->deviceid); | ||
| 56 | cdns_pcie_ep_fn_writeb(pcie, fn, PCI_REVISION_ID, hdr->revid); | ||
| 57 | cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CLASS_PROG, hdr->progif_code); | ||
| 58 | cdns_pcie_ep_fn_writew(pcie, fn, PCI_CLASS_DEVICE, | ||
| 59 | hdr->subclass_code | hdr->baseclass_code << 8); | ||
| 60 | cdns_pcie_ep_fn_writeb(pcie, fn, PCI_CACHE_LINE_SIZE, | ||
| 61 | hdr->cache_line_size); | ||
| 62 | cdns_pcie_ep_fn_writew(pcie, fn, PCI_SUBSYSTEM_ID, hdr->subsys_id); | ||
| 63 | cdns_pcie_ep_fn_writeb(pcie, fn, PCI_INTERRUPT_PIN, hdr->interrupt_pin); | ||
| 64 | |||
| 65 | /* | ||
| 66 | * Vendor ID can only be modified from function 0, all other functions | ||
| 67 | * use the same vendor ID as function 0. | ||
| 68 | */ | ||
| 69 | if (fn == 0) { | ||
| 70 | /* Update the vendor IDs. */ | ||
| 71 | u32 id = CDNS_PCIE_LM_ID_VENDOR(hdr->vendorid) | | ||
| 72 | CDNS_PCIE_LM_ID_SUBSYS(hdr->subsys_vendor_id); | ||
| 73 | |||
| 74 | cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id); | ||
| 75 | } | ||
| 76 | |||
| 77 | return 0; | ||
| 78 | } | ||
| 79 | |||
| 80 | static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, | ||
| 81 | struct pci_epf_bar *epf_bar) | ||
| 82 | { | ||
| 83 | struct cdns_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 84 | struct cdns_pcie *pcie = &ep->pcie; | ||
| 85 | dma_addr_t bar_phys = epf_bar->phys_addr; | ||
| 86 | enum pci_barno bar = epf_bar->barno; | ||
| 87 | int flags = epf_bar->flags; | ||
| 88 | u32 addr0, addr1, reg, cfg, b, aperture, ctrl; | ||
| 89 | u64 sz; | ||
| 90 | |||
| 91 | /* BAR size is 2^(aperture + 7) */ | ||
| 92 | sz = max_t(size_t, epf_bar->size, CDNS_PCIE_EP_MIN_APERTURE); | ||
| 93 | /* | ||
| 94 | * roundup_pow_of_two() returns an unsigned long, which is not suited | ||
| 95 | * for 64bit values. | ||
| 96 | */ | ||
| 97 | sz = 1ULL << fls64(sz - 1); | ||
| 98 | aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */ | ||
| 99 | |||
| 100 | if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { | ||
| 101 | ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS; | ||
| 102 | } else { | ||
| 103 | bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH); | ||
| 104 | bool is_64bits = sz > SZ_2G; | ||
| 105 | |||
| 106 | if (is_64bits && (bar & 1)) | ||
| 107 | return -EINVAL; | ||
| 108 | |||
| 109 | if (is_64bits && !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64)) | ||
| 110 | epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; | ||
| 111 | |||
| 112 | if (is_64bits && is_prefetch) | ||
| 113 | ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS; | ||
| 114 | else if (is_prefetch) | ||
| 115 | ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS; | ||
| 116 | else if (is_64bits) | ||
| 117 | ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS; | ||
| 118 | else | ||
| 119 | ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS; | ||
| 120 | } | ||
| 121 | |||
| 122 | addr0 = lower_32_bits(bar_phys); | ||
| 123 | addr1 = upper_32_bits(bar_phys); | ||
| 124 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), | ||
| 125 | addr0); | ||
| 126 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), | ||
| 127 | addr1); | ||
| 128 | |||
| 129 | if (bar < BAR_4) { | ||
| 130 | reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn); | ||
| 131 | b = bar; | ||
| 132 | } else { | ||
| 133 | reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn); | ||
| 134 | b = bar - BAR_4; | ||
| 135 | } | ||
| 136 | |||
| 137 | cfg = cdns_pcie_readl(pcie, reg); | ||
| 138 | cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | | ||
| 139 | CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); | ||
| 140 | cfg |= (CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) | | ||
| 141 | CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); | ||
| 142 | cdns_pcie_writel(pcie, reg, cfg); | ||
| 143 | |||
| 144 | return 0; | ||
| 145 | } | ||
| 146 | |||
| 147 | static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, | ||
| 148 | struct pci_epf_bar *epf_bar) | ||
| 149 | { | ||
| 150 | struct cdns_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 151 | struct cdns_pcie *pcie = &ep->pcie; | ||
| 152 | enum pci_barno bar = epf_bar->barno; | ||
| 153 | u32 reg, cfg, b, ctrl; | ||
| 154 | |||
| 155 | if (bar < BAR_4) { | ||
| 156 | reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn); | ||
| 157 | b = bar; | ||
| 158 | } else { | ||
| 159 | reg = CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn); | ||
| 160 | b = bar - BAR_4; | ||
| 161 | } | ||
| 162 | |||
| 163 | ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED; | ||
| 164 | cfg = cdns_pcie_readl(pcie, reg); | ||
| 165 | cfg &= ~(CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | | ||
| 166 | CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); | ||
| 167 | cfg |= CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl); | ||
| 168 | cdns_pcie_writel(pcie, reg, cfg); | ||
| 169 | |||
| 170 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0); | ||
| 171 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0); | ||
| 172 | } | ||
| 173 | |||
| 174 | static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr, | ||
| 175 | u64 pci_addr, size_t size) | ||
| 176 | { | ||
| 177 | struct cdns_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 178 | struct cdns_pcie *pcie = &ep->pcie; | ||
| 179 | u32 r; | ||
| 180 | |||
| 181 | r = find_first_zero_bit(&ep->ob_region_map, | ||
| 182 | sizeof(ep->ob_region_map) * BITS_PER_LONG); | ||
| 183 | if (r >= ep->max_regions - 1) { | ||
| 184 | dev_err(&epc->dev, "no free outbound region\n"); | ||
| 185 | return -EINVAL; | ||
| 186 | } | ||
| 187 | |||
| 188 | cdns_pcie_set_outbound_region(pcie, fn, r, false, addr, pci_addr, size); | ||
| 189 | |||
| 190 | set_bit(r, &ep->ob_region_map); | ||
| 191 | ep->ob_addr[r] = addr; | ||
| 192 | |||
| 193 | return 0; | ||
| 194 | } | ||
| 195 | |||
| 196 | static void cdns_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, | ||
| 197 | phys_addr_t addr) | ||
| 198 | { | ||
| 199 | struct cdns_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 200 | struct cdns_pcie *pcie = &ep->pcie; | ||
| 201 | u32 r; | ||
| 202 | |||
| 203 | for (r = 0; r < ep->max_regions - 1; r++) | ||
| 204 | if (ep->ob_addr[r] == addr) | ||
| 205 | break; | ||
| 206 | |||
| 207 | if (r == ep->max_regions - 1) | ||
| 208 | return; | ||
| 209 | |||
| 210 | cdns_pcie_reset_outbound_region(pcie, r); | ||
| 211 | |||
| 212 | ep->ob_addr[r] = 0; | ||
| 213 | clear_bit(r, &ep->ob_region_map); | ||
| 214 | } | ||
| 215 | |||
| 216 | static int cdns_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, u8 mmc) | ||
| 217 | { | ||
| 218 | struct cdns_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 219 | struct cdns_pcie *pcie = &ep->pcie; | ||
| 220 | u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; | ||
| 221 | u16 flags; | ||
| 222 | |||
| 223 | /* | ||
| 224 | * Set the Multiple Message Capable bitfield into the Message Control | ||
| 225 | * register. | ||
| 226 | */ | ||
| 227 | flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); | ||
| 228 | flags = (flags & ~PCI_MSI_FLAGS_QMASK) | (mmc << 1); | ||
| 229 | flags |= PCI_MSI_FLAGS_64BIT; | ||
| 230 | flags &= ~PCI_MSI_FLAGS_MASKBIT; | ||
| 231 | cdns_pcie_ep_fn_writew(pcie, fn, cap + PCI_MSI_FLAGS, flags); | ||
| 232 | |||
| 233 | return 0; | ||
| 234 | } | ||
| 235 | |||
| 236 | static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) | ||
| 237 | { | ||
| 238 | struct cdns_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 239 | struct cdns_pcie *pcie = &ep->pcie; | ||
| 240 | u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; | ||
| 241 | u16 flags, mmc, mme; | ||
| 242 | |||
| 243 | /* Validate that the MSI feature is actually enabled. */ | ||
| 244 | flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); | ||
| 245 | if (!(flags & PCI_MSI_FLAGS_ENABLE)) | ||
| 246 | return -EINVAL; | ||
| 247 | |||
| 248 | /* | ||
| 249 | * Get the Multiple Message Enable bitfield from the Message Control | ||
| 250 | * register. | ||
| 251 | */ | ||
| 252 | mmc = (flags & PCI_MSI_FLAGS_QMASK) >> 1; | ||
| 253 | mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; | ||
| 254 | |||
| 255 | return mme; | ||
| 256 | } | ||
| 257 | |||
| 258 | static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, | ||
| 259 | u8 intx, bool is_asserted) | ||
| 260 | { | ||
| 261 | struct cdns_pcie *pcie = &ep->pcie; | ||
| 262 | u32 r = ep->max_regions - 1; | ||
| 263 | u32 offset; | ||
| 264 | u16 status; | ||
| 265 | u8 msg_code; | ||
| 266 | |||
| 267 | intx &= 3; | ||
| 268 | |||
| 269 | /* Set the outbound region if needed. */ | ||
| 270 | if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY || | ||
| 271 | ep->irq_pci_fn != fn)) { | ||
| 272 | /* Last region was reserved for IRQ writes. */ | ||
| 273 | cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, r, | ||
| 274 | ep->irq_phys_addr); | ||
| 275 | ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY; | ||
| 276 | ep->irq_pci_fn = fn; | ||
| 277 | } | ||
| 278 | |||
| 279 | if (is_asserted) { | ||
| 280 | ep->irq_pending |= BIT(intx); | ||
| 281 | msg_code = MSG_CODE_ASSERT_INTA + intx; | ||
| 282 | } else { | ||
| 283 | ep->irq_pending &= ~BIT(intx); | ||
| 284 | msg_code = MSG_CODE_DEASSERT_INTA + intx; | ||
| 285 | } | ||
| 286 | |||
| 287 | status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS); | ||
| 288 | if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) { | ||
| 289 | status ^= PCI_STATUS_INTERRUPT; | ||
| 290 | cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status); | ||
| 291 | } | ||
| 292 | |||
| 293 | offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | | ||
| 294 | CDNS_PCIE_NORMAL_MSG_CODE(msg_code) | | ||
| 295 | CDNS_PCIE_MSG_NO_DATA; | ||
| 296 | writel(0, ep->irq_cpu_addr + offset); | ||
| 297 | } | ||
| 298 | |||
| 299 | static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx) | ||
| 300 | { | ||
| 301 | u16 cmd; | ||
| 302 | |||
| 303 | cmd = cdns_pcie_ep_fn_readw(&ep->pcie, fn, PCI_COMMAND); | ||
| 304 | if (cmd & PCI_COMMAND_INTX_DISABLE) | ||
| 305 | return -EINVAL; | ||
| 306 | |||
| 307 | cdns_pcie_ep_assert_intx(ep, fn, intx, true); | ||
| 308 | /* | ||
| 309 | * The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq() | ||
| 310 | * from drivers/pci/dwc/pci-dra7xx.c | ||
| 311 | */ | ||
| 312 | mdelay(1); | ||
| 313 | cdns_pcie_ep_assert_intx(ep, fn, intx, false); | ||
| 314 | return 0; | ||
| 315 | } | ||
| 316 | |||
| 317 | static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, | ||
| 318 | u8 interrupt_num) | ||
| 319 | { | ||
| 320 | struct cdns_pcie *pcie = &ep->pcie; | ||
| 321 | u32 cap = CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET; | ||
| 322 | u16 flags, mme, data, data_mask; | ||
| 323 | u8 msi_count; | ||
| 324 | u64 pci_addr, pci_addr_mask = 0xff; | ||
| 325 | |||
| 326 | /* Check whether the MSI feature has been enabled by the PCI host. */ | ||
| 327 | flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_FLAGS); | ||
| 328 | if (!(flags & PCI_MSI_FLAGS_ENABLE)) | ||
| 329 | return -EINVAL; | ||
| 330 | |||
| 331 | /* Get the number of enabled MSIs */ | ||
| 332 | mme = (flags & PCI_MSI_FLAGS_QSIZE) >> 4; | ||
| 333 | msi_count = 1 << mme; | ||
| 334 | if (!interrupt_num || interrupt_num > msi_count) | ||
| 335 | return -EINVAL; | ||
| 336 | |||
| 337 | /* Compute the data value to be written. */ | ||
| 338 | data_mask = msi_count - 1; | ||
| 339 | data = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSI_DATA_64); | ||
| 340 | data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); | ||
| 341 | |||
| 342 | /* Get the PCI address where to write the data into. */ | ||
| 343 | pci_addr = cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_HI); | ||
| 344 | pci_addr <<= 32; | ||
| 345 | pci_addr |= cdns_pcie_ep_fn_readl(pcie, fn, cap + PCI_MSI_ADDRESS_LO); | ||
| 346 | pci_addr &= GENMASK_ULL(63, 2); | ||
| 347 | |||
| 348 | /* Set the outbound region if needed. */ | ||
| 349 | if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || | ||
| 350 | ep->irq_pci_fn != fn)) { | ||
| 351 | /* Last region was reserved for IRQ writes. */ | ||
| 352 | cdns_pcie_set_outbound_region(pcie, fn, ep->max_regions - 1, | ||
| 353 | false, | ||
| 354 | ep->irq_phys_addr, | ||
| 355 | pci_addr & ~pci_addr_mask, | ||
| 356 | pci_addr_mask + 1); | ||
| 357 | ep->irq_pci_addr = (pci_addr & ~pci_addr_mask); | ||
| 358 | ep->irq_pci_fn = fn; | ||
| 359 | } | ||
| 360 | writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); | ||
| 361 | |||
| 362 | return 0; | ||
| 363 | } | ||
| 364 | |||
| 365 | static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, | ||
| 366 | enum pci_epc_irq_type type, u8 interrupt_num) | ||
| 367 | { | ||
| 368 | struct cdns_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 369 | |||
| 370 | switch (type) { | ||
| 371 | case PCI_EPC_IRQ_LEGACY: | ||
| 372 | return cdns_pcie_ep_send_legacy_irq(ep, fn, 0); | ||
| 373 | |||
| 374 | case PCI_EPC_IRQ_MSI: | ||
| 375 | return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num); | ||
| 376 | |||
| 377 | default: | ||
| 378 | break; | ||
| 379 | } | ||
| 380 | |||
| 381 | return -EINVAL; | ||
| 382 | } | ||
| 383 | |||
| 384 | static int cdns_pcie_ep_start(struct pci_epc *epc) | ||
| 385 | { | ||
| 386 | struct cdns_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 387 | struct cdns_pcie *pcie = &ep->pcie; | ||
| 388 | struct pci_epf *epf; | ||
| 389 | u32 cfg; | ||
| 390 | |||
| 391 | /* | ||
| 392 | * BIT(0) is hardwired to 1, hence function 0 is always enabled | ||
| 393 | * and can't be disabled anyway. | ||
| 394 | */ | ||
| 395 | cfg = BIT(0); | ||
| 396 | list_for_each_entry(epf, &epc->pci_epf, list) | ||
| 397 | cfg |= BIT(epf->func_no); | ||
| 398 | cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg); | ||
| 399 | |||
| 400 | /* | ||
| 401 | * The PCIe links are automatically established by the controller | ||
| 402 | * once for all at powerup: the software can neither start nor stop | ||
| 403 | * those links later at runtime. | ||
| 404 | * | ||
| 405 | * Then we only have to notify the EP core that our links are already | ||
| 406 | * established. However we don't call directly pci_epc_linkup() because | ||
| 407 | * we've already locked the epc->lock. | ||
| 408 | */ | ||
| 409 | list_for_each_entry(epf, &epc->pci_epf, list) | ||
| 410 | pci_epf_linkup(epf); | ||
| 411 | |||
| 412 | return 0; | ||
| 413 | } | ||
| 414 | |||
| 415 | static const struct pci_epc_ops cdns_pcie_epc_ops = { | ||
| 416 | .write_header = cdns_pcie_ep_write_header, | ||
| 417 | .set_bar = cdns_pcie_ep_set_bar, | ||
| 418 | .clear_bar = cdns_pcie_ep_clear_bar, | ||
| 419 | .map_addr = cdns_pcie_ep_map_addr, | ||
| 420 | .unmap_addr = cdns_pcie_ep_unmap_addr, | ||
| 421 | .set_msi = cdns_pcie_ep_set_msi, | ||
| 422 | .get_msi = cdns_pcie_ep_get_msi, | ||
| 423 | .raise_irq = cdns_pcie_ep_raise_irq, | ||
| 424 | .start = cdns_pcie_ep_start, | ||
| 425 | }; | ||
| 426 | |||
| 427 | static const struct of_device_id cdns_pcie_ep_of_match[] = { | ||
| 428 | { .compatible = "cdns,cdns-pcie-ep" }, | ||
| 429 | |||
| 430 | { }, | ||
| 431 | }; | ||
| 432 | |||
| 433 | static int cdns_pcie_ep_probe(struct platform_device *pdev) | ||
| 434 | { | ||
| 435 | struct device *dev = &pdev->dev; | ||
| 436 | struct device_node *np = dev->of_node; | ||
| 437 | struct cdns_pcie_ep *ep; | ||
| 438 | struct cdns_pcie *pcie; | ||
| 439 | struct pci_epc *epc; | ||
| 440 | struct resource *res; | ||
| 441 | int ret; | ||
| 442 | |||
| 443 | ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); | ||
| 444 | if (!ep) | ||
| 445 | return -ENOMEM; | ||
| 446 | |||
| 447 | pcie = &ep->pcie; | ||
| 448 | pcie->is_rc = false; | ||
| 449 | |||
| 450 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); | ||
| 451 | pcie->reg_base = devm_ioremap_resource(dev, res); | ||
| 452 | if (IS_ERR(pcie->reg_base)) { | ||
| 453 | dev_err(dev, "missing \"reg\"\n"); | ||
| 454 | return PTR_ERR(pcie->reg_base); | ||
| 455 | } | ||
| 456 | |||
| 457 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); | ||
| 458 | if (!res) { | ||
| 459 | dev_err(dev, "missing \"mem\"\n"); | ||
| 460 | return -EINVAL; | ||
| 461 | } | ||
| 462 | pcie->mem_res = res; | ||
| 463 | |||
| 464 | ret = of_property_read_u32(np, "cdns,max-outbound-regions", | ||
| 465 | &ep->max_regions); | ||
| 466 | if (ret < 0) { | ||
| 467 | dev_err(dev, "missing \"cdns,max-outbound-regions\"\n"); | ||
| 468 | return ret; | ||
| 469 | } | ||
| 470 | ep->ob_addr = devm_kzalloc(dev, ep->max_regions * sizeof(*ep->ob_addr), | ||
| 471 | GFP_KERNEL); | ||
| 472 | if (!ep->ob_addr) | ||
| 473 | return -ENOMEM; | ||
| 474 | |||
| 475 | pm_runtime_enable(dev); | ||
| 476 | ret = pm_runtime_get_sync(dev); | ||
| 477 | if (ret < 0) { | ||
| 478 | dev_err(dev, "pm_runtime_get_sync() failed\n"); | ||
| 479 | goto err_get_sync; | ||
| 480 | } | ||
| 481 | |||
| 482 | /* Disable all but function 0 (anyway BIT(0) is hardwired to 1). */ | ||
| 483 | cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, BIT(0)); | ||
| 484 | |||
| 485 | epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops); | ||
| 486 | if (IS_ERR(epc)) { | ||
| 487 | dev_err(dev, "failed to create epc device\n"); | ||
| 488 | ret = PTR_ERR(epc); | ||
| 489 | goto err_init; | ||
| 490 | } | ||
| 491 | |||
| 492 | epc_set_drvdata(epc, ep); | ||
| 493 | |||
| 494 | if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0) | ||
| 495 | epc->max_functions = 1; | ||
| 496 | |||
| 497 | ret = pci_epc_mem_init(epc, pcie->mem_res->start, | ||
| 498 | resource_size(pcie->mem_res)); | ||
| 499 | if (ret < 0) { | ||
| 500 | dev_err(dev, "failed to initialize the memory space\n"); | ||
| 501 | goto err_init; | ||
| 502 | } | ||
| 503 | |||
| 504 | ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, | ||
| 505 | SZ_128K); | ||
| 506 | if (!ep->irq_cpu_addr) { | ||
| 507 | dev_err(dev, "failed to reserve memory space for MSI\n"); | ||
| 508 | ret = -ENOMEM; | ||
| 509 | goto free_epc_mem; | ||
| 510 | } | ||
| 511 | ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE; | ||
| 512 | |||
| 513 | return 0; | ||
| 514 | |||
| 515 | free_epc_mem: | ||
| 516 | pci_epc_mem_exit(epc); | ||
| 517 | |||
| 518 | err_init: | ||
| 519 | pm_runtime_put_sync(dev); | ||
| 520 | |||
| 521 | err_get_sync: | ||
| 522 | pm_runtime_disable(dev); | ||
| 523 | |||
| 524 | return ret; | ||
| 525 | } | ||
| 526 | |||
| 527 | static void cdns_pcie_ep_shutdown(struct platform_device *pdev) | ||
| 528 | { | ||
| 529 | struct device *dev = &pdev->dev; | ||
| 530 | int ret; | ||
| 531 | |||
| 532 | ret = pm_runtime_put_sync(dev); | ||
| 533 | if (ret < 0) | ||
| 534 | dev_dbg(dev, "pm_runtime_put_sync failed\n"); | ||
| 535 | |||
| 536 | pm_runtime_disable(dev); | ||
| 537 | |||
| 538 | /* The PCIe controller can't be disabled. */ | ||
| 539 | } | ||
| 540 | |||
| 541 | static struct platform_driver cdns_pcie_ep_driver = { | ||
| 542 | .driver = { | ||
| 543 | .name = "cdns-pcie-ep", | ||
| 544 | .of_match_table = cdns_pcie_ep_of_match, | ||
| 545 | }, | ||
| 546 | .probe = cdns_pcie_ep_probe, | ||
| 547 | .shutdown = cdns_pcie_ep_shutdown, | ||
| 548 | }; | ||
| 549 | builtin_platform_driver(cdns_pcie_ep_driver); | ||
diff --git a/drivers/pci/controller/pcie-cadence-host.c b/drivers/pci/controller/pcie-cadence-host.c new file mode 100644 index 000000000000..a4ebbd37b553 --- /dev/null +++ b/drivers/pci/controller/pcie-cadence-host.c | |||
| @@ -0,0 +1,336 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (c) 2017 Cadence | ||
| 3 | // Cadence PCIe host controller driver. | ||
| 4 | // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> | ||
| 5 | |||
| 6 | #include <linux/kernel.h> | ||
| 7 | #include <linux/of_address.h> | ||
| 8 | #include <linux/of_pci.h> | ||
| 9 | #include <linux/platform_device.h> | ||
| 10 | #include <linux/pm_runtime.h> | ||
| 11 | |||
| 12 | #include "pcie-cadence.h" | ||
| 13 | |||
| 14 | /** | ||
| 15 | * struct cdns_pcie_rc - private data for this PCIe Root Complex driver | ||
| 16 | * @pcie: Cadence PCIe controller | ||
| 17 | * @dev: pointer to PCIe device | ||
| 18 | * @cfg_res: start/end offsets in the physical system memory to map PCI | ||
| 19 | * configuration space accesses | ||
| 20 | * @bus_range: first/last buses behind the PCIe host controller | ||
| 21 | * @cfg_base: IO mapped window to access the PCI configuration space of a | ||
| 22 | * single function at a time | ||
| 23 | * @max_regions: maximum number of regions supported by the hardware | ||
| 24 | * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address | ||
| 25 | * translation (nbits sets into the "no BAR match" register) | ||
| 26 | * @vendor_id: PCI vendor ID | ||
| 27 | * @device_id: PCI device ID | ||
| 28 | */ | ||
| 29 | struct cdns_pcie_rc { | ||
| 30 | struct cdns_pcie pcie; | ||
| 31 | struct device *dev; | ||
| 32 | struct resource *cfg_res; | ||
| 33 | struct resource *bus_range; | ||
| 34 | void __iomem *cfg_base; | ||
| 35 | u32 max_regions; | ||
| 36 | u32 no_bar_nbits; | ||
| 37 | u16 vendor_id; | ||
| 38 | u16 device_id; | ||
| 39 | }; | ||
| 40 | |||
| 41 | static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, | ||
| 42 | int where) | ||
| 43 | { | ||
| 44 | struct pci_host_bridge *bridge = pci_find_host_bridge(bus); | ||
| 45 | struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge); | ||
| 46 | struct cdns_pcie *pcie = &rc->pcie; | ||
| 47 | unsigned int busn = bus->number; | ||
| 48 | u32 addr0, desc0; | ||
| 49 | |||
| 50 | if (busn == rc->bus_range->start) { | ||
| 51 | /* | ||
| 52 | * Only the root port (devfn == 0) is connected to this bus. | ||
| 53 | * All other PCI devices are behind some bridge hence on another | ||
| 54 | * bus. | ||
| 55 | */ | ||
| 56 | if (devfn) | ||
| 57 | return NULL; | ||
| 58 | |||
| 59 | return pcie->reg_base + (where & 0xfff); | ||
| 60 | } | ||
| 61 | |||
| 62 | /* Update Output registers for AXI region 0. */ | ||
| 63 | addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(12) | | ||
| 64 | CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) | | ||
| 65 | CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(busn); | ||
| 66 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(0), addr0); | ||
| 67 | |||
| 68 | /* Configuration Type 0 or Type 1 access. */ | ||
| 69 | desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | | ||
| 70 | CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); | ||
| 71 | /* | ||
| 72 | * The bus number was already set once for all in desc1 by | ||
| 73 | * cdns_pcie_host_init_address_translation(). | ||
| 74 | */ | ||
| 75 | if (busn == rc->bus_range->start + 1) | ||
| 76 | desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0; | ||
| 77 | else | ||
| 78 | desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1; | ||
| 79 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(0), desc0); | ||
| 80 | |||
| 81 | return rc->cfg_base + (where & 0xfff); | ||
| 82 | } | ||
| 83 | |||
| 84 | static struct pci_ops cdns_pcie_host_ops = { | ||
| 85 | .map_bus = cdns_pci_map_bus, | ||
| 86 | .read = pci_generic_config_read, | ||
| 87 | .write = pci_generic_config_write, | ||
| 88 | }; | ||
| 89 | |||
| 90 | static const struct of_device_id cdns_pcie_host_of_match[] = { | ||
| 91 | { .compatible = "cdns,cdns-pcie-host" }, | ||
| 92 | |||
| 93 | { }, | ||
| 94 | }; | ||
| 95 | |||
| 96 | static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) | ||
| 97 | { | ||
| 98 | struct cdns_pcie *pcie = &rc->pcie; | ||
| 99 | u32 value, ctrl; | ||
| 100 | |||
| 101 | /* | ||
| 102 | * Set the root complex BAR configuration register: | ||
| 103 | * - disable both BAR0 and BAR1. | ||
| 104 | * - enable Prefetchable Memory Base and Limit registers in type 1 | ||
| 105 | * config space (64 bits). | ||
| 106 | * - enable IO Base and Limit registers in type 1 config | ||
| 107 | * space (32 bits). | ||
| 108 | */ | ||
| 109 | ctrl = CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED; | ||
| 110 | value = CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(ctrl) | | ||
| 111 | CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(ctrl) | | ||
| 112 | CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE | | ||
| 113 | CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS | | ||
| 114 | CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE | | ||
| 115 | CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS; | ||
| 116 | cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value); | ||
| 117 | |||
| 118 | /* Set root port configuration space */ | ||
| 119 | if (rc->vendor_id != 0xffff) | ||
| 120 | cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id); | ||
| 121 | if (rc->device_id != 0xffff) | ||
| 122 | cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id); | ||
| 123 | |||
| 124 | cdns_pcie_rp_writeb(pcie, PCI_CLASS_REVISION, 0); | ||
| 125 | cdns_pcie_rp_writeb(pcie, PCI_CLASS_PROG, 0); | ||
| 126 | cdns_pcie_rp_writew(pcie, PCI_CLASS_DEVICE, PCI_CLASS_BRIDGE_PCI); | ||
| 127 | |||
| 128 | return 0; | ||
| 129 | } | ||
| 130 | |||
| 131 | static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) | ||
| 132 | { | ||
| 133 | struct cdns_pcie *pcie = &rc->pcie; | ||
| 134 | struct resource *cfg_res = rc->cfg_res; | ||
| 135 | struct resource *mem_res = pcie->mem_res; | ||
| 136 | struct resource *bus_range = rc->bus_range; | ||
| 137 | struct device *dev = rc->dev; | ||
| 138 | struct device_node *np = dev->of_node; | ||
| 139 | struct of_pci_range_parser parser; | ||
| 140 | struct of_pci_range range; | ||
| 141 | u32 addr0, addr1, desc1; | ||
| 142 | u64 cpu_addr; | ||
| 143 | int r, err; | ||
| 144 | |||
| 145 | /* | ||
| 146 | * Reserve region 0 for PCI configure space accesses: | ||
| 147 | * OB_REGION_PCI_ADDR0 and OB_REGION_DESC0 are updated dynamically by | ||
| 148 | * cdns_pci_map_bus(), other region registers are set here once for all. | ||
| 149 | */ | ||
| 150 | addr1 = 0; /* Should be programmed to zero. */ | ||
| 151 | desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus_range->start); | ||
| 152 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1); | ||
| 153 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1); | ||
| 154 | |||
| 155 | cpu_addr = cfg_res->start - mem_res->start; | ||
| 156 | addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) | | ||
| 157 | (lower_32_bits(cpu_addr) & GENMASK(31, 8)); | ||
| 158 | addr1 = upper_32_bits(cpu_addr); | ||
| 159 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0); | ||
| 160 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1); | ||
| 161 | |||
| 162 | err = of_pci_range_parser_init(&parser, np); | ||
| 163 | if (err) | ||
| 164 | return err; | ||
| 165 | |||
| 166 | r = 1; | ||
| 167 | for_each_of_pci_range(&parser, &range) { | ||
| 168 | bool is_io; | ||
| 169 | |||
| 170 | if (r >= rc->max_regions) | ||
| 171 | break; | ||
| 172 | |||
| 173 | if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM) | ||
| 174 | is_io = false; | ||
| 175 | else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO) | ||
| 176 | is_io = true; | ||
| 177 | else | ||
| 178 | continue; | ||
| 179 | |||
| 180 | cdns_pcie_set_outbound_region(pcie, 0, r, is_io, | ||
| 181 | range.cpu_addr, | ||
| 182 | range.pci_addr, | ||
| 183 | range.size); | ||
| 184 | r++; | ||
| 185 | } | ||
| 186 | |||
| 187 | /* | ||
| 188 | * Set Root Port no BAR match Inbound Translation registers: | ||
| 189 | * needed for MSI and DMA. | ||
| 190 | * Root Port BAR0 and BAR1 are disabled, hence no need to set their | ||
| 191 | * inbound translation registers. | ||
| 192 | */ | ||
| 193 | addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(rc->no_bar_nbits); | ||
| 194 | addr1 = 0; | ||
| 195 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(RP_NO_BAR), addr0); | ||
| 196 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(RP_NO_BAR), addr1); | ||
| 197 | |||
| 198 | return 0; | ||
| 199 | } | ||
| 200 | |||
| 201 | static int cdns_pcie_host_init(struct device *dev, | ||
| 202 | struct list_head *resources, | ||
| 203 | struct cdns_pcie_rc *rc) | ||
| 204 | { | ||
| 205 | struct resource *bus_range = NULL; | ||
| 206 | int err; | ||
| 207 | |||
| 208 | /* Parse our PCI ranges and request their resources */ | ||
| 209 | err = pci_parse_request_of_pci_ranges(dev, resources, &bus_range); | ||
| 210 | if (err) | ||
| 211 | return err; | ||
| 212 | |||
| 213 | rc->bus_range = bus_range; | ||
| 214 | rc->pcie.bus = bus_range->start; | ||
| 215 | |||
| 216 | err = cdns_pcie_host_init_root_port(rc); | ||
| 217 | if (err) | ||
| 218 | goto err_out; | ||
| 219 | |||
| 220 | err = cdns_pcie_host_init_address_translation(rc); | ||
| 221 | if (err) | ||
| 222 | goto err_out; | ||
| 223 | |||
| 224 | return 0; | ||
| 225 | |||
| 226 | err_out: | ||
| 227 | pci_free_resource_list(resources); | ||
| 228 | return err; | ||
| 229 | } | ||
| 230 | |||
| 231 | static int cdns_pcie_host_probe(struct platform_device *pdev) | ||
| 232 | { | ||
| 233 | const char *type; | ||
| 234 | struct device *dev = &pdev->dev; | ||
| 235 | struct device_node *np = dev->of_node; | ||
| 236 | struct pci_host_bridge *bridge; | ||
| 237 | struct list_head resources; | ||
| 238 | struct cdns_pcie_rc *rc; | ||
| 239 | struct cdns_pcie *pcie; | ||
| 240 | struct resource *res; | ||
| 241 | int ret; | ||
| 242 | |||
| 243 | bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); | ||
| 244 | if (!bridge) | ||
| 245 | return -ENOMEM; | ||
| 246 | |||
| 247 | rc = pci_host_bridge_priv(bridge); | ||
| 248 | rc->dev = dev; | ||
| 249 | |||
| 250 | pcie = &rc->pcie; | ||
| 251 | pcie->is_rc = true; | ||
| 252 | |||
| 253 | rc->max_regions = 32; | ||
| 254 | of_property_read_u32(np, "cdns,max-outbound-regions", &rc->max_regions); | ||
| 255 | |||
| 256 | rc->no_bar_nbits = 32; | ||
| 257 | of_property_read_u32(np, "cdns,no-bar-match-nbits", &rc->no_bar_nbits); | ||
| 258 | |||
| 259 | rc->vendor_id = 0xffff; | ||
| 260 | of_property_read_u16(np, "vendor-id", &rc->vendor_id); | ||
| 261 | |||
| 262 | rc->device_id = 0xffff; | ||
| 263 | of_property_read_u16(np, "device-id", &rc->device_id); | ||
| 264 | |||
| 265 | type = of_get_property(np, "device_type", NULL); | ||
| 266 | if (!type || strcmp(type, "pci")) { | ||
| 267 | dev_err(dev, "invalid \"device_type\" %s\n", type); | ||
| 268 | return -EINVAL; | ||
| 269 | } | ||
| 270 | |||
| 271 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); | ||
| 272 | pcie->reg_base = devm_ioremap_resource(dev, res); | ||
| 273 | if (IS_ERR(pcie->reg_base)) { | ||
| 274 | dev_err(dev, "missing \"reg\"\n"); | ||
| 275 | return PTR_ERR(pcie->reg_base); | ||
| 276 | } | ||
| 277 | |||
| 278 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); | ||
| 279 | rc->cfg_base = devm_pci_remap_cfg_resource(dev, res); | ||
| 280 | if (IS_ERR(rc->cfg_base)) { | ||
| 281 | dev_err(dev, "missing \"cfg\"\n"); | ||
| 282 | return PTR_ERR(rc->cfg_base); | ||
| 283 | } | ||
| 284 | rc->cfg_res = res; | ||
| 285 | |||
| 286 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); | ||
| 287 | if (!res) { | ||
| 288 | dev_err(dev, "missing \"mem\"\n"); | ||
| 289 | return -EINVAL; | ||
| 290 | } | ||
| 291 | pcie->mem_res = res; | ||
| 292 | |||
| 293 | pm_runtime_enable(dev); | ||
| 294 | ret = pm_runtime_get_sync(dev); | ||
| 295 | if (ret < 0) { | ||
| 296 | dev_err(dev, "pm_runtime_get_sync() failed\n"); | ||
| 297 | goto err_get_sync; | ||
| 298 | } | ||
| 299 | |||
| 300 | ret = cdns_pcie_host_init(dev, &resources, rc); | ||
| 301 | if (ret) | ||
| 302 | goto err_init; | ||
| 303 | |||
| 304 | list_splice_init(&resources, &bridge->windows); | ||
| 305 | bridge->dev.parent = dev; | ||
| 306 | bridge->busnr = pcie->bus; | ||
| 307 | bridge->ops = &cdns_pcie_host_ops; | ||
| 308 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 309 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 310 | |||
| 311 | ret = pci_host_probe(bridge); | ||
| 312 | if (ret < 0) | ||
| 313 | goto err_host_probe; | ||
| 314 | |||
| 315 | return 0; | ||
| 316 | |||
| 317 | err_host_probe: | ||
| 318 | pci_free_resource_list(&resources); | ||
| 319 | |||
| 320 | err_init: | ||
| 321 | pm_runtime_put_sync(dev); | ||
| 322 | |||
| 323 | err_get_sync: | ||
| 324 | pm_runtime_disable(dev); | ||
| 325 | |||
| 326 | return ret; | ||
| 327 | } | ||
| 328 | |||
| 329 | static struct platform_driver cdns_pcie_host_driver = { | ||
| 330 | .driver = { | ||
| 331 | .name = "cdns-pcie-host", | ||
| 332 | .of_match_table = cdns_pcie_host_of_match, | ||
| 333 | }, | ||
| 334 | .probe = cdns_pcie_host_probe, | ||
| 335 | }; | ||
| 336 | builtin_platform_driver(cdns_pcie_host_driver); | ||
diff --git a/drivers/pci/controller/pcie-cadence.c b/drivers/pci/controller/pcie-cadence.c new file mode 100644 index 000000000000..138d113eb45d --- /dev/null +++ b/drivers/pci/controller/pcie-cadence.c | |||
| @@ -0,0 +1,126 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (c) 2017 Cadence | ||
| 3 | // Cadence PCIe controller driver. | ||
| 4 | // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> | ||
| 5 | |||
| 6 | #include <linux/kernel.h> | ||
| 7 | |||
| 8 | #include "pcie-cadence.h" | ||
| 9 | |||
| 10 | void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn, | ||
| 11 | u32 r, bool is_io, | ||
| 12 | u64 cpu_addr, u64 pci_addr, size_t size) | ||
| 13 | { | ||
| 14 | /* | ||
| 15 | * roundup_pow_of_two() returns an unsigned long, which is not suited | ||
| 16 | * for 64bit values. | ||
| 17 | */ | ||
| 18 | u64 sz = 1ULL << fls64(size - 1); | ||
| 19 | int nbits = ilog2(sz); | ||
| 20 | u32 addr0, addr1, desc0, desc1; | ||
| 21 | |||
| 22 | if (nbits < 8) | ||
| 23 | nbits = 8; | ||
| 24 | |||
| 25 | /* Set the PCI address */ | ||
| 26 | addr0 = CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) | | ||
| 27 | (lower_32_bits(pci_addr) & GENMASK(31, 8)); | ||
| 28 | addr1 = upper_32_bits(pci_addr); | ||
| 29 | |||
| 30 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), addr0); | ||
| 31 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), addr1); | ||
| 32 | |||
| 33 | /* Set the PCIe header descriptor */ | ||
| 34 | if (is_io) | ||
| 35 | desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO; | ||
| 36 | else | ||
| 37 | desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM; | ||
| 38 | desc1 = 0; | ||
| 39 | |||
| 40 | /* | ||
| 41 | * Whatever Bit [23] is set or not inside DESC0 register of the outbound | ||
| 42 | * PCIe descriptor, the PCI function number must be set into | ||
| 43 | * Bits [26:24] of DESC0 anyway. | ||
| 44 | * | ||
| 45 | * In Root Complex mode, the function number is always 0 but in Endpoint | ||
| 46 | * mode, the PCIe controller may support more than one function. This | ||
| 47 | * function number needs to be set properly into the outbound PCIe | ||
| 48 | * descriptor. | ||
| 49 | * | ||
| 50 | * Besides, setting Bit [23] is mandatory when in Root Complex mode: | ||
| 51 | * then the driver must provide the bus, resp. device, number in | ||
| 52 | * Bits [7:0] of DESC1, resp. Bits[31:27] of DESC0. Like the function | ||
| 53 | * number, the device number is always 0 in Root Complex mode. | ||
| 54 | * | ||
| 55 | * However when in Endpoint mode, we can clear Bit [23] of DESC0, hence | ||
| 56 | * the PCIe controller will use the captured values for the bus and | ||
| 57 | * device numbers. | ||
| 58 | */ | ||
| 59 | if (pcie->is_rc) { | ||
| 60 | /* The device and function numbers are always 0. */ | ||
| 61 | desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | | ||
| 62 | CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); | ||
| 63 | desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus); | ||
| 64 | } else { | ||
| 65 | /* | ||
| 66 | * Use captured values for bus and device numbers but still | ||
| 67 | * need to set the function number. | ||
| 68 | */ | ||
| 69 | desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn); | ||
| 70 | } | ||
| 71 | |||
| 72 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0); | ||
| 73 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1); | ||
| 74 | |||
| 75 | /* Set the CPU address */ | ||
| 76 | cpu_addr -= pcie->mem_res->start; | ||
| 77 | addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) | | ||
| 78 | (lower_32_bits(cpu_addr) & GENMASK(31, 8)); | ||
| 79 | addr1 = upper_32_bits(cpu_addr); | ||
| 80 | |||
| 81 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0); | ||
| 82 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1); | ||
| 83 | } | ||
| 84 | |||
| 85 | void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn, | ||
| 86 | u32 r, u64 cpu_addr) | ||
| 87 | { | ||
| 88 | u32 addr0, addr1, desc0, desc1; | ||
| 89 | |||
| 90 | desc0 = CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG; | ||
| 91 | desc1 = 0; | ||
| 92 | |||
| 93 | /* See cdns_pcie_set_outbound_region() comments above. */ | ||
| 94 | if (pcie->is_rc) { | ||
| 95 | desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | | ||
| 96 | CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); | ||
| 97 | desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus); | ||
| 98 | } else { | ||
| 99 | desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn); | ||
| 100 | } | ||
| 101 | |||
| 102 | /* Set the CPU address */ | ||
| 103 | cpu_addr -= pcie->mem_res->start; | ||
| 104 | addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) | | ||
| 105 | (lower_32_bits(cpu_addr) & GENMASK(31, 8)); | ||
| 106 | addr1 = upper_32_bits(cpu_addr); | ||
| 107 | |||
| 108 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0); | ||
| 109 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0); | ||
| 110 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), desc0); | ||
| 111 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1); | ||
| 112 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), addr0); | ||
| 113 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1); | ||
| 114 | } | ||
| 115 | |||
| 116 | void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r) | ||
| 117 | { | ||
| 118 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r), 0); | ||
| 119 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r), 0); | ||
| 120 | |||
| 121 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC0(r), 0); | ||
| 122 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), 0); | ||
| 123 | |||
| 124 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r), 0); | ||
| 125 | cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), 0); | ||
| 126 | } | ||
diff --git a/drivers/pci/controller/pcie-cadence.h b/drivers/pci/controller/pcie-cadence.h new file mode 100644 index 000000000000..4bb27333b05c --- /dev/null +++ b/drivers/pci/controller/pcie-cadence.h | |||
| @@ -0,0 +1,311 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // Copyright (c) 2017 Cadence | ||
| 3 | // Cadence PCIe controller driver. | ||
| 4 | // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> | ||
| 5 | |||
| 6 | #ifndef _PCIE_CADENCE_H | ||
| 7 | #define _PCIE_CADENCE_H | ||
| 8 | |||
| 9 | #include <linux/kernel.h> | ||
| 10 | #include <linux/pci.h> | ||
| 11 | |||
| 12 | /* | ||
| 13 | * Local Management Registers | ||
| 14 | */ | ||
| 15 | #define CDNS_PCIE_LM_BASE 0x00100000 | ||
| 16 | |||
| 17 | /* Vendor ID Register */ | ||
| 18 | #define CDNS_PCIE_LM_ID (CDNS_PCIE_LM_BASE + 0x0044) | ||
| 19 | #define CDNS_PCIE_LM_ID_VENDOR_MASK GENMASK(15, 0) | ||
| 20 | #define CDNS_PCIE_LM_ID_VENDOR_SHIFT 0 | ||
| 21 | #define CDNS_PCIE_LM_ID_VENDOR(vid) \ | ||
| 22 | (((vid) << CDNS_PCIE_LM_ID_VENDOR_SHIFT) & CDNS_PCIE_LM_ID_VENDOR_MASK) | ||
| 23 | #define CDNS_PCIE_LM_ID_SUBSYS_MASK GENMASK(31, 16) | ||
| 24 | #define CDNS_PCIE_LM_ID_SUBSYS_SHIFT 16 | ||
| 25 | #define CDNS_PCIE_LM_ID_SUBSYS(sub) \ | ||
| 26 | (((sub) << CDNS_PCIE_LM_ID_SUBSYS_SHIFT) & CDNS_PCIE_LM_ID_SUBSYS_MASK) | ||
| 27 | |||
| 28 | /* Root Port Requestor ID Register */ | ||
| 29 | #define CDNS_PCIE_LM_RP_RID (CDNS_PCIE_LM_BASE + 0x0228) | ||
| 30 | #define CDNS_PCIE_LM_RP_RID_MASK GENMASK(15, 0) | ||
| 31 | #define CDNS_PCIE_LM_RP_RID_SHIFT 0 | ||
| 32 | #define CDNS_PCIE_LM_RP_RID_(rid) \ | ||
| 33 | (((rid) << CDNS_PCIE_LM_RP_RID_SHIFT) & CDNS_PCIE_LM_RP_RID_MASK) | ||
| 34 | |||
| 35 | /* Endpoint Bus and Device Number Register */ | ||
| 36 | #define CDNS_PCIE_LM_EP_ID (CDNS_PCIE_LM_BASE + 0x022c) | ||
| 37 | #define CDNS_PCIE_LM_EP_ID_DEV_MASK GENMASK(4, 0) | ||
| 38 | #define CDNS_PCIE_LM_EP_ID_DEV_SHIFT 0 | ||
| 39 | #define CDNS_PCIE_LM_EP_ID_BUS_MASK GENMASK(15, 8) | ||
| 40 | #define CDNS_PCIE_LM_EP_ID_BUS_SHIFT 8 | ||
| 41 | |||
| 42 | /* Endpoint Function f BAR b Configuration Registers */ | ||
| 43 | #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG0(fn) \ | ||
| 44 | (CDNS_PCIE_LM_BASE + 0x0240 + (fn) * 0x0008) | ||
| 45 | #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG1(fn) \ | ||
| 46 | (CDNS_PCIE_LM_BASE + 0x0244 + (fn) * 0x0008) | ||
| 47 | #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \ | ||
| 48 | (GENMASK(4, 0) << ((b) * 8)) | ||
| 49 | #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \ | ||
| 50 | (((a) << ((b) * 8)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b)) | ||
| 51 | #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \ | ||
| 52 | (GENMASK(7, 5) << ((b) * 8)) | ||
| 53 | #define CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \ | ||
| 54 | (((c) << ((b) * 8 + 5)) & CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)) | ||
| 55 | |||
| 56 | /* Endpoint Function Configuration Register */ | ||
| 57 | #define CDNS_PCIE_LM_EP_FUNC_CFG (CDNS_PCIE_LM_BASE + 0x02c0) | ||
| 58 | |||
| 59 | /* Root Complex BAR Configuration Register */ | ||
| 60 | #define CDNS_PCIE_LM_RC_BAR_CFG (CDNS_PCIE_LM_BASE + 0x0300) | ||
| 61 | #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK GENMASK(5, 0) | ||
| 62 | #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE(a) \ | ||
| 63 | (((a) << 0) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_APERTURE_MASK) | ||
| 64 | #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK GENMASK(8, 6) | ||
| 65 | #define CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL(c) \ | ||
| 66 | (((c) << 6) & CDNS_PCIE_LM_RC_BAR_CFG_BAR0_CTRL_MASK) | ||
| 67 | #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK GENMASK(13, 9) | ||
| 68 | #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE(a) \ | ||
| 69 | (((a) << 9) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_APERTURE_MASK) | ||
| 70 | #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK GENMASK(16, 14) | ||
| 71 | #define CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL(c) \ | ||
| 72 | (((c) << 14) & CDNS_PCIE_LM_RC_BAR_CFG_BAR1_CTRL_MASK) | ||
| 73 | #define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_ENABLE BIT(17) | ||
| 74 | #define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_32BITS 0 | ||
| 75 | #define CDNS_PCIE_LM_RC_BAR_CFG_PREFETCH_MEM_64BITS BIT(18) | ||
| 76 | #define CDNS_PCIE_LM_RC_BAR_CFG_IO_ENABLE BIT(19) | ||
| 77 | #define CDNS_PCIE_LM_RC_BAR_CFG_IO_16BITS 0 | ||
| 78 | #define CDNS_PCIE_LM_RC_BAR_CFG_IO_32BITS BIT(20) | ||
| 79 | #define CDNS_PCIE_LM_RC_BAR_CFG_CHECK_ENABLE BIT(31) | ||
| 80 | |||
| 81 | /* BAR control values applicable to both Endpoint Function and Root Complex */ | ||
| 82 | #define CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED 0x0 | ||
| 83 | #define CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS 0x1 | ||
| 84 | #define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS 0x4 | ||
| 85 | #define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5 | ||
| 86 | #define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6 | ||
| 87 | #define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7 | ||
| 88 | |||
| 89 | |||
| 90 | /* | ||
| 91 | * Endpoint Function Registers (PCI configuration space for endpoint functions) | ||
| 92 | */ | ||
| 93 | #define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12)) | ||
| 94 | |||
| 95 | #define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90 | ||
| 96 | |||
| 97 | /* | ||
| 98 | * Root Port Registers (PCI configuration space for the root port function) | ||
| 99 | */ | ||
| 100 | #define CDNS_PCIE_RP_BASE 0x00200000 | ||
| 101 | |||
| 102 | |||
| 103 | /* | ||
| 104 | * Address Translation Registers | ||
| 105 | */ | ||
| 106 | #define CDNS_PCIE_AT_BASE 0x00400000 | ||
| 107 | |||
| 108 | /* Region r Outbound AXI to PCIe Address Translation Register 0 */ | ||
| 109 | #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0(r) \ | ||
| 110 | (CDNS_PCIE_AT_BASE + 0x0000 + ((r) & 0x1f) * 0x0020) | ||
| 111 | #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK GENMASK(5, 0) | ||
| 112 | #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS(nbits) \ | ||
| 113 | (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_NBITS_MASK) | ||
| 114 | #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12) | ||
| 115 | #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \ | ||
| 116 | (((devfn) << 12) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK) | ||
| 117 | #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20) | ||
| 118 | #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \ | ||
| 119 | (((bus) << 20) & CDNS_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK) | ||
| 120 | |||
| 121 | /* Region r Outbound AXI to PCIe Address Translation Register 1 */ | ||
| 122 | #define CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(r) \ | ||
| 123 | (CDNS_PCIE_AT_BASE + 0x0004 + ((r) & 0x1f) * 0x0020) | ||
| 124 | |||
| 125 | /* Region r Outbound PCIe Descriptor Register 0 */ | ||
| 126 | #define CDNS_PCIE_AT_OB_REGION_DESC0(r) \ | ||
| 127 | (CDNS_PCIE_AT_BASE + 0x0008 + ((r) & 0x1f) * 0x0020) | ||
| 128 | #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MASK GENMASK(3, 0) | ||
| 129 | #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_MEM 0x2 | ||
| 130 | #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_IO 0x6 | ||
| 131 | #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0 0xa | ||
| 132 | #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1 0xb | ||
| 133 | #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_NORMAL_MSG 0xc | ||
| 134 | #define CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_VENDOR_MSG 0xd | ||
| 135 | /* Bit 23 MUST be set in RC mode. */ | ||
| 136 | #define CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23) | ||
| 137 | #define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24) | ||
| 138 | #define CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \ | ||
| 139 | (((devfn) << 24) & CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK) | ||
| 140 | |||
| 141 | /* Region r Outbound PCIe Descriptor Register 1 */ | ||
| 142 | #define CDNS_PCIE_AT_OB_REGION_DESC1(r) \ | ||
| 143 | (CDNS_PCIE_AT_BASE + 0x000c + ((r) & 0x1f) * 0x0020) | ||
| 144 | #define CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK GENMASK(7, 0) | ||
| 145 | #define CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus) \ | ||
| 146 | ((bus) & CDNS_PCIE_AT_OB_REGION_DESC1_BUS_MASK) | ||
| 147 | |||
| 148 | /* Region r AXI Region Base Address Register 0 */ | ||
| 149 | #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(r) \ | ||
| 150 | (CDNS_PCIE_AT_BASE + 0x0018 + ((r) & 0x1f) * 0x0020) | ||
| 151 | #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK GENMASK(5, 0) | ||
| 152 | #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) \ | ||
| 153 | (((nbits) - 1) & CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS_MASK) | ||
| 154 | |||
| 155 | /* Region r AXI Region Base Address Register 1 */ | ||
| 156 | #define CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r) \ | ||
| 157 | (CDNS_PCIE_AT_BASE + 0x001c + ((r) & 0x1f) * 0x0020) | ||
| 158 | |||
| 159 | /* Root Port BAR Inbound PCIe to AXI Address Translation Register */ | ||
| 160 | #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar) \ | ||
| 161 | (CDNS_PCIE_AT_BASE + 0x0800 + (bar) * 0x0008) | ||
| 162 | #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK GENMASK(5, 0) | ||
| 163 | #define CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(nbits) \ | ||
| 164 | (((nbits) - 1) & CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS_MASK) | ||
| 165 | #define CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar) \ | ||
| 166 | (CDNS_PCIE_AT_BASE + 0x0804 + (bar) * 0x0008) | ||
| 167 | |||
| 168 | enum cdns_pcie_rp_bar { | ||
| 169 | RP_BAR0, | ||
| 170 | RP_BAR1, | ||
| 171 | RP_NO_BAR | ||
| 172 | }; | ||
| 173 | |||
| 174 | /* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */ | ||
| 175 | #define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \ | ||
| 176 | (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008) | ||
| 177 | #define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \ | ||
| 178 | (CDNS_PCIE_AT_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008) | ||
| 179 | |||
| 180 | /* Normal/Vendor specific message access: offset inside some outbound region */ | ||
| 181 | #define CDNS_PCIE_NORMAL_MSG_ROUTING_MASK GENMASK(7, 5) | ||
| 182 | #define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \ | ||
| 183 | (((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK) | ||
| 184 | #define CDNS_PCIE_NORMAL_MSG_CODE_MASK GENMASK(15, 8) | ||
| 185 | #define CDNS_PCIE_NORMAL_MSG_CODE(code) \ | ||
| 186 | (((code) << 8) & CDNS_PCIE_NORMAL_MSG_CODE_MASK) | ||
| 187 | #define CDNS_PCIE_MSG_NO_DATA BIT(16) | ||
| 188 | |||
| 189 | enum cdns_pcie_msg_code { | ||
| 190 | MSG_CODE_ASSERT_INTA = 0x20, | ||
| 191 | MSG_CODE_ASSERT_INTB = 0x21, | ||
| 192 | MSG_CODE_ASSERT_INTC = 0x22, | ||
| 193 | MSG_CODE_ASSERT_INTD = 0x23, | ||
| 194 | MSG_CODE_DEASSERT_INTA = 0x24, | ||
| 195 | MSG_CODE_DEASSERT_INTB = 0x25, | ||
| 196 | MSG_CODE_DEASSERT_INTC = 0x26, | ||
| 197 | MSG_CODE_DEASSERT_INTD = 0x27, | ||
| 198 | }; | ||
| 199 | |||
| 200 | enum cdns_pcie_msg_routing { | ||
| 201 | /* Route to Root Complex */ | ||
| 202 | MSG_ROUTING_TO_RC, | ||
| 203 | |||
| 204 | /* Use Address Routing */ | ||
| 205 | MSG_ROUTING_BY_ADDR, | ||
| 206 | |||
| 207 | /* Use ID Routing */ | ||
| 208 | MSG_ROUTING_BY_ID, | ||
| 209 | |||
| 210 | /* Route as Broadcast Message from Root Complex */ | ||
| 211 | MSG_ROUTING_BCAST, | ||
| 212 | |||
| 213 | /* Local message; terminate at receiver (INTx messages) */ | ||
| 214 | MSG_ROUTING_LOCAL, | ||
| 215 | |||
| 216 | /* Gather & route to Root Complex (PME_TO_Ack message) */ | ||
| 217 | MSG_ROUTING_GATHER, | ||
| 218 | }; | ||
| 219 | |||
| 220 | /** | ||
| 221 | * struct cdns_pcie - private data for Cadence PCIe controller drivers | ||
| 222 | * @reg_base: IO mapped register base | ||
| 223 | * @mem_res: start/end offsets in the physical system memory to map PCI accesses | ||
| 224 | * @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint. | ||
| 225 | * @bus: In Root Complex mode, the bus number | ||
| 226 | */ | ||
| 227 | struct cdns_pcie { | ||
| 228 | void __iomem *reg_base; | ||
| 229 | struct resource *mem_res; | ||
| 230 | bool is_rc; | ||
| 231 | u8 bus; | ||
| 232 | }; | ||
| 233 | |||
| 234 | /* Register access */ | ||
| 235 | static inline void cdns_pcie_writeb(struct cdns_pcie *pcie, u32 reg, u8 value) | ||
| 236 | { | ||
| 237 | writeb(value, pcie->reg_base + reg); | ||
| 238 | } | ||
| 239 | |||
| 240 | static inline void cdns_pcie_writew(struct cdns_pcie *pcie, u32 reg, u16 value) | ||
| 241 | { | ||
| 242 | writew(value, pcie->reg_base + reg); | ||
| 243 | } | ||
| 244 | |||
| 245 | static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value) | ||
| 246 | { | ||
| 247 | writel(value, pcie->reg_base + reg); | ||
| 248 | } | ||
| 249 | |||
| 250 | static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg) | ||
| 251 | { | ||
| 252 | return readl(pcie->reg_base + reg); | ||
| 253 | } | ||
| 254 | |||
| 255 | /* Root Port register access */ | ||
| 256 | static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie, | ||
| 257 | u32 reg, u8 value) | ||
| 258 | { | ||
| 259 | writeb(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg); | ||
| 260 | } | ||
| 261 | |||
| 262 | static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie, | ||
| 263 | u32 reg, u16 value) | ||
| 264 | { | ||
| 265 | writew(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg); | ||
| 266 | } | ||
| 267 | |||
| 268 | /* Endpoint Function register access */ | ||
| 269 | static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn, | ||
| 270 | u32 reg, u8 value) | ||
| 271 | { | ||
| 272 | writeb(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); | ||
| 273 | } | ||
| 274 | |||
| 275 | static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn, | ||
| 276 | u32 reg, u16 value) | ||
| 277 | { | ||
| 278 | writew(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); | ||
| 279 | } | ||
| 280 | |||
| 281 | static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn, | ||
| 282 | u32 reg, u16 value) | ||
| 283 | { | ||
| 284 | writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); | ||
| 285 | } | ||
| 286 | |||
| 287 | static inline u8 cdns_pcie_ep_fn_readb(struct cdns_pcie *pcie, u8 fn, u32 reg) | ||
| 288 | { | ||
| 289 | return readb(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); | ||
| 290 | } | ||
| 291 | |||
| 292 | static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg) | ||
| 293 | { | ||
| 294 | return readw(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); | ||
| 295 | } | ||
| 296 | |||
| 297 | static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg) | ||
| 298 | { | ||
| 299 | return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); | ||
| 300 | } | ||
| 301 | |||
| 302 | void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn, | ||
| 303 | u32 r, bool is_io, | ||
| 304 | u64 cpu_addr, u64 pci_addr, size_t size); | ||
| 305 | |||
| 306 | void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn, | ||
| 307 | u32 r, u64 cpu_addr); | ||
| 308 | |||
| 309 | void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r); | ||
| 310 | |||
| 311 | #endif /* _PCIE_CADENCE_H */ | ||
diff --git a/drivers/pci/controller/pcie-iproc-bcma.c b/drivers/pci/controller/pcie-iproc-bcma.c new file mode 100644 index 000000000000..aa55b064f64d --- /dev/null +++ b/drivers/pci/controller/pcie-iproc-bcma.c | |||
| @@ -0,0 +1,112 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Copyright (C) 2015 Broadcom Corporation | ||
| 4 | * Copyright (C) 2015 Hauke Mehrtens <hauke@hauke-m.de> | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include <linux/kernel.h> | ||
| 8 | #include <linux/pci.h> | ||
| 9 | #include <linux/module.h> | ||
| 10 | #include <linux/slab.h> | ||
| 11 | #include <linux/phy/phy.h> | ||
| 12 | #include <linux/bcma/bcma.h> | ||
| 13 | #include <linux/ioport.h> | ||
| 14 | |||
| 15 | #include "pcie-iproc.h" | ||
| 16 | |||
| 17 | |||
| 18 | /* NS: CLASS field is R/O, and set to wrong 0x200 value */ | ||
| 19 | static void bcma_pcie2_fixup_class(struct pci_dev *dev) | ||
| 20 | { | ||
| 21 | dev->class = PCI_CLASS_BRIDGE_PCI << 8; | ||
| 22 | } | ||
| 23 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class); | ||
| 24 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class); | ||
| 25 | |||
| 26 | static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | ||
| 27 | { | ||
| 28 | struct iproc_pcie *pcie = dev->sysdata; | ||
| 29 | struct bcma_device *bdev = container_of(pcie->dev, struct bcma_device, dev); | ||
| 30 | |||
| 31 | return bcma_core_irq(bdev, 5); | ||
| 32 | } | ||
| 33 | |||
| 34 | static int iproc_pcie_bcma_probe(struct bcma_device *bdev) | ||
| 35 | { | ||
| 36 | struct device *dev = &bdev->dev; | ||
| 37 | struct iproc_pcie *pcie; | ||
| 38 | LIST_HEAD(resources); | ||
| 39 | struct pci_host_bridge *bridge; | ||
| 40 | int ret; | ||
| 41 | |||
| 42 | bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); | ||
| 43 | if (!bridge) | ||
| 44 | return -ENOMEM; | ||
| 45 | |||
| 46 | pcie = pci_host_bridge_priv(bridge); | ||
| 47 | |||
| 48 | pcie->dev = dev; | ||
| 49 | |||
| 50 | pcie->type = IPROC_PCIE_PAXB_BCMA; | ||
| 51 | pcie->base = bdev->io_addr; | ||
| 52 | if (!pcie->base) { | ||
| 53 | dev_err(dev, "no controller registers\n"); | ||
| 54 | return -ENOMEM; | ||
| 55 | } | ||
| 56 | |||
| 57 | pcie->base_addr = bdev->addr; | ||
| 58 | |||
| 59 | pcie->mem.start = bdev->addr_s[0]; | ||
| 60 | pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1; | ||
| 61 | pcie->mem.name = "PCIe MEM space"; | ||
| 62 | pcie->mem.flags = IORESOURCE_MEM; | ||
| 63 | pci_add_resource(&resources, &pcie->mem); | ||
| 64 | |||
| 65 | pcie->map_irq = iproc_pcie_bcma_map_irq; | ||
| 66 | |||
| 67 | ret = iproc_pcie_setup(pcie, &resources); | ||
| 68 | if (ret) { | ||
| 69 | dev_err(dev, "PCIe controller setup failed\n"); | ||
| 70 | pci_free_resource_list(&resources); | ||
| 71 | return ret; | ||
| 72 | } | ||
| 73 | |||
| 74 | bcma_set_drvdata(bdev, pcie); | ||
| 75 | return 0; | ||
| 76 | } | ||
| 77 | |||
| 78 | static void iproc_pcie_bcma_remove(struct bcma_device *bdev) | ||
| 79 | { | ||
| 80 | struct iproc_pcie *pcie = bcma_get_drvdata(bdev); | ||
| 81 | |||
| 82 | iproc_pcie_remove(pcie); | ||
| 83 | } | ||
| 84 | |||
| 85 | static const struct bcma_device_id iproc_pcie_bcma_table[] = { | ||
| 86 | BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_PCIEG2, BCMA_ANY_REV, BCMA_ANY_CLASS), | ||
| 87 | {}, | ||
| 88 | }; | ||
| 89 | MODULE_DEVICE_TABLE(bcma, iproc_pcie_bcma_table); | ||
| 90 | |||
| 91 | static struct bcma_driver iproc_pcie_bcma_driver = { | ||
| 92 | .name = KBUILD_MODNAME, | ||
| 93 | .id_table = iproc_pcie_bcma_table, | ||
| 94 | .probe = iproc_pcie_bcma_probe, | ||
| 95 | .remove = iproc_pcie_bcma_remove, | ||
| 96 | }; | ||
| 97 | |||
| 98 | static int __init iproc_pcie_bcma_init(void) | ||
| 99 | { | ||
| 100 | return bcma_driver_register(&iproc_pcie_bcma_driver); | ||
| 101 | } | ||
| 102 | module_init(iproc_pcie_bcma_init); | ||
| 103 | |||
| 104 | static void __exit iproc_pcie_bcma_exit(void) | ||
| 105 | { | ||
| 106 | bcma_driver_unregister(&iproc_pcie_bcma_driver); | ||
| 107 | } | ||
| 108 | module_exit(iproc_pcie_bcma_exit); | ||
| 109 | |||
| 110 | MODULE_AUTHOR("Hauke Mehrtens"); | ||
| 111 | MODULE_DESCRIPTION("Broadcom iProc PCIe BCMA driver"); | ||
| 112 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c new file mode 100644 index 000000000000..9deb56989d72 --- /dev/null +++ b/drivers/pci/controller/pcie-iproc-msi.c | |||
| @@ -0,0 +1,671 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Copyright (C) 2015 Broadcom Corporation | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include <linux/interrupt.h> | ||
| 7 | #include <linux/irqchip/chained_irq.h> | ||
| 8 | #include <linux/irqdomain.h> | ||
| 9 | #include <linux/msi.h> | ||
| 10 | #include <linux/of_irq.h> | ||
| 11 | #include <linux/of_pci.h> | ||
| 12 | #include <linux/pci.h> | ||
| 13 | |||
| 14 | #include "pcie-iproc.h" | ||
| 15 | |||
| 16 | #define IPROC_MSI_INTR_EN_SHIFT 11 | ||
| 17 | #define IPROC_MSI_INTR_EN BIT(IPROC_MSI_INTR_EN_SHIFT) | ||
| 18 | #define IPROC_MSI_INT_N_EVENT_SHIFT 1 | ||
| 19 | #define IPROC_MSI_INT_N_EVENT BIT(IPROC_MSI_INT_N_EVENT_SHIFT) | ||
| 20 | #define IPROC_MSI_EQ_EN_SHIFT 0 | ||
| 21 | #define IPROC_MSI_EQ_EN BIT(IPROC_MSI_EQ_EN_SHIFT) | ||
| 22 | |||
| 23 | #define IPROC_MSI_EQ_MASK 0x3f | ||
| 24 | |||
| 25 | /* Max number of GIC interrupts */ | ||
| 26 | #define NR_HW_IRQS 6 | ||
| 27 | |||
| 28 | /* Number of entries in each event queue */ | ||
| 29 | #define EQ_LEN 64 | ||
| 30 | |||
| 31 | /* Size of each event queue memory region */ | ||
| 32 | #define EQ_MEM_REGION_SIZE SZ_4K | ||
| 33 | |||
| 34 | /* Size of each MSI address region */ | ||
| 35 | #define MSI_MEM_REGION_SIZE SZ_4K | ||
| 36 | |||
| 37 | enum iproc_msi_reg { | ||
| 38 | IPROC_MSI_EQ_PAGE = 0, | ||
| 39 | IPROC_MSI_EQ_PAGE_UPPER, | ||
| 40 | IPROC_MSI_PAGE, | ||
| 41 | IPROC_MSI_PAGE_UPPER, | ||
| 42 | IPROC_MSI_CTRL, | ||
| 43 | IPROC_MSI_EQ_HEAD, | ||
| 44 | IPROC_MSI_EQ_TAIL, | ||
| 45 | IPROC_MSI_INTS_EN, | ||
| 46 | IPROC_MSI_REG_SIZE, | ||
| 47 | }; | ||
| 48 | |||
| 49 | struct iproc_msi; | ||
| 50 | |||
| 51 | /** | ||
| 52 | * iProc MSI group | ||
| 53 | * | ||
| 54 | * One MSI group is allocated per GIC interrupt, serviced by one iProc MSI | ||
| 55 | * event queue. | ||
| 56 | * | ||
| 57 | * @msi: pointer to iProc MSI data | ||
| 58 | * @gic_irq: GIC interrupt | ||
| 59 | * @eq: Event queue number | ||
| 60 | */ | ||
| 61 | struct iproc_msi_grp { | ||
| 62 | struct iproc_msi *msi; | ||
| 63 | int gic_irq; | ||
| 64 | unsigned int eq; | ||
| 65 | }; | ||
| 66 | |||
| 67 | /** | ||
| 68 | * iProc event queue based MSI | ||
| 69 | * | ||
| 70 | * Only meant to be used on platforms without MSI support integrated into the | ||
| 71 | * GIC. | ||
| 72 | * | ||
| 73 | * @pcie: pointer to iProc PCIe data | ||
| 74 | * @reg_offsets: MSI register offsets | ||
| 75 | * @grps: MSI groups | ||
| 76 | * @nr_irqs: number of total interrupts connected to GIC | ||
| 77 | * @nr_cpus: number of toal CPUs | ||
| 78 | * @has_inten_reg: indicates the MSI interrupt enable register needs to be | ||
| 79 | * set explicitly (required for some legacy platforms) | ||
| 80 | * @bitmap: MSI vector bitmap | ||
| 81 | * @bitmap_lock: lock to protect access to the MSI bitmap | ||
| 82 | * @nr_msi_vecs: total number of MSI vectors | ||
| 83 | * @inner_domain: inner IRQ domain | ||
| 84 | * @msi_domain: MSI IRQ domain | ||
| 85 | * @nr_eq_region: required number of 4K aligned memory region for MSI event | ||
| 86 | * queues | ||
| 87 | * @nr_msi_region: required number of 4K aligned address region for MSI posted | ||
| 88 | * writes | ||
| 89 | * @eq_cpu: pointer to allocated memory region for MSI event queues | ||
| 90 | * @eq_dma: DMA address of MSI event queues | ||
| 91 | * @msi_addr: MSI address | ||
| 92 | */ | ||
| 93 | struct iproc_msi { | ||
| 94 | struct iproc_pcie *pcie; | ||
| 95 | const u16 (*reg_offsets)[IPROC_MSI_REG_SIZE]; | ||
| 96 | struct iproc_msi_grp *grps; | ||
| 97 | int nr_irqs; | ||
| 98 | int nr_cpus; | ||
| 99 | bool has_inten_reg; | ||
| 100 | unsigned long *bitmap; | ||
| 101 | struct mutex bitmap_lock; | ||
| 102 | unsigned int nr_msi_vecs; | ||
| 103 | struct irq_domain *inner_domain; | ||
| 104 | struct irq_domain *msi_domain; | ||
| 105 | unsigned int nr_eq_region; | ||
| 106 | unsigned int nr_msi_region; | ||
| 107 | void *eq_cpu; | ||
| 108 | dma_addr_t eq_dma; | ||
| 109 | phys_addr_t msi_addr; | ||
| 110 | }; | ||
| 111 | |||
| 112 | static const u16 iproc_msi_reg_paxb[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = { | ||
| 113 | { 0x200, 0x2c0, 0x204, 0x2c4, 0x210, 0x250, 0x254, 0x208 }, | ||
| 114 | { 0x200, 0x2c0, 0x204, 0x2c4, 0x214, 0x258, 0x25c, 0x208 }, | ||
| 115 | { 0x200, 0x2c0, 0x204, 0x2c4, 0x218, 0x260, 0x264, 0x208 }, | ||
| 116 | { 0x200, 0x2c0, 0x204, 0x2c4, 0x21c, 0x268, 0x26c, 0x208 }, | ||
| 117 | { 0x200, 0x2c0, 0x204, 0x2c4, 0x220, 0x270, 0x274, 0x208 }, | ||
| 118 | { 0x200, 0x2c0, 0x204, 0x2c4, 0x224, 0x278, 0x27c, 0x208 }, | ||
| 119 | }; | ||
| 120 | |||
| 121 | static const u16 iproc_msi_reg_paxc[NR_HW_IRQS][IPROC_MSI_REG_SIZE] = { | ||
| 122 | { 0xc00, 0xc04, 0xc08, 0xc0c, 0xc40, 0xc50, 0xc60 }, | ||
| 123 | { 0xc10, 0xc14, 0xc18, 0xc1c, 0xc44, 0xc54, 0xc64 }, | ||
| 124 | { 0xc20, 0xc24, 0xc28, 0xc2c, 0xc48, 0xc58, 0xc68 }, | ||
| 125 | { 0xc30, 0xc34, 0xc38, 0xc3c, 0xc4c, 0xc5c, 0xc6c }, | ||
| 126 | }; | ||
| 127 | |||
| 128 | static inline u32 iproc_msi_read_reg(struct iproc_msi *msi, | ||
| 129 | enum iproc_msi_reg reg, | ||
| 130 | unsigned int eq) | ||
| 131 | { | ||
| 132 | struct iproc_pcie *pcie = msi->pcie; | ||
| 133 | |||
| 134 | return readl_relaxed(pcie->base + msi->reg_offsets[eq][reg]); | ||
| 135 | } | ||
| 136 | |||
| 137 | static inline void iproc_msi_write_reg(struct iproc_msi *msi, | ||
| 138 | enum iproc_msi_reg reg, | ||
| 139 | int eq, u32 val) | ||
| 140 | { | ||
| 141 | struct iproc_pcie *pcie = msi->pcie; | ||
| 142 | |||
| 143 | writel_relaxed(val, pcie->base + msi->reg_offsets[eq][reg]); | ||
| 144 | } | ||
| 145 | |||
| 146 | static inline u32 hwirq_to_group(struct iproc_msi *msi, unsigned long hwirq) | ||
| 147 | { | ||
| 148 | return (hwirq % msi->nr_irqs); | ||
| 149 | } | ||
| 150 | |||
| 151 | static inline unsigned int iproc_msi_addr_offset(struct iproc_msi *msi, | ||
| 152 | unsigned long hwirq) | ||
| 153 | { | ||
| 154 | if (msi->nr_msi_region > 1) | ||
| 155 | return hwirq_to_group(msi, hwirq) * MSI_MEM_REGION_SIZE; | ||
| 156 | else | ||
| 157 | return hwirq_to_group(msi, hwirq) * sizeof(u32); | ||
| 158 | } | ||
| 159 | |||
| 160 | static inline unsigned int iproc_msi_eq_offset(struct iproc_msi *msi, u32 eq) | ||
| 161 | { | ||
| 162 | if (msi->nr_eq_region > 1) | ||
| 163 | return eq * EQ_MEM_REGION_SIZE; | ||
| 164 | else | ||
| 165 | return eq * EQ_LEN * sizeof(u32); | ||
| 166 | } | ||
| 167 | |||
| 168 | static struct irq_chip iproc_msi_irq_chip = { | ||
| 169 | .name = "iProc-MSI", | ||
| 170 | }; | ||
| 171 | |||
| 172 | static struct msi_domain_info iproc_msi_domain_info = { | ||
| 173 | .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | | ||
| 174 | MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, | ||
| 175 | .chip = &iproc_msi_irq_chip, | ||
| 176 | }; | ||
| 177 | |||
| 178 | /* | ||
| 179 | * In iProc PCIe core, each MSI group is serviced by a GIC interrupt and a | ||
| 180 | * dedicated event queue. Each MSI group can support up to 64 MSI vectors. | ||
| 181 | * | ||
| 182 | * The number of MSI groups varies between different iProc SoCs. The total | ||
| 183 | * number of CPU cores also varies. To support MSI IRQ affinity, we | ||
| 184 | * distribute GIC interrupts across all available CPUs. MSI vector is moved | ||
| 185 | * from one GIC interrupt to another to steer to the target CPU. | ||
| 186 | * | ||
| 187 | * Assuming: | ||
| 188 | * - the number of MSI groups is M | ||
| 189 | * - the number of CPU cores is N | ||
| 190 | * - M is always a multiple of N | ||
| 191 | * | ||
| 192 | * Total number of raw MSI vectors = M * 64 | ||
| 193 | * Total number of supported MSI vectors = (M * 64) / N | ||
| 194 | */ | ||
| 195 | static inline int hwirq_to_cpu(struct iproc_msi *msi, unsigned long hwirq) | ||
| 196 | { | ||
| 197 | return (hwirq % msi->nr_cpus); | ||
| 198 | } | ||
| 199 | |||
| 200 | static inline unsigned long hwirq_to_canonical_hwirq(struct iproc_msi *msi, | ||
| 201 | unsigned long hwirq) | ||
| 202 | { | ||
| 203 | return (hwirq - hwirq_to_cpu(msi, hwirq)); | ||
| 204 | } | ||
| 205 | |||
| 206 | static int iproc_msi_irq_set_affinity(struct irq_data *data, | ||
| 207 | const struct cpumask *mask, bool force) | ||
| 208 | { | ||
| 209 | struct iproc_msi *msi = irq_data_get_irq_chip_data(data); | ||
| 210 | int target_cpu = cpumask_first(mask); | ||
| 211 | int curr_cpu; | ||
| 212 | |||
| 213 | curr_cpu = hwirq_to_cpu(msi, data->hwirq); | ||
| 214 | if (curr_cpu == target_cpu) | ||
| 215 | return IRQ_SET_MASK_OK_DONE; | ||
| 216 | |||
| 217 | /* steer MSI to the target CPU */ | ||
| 218 | data->hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq) + target_cpu; | ||
| 219 | |||
| 220 | return IRQ_SET_MASK_OK; | ||
| 221 | } | ||
| 222 | |||
| 223 | static void iproc_msi_irq_compose_msi_msg(struct irq_data *data, | ||
| 224 | struct msi_msg *msg) | ||
| 225 | { | ||
| 226 | struct iproc_msi *msi = irq_data_get_irq_chip_data(data); | ||
| 227 | dma_addr_t addr; | ||
| 228 | |||
| 229 | addr = msi->msi_addr + iproc_msi_addr_offset(msi, data->hwirq); | ||
| 230 | msg->address_lo = lower_32_bits(addr); | ||
| 231 | msg->address_hi = upper_32_bits(addr); | ||
| 232 | msg->data = data->hwirq << 5; | ||
| 233 | } | ||
| 234 | |||
| 235 | static struct irq_chip iproc_msi_bottom_irq_chip = { | ||
| 236 | .name = "MSI", | ||
| 237 | .irq_set_affinity = iproc_msi_irq_set_affinity, | ||
| 238 | .irq_compose_msi_msg = iproc_msi_irq_compose_msi_msg, | ||
| 239 | }; | ||
| 240 | |||
| 241 | static int iproc_msi_irq_domain_alloc(struct irq_domain *domain, | ||
| 242 | unsigned int virq, unsigned int nr_irqs, | ||
| 243 | void *args) | ||
| 244 | { | ||
| 245 | struct iproc_msi *msi = domain->host_data; | ||
| 246 | int hwirq, i; | ||
| 247 | |||
| 248 | mutex_lock(&msi->bitmap_lock); | ||
| 249 | |||
| 250 | /* Allocate 'nr_cpus' number of MSI vectors each time */ | ||
| 251 | hwirq = bitmap_find_next_zero_area(msi->bitmap, msi->nr_msi_vecs, 0, | ||
| 252 | msi->nr_cpus, 0); | ||
| 253 | if (hwirq < msi->nr_msi_vecs) { | ||
| 254 | bitmap_set(msi->bitmap, hwirq, msi->nr_cpus); | ||
| 255 | } else { | ||
| 256 | mutex_unlock(&msi->bitmap_lock); | ||
| 257 | return -ENOSPC; | ||
| 258 | } | ||
| 259 | |||
| 260 | mutex_unlock(&msi->bitmap_lock); | ||
| 261 | |||
| 262 | for (i = 0; i < nr_irqs; i++) { | ||
| 263 | irq_domain_set_info(domain, virq + i, hwirq + i, | ||
| 264 | &iproc_msi_bottom_irq_chip, | ||
| 265 | domain->host_data, handle_simple_irq, | ||
| 266 | NULL, NULL); | ||
| 267 | } | ||
| 268 | |||
| 269 | return hwirq; | ||
| 270 | } | ||
| 271 | |||
| 272 | static void iproc_msi_irq_domain_free(struct irq_domain *domain, | ||
| 273 | unsigned int virq, unsigned int nr_irqs) | ||
| 274 | { | ||
| 275 | struct irq_data *data = irq_domain_get_irq_data(domain, virq); | ||
| 276 | struct iproc_msi *msi = irq_data_get_irq_chip_data(data); | ||
| 277 | unsigned int hwirq; | ||
| 278 | |||
| 279 | mutex_lock(&msi->bitmap_lock); | ||
| 280 | |||
| 281 | hwirq = hwirq_to_canonical_hwirq(msi, data->hwirq); | ||
| 282 | bitmap_clear(msi->bitmap, hwirq, msi->nr_cpus); | ||
| 283 | |||
| 284 | mutex_unlock(&msi->bitmap_lock); | ||
| 285 | |||
| 286 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); | ||
| 287 | } | ||
| 288 | |||
| 289 | static const struct irq_domain_ops msi_domain_ops = { | ||
| 290 | .alloc = iproc_msi_irq_domain_alloc, | ||
| 291 | .free = iproc_msi_irq_domain_free, | ||
| 292 | }; | ||
| 293 | |||
| 294 | static inline u32 decode_msi_hwirq(struct iproc_msi *msi, u32 eq, u32 head) | ||
| 295 | { | ||
| 296 | u32 *msg, hwirq; | ||
| 297 | unsigned int offs; | ||
| 298 | |||
| 299 | offs = iproc_msi_eq_offset(msi, eq) + head * sizeof(u32); | ||
| 300 | msg = (u32 *)(msi->eq_cpu + offs); | ||
| 301 | hwirq = readl(msg); | ||
| 302 | hwirq = (hwirq >> 5) + (hwirq & 0x1f); | ||
| 303 | |||
| 304 | /* | ||
| 305 | * Since we have multiple hwirq mapped to a single MSI vector, | ||
| 306 | * now we need to derive the hwirq at CPU0. It can then be used to | ||
| 307 | * mapped back to virq. | ||
| 308 | */ | ||
| 309 | return hwirq_to_canonical_hwirq(msi, hwirq); | ||
| 310 | } | ||
| 311 | |||
| 312 | static void iproc_msi_handler(struct irq_desc *desc) | ||
| 313 | { | ||
| 314 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
| 315 | struct iproc_msi_grp *grp; | ||
| 316 | struct iproc_msi *msi; | ||
| 317 | u32 eq, head, tail, nr_events; | ||
| 318 | unsigned long hwirq; | ||
| 319 | int virq; | ||
| 320 | |||
| 321 | chained_irq_enter(chip, desc); | ||
| 322 | |||
| 323 | grp = irq_desc_get_handler_data(desc); | ||
| 324 | msi = grp->msi; | ||
| 325 | eq = grp->eq; | ||
| 326 | |||
| 327 | /* | ||
| 328 | * iProc MSI event queue is tracked by head and tail pointers. Head | ||
| 329 | * pointer indicates the next entry (MSI data) to be consumed by SW in | ||
| 330 | * the queue and needs to be updated by SW. iProc MSI core uses the | ||
| 331 | * tail pointer as the next data insertion point. | ||
| 332 | * | ||
| 333 | * Entries between head and tail pointers contain valid MSI data. MSI | ||
| 334 | * data is guaranteed to be in the event queue memory before the tail | ||
| 335 | * pointer is updated by the iProc MSI core. | ||
| 336 | */ | ||
| 337 | head = iproc_msi_read_reg(msi, IPROC_MSI_EQ_HEAD, | ||
| 338 | eq) & IPROC_MSI_EQ_MASK; | ||
| 339 | do { | ||
| 340 | tail = iproc_msi_read_reg(msi, IPROC_MSI_EQ_TAIL, | ||
| 341 | eq) & IPROC_MSI_EQ_MASK; | ||
| 342 | |||
| 343 | /* | ||
| 344 | * Figure out total number of events (MSI data) to be | ||
| 345 | * processed. | ||
| 346 | */ | ||
| 347 | nr_events = (tail < head) ? | ||
| 348 | (EQ_LEN - (head - tail)) : (tail - head); | ||
| 349 | if (!nr_events) | ||
| 350 | break; | ||
| 351 | |||
| 352 | /* process all outstanding events */ | ||
| 353 | while (nr_events--) { | ||
| 354 | hwirq = decode_msi_hwirq(msi, eq, head); | ||
| 355 | virq = irq_find_mapping(msi->inner_domain, hwirq); | ||
| 356 | generic_handle_irq(virq); | ||
| 357 | |||
| 358 | head++; | ||
| 359 | head %= EQ_LEN; | ||
| 360 | } | ||
| 361 | |||
| 362 | /* | ||
| 363 | * Now all outstanding events have been processed. Update the | ||
| 364 | * head pointer. | ||
| 365 | */ | ||
| 366 | iproc_msi_write_reg(msi, IPROC_MSI_EQ_HEAD, eq, head); | ||
| 367 | |||
| 368 | /* | ||
| 369 | * Now go read the tail pointer again to see if there are new | ||
| 370 | * oustanding events that came in during the above window. | ||
| 371 | */ | ||
| 372 | } while (true); | ||
| 373 | |||
| 374 | chained_irq_exit(chip, desc); | ||
| 375 | } | ||
| 376 | |||
| 377 | static void iproc_msi_enable(struct iproc_msi *msi) | ||
| 378 | { | ||
| 379 | int i, eq; | ||
| 380 | u32 val; | ||
| 381 | |||
| 382 | /* Program memory region for each event queue */ | ||
| 383 | for (i = 0; i < msi->nr_eq_region; i++) { | ||
| 384 | dma_addr_t addr = msi->eq_dma + (i * EQ_MEM_REGION_SIZE); | ||
| 385 | |||
| 386 | iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE, i, | ||
| 387 | lower_32_bits(addr)); | ||
| 388 | iproc_msi_write_reg(msi, IPROC_MSI_EQ_PAGE_UPPER, i, | ||
| 389 | upper_32_bits(addr)); | ||
| 390 | } | ||
| 391 | |||
| 392 | /* Program address region for MSI posted writes */ | ||
| 393 | for (i = 0; i < msi->nr_msi_region; i++) { | ||
| 394 | phys_addr_t addr = msi->msi_addr + (i * MSI_MEM_REGION_SIZE); | ||
| 395 | |||
| 396 | iproc_msi_write_reg(msi, IPROC_MSI_PAGE, i, | ||
| 397 | lower_32_bits(addr)); | ||
| 398 | iproc_msi_write_reg(msi, IPROC_MSI_PAGE_UPPER, i, | ||
| 399 | upper_32_bits(addr)); | ||
| 400 | } | ||
| 401 | |||
| 402 | for (eq = 0; eq < msi->nr_irqs; eq++) { | ||
| 403 | /* Enable MSI event queue */ | ||
| 404 | val = IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT | | ||
| 405 | IPROC_MSI_EQ_EN; | ||
| 406 | iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val); | ||
| 407 | |||
| 408 | /* | ||
| 409 | * Some legacy platforms require the MSI interrupt enable | ||
| 410 | * register to be set explicitly. | ||
| 411 | */ | ||
| 412 | if (msi->has_inten_reg) { | ||
| 413 | val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq); | ||
| 414 | val |= BIT(eq); | ||
| 415 | iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val); | ||
| 416 | } | ||
| 417 | } | ||
| 418 | } | ||
| 419 | |||
| 420 | static void iproc_msi_disable(struct iproc_msi *msi) | ||
| 421 | { | ||
| 422 | u32 eq, val; | ||
| 423 | |||
| 424 | for (eq = 0; eq < msi->nr_irqs; eq++) { | ||
| 425 | if (msi->has_inten_reg) { | ||
| 426 | val = iproc_msi_read_reg(msi, IPROC_MSI_INTS_EN, eq); | ||
| 427 | val &= ~BIT(eq); | ||
| 428 | iproc_msi_write_reg(msi, IPROC_MSI_INTS_EN, eq, val); | ||
| 429 | } | ||
| 430 | |||
| 431 | val = iproc_msi_read_reg(msi, IPROC_MSI_CTRL, eq); | ||
| 432 | val &= ~(IPROC_MSI_INTR_EN | IPROC_MSI_INT_N_EVENT | | ||
| 433 | IPROC_MSI_EQ_EN); | ||
| 434 | iproc_msi_write_reg(msi, IPROC_MSI_CTRL, eq, val); | ||
| 435 | } | ||
| 436 | } | ||
| 437 | |||
| 438 | static int iproc_msi_alloc_domains(struct device_node *node, | ||
| 439 | struct iproc_msi *msi) | ||
| 440 | { | ||
| 441 | msi->inner_domain = irq_domain_add_linear(NULL, msi->nr_msi_vecs, | ||
| 442 | &msi_domain_ops, msi); | ||
| 443 | if (!msi->inner_domain) | ||
| 444 | return -ENOMEM; | ||
| 445 | |||
| 446 | msi->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), | ||
| 447 | &iproc_msi_domain_info, | ||
| 448 | msi->inner_domain); | ||
| 449 | if (!msi->msi_domain) { | ||
| 450 | irq_domain_remove(msi->inner_domain); | ||
| 451 | return -ENOMEM; | ||
| 452 | } | ||
| 453 | |||
| 454 | return 0; | ||
| 455 | } | ||
| 456 | |||
| 457 | static void iproc_msi_free_domains(struct iproc_msi *msi) | ||
| 458 | { | ||
| 459 | if (msi->msi_domain) | ||
| 460 | irq_domain_remove(msi->msi_domain); | ||
| 461 | |||
| 462 | if (msi->inner_domain) | ||
| 463 | irq_domain_remove(msi->inner_domain); | ||
| 464 | } | ||
| 465 | |||
| 466 | static void iproc_msi_irq_free(struct iproc_msi *msi, unsigned int cpu) | ||
| 467 | { | ||
| 468 | int i; | ||
| 469 | |||
| 470 | for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) { | ||
| 471 | irq_set_chained_handler_and_data(msi->grps[i].gic_irq, | ||
| 472 | NULL, NULL); | ||
| 473 | } | ||
| 474 | } | ||
| 475 | |||
| 476 | static int iproc_msi_irq_setup(struct iproc_msi *msi, unsigned int cpu) | ||
| 477 | { | ||
| 478 | int i, ret; | ||
| 479 | cpumask_var_t mask; | ||
| 480 | struct iproc_pcie *pcie = msi->pcie; | ||
| 481 | |||
| 482 | for (i = cpu; i < msi->nr_irqs; i += msi->nr_cpus) { | ||
| 483 | irq_set_chained_handler_and_data(msi->grps[i].gic_irq, | ||
| 484 | iproc_msi_handler, | ||
| 485 | &msi->grps[i]); | ||
| 486 | /* Dedicate GIC interrupt to each CPU core */ | ||
| 487 | if (alloc_cpumask_var(&mask, GFP_KERNEL)) { | ||
| 488 | cpumask_clear(mask); | ||
| 489 | cpumask_set_cpu(cpu, mask); | ||
| 490 | ret = irq_set_affinity(msi->grps[i].gic_irq, mask); | ||
| 491 | if (ret) | ||
| 492 | dev_err(pcie->dev, | ||
| 493 | "failed to set affinity for IRQ%d\n", | ||
| 494 | msi->grps[i].gic_irq); | ||
| 495 | free_cpumask_var(mask); | ||
| 496 | } else { | ||
| 497 | dev_err(pcie->dev, "failed to alloc CPU mask\n"); | ||
| 498 | ret = -EINVAL; | ||
| 499 | } | ||
| 500 | |||
| 501 | if (ret) { | ||
| 502 | /* Free all configured/unconfigured IRQs */ | ||
| 503 | iproc_msi_irq_free(msi, cpu); | ||
| 504 | return ret; | ||
| 505 | } | ||
| 506 | } | ||
| 507 | |||
| 508 | return 0; | ||
| 509 | } | ||
| 510 | |||
| 511 | int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node) | ||
| 512 | { | ||
| 513 | struct iproc_msi *msi; | ||
| 514 | int i, ret; | ||
| 515 | unsigned int cpu; | ||
| 516 | |||
| 517 | if (!of_device_is_compatible(node, "brcm,iproc-msi")) | ||
| 518 | return -ENODEV; | ||
| 519 | |||
| 520 | if (!of_find_property(node, "msi-controller", NULL)) | ||
| 521 | return -ENODEV; | ||
| 522 | |||
| 523 | if (pcie->msi) | ||
| 524 | return -EBUSY; | ||
| 525 | |||
| 526 | msi = devm_kzalloc(pcie->dev, sizeof(*msi), GFP_KERNEL); | ||
| 527 | if (!msi) | ||
| 528 | return -ENOMEM; | ||
| 529 | |||
| 530 | msi->pcie = pcie; | ||
| 531 | pcie->msi = msi; | ||
| 532 | msi->msi_addr = pcie->base_addr; | ||
| 533 | mutex_init(&msi->bitmap_lock); | ||
| 534 | msi->nr_cpus = num_possible_cpus(); | ||
| 535 | |||
| 536 | msi->nr_irqs = of_irq_count(node); | ||
| 537 | if (!msi->nr_irqs) { | ||
| 538 | dev_err(pcie->dev, "found no MSI GIC interrupt\n"); | ||
| 539 | return -ENODEV; | ||
| 540 | } | ||
| 541 | |||
| 542 | if (msi->nr_irqs > NR_HW_IRQS) { | ||
| 543 | dev_warn(pcie->dev, "too many MSI GIC interrupts defined %d\n", | ||
| 544 | msi->nr_irqs); | ||
| 545 | msi->nr_irqs = NR_HW_IRQS; | ||
| 546 | } | ||
| 547 | |||
| 548 | if (msi->nr_irqs < msi->nr_cpus) { | ||
| 549 | dev_err(pcie->dev, | ||
| 550 | "not enough GIC interrupts for MSI affinity\n"); | ||
| 551 | return -EINVAL; | ||
| 552 | } | ||
| 553 | |||
| 554 | if (msi->nr_irqs % msi->nr_cpus != 0) { | ||
| 555 | msi->nr_irqs -= msi->nr_irqs % msi->nr_cpus; | ||
| 556 | dev_warn(pcie->dev, "Reducing number of interrupts to %d\n", | ||
| 557 | msi->nr_irqs); | ||
| 558 | } | ||
| 559 | |||
| 560 | switch (pcie->type) { | ||
| 561 | case IPROC_PCIE_PAXB_BCMA: | ||
| 562 | case IPROC_PCIE_PAXB: | ||
| 563 | msi->reg_offsets = iproc_msi_reg_paxb; | ||
| 564 | msi->nr_eq_region = 1; | ||
| 565 | msi->nr_msi_region = 1; | ||
| 566 | break; | ||
| 567 | case IPROC_PCIE_PAXC: | ||
| 568 | msi->reg_offsets = iproc_msi_reg_paxc; | ||
| 569 | msi->nr_eq_region = msi->nr_irqs; | ||
| 570 | msi->nr_msi_region = msi->nr_irqs; | ||
| 571 | break; | ||
| 572 | default: | ||
| 573 | dev_err(pcie->dev, "incompatible iProc PCIe interface\n"); | ||
| 574 | return -EINVAL; | ||
| 575 | } | ||
| 576 | |||
| 577 | if (of_find_property(node, "brcm,pcie-msi-inten", NULL)) | ||
| 578 | msi->has_inten_reg = true; | ||
| 579 | |||
| 580 | msi->nr_msi_vecs = msi->nr_irqs * EQ_LEN; | ||
| 581 | msi->bitmap = devm_kcalloc(pcie->dev, BITS_TO_LONGS(msi->nr_msi_vecs), | ||
| 582 | sizeof(*msi->bitmap), GFP_KERNEL); | ||
| 583 | if (!msi->bitmap) | ||
| 584 | return -ENOMEM; | ||
| 585 | |||
| 586 | msi->grps = devm_kcalloc(pcie->dev, msi->nr_irqs, sizeof(*msi->grps), | ||
| 587 | GFP_KERNEL); | ||
| 588 | if (!msi->grps) | ||
| 589 | return -ENOMEM; | ||
| 590 | |||
| 591 | for (i = 0; i < msi->nr_irqs; i++) { | ||
| 592 | unsigned int irq = irq_of_parse_and_map(node, i); | ||
| 593 | |||
| 594 | if (!irq) { | ||
| 595 | dev_err(pcie->dev, "unable to parse/map interrupt\n"); | ||
| 596 | ret = -ENODEV; | ||
| 597 | goto free_irqs; | ||
| 598 | } | ||
| 599 | msi->grps[i].gic_irq = irq; | ||
| 600 | msi->grps[i].msi = msi; | ||
| 601 | msi->grps[i].eq = i; | ||
| 602 | } | ||
| 603 | |||
| 604 | /* Reserve memory for event queue and make sure memories are zeroed */ | ||
| 605 | msi->eq_cpu = dma_zalloc_coherent(pcie->dev, | ||
| 606 | msi->nr_eq_region * EQ_MEM_REGION_SIZE, | ||
| 607 | &msi->eq_dma, GFP_KERNEL); | ||
| 608 | if (!msi->eq_cpu) { | ||
| 609 | ret = -ENOMEM; | ||
| 610 | goto free_irqs; | ||
| 611 | } | ||
| 612 | |||
| 613 | ret = iproc_msi_alloc_domains(node, msi); | ||
| 614 | if (ret) { | ||
| 615 | dev_err(pcie->dev, "failed to create MSI domains\n"); | ||
| 616 | goto free_eq_dma; | ||
| 617 | } | ||
| 618 | |||
| 619 | for_each_online_cpu(cpu) { | ||
| 620 | ret = iproc_msi_irq_setup(msi, cpu); | ||
| 621 | if (ret) | ||
| 622 | goto free_msi_irq; | ||
| 623 | } | ||
| 624 | |||
| 625 | iproc_msi_enable(msi); | ||
| 626 | |||
| 627 | return 0; | ||
| 628 | |||
| 629 | free_msi_irq: | ||
| 630 | for_each_online_cpu(cpu) | ||
| 631 | iproc_msi_irq_free(msi, cpu); | ||
| 632 | iproc_msi_free_domains(msi); | ||
| 633 | |||
| 634 | free_eq_dma: | ||
| 635 | dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE, | ||
| 636 | msi->eq_cpu, msi->eq_dma); | ||
| 637 | |||
| 638 | free_irqs: | ||
| 639 | for (i = 0; i < msi->nr_irqs; i++) { | ||
| 640 | if (msi->grps[i].gic_irq) | ||
| 641 | irq_dispose_mapping(msi->grps[i].gic_irq); | ||
| 642 | } | ||
| 643 | pcie->msi = NULL; | ||
| 644 | return ret; | ||
| 645 | } | ||
| 646 | EXPORT_SYMBOL(iproc_msi_init); | ||
| 647 | |||
| 648 | void iproc_msi_exit(struct iproc_pcie *pcie) | ||
| 649 | { | ||
| 650 | struct iproc_msi *msi = pcie->msi; | ||
| 651 | unsigned int i, cpu; | ||
| 652 | |||
| 653 | if (!msi) | ||
| 654 | return; | ||
| 655 | |||
| 656 | iproc_msi_disable(msi); | ||
| 657 | |||
| 658 | for_each_online_cpu(cpu) | ||
| 659 | iproc_msi_irq_free(msi, cpu); | ||
| 660 | |||
| 661 | iproc_msi_free_domains(msi); | ||
| 662 | |||
| 663 | dma_free_coherent(pcie->dev, msi->nr_eq_region * EQ_MEM_REGION_SIZE, | ||
| 664 | msi->eq_cpu, msi->eq_dma); | ||
| 665 | |||
| 666 | for (i = 0; i < msi->nr_irqs; i++) { | ||
| 667 | if (msi->grps[i].gic_irq) | ||
| 668 | irq_dispose_mapping(msi->grps[i].gic_irq); | ||
| 669 | } | ||
| 670 | } | ||
| 671 | EXPORT_SYMBOL(iproc_msi_exit); | ||
diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c new file mode 100644 index 000000000000..f30f5f3fb5c1 --- /dev/null +++ b/drivers/pci/controller/pcie-iproc-platform.c | |||
| @@ -0,0 +1,157 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Copyright (C) 2015 Broadcom Corporation | ||
| 4 | */ | ||
| 5 | |||
| 6 | #include <linux/kernel.h> | ||
| 7 | #include <linux/pci.h> | ||
| 8 | #include <linux/clk.h> | ||
| 9 | #include <linux/module.h> | ||
| 10 | #include <linux/slab.h> | ||
| 11 | #include <linux/interrupt.h> | ||
| 12 | #include <linux/platform_device.h> | ||
| 13 | #include <linux/of_address.h> | ||
| 14 | #include <linux/of_pci.h> | ||
| 15 | #include <linux/of_irq.h> | ||
| 16 | #include <linux/of_platform.h> | ||
| 17 | #include <linux/phy/phy.h> | ||
| 18 | |||
| 19 | #include "../pci.h" | ||
| 20 | #include "pcie-iproc.h" | ||
| 21 | |||
| 22 | static const struct of_device_id iproc_pcie_of_match_table[] = { | ||
| 23 | { | ||
| 24 | .compatible = "brcm,iproc-pcie", | ||
| 25 | .data = (int *)IPROC_PCIE_PAXB, | ||
| 26 | }, { | ||
| 27 | .compatible = "brcm,iproc-pcie-paxb-v2", | ||
| 28 | .data = (int *)IPROC_PCIE_PAXB_V2, | ||
| 29 | }, { | ||
| 30 | .compatible = "brcm,iproc-pcie-paxc", | ||
| 31 | .data = (int *)IPROC_PCIE_PAXC, | ||
| 32 | }, { | ||
| 33 | .compatible = "brcm,iproc-pcie-paxc-v2", | ||
| 34 | .data = (int *)IPROC_PCIE_PAXC_V2, | ||
| 35 | }, | ||
| 36 | { /* sentinel */ } | ||
| 37 | }; | ||
| 38 | MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table); | ||
| 39 | |||
| 40 | static int iproc_pcie_pltfm_probe(struct platform_device *pdev) | ||
| 41 | { | ||
| 42 | struct device *dev = &pdev->dev; | ||
| 43 | struct iproc_pcie *pcie; | ||
| 44 | struct device_node *np = dev->of_node; | ||
| 45 | struct resource reg; | ||
| 46 | resource_size_t iobase = 0; | ||
| 47 | LIST_HEAD(resources); | ||
| 48 | struct pci_host_bridge *bridge; | ||
| 49 | int ret; | ||
| 50 | |||
| 51 | bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); | ||
| 52 | if (!bridge) | ||
| 53 | return -ENOMEM; | ||
| 54 | |||
| 55 | pcie = pci_host_bridge_priv(bridge); | ||
| 56 | |||
| 57 | pcie->dev = dev; | ||
| 58 | pcie->type = (enum iproc_pcie_type) of_device_get_match_data(dev); | ||
| 59 | |||
| 60 | ret = of_address_to_resource(np, 0, ®); | ||
| 61 | if (ret < 0) { | ||
| 62 | dev_err(dev, "unable to obtain controller resources\n"); | ||
| 63 | return ret; | ||
| 64 | } | ||
| 65 | |||
| 66 | pcie->base = devm_pci_remap_cfgspace(dev, reg.start, | ||
| 67 | resource_size(®)); | ||
| 68 | if (!pcie->base) { | ||
| 69 | dev_err(dev, "unable to map controller registers\n"); | ||
| 70 | return -ENOMEM; | ||
| 71 | } | ||
| 72 | pcie->base_addr = reg.start; | ||
| 73 | |||
| 74 | if (of_property_read_bool(np, "brcm,pcie-ob")) { | ||
| 75 | u32 val; | ||
| 76 | |||
| 77 | ret = of_property_read_u32(np, "brcm,pcie-ob-axi-offset", | ||
| 78 | &val); | ||
| 79 | if (ret) { | ||
| 80 | dev_err(dev, | ||
| 81 | "missing brcm,pcie-ob-axi-offset property\n"); | ||
| 82 | return ret; | ||
| 83 | } | ||
| 84 | pcie->ob.axi_offset = val; | ||
| 85 | pcie->need_ob_cfg = true; | ||
| 86 | } | ||
| 87 | |||
| 88 | /* | ||
| 89 | * DT nodes are not used by all platforms that use the iProc PCIe | ||
| 90 | * core driver. For platforms that require explict inbound mapping | ||
| 91 | * configuration, "dma-ranges" would have been present in DT | ||
| 92 | */ | ||
| 93 | pcie->need_ib_cfg = of_property_read_bool(np, "dma-ranges"); | ||
| 94 | |||
| 95 | /* PHY use is optional */ | ||
| 96 | pcie->phy = devm_phy_get(dev, "pcie-phy"); | ||
| 97 | if (IS_ERR(pcie->phy)) { | ||
| 98 | if (PTR_ERR(pcie->phy) == -EPROBE_DEFER) | ||
| 99 | return -EPROBE_DEFER; | ||
| 100 | pcie->phy = NULL; | ||
| 101 | } | ||
| 102 | |||
| 103 | ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &resources, | ||
| 104 | &iobase); | ||
| 105 | if (ret) { | ||
| 106 | dev_err(dev, "unable to get PCI host bridge resources\n"); | ||
| 107 | return ret; | ||
| 108 | } | ||
| 109 | |||
| 110 | /* PAXC doesn't support legacy IRQs, skip mapping */ | ||
| 111 | switch (pcie->type) { | ||
| 112 | case IPROC_PCIE_PAXC: | ||
| 113 | case IPROC_PCIE_PAXC_V2: | ||
| 114 | break; | ||
| 115 | default: | ||
| 116 | pcie->map_irq = of_irq_parse_and_map_pci; | ||
| 117 | } | ||
| 118 | |||
| 119 | ret = iproc_pcie_setup(pcie, &resources); | ||
| 120 | if (ret) { | ||
| 121 | dev_err(dev, "PCIe controller setup failed\n"); | ||
| 122 | pci_free_resource_list(&resources); | ||
| 123 | return ret; | ||
| 124 | } | ||
| 125 | |||
| 126 | platform_set_drvdata(pdev, pcie); | ||
| 127 | return 0; | ||
| 128 | } | ||
| 129 | |||
| 130 | static int iproc_pcie_pltfm_remove(struct platform_device *pdev) | ||
| 131 | { | ||
| 132 | struct iproc_pcie *pcie = platform_get_drvdata(pdev); | ||
| 133 | |||
| 134 | return iproc_pcie_remove(pcie); | ||
| 135 | } | ||
| 136 | |||
| 137 | static void iproc_pcie_pltfm_shutdown(struct platform_device *pdev) | ||
| 138 | { | ||
| 139 | struct iproc_pcie *pcie = platform_get_drvdata(pdev); | ||
| 140 | |||
| 141 | iproc_pcie_shutdown(pcie); | ||
| 142 | } | ||
| 143 | |||
| 144 | static struct platform_driver iproc_pcie_pltfm_driver = { | ||
| 145 | .driver = { | ||
| 146 | .name = "iproc-pcie", | ||
| 147 | .of_match_table = of_match_ptr(iproc_pcie_of_match_table), | ||
| 148 | }, | ||
| 149 | .probe = iproc_pcie_pltfm_probe, | ||
| 150 | .remove = iproc_pcie_pltfm_remove, | ||
| 151 | .shutdown = iproc_pcie_pltfm_shutdown, | ||
| 152 | }; | ||
| 153 | module_platform_driver(iproc_pcie_pltfm_driver); | ||
| 154 | |||
| 155 | MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>"); | ||
| 156 | MODULE_DESCRIPTION("Broadcom iPROC PCIe platform driver"); | ||
| 157 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c new file mode 100644 index 000000000000..3c76c5fa4f32 --- /dev/null +++ b/drivers/pci/controller/pcie-iproc.c | |||
| @@ -0,0 +1,1432 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Copyright (C) 2014 Hauke Mehrtens <hauke@hauke-m.de> | ||
| 4 | * Copyright (C) 2015 Broadcom Corporation | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include <linux/kernel.h> | ||
| 8 | #include <linux/pci.h> | ||
| 9 | #include <linux/msi.h> | ||
| 10 | #include <linux/clk.h> | ||
| 11 | #include <linux/module.h> | ||
| 12 | #include <linux/mbus.h> | ||
| 13 | #include <linux/slab.h> | ||
| 14 | #include <linux/delay.h> | ||
| 15 | #include <linux/interrupt.h> | ||
| 16 | #include <linux/irqchip/arm-gic-v3.h> | ||
| 17 | #include <linux/platform_device.h> | ||
| 18 | #include <linux/of_address.h> | ||
| 19 | #include <linux/of_pci.h> | ||
| 20 | #include <linux/of_irq.h> | ||
| 21 | #include <linux/of_platform.h> | ||
| 22 | #include <linux/phy/phy.h> | ||
| 23 | |||
| 24 | #include "pcie-iproc.h" | ||
| 25 | |||
| 26 | #define EP_PERST_SOURCE_SELECT_SHIFT 2 | ||
| 27 | #define EP_PERST_SOURCE_SELECT BIT(EP_PERST_SOURCE_SELECT_SHIFT) | ||
| 28 | #define EP_MODE_SURVIVE_PERST_SHIFT 1 | ||
| 29 | #define EP_MODE_SURVIVE_PERST BIT(EP_MODE_SURVIVE_PERST_SHIFT) | ||
| 30 | #define RC_PCIE_RST_OUTPUT_SHIFT 0 | ||
| 31 | #define RC_PCIE_RST_OUTPUT BIT(RC_PCIE_RST_OUTPUT_SHIFT) | ||
| 32 | #define PAXC_RESET_MASK 0x7f | ||
| 33 | |||
| 34 | #define GIC_V3_CFG_SHIFT 0 | ||
| 35 | #define GIC_V3_CFG BIT(GIC_V3_CFG_SHIFT) | ||
| 36 | |||
| 37 | #define MSI_ENABLE_CFG_SHIFT 0 | ||
| 38 | #define MSI_ENABLE_CFG BIT(MSI_ENABLE_CFG_SHIFT) | ||
| 39 | |||
| 40 | #define CFG_IND_ADDR_MASK 0x00001ffc | ||
| 41 | |||
| 42 | #define CFG_ADDR_BUS_NUM_SHIFT 20 | ||
| 43 | #define CFG_ADDR_BUS_NUM_MASK 0x0ff00000 | ||
| 44 | #define CFG_ADDR_DEV_NUM_SHIFT 15 | ||
| 45 | #define CFG_ADDR_DEV_NUM_MASK 0x000f8000 | ||
| 46 | #define CFG_ADDR_FUNC_NUM_SHIFT 12 | ||
| 47 | #define CFG_ADDR_FUNC_NUM_MASK 0x00007000 | ||
| 48 | #define CFG_ADDR_REG_NUM_SHIFT 2 | ||
| 49 | #define CFG_ADDR_REG_NUM_MASK 0x00000ffc | ||
| 50 | #define CFG_ADDR_CFG_TYPE_SHIFT 0 | ||
| 51 | #define CFG_ADDR_CFG_TYPE_MASK 0x00000003 | ||
| 52 | |||
| 53 | #define SYS_RC_INTX_MASK 0xf | ||
| 54 | |||
| 55 | #define PCIE_PHYLINKUP_SHIFT 3 | ||
| 56 | #define PCIE_PHYLINKUP BIT(PCIE_PHYLINKUP_SHIFT) | ||
| 57 | #define PCIE_DL_ACTIVE_SHIFT 2 | ||
| 58 | #define PCIE_DL_ACTIVE BIT(PCIE_DL_ACTIVE_SHIFT) | ||
| 59 | |||
| 60 | #define APB_ERR_EN_SHIFT 0 | ||
| 61 | #define APB_ERR_EN BIT(APB_ERR_EN_SHIFT) | ||
| 62 | |||
| 63 | #define CFG_RETRY_STATUS 0xffff0001 | ||
| 64 | #define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */ | ||
| 65 | |||
| 66 | /* derive the enum index of the outbound/inbound mapping registers */ | ||
| 67 | #define MAP_REG(base_reg, index) ((base_reg) + (index) * 2) | ||
| 68 | |||
| 69 | /* | ||
| 70 | * Maximum number of outbound mapping window sizes that can be supported by any | ||
| 71 | * OARR/OMAP mapping pair | ||
| 72 | */ | ||
| 73 | #define MAX_NUM_OB_WINDOW_SIZES 4 | ||
| 74 | |||
| 75 | #define OARR_VALID_SHIFT 0 | ||
| 76 | #define OARR_VALID BIT(OARR_VALID_SHIFT) | ||
| 77 | #define OARR_SIZE_CFG_SHIFT 1 | ||
| 78 | |||
| 79 | /* | ||
| 80 | * Maximum number of inbound mapping region sizes that can be supported by an | ||
| 81 | * IARR | ||
| 82 | */ | ||
| 83 | #define MAX_NUM_IB_REGION_SIZES 9 | ||
| 84 | |||
| 85 | #define IMAP_VALID_SHIFT 0 | ||
| 86 | #define IMAP_VALID BIT(IMAP_VALID_SHIFT) | ||
| 87 | |||
| 88 | #define IPROC_PCI_EXP_CAP 0xac | ||
| 89 | |||
| 90 | #define IPROC_PCIE_REG_INVALID 0xffff | ||
| 91 | |||
| 92 | /** | ||
| 93 | * iProc PCIe outbound mapping controller specific parameters | ||
| 94 | * | ||
| 95 | * @window_sizes: list of supported outbound mapping window sizes in MB | ||
| 96 | * @nr_sizes: number of supported outbound mapping window sizes | ||
| 97 | */ | ||
| 98 | struct iproc_pcie_ob_map { | ||
| 99 | resource_size_t window_sizes[MAX_NUM_OB_WINDOW_SIZES]; | ||
| 100 | unsigned int nr_sizes; | ||
| 101 | }; | ||
| 102 | |||
| 103 | static const struct iproc_pcie_ob_map paxb_ob_map[] = { | ||
| 104 | { | ||
| 105 | /* OARR0/OMAP0 */ | ||
| 106 | .window_sizes = { 128, 256 }, | ||
| 107 | .nr_sizes = 2, | ||
| 108 | }, | ||
| 109 | { | ||
| 110 | /* OARR1/OMAP1 */ | ||
| 111 | .window_sizes = { 128, 256 }, | ||
| 112 | .nr_sizes = 2, | ||
| 113 | }, | ||
| 114 | }; | ||
| 115 | |||
| 116 | static const struct iproc_pcie_ob_map paxb_v2_ob_map[] = { | ||
| 117 | { | ||
| 118 | /* OARR0/OMAP0 */ | ||
| 119 | .window_sizes = { 128, 256 }, | ||
| 120 | .nr_sizes = 2, | ||
| 121 | }, | ||
| 122 | { | ||
| 123 | /* OARR1/OMAP1 */ | ||
| 124 | .window_sizes = { 128, 256 }, | ||
| 125 | .nr_sizes = 2, | ||
| 126 | }, | ||
| 127 | { | ||
| 128 | /* OARR2/OMAP2 */ | ||
| 129 | .window_sizes = { 128, 256, 512, 1024 }, | ||
| 130 | .nr_sizes = 4, | ||
| 131 | }, | ||
| 132 | { | ||
| 133 | /* OARR3/OMAP3 */ | ||
| 134 | .window_sizes = { 128, 256, 512, 1024 }, | ||
| 135 | .nr_sizes = 4, | ||
| 136 | }, | ||
| 137 | }; | ||
| 138 | |||
| 139 | /** | ||
| 140 | * iProc PCIe inbound mapping type | ||
| 141 | */ | ||
| 142 | enum iproc_pcie_ib_map_type { | ||
| 143 | /* for DDR memory */ | ||
| 144 | IPROC_PCIE_IB_MAP_MEM = 0, | ||
| 145 | |||
| 146 | /* for device I/O memory */ | ||
| 147 | IPROC_PCIE_IB_MAP_IO, | ||
| 148 | |||
| 149 | /* invalid or unused */ | ||
| 150 | IPROC_PCIE_IB_MAP_INVALID | ||
| 151 | }; | ||
| 152 | |||
| 153 | /** | ||
| 154 | * iProc PCIe inbound mapping controller specific parameters | ||
| 155 | * | ||
| 156 | * @type: inbound mapping region type | ||
| 157 | * @size_unit: inbound mapping region size unit, could be SZ_1K, SZ_1M, or | ||
| 158 | * SZ_1G | ||
| 159 | * @region_sizes: list of supported inbound mapping region sizes in KB, MB, or | ||
| 160 | * GB, depedning on the size unit | ||
| 161 | * @nr_sizes: number of supported inbound mapping region sizes | ||
| 162 | * @nr_windows: number of supported inbound mapping windows for the region | ||
| 163 | * @imap_addr_offset: register offset between the upper and lower 32-bit | ||
| 164 | * IMAP address registers | ||
| 165 | * @imap_window_offset: register offset between each IMAP window | ||
| 166 | */ | ||
| 167 | struct iproc_pcie_ib_map { | ||
| 168 | enum iproc_pcie_ib_map_type type; | ||
| 169 | unsigned int size_unit; | ||
| 170 | resource_size_t region_sizes[MAX_NUM_IB_REGION_SIZES]; | ||
| 171 | unsigned int nr_sizes; | ||
| 172 | unsigned int nr_windows; | ||
| 173 | u16 imap_addr_offset; | ||
| 174 | u16 imap_window_offset; | ||
| 175 | }; | ||
| 176 | |||
| 177 | static const struct iproc_pcie_ib_map paxb_v2_ib_map[] = { | ||
| 178 | { | ||
| 179 | /* IARR0/IMAP0 */ | ||
| 180 | .type = IPROC_PCIE_IB_MAP_IO, | ||
| 181 | .size_unit = SZ_1K, | ||
| 182 | .region_sizes = { 32 }, | ||
| 183 | .nr_sizes = 1, | ||
| 184 | .nr_windows = 8, | ||
| 185 | .imap_addr_offset = 0x40, | ||
| 186 | .imap_window_offset = 0x4, | ||
| 187 | }, | ||
| 188 | { | ||
| 189 | /* IARR1/IMAP1 (currently unused) */ | ||
| 190 | .type = IPROC_PCIE_IB_MAP_INVALID, | ||
| 191 | }, | ||
| 192 | { | ||
| 193 | /* IARR2/IMAP2 */ | ||
| 194 | .type = IPROC_PCIE_IB_MAP_MEM, | ||
| 195 | .size_unit = SZ_1M, | ||
| 196 | .region_sizes = { 64, 128, 256, 512, 1024, 2048, 4096, 8192, | ||
| 197 | 16384 }, | ||
| 198 | .nr_sizes = 9, | ||
| 199 | .nr_windows = 1, | ||
| 200 | .imap_addr_offset = 0x4, | ||
| 201 | .imap_window_offset = 0x8, | ||
| 202 | }, | ||
| 203 | { | ||
| 204 | /* IARR3/IMAP3 */ | ||
| 205 | .type = IPROC_PCIE_IB_MAP_MEM, | ||
| 206 | .size_unit = SZ_1G, | ||
| 207 | .region_sizes = { 1, 2, 4, 8, 16, 32 }, | ||
| 208 | .nr_sizes = 6, | ||
| 209 | .nr_windows = 8, | ||
| 210 | .imap_addr_offset = 0x4, | ||
| 211 | .imap_window_offset = 0x8, | ||
| 212 | }, | ||
| 213 | { | ||
| 214 | /* IARR4/IMAP4 */ | ||
| 215 | .type = IPROC_PCIE_IB_MAP_MEM, | ||
| 216 | .size_unit = SZ_1G, | ||
| 217 | .region_sizes = { 32, 64, 128, 256, 512 }, | ||
| 218 | .nr_sizes = 5, | ||
| 219 | .nr_windows = 8, | ||
| 220 | .imap_addr_offset = 0x4, | ||
| 221 | .imap_window_offset = 0x8, | ||
| 222 | }, | ||
| 223 | }; | ||
| 224 | |||
| 225 | /* | ||
| 226 | * iProc PCIe host registers | ||
| 227 | */ | ||
| 228 | enum iproc_pcie_reg { | ||
| 229 | /* clock/reset signal control */ | ||
| 230 | IPROC_PCIE_CLK_CTRL = 0, | ||
| 231 | |||
| 232 | /* | ||
| 233 | * To allow MSI to be steered to an external MSI controller (e.g., ARM | ||
| 234 | * GICv3 ITS) | ||
| 235 | */ | ||
| 236 | IPROC_PCIE_MSI_GIC_MODE, | ||
| 237 | |||
| 238 | /* | ||
| 239 | * IPROC_PCIE_MSI_BASE_ADDR and IPROC_PCIE_MSI_WINDOW_SIZE define the | ||
| 240 | * window where the MSI posted writes are written, for the writes to be | ||
| 241 | * interpreted as MSI writes. | ||
| 242 | */ | ||
| 243 | IPROC_PCIE_MSI_BASE_ADDR, | ||
| 244 | IPROC_PCIE_MSI_WINDOW_SIZE, | ||
| 245 | |||
| 246 | /* | ||
| 247 | * To hold the address of the register where the MSI writes are | ||
| 248 | * programed. When ARM GICv3 ITS is used, this should be programmed | ||
| 249 | * with the address of the GITS_TRANSLATER register. | ||
| 250 | */ | ||
| 251 | IPROC_PCIE_MSI_ADDR_LO, | ||
| 252 | IPROC_PCIE_MSI_ADDR_HI, | ||
| 253 | |||
| 254 | /* enable MSI */ | ||
| 255 | IPROC_PCIE_MSI_EN_CFG, | ||
| 256 | |||
| 257 | /* allow access to root complex configuration space */ | ||
| 258 | IPROC_PCIE_CFG_IND_ADDR, | ||
| 259 | IPROC_PCIE_CFG_IND_DATA, | ||
| 260 | |||
| 261 | /* allow access to device configuration space */ | ||
| 262 | IPROC_PCIE_CFG_ADDR, | ||
| 263 | IPROC_PCIE_CFG_DATA, | ||
| 264 | |||
| 265 | /* enable INTx */ | ||
| 266 | IPROC_PCIE_INTX_EN, | ||
| 267 | |||
| 268 | /* outbound address mapping */ | ||
| 269 | IPROC_PCIE_OARR0, | ||
| 270 | IPROC_PCIE_OMAP0, | ||
| 271 | IPROC_PCIE_OARR1, | ||
| 272 | IPROC_PCIE_OMAP1, | ||
| 273 | IPROC_PCIE_OARR2, | ||
| 274 | IPROC_PCIE_OMAP2, | ||
| 275 | IPROC_PCIE_OARR3, | ||
| 276 | IPROC_PCIE_OMAP3, | ||
| 277 | |||
| 278 | /* inbound address mapping */ | ||
| 279 | IPROC_PCIE_IARR0, | ||
| 280 | IPROC_PCIE_IMAP0, | ||
| 281 | IPROC_PCIE_IARR1, | ||
| 282 | IPROC_PCIE_IMAP1, | ||
| 283 | IPROC_PCIE_IARR2, | ||
| 284 | IPROC_PCIE_IMAP2, | ||
| 285 | IPROC_PCIE_IARR3, | ||
| 286 | IPROC_PCIE_IMAP3, | ||
| 287 | IPROC_PCIE_IARR4, | ||
| 288 | IPROC_PCIE_IMAP4, | ||
| 289 | |||
| 290 | /* link status */ | ||
| 291 | IPROC_PCIE_LINK_STATUS, | ||
| 292 | |||
| 293 | /* enable APB error for unsupported requests */ | ||
| 294 | IPROC_PCIE_APB_ERR_EN, | ||
| 295 | |||
| 296 | /* total number of core registers */ | ||
| 297 | IPROC_PCIE_MAX_NUM_REG, | ||
| 298 | }; | ||
| 299 | |||
| 300 | /* iProc PCIe PAXB BCMA registers */ | ||
| 301 | static const u16 iproc_pcie_reg_paxb_bcma[] = { | ||
| 302 | [IPROC_PCIE_CLK_CTRL] = 0x000, | ||
| 303 | [IPROC_PCIE_CFG_IND_ADDR] = 0x120, | ||
| 304 | [IPROC_PCIE_CFG_IND_DATA] = 0x124, | ||
| 305 | [IPROC_PCIE_CFG_ADDR] = 0x1f8, | ||
| 306 | [IPROC_PCIE_CFG_DATA] = 0x1fc, | ||
| 307 | [IPROC_PCIE_INTX_EN] = 0x330, | ||
| 308 | [IPROC_PCIE_LINK_STATUS] = 0xf0c, | ||
| 309 | }; | ||
| 310 | |||
| 311 | /* iProc PCIe PAXB registers */ | ||
| 312 | static const u16 iproc_pcie_reg_paxb[] = { | ||
| 313 | [IPROC_PCIE_CLK_CTRL] = 0x000, | ||
| 314 | [IPROC_PCIE_CFG_IND_ADDR] = 0x120, | ||
| 315 | [IPROC_PCIE_CFG_IND_DATA] = 0x124, | ||
| 316 | [IPROC_PCIE_CFG_ADDR] = 0x1f8, | ||
| 317 | [IPROC_PCIE_CFG_DATA] = 0x1fc, | ||
| 318 | [IPROC_PCIE_INTX_EN] = 0x330, | ||
| 319 | [IPROC_PCIE_OARR0] = 0xd20, | ||
| 320 | [IPROC_PCIE_OMAP0] = 0xd40, | ||
| 321 | [IPROC_PCIE_OARR1] = 0xd28, | ||
| 322 | [IPROC_PCIE_OMAP1] = 0xd48, | ||
| 323 | [IPROC_PCIE_LINK_STATUS] = 0xf0c, | ||
| 324 | [IPROC_PCIE_APB_ERR_EN] = 0xf40, | ||
| 325 | }; | ||
| 326 | |||
| 327 | /* iProc PCIe PAXB v2 registers */ | ||
| 328 | static const u16 iproc_pcie_reg_paxb_v2[] = { | ||
| 329 | [IPROC_PCIE_CLK_CTRL] = 0x000, | ||
| 330 | [IPROC_PCIE_CFG_IND_ADDR] = 0x120, | ||
| 331 | [IPROC_PCIE_CFG_IND_DATA] = 0x124, | ||
| 332 | [IPROC_PCIE_CFG_ADDR] = 0x1f8, | ||
| 333 | [IPROC_PCIE_CFG_DATA] = 0x1fc, | ||
| 334 | [IPROC_PCIE_INTX_EN] = 0x330, | ||
| 335 | [IPROC_PCIE_OARR0] = 0xd20, | ||
| 336 | [IPROC_PCIE_OMAP0] = 0xd40, | ||
| 337 | [IPROC_PCIE_OARR1] = 0xd28, | ||
| 338 | [IPROC_PCIE_OMAP1] = 0xd48, | ||
| 339 | [IPROC_PCIE_OARR2] = 0xd60, | ||
| 340 | [IPROC_PCIE_OMAP2] = 0xd68, | ||
| 341 | [IPROC_PCIE_OARR3] = 0xdf0, | ||
| 342 | [IPROC_PCIE_OMAP3] = 0xdf8, | ||
| 343 | [IPROC_PCIE_IARR0] = 0xd00, | ||
| 344 | [IPROC_PCIE_IMAP0] = 0xc00, | ||
| 345 | [IPROC_PCIE_IARR2] = 0xd10, | ||
| 346 | [IPROC_PCIE_IMAP2] = 0xcc0, | ||
| 347 | [IPROC_PCIE_IARR3] = 0xe00, | ||
| 348 | [IPROC_PCIE_IMAP3] = 0xe08, | ||
| 349 | [IPROC_PCIE_IARR4] = 0xe68, | ||
| 350 | [IPROC_PCIE_IMAP4] = 0xe70, | ||
| 351 | [IPROC_PCIE_LINK_STATUS] = 0xf0c, | ||
| 352 | [IPROC_PCIE_APB_ERR_EN] = 0xf40, | ||
| 353 | }; | ||
| 354 | |||
| 355 | /* iProc PCIe PAXC v1 registers */ | ||
| 356 | static const u16 iproc_pcie_reg_paxc[] = { | ||
| 357 | [IPROC_PCIE_CLK_CTRL] = 0x000, | ||
| 358 | [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, | ||
| 359 | [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, | ||
| 360 | [IPROC_PCIE_CFG_ADDR] = 0x1f8, | ||
| 361 | [IPROC_PCIE_CFG_DATA] = 0x1fc, | ||
| 362 | }; | ||
| 363 | |||
| 364 | /* iProc PCIe PAXC v2 registers */ | ||
| 365 | static const u16 iproc_pcie_reg_paxc_v2[] = { | ||
| 366 | [IPROC_PCIE_MSI_GIC_MODE] = 0x050, | ||
| 367 | [IPROC_PCIE_MSI_BASE_ADDR] = 0x074, | ||
| 368 | [IPROC_PCIE_MSI_WINDOW_SIZE] = 0x078, | ||
| 369 | [IPROC_PCIE_MSI_ADDR_LO] = 0x07c, | ||
| 370 | [IPROC_PCIE_MSI_ADDR_HI] = 0x080, | ||
| 371 | [IPROC_PCIE_MSI_EN_CFG] = 0x09c, | ||
| 372 | [IPROC_PCIE_CFG_IND_ADDR] = 0x1f0, | ||
| 373 | [IPROC_PCIE_CFG_IND_DATA] = 0x1f4, | ||
| 374 | [IPROC_PCIE_CFG_ADDR] = 0x1f8, | ||
| 375 | [IPROC_PCIE_CFG_DATA] = 0x1fc, | ||
| 376 | }; | ||
| 377 | |||
| 378 | static inline struct iproc_pcie *iproc_data(struct pci_bus *bus) | ||
| 379 | { | ||
| 380 | struct iproc_pcie *pcie = bus->sysdata; | ||
| 381 | return pcie; | ||
| 382 | } | ||
| 383 | |||
| 384 | static inline bool iproc_pcie_reg_is_invalid(u16 reg_offset) | ||
| 385 | { | ||
| 386 | return !!(reg_offset == IPROC_PCIE_REG_INVALID); | ||
| 387 | } | ||
| 388 | |||
| 389 | static inline u16 iproc_pcie_reg_offset(struct iproc_pcie *pcie, | ||
| 390 | enum iproc_pcie_reg reg) | ||
| 391 | { | ||
| 392 | return pcie->reg_offsets[reg]; | ||
| 393 | } | ||
| 394 | |||
| 395 | static inline u32 iproc_pcie_read_reg(struct iproc_pcie *pcie, | ||
| 396 | enum iproc_pcie_reg reg) | ||
| 397 | { | ||
| 398 | u16 offset = iproc_pcie_reg_offset(pcie, reg); | ||
| 399 | |||
| 400 | if (iproc_pcie_reg_is_invalid(offset)) | ||
| 401 | return 0; | ||
| 402 | |||
| 403 | return readl(pcie->base + offset); | ||
| 404 | } | ||
| 405 | |||
| 406 | static inline void iproc_pcie_write_reg(struct iproc_pcie *pcie, | ||
| 407 | enum iproc_pcie_reg reg, u32 val) | ||
| 408 | { | ||
| 409 | u16 offset = iproc_pcie_reg_offset(pcie, reg); | ||
| 410 | |||
| 411 | if (iproc_pcie_reg_is_invalid(offset)) | ||
| 412 | return; | ||
| 413 | |||
| 414 | writel(val, pcie->base + offset); | ||
| 415 | } | ||
| 416 | |||
| 417 | /** | ||
| 418 | * APB error forwarding can be disabled during access of configuration | ||
| 419 | * registers of the endpoint device, to prevent unsupported requests | ||
| 420 | * (typically seen during enumeration with multi-function devices) from | ||
| 421 | * triggering a system exception. | ||
| 422 | */ | ||
| 423 | static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus, | ||
| 424 | bool disable) | ||
| 425 | { | ||
| 426 | struct iproc_pcie *pcie = iproc_data(bus); | ||
| 427 | u32 val; | ||
| 428 | |||
| 429 | if (bus->number && pcie->has_apb_err_disable) { | ||
| 430 | val = iproc_pcie_read_reg(pcie, IPROC_PCIE_APB_ERR_EN); | ||
| 431 | if (disable) | ||
| 432 | val &= ~APB_ERR_EN; | ||
| 433 | else | ||
| 434 | val |= APB_ERR_EN; | ||
| 435 | iproc_pcie_write_reg(pcie, IPROC_PCIE_APB_ERR_EN, val); | ||
| 436 | } | ||
| 437 | } | ||
| 438 | |||
| 439 | static void __iomem *iproc_pcie_map_ep_cfg_reg(struct iproc_pcie *pcie, | ||
| 440 | unsigned int busno, | ||
| 441 | unsigned int slot, | ||
| 442 | unsigned int fn, | ||
| 443 | int where) | ||
| 444 | { | ||
| 445 | u16 offset; | ||
| 446 | u32 val; | ||
| 447 | |||
| 448 | /* EP device access */ | ||
| 449 | val = (busno << CFG_ADDR_BUS_NUM_SHIFT) | | ||
| 450 | (slot << CFG_ADDR_DEV_NUM_SHIFT) | | ||
| 451 | (fn << CFG_ADDR_FUNC_NUM_SHIFT) | | ||
| 452 | (where & CFG_ADDR_REG_NUM_MASK) | | ||
| 453 | (1 & CFG_ADDR_CFG_TYPE_MASK); | ||
| 454 | |||
| 455 | iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_ADDR, val); | ||
| 456 | offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_DATA); | ||
| 457 | |||
| 458 | if (iproc_pcie_reg_is_invalid(offset)) | ||
| 459 | return NULL; | ||
| 460 | |||
| 461 | return (pcie->base + offset); | ||
| 462 | } | ||
| 463 | |||
| 464 | static unsigned int iproc_pcie_cfg_retry(void __iomem *cfg_data_p) | ||
| 465 | { | ||
| 466 | int timeout = CFG_RETRY_STATUS_TIMEOUT_US; | ||
| 467 | unsigned int data; | ||
| 468 | |||
| 469 | /* | ||
| 470 | * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only | ||
| 471 | * affects config reads of the Vendor ID. For config writes or any | ||
| 472 | * other config reads, the Root may automatically reissue the | ||
| 473 | * configuration request again as a new request. | ||
| 474 | * | ||
| 475 | * For config reads, this hardware returns CFG_RETRY_STATUS data | ||
| 476 | * when it receives a CRS completion, regardless of the address of | ||
| 477 | * the read or the CRS Software Visibility Enable bit. As a | ||
| 478 | * partial workaround for this, we retry in software any read that | ||
| 479 | * returns CFG_RETRY_STATUS. | ||
| 480 | * | ||
| 481 | * Note that a non-Vendor ID config register may have a value of | ||
| 482 | * CFG_RETRY_STATUS. If we read that, we can't distinguish it from | ||
| 483 | * a CRS completion, so we will incorrectly retry the read and | ||
| 484 | * eventually return the wrong data (0xffffffff). | ||
| 485 | */ | ||
| 486 | data = readl(cfg_data_p); | ||
| 487 | while (data == CFG_RETRY_STATUS && timeout--) { | ||
| 488 | udelay(1); | ||
| 489 | data = readl(cfg_data_p); | ||
| 490 | } | ||
| 491 | |||
| 492 | if (data == CFG_RETRY_STATUS) | ||
| 493 | data = 0xffffffff; | ||
| 494 | |||
| 495 | return data; | ||
| 496 | } | ||
| 497 | |||
| 498 | static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn, | ||
| 499 | int where, int size, u32 *val) | ||
| 500 | { | ||
| 501 | struct iproc_pcie *pcie = iproc_data(bus); | ||
| 502 | unsigned int slot = PCI_SLOT(devfn); | ||
| 503 | unsigned int fn = PCI_FUNC(devfn); | ||
| 504 | unsigned int busno = bus->number; | ||
| 505 | void __iomem *cfg_data_p; | ||
| 506 | unsigned int data; | ||
| 507 | int ret; | ||
| 508 | |||
| 509 | /* root complex access */ | ||
| 510 | if (busno == 0) { | ||
| 511 | ret = pci_generic_config_read32(bus, devfn, where, size, val); | ||
| 512 | if (ret != PCIBIOS_SUCCESSFUL) | ||
| 513 | return ret; | ||
| 514 | |||
| 515 | /* Don't advertise CRS SV support */ | ||
| 516 | if ((where & ~0x3) == IPROC_PCI_EXP_CAP + PCI_EXP_RTCTL) | ||
| 517 | *val &= ~(PCI_EXP_RTCAP_CRSVIS << 16); | ||
| 518 | return PCIBIOS_SUCCESSFUL; | ||
| 519 | } | ||
| 520 | |||
| 521 | cfg_data_p = iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where); | ||
| 522 | |||
| 523 | if (!cfg_data_p) | ||
| 524 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 525 | |||
| 526 | data = iproc_pcie_cfg_retry(cfg_data_p); | ||
| 527 | |||
| 528 | *val = data; | ||
| 529 | if (size <= 2) | ||
| 530 | *val = (data >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); | ||
| 531 | |||
| 532 | return PCIBIOS_SUCCESSFUL; | ||
| 533 | } | ||
| 534 | |||
| 535 | /** | ||
| 536 | * Note access to the configuration registers are protected at the higher layer | ||
| 537 | * by 'pci_lock' in drivers/pci/access.c | ||
| 538 | */ | ||
| 539 | static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie, | ||
| 540 | int busno, unsigned int devfn, | ||
| 541 | int where) | ||
| 542 | { | ||
| 543 | unsigned slot = PCI_SLOT(devfn); | ||
| 544 | unsigned fn = PCI_FUNC(devfn); | ||
| 545 | u16 offset; | ||
| 546 | |||
| 547 | /* root complex access */ | ||
| 548 | if (busno == 0) { | ||
| 549 | if (slot > 0 || fn > 0) | ||
| 550 | return NULL; | ||
| 551 | |||
| 552 | iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR, | ||
| 553 | where & CFG_IND_ADDR_MASK); | ||
| 554 | offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA); | ||
| 555 | if (iproc_pcie_reg_is_invalid(offset)) | ||
| 556 | return NULL; | ||
| 557 | else | ||
| 558 | return (pcie->base + offset); | ||
| 559 | } | ||
| 560 | |||
| 561 | /* | ||
| 562 | * PAXC is connected to an internally emulated EP within the SoC. It | ||
| 563 | * allows only one device. | ||
| 564 | */ | ||
| 565 | if (pcie->ep_is_internal) | ||
| 566 | if (slot > 0) | ||
| 567 | return NULL; | ||
| 568 | |||
| 569 | return iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where); | ||
| 570 | } | ||
| 571 | |||
| 572 | static void __iomem *iproc_pcie_bus_map_cfg_bus(struct pci_bus *bus, | ||
| 573 | unsigned int devfn, | ||
| 574 | int where) | ||
| 575 | { | ||
| 576 | return iproc_pcie_map_cfg_bus(iproc_data(bus), bus->number, devfn, | ||
| 577 | where); | ||
| 578 | } | ||
| 579 | |||
| 580 | static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie, | ||
| 581 | unsigned int devfn, int where, | ||
| 582 | int size, u32 *val) | ||
| 583 | { | ||
| 584 | void __iomem *addr; | ||
| 585 | |||
| 586 | addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3); | ||
| 587 | if (!addr) { | ||
| 588 | *val = ~0; | ||
| 589 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 590 | } | ||
| 591 | |||
| 592 | *val = readl(addr); | ||
| 593 | |||
| 594 | if (size <= 2) | ||
| 595 | *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); | ||
| 596 | |||
| 597 | return PCIBIOS_SUCCESSFUL; | ||
| 598 | } | ||
| 599 | |||
| 600 | static int iproc_pci_raw_config_write32(struct iproc_pcie *pcie, | ||
| 601 | unsigned int devfn, int where, | ||
| 602 | int size, u32 val) | ||
| 603 | { | ||
| 604 | void __iomem *addr; | ||
| 605 | u32 mask, tmp; | ||
| 606 | |||
| 607 | addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3); | ||
| 608 | if (!addr) | ||
| 609 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 610 | |||
| 611 | if (size == 4) { | ||
| 612 | writel(val, addr); | ||
| 613 | return PCIBIOS_SUCCESSFUL; | ||
| 614 | } | ||
| 615 | |||
| 616 | mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); | ||
| 617 | tmp = readl(addr) & mask; | ||
| 618 | tmp |= val << ((where & 0x3) * 8); | ||
| 619 | writel(tmp, addr); | ||
| 620 | |||
| 621 | return PCIBIOS_SUCCESSFUL; | ||
| 622 | } | ||
| 623 | |||
| 624 | static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, | ||
| 625 | int where, int size, u32 *val) | ||
| 626 | { | ||
| 627 | int ret; | ||
| 628 | struct iproc_pcie *pcie = iproc_data(bus); | ||
| 629 | |||
| 630 | iproc_pcie_apb_err_disable(bus, true); | ||
| 631 | if (pcie->type == IPROC_PCIE_PAXB_V2) | ||
| 632 | ret = iproc_pcie_config_read(bus, devfn, where, size, val); | ||
| 633 | else | ||
| 634 | ret = pci_generic_config_read32(bus, devfn, where, size, val); | ||
| 635 | iproc_pcie_apb_err_disable(bus, false); | ||
| 636 | |||
| 637 | return ret; | ||
| 638 | } | ||
| 639 | |||
| 640 | static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn, | ||
| 641 | int where, int size, u32 val) | ||
| 642 | { | ||
| 643 | int ret; | ||
| 644 | |||
| 645 | iproc_pcie_apb_err_disable(bus, true); | ||
| 646 | ret = pci_generic_config_write32(bus, devfn, where, size, val); | ||
| 647 | iproc_pcie_apb_err_disable(bus, false); | ||
| 648 | |||
| 649 | return ret; | ||
| 650 | } | ||
| 651 | |||
| 652 | static struct pci_ops iproc_pcie_ops = { | ||
| 653 | .map_bus = iproc_pcie_bus_map_cfg_bus, | ||
| 654 | .read = iproc_pcie_config_read32, | ||
| 655 | .write = iproc_pcie_config_write32, | ||
| 656 | }; | ||
| 657 | |||
| 658 | static void iproc_pcie_perst_ctrl(struct iproc_pcie *pcie, bool assert) | ||
| 659 | { | ||
| 660 | u32 val; | ||
| 661 | |||
| 662 | /* | ||
| 663 | * PAXC and the internal emulated endpoint device downstream should not | ||
| 664 | * be reset. If firmware has been loaded on the endpoint device at an | ||
| 665 | * earlier boot stage, reset here causes issues. | ||
| 666 | */ | ||
| 667 | if (pcie->ep_is_internal) | ||
| 668 | return; | ||
| 669 | |||
| 670 | if (assert) { | ||
| 671 | val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL); | ||
| 672 | val &= ~EP_PERST_SOURCE_SELECT & ~EP_MODE_SURVIVE_PERST & | ||
| 673 | ~RC_PCIE_RST_OUTPUT; | ||
| 674 | iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); | ||
| 675 | udelay(250); | ||
| 676 | } else { | ||
| 677 | val = iproc_pcie_read_reg(pcie, IPROC_PCIE_CLK_CTRL); | ||
| 678 | val |= RC_PCIE_RST_OUTPUT; | ||
| 679 | iproc_pcie_write_reg(pcie, IPROC_PCIE_CLK_CTRL, val); | ||
| 680 | msleep(100); | ||
| 681 | } | ||
| 682 | } | ||
| 683 | |||
| 684 | int iproc_pcie_shutdown(struct iproc_pcie *pcie) | ||
| 685 | { | ||
| 686 | iproc_pcie_perst_ctrl(pcie, true); | ||
| 687 | msleep(500); | ||
| 688 | |||
| 689 | return 0; | ||
| 690 | } | ||
| 691 | EXPORT_SYMBOL_GPL(iproc_pcie_shutdown); | ||
| 692 | |||
| 693 | static int iproc_pcie_check_link(struct iproc_pcie *pcie) | ||
| 694 | { | ||
| 695 | struct device *dev = pcie->dev; | ||
| 696 | u32 hdr_type, link_ctrl, link_status, class, val; | ||
| 697 | bool link_is_active = false; | ||
| 698 | |||
| 699 | /* | ||
| 700 | * PAXC connects to emulated endpoint devices directly and does not | ||
| 701 | * have a Serdes. Therefore skip the link detection logic here. | ||
| 702 | */ | ||
| 703 | if (pcie->ep_is_internal) | ||
| 704 | return 0; | ||
| 705 | |||
| 706 | val = iproc_pcie_read_reg(pcie, IPROC_PCIE_LINK_STATUS); | ||
| 707 | if (!(val & PCIE_PHYLINKUP) || !(val & PCIE_DL_ACTIVE)) { | ||
| 708 | dev_err(dev, "PHY or data link is INACTIVE!\n"); | ||
| 709 | return -ENODEV; | ||
| 710 | } | ||
| 711 | |||
| 712 | /* make sure we are not in EP mode */ | ||
| 713 | iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type); | ||
| 714 | if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) { | ||
| 715 | dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type); | ||
| 716 | return -EFAULT; | ||
| 717 | } | ||
| 718 | |||
| 719 | /* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */ | ||
| 720 | #define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c | ||
| 721 | #define PCI_CLASS_BRIDGE_MASK 0xffff00 | ||
| 722 | #define PCI_CLASS_BRIDGE_SHIFT 8 | ||
| 723 | iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET, | ||
| 724 | 4, &class); | ||
| 725 | class &= ~PCI_CLASS_BRIDGE_MASK; | ||
| 726 | class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT); | ||
| 727 | iproc_pci_raw_config_write32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET, | ||
| 728 | 4, class); | ||
| 729 | |||
| 730 | /* check link status to see if link is active */ | ||
| 731 | iproc_pci_raw_config_read32(pcie, 0, IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA, | ||
| 732 | 2, &link_status); | ||
| 733 | if (link_status & PCI_EXP_LNKSTA_NLW) | ||
| 734 | link_is_active = true; | ||
| 735 | |||
| 736 | if (!link_is_active) { | ||
| 737 | /* try GEN 1 link speed */ | ||
| 738 | #define PCI_TARGET_LINK_SPEED_MASK 0xf | ||
| 739 | #define PCI_TARGET_LINK_SPEED_GEN2 0x2 | ||
| 740 | #define PCI_TARGET_LINK_SPEED_GEN1 0x1 | ||
| 741 | iproc_pci_raw_config_read32(pcie, 0, | ||
| 742 | IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2, | ||
| 743 | 4, &link_ctrl); | ||
| 744 | if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) == | ||
| 745 | PCI_TARGET_LINK_SPEED_GEN2) { | ||
| 746 | link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK; | ||
| 747 | link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1; | ||
| 748 | iproc_pci_raw_config_write32(pcie, 0, | ||
| 749 | IPROC_PCI_EXP_CAP + PCI_EXP_LNKCTL2, | ||
| 750 | 4, link_ctrl); | ||
| 751 | msleep(100); | ||
| 752 | |||
| 753 | iproc_pci_raw_config_read32(pcie, 0, | ||
| 754 | IPROC_PCI_EXP_CAP + PCI_EXP_LNKSTA, | ||
| 755 | 2, &link_status); | ||
| 756 | if (link_status & PCI_EXP_LNKSTA_NLW) | ||
| 757 | link_is_active = true; | ||
| 758 | } | ||
| 759 | } | ||
| 760 | |||
| 761 | dev_info(dev, "link: %s\n", link_is_active ? "UP" : "DOWN"); | ||
| 762 | |||
| 763 | return link_is_active ? 0 : -ENODEV; | ||
| 764 | } | ||
| 765 | |||
| 766 | static void iproc_pcie_enable(struct iproc_pcie *pcie) | ||
| 767 | { | ||
| 768 | iproc_pcie_write_reg(pcie, IPROC_PCIE_INTX_EN, SYS_RC_INTX_MASK); | ||
| 769 | } | ||
| 770 | |||
| 771 | static inline bool iproc_pcie_ob_is_valid(struct iproc_pcie *pcie, | ||
| 772 | int window_idx) | ||
| 773 | { | ||
| 774 | u32 val; | ||
| 775 | |||
| 776 | val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_OARR0, window_idx)); | ||
| 777 | |||
| 778 | return !!(val & OARR_VALID); | ||
| 779 | } | ||
| 780 | |||
| 781 | static inline int iproc_pcie_ob_write(struct iproc_pcie *pcie, int window_idx, | ||
| 782 | int size_idx, u64 axi_addr, u64 pci_addr) | ||
| 783 | { | ||
| 784 | struct device *dev = pcie->dev; | ||
| 785 | u16 oarr_offset, omap_offset; | ||
| 786 | |||
| 787 | /* | ||
| 788 | * Derive the OARR/OMAP offset from the first pair (OARR0/OMAP0) based | ||
| 789 | * on window index. | ||
| 790 | */ | ||
| 791 | oarr_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OARR0, | ||
| 792 | window_idx)); | ||
| 793 | omap_offset = iproc_pcie_reg_offset(pcie, MAP_REG(IPROC_PCIE_OMAP0, | ||
| 794 | window_idx)); | ||
| 795 | if (iproc_pcie_reg_is_invalid(oarr_offset) || | ||
| 796 | iproc_pcie_reg_is_invalid(omap_offset)) | ||
| 797 | return -EINVAL; | ||
| 798 | |||
| 799 | /* | ||
| 800 | * Program the OARR registers. The upper 32-bit OARR register is | ||
| 801 | * always right after the lower 32-bit OARR register. | ||
| 802 | */ | ||
| 803 | writel(lower_32_bits(axi_addr) | (size_idx << OARR_SIZE_CFG_SHIFT) | | ||
| 804 | OARR_VALID, pcie->base + oarr_offset); | ||
| 805 | writel(upper_32_bits(axi_addr), pcie->base + oarr_offset + 4); | ||
| 806 | |||
| 807 | /* now program the OMAP registers */ | ||
| 808 | writel(lower_32_bits(pci_addr), pcie->base + omap_offset); | ||
| 809 | writel(upper_32_bits(pci_addr), pcie->base + omap_offset + 4); | ||
| 810 | |||
| 811 | dev_info(dev, "ob window [%d]: offset 0x%x axi %pap pci %pap\n", | ||
| 812 | window_idx, oarr_offset, &axi_addr, &pci_addr); | ||
| 813 | dev_info(dev, "oarr lo 0x%x oarr hi 0x%x\n", | ||
| 814 | readl(pcie->base + oarr_offset), | ||
| 815 | readl(pcie->base + oarr_offset + 4)); | ||
| 816 | dev_info(dev, "omap lo 0x%x omap hi 0x%x\n", | ||
| 817 | readl(pcie->base + omap_offset), | ||
| 818 | readl(pcie->base + omap_offset + 4)); | ||
| 819 | |||
| 820 | return 0; | ||
| 821 | } | ||
| 822 | |||
| 823 | /** | ||
| 824 | * Some iProc SoCs require the SW to configure the outbound address mapping | ||
| 825 | * | ||
| 826 | * Outbound address translation: | ||
| 827 | * | ||
| 828 | * iproc_pcie_address = axi_address - axi_offset | ||
| 829 | * OARR = iproc_pcie_address | ||
| 830 | * OMAP = pci_addr | ||
| 831 | * | ||
| 832 | * axi_addr -> iproc_pcie_address -> OARR -> OMAP -> pci_address | ||
| 833 | */ | ||
| 834 | static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr, | ||
| 835 | u64 pci_addr, resource_size_t size) | ||
| 836 | { | ||
| 837 | struct iproc_pcie_ob *ob = &pcie->ob; | ||
| 838 | struct device *dev = pcie->dev; | ||
| 839 | int ret = -EINVAL, window_idx, size_idx; | ||
| 840 | |||
| 841 | if (axi_addr < ob->axi_offset) { | ||
| 842 | dev_err(dev, "axi address %pap less than offset %pap\n", | ||
| 843 | &axi_addr, &ob->axi_offset); | ||
| 844 | return -EINVAL; | ||
| 845 | } | ||
| 846 | |||
| 847 | /* | ||
| 848 | * Translate the AXI address to the internal address used by the iProc | ||
| 849 | * PCIe core before programming the OARR | ||
| 850 | */ | ||
| 851 | axi_addr -= ob->axi_offset; | ||
| 852 | |||
| 853 | /* iterate through all OARR/OMAP mapping windows */ | ||
| 854 | for (window_idx = ob->nr_windows - 1; window_idx >= 0; window_idx--) { | ||
| 855 | const struct iproc_pcie_ob_map *ob_map = | ||
| 856 | &pcie->ob_map[window_idx]; | ||
| 857 | |||
| 858 | /* | ||
| 859 | * If current outbound window is already in use, move on to the | ||
| 860 | * next one. | ||
| 861 | */ | ||
| 862 | if (iproc_pcie_ob_is_valid(pcie, window_idx)) | ||
| 863 | continue; | ||
| 864 | |||
| 865 | /* | ||
| 866 | * Iterate through all supported window sizes within the | ||
| 867 | * OARR/OMAP pair to find a match. Go through the window sizes | ||
| 868 | * in a descending order. | ||
| 869 | */ | ||
| 870 | for (size_idx = ob_map->nr_sizes - 1; size_idx >= 0; | ||
| 871 | size_idx--) { | ||
| 872 | resource_size_t window_size = | ||
| 873 | ob_map->window_sizes[size_idx] * SZ_1M; | ||
| 874 | |||
| 875 | if (size < window_size) | ||
| 876 | continue; | ||
| 877 | |||
| 878 | if (!IS_ALIGNED(axi_addr, window_size) || | ||
| 879 | !IS_ALIGNED(pci_addr, window_size)) { | ||
| 880 | dev_err(dev, | ||
| 881 | "axi %pap or pci %pap not aligned\n", | ||
| 882 | &axi_addr, &pci_addr); | ||
| 883 | return -EINVAL; | ||
| 884 | } | ||
| 885 | |||
| 886 | /* | ||
| 887 | * Match found! Program both OARR and OMAP and mark | ||
| 888 | * them as a valid entry. | ||
| 889 | */ | ||
| 890 | ret = iproc_pcie_ob_write(pcie, window_idx, size_idx, | ||
| 891 | axi_addr, pci_addr); | ||
| 892 | if (ret) | ||
| 893 | goto err_ob; | ||
| 894 | |||
| 895 | size -= window_size; | ||
| 896 | if (size == 0) | ||
| 897 | return 0; | ||
| 898 | |||
| 899 | /* | ||
| 900 | * If we are here, we are done with the current window, | ||
| 901 | * but not yet finished all mappings. Need to move on | ||
| 902 | * to the next window. | ||
| 903 | */ | ||
| 904 | axi_addr += window_size; | ||
| 905 | pci_addr += window_size; | ||
| 906 | break; | ||
| 907 | } | ||
| 908 | } | ||
| 909 | |||
| 910 | err_ob: | ||
| 911 | dev_err(dev, "unable to configure outbound mapping\n"); | ||
| 912 | dev_err(dev, | ||
| 913 | "axi %pap, axi offset %pap, pci %pap, res size %pap\n", | ||
| 914 | &axi_addr, &ob->axi_offset, &pci_addr, &size); | ||
| 915 | |||
| 916 | return ret; | ||
| 917 | } | ||
| 918 | |||
| 919 | static int iproc_pcie_map_ranges(struct iproc_pcie *pcie, | ||
| 920 | struct list_head *resources) | ||
| 921 | { | ||
| 922 | struct device *dev = pcie->dev; | ||
| 923 | struct resource_entry *window; | ||
| 924 | int ret; | ||
| 925 | |||
| 926 | resource_list_for_each_entry(window, resources) { | ||
| 927 | struct resource *res = window->res; | ||
| 928 | u64 res_type = resource_type(res); | ||
| 929 | |||
| 930 | switch (res_type) { | ||
| 931 | case IORESOURCE_IO: | ||
| 932 | case IORESOURCE_BUS: | ||
| 933 | break; | ||
| 934 | case IORESOURCE_MEM: | ||
| 935 | ret = iproc_pcie_setup_ob(pcie, res->start, | ||
| 936 | res->start - window->offset, | ||
| 937 | resource_size(res)); | ||
| 938 | if (ret) | ||
| 939 | return ret; | ||
| 940 | break; | ||
| 941 | default: | ||
| 942 | dev_err(dev, "invalid resource %pR\n", res); | ||
| 943 | return -EINVAL; | ||
| 944 | } | ||
| 945 | } | ||
| 946 | |||
| 947 | return 0; | ||
| 948 | } | ||
| 949 | |||
| 950 | static inline bool iproc_pcie_ib_is_in_use(struct iproc_pcie *pcie, | ||
| 951 | int region_idx) | ||
| 952 | { | ||
| 953 | const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx]; | ||
| 954 | u32 val; | ||
| 955 | |||
| 956 | val = iproc_pcie_read_reg(pcie, MAP_REG(IPROC_PCIE_IARR0, region_idx)); | ||
| 957 | |||
| 958 | return !!(val & (BIT(ib_map->nr_sizes) - 1)); | ||
| 959 | } | ||
| 960 | |||
| 961 | static inline bool iproc_pcie_ib_check_type(const struct iproc_pcie_ib_map *ib_map, | ||
| 962 | enum iproc_pcie_ib_map_type type) | ||
| 963 | { | ||
| 964 | return !!(ib_map->type == type); | ||
| 965 | } | ||
| 966 | |||
| 967 | static int iproc_pcie_ib_write(struct iproc_pcie *pcie, int region_idx, | ||
| 968 | int size_idx, int nr_windows, u64 axi_addr, | ||
| 969 | u64 pci_addr, resource_size_t size) | ||
| 970 | { | ||
| 971 | struct device *dev = pcie->dev; | ||
| 972 | const struct iproc_pcie_ib_map *ib_map = &pcie->ib_map[region_idx]; | ||
| 973 | u16 iarr_offset, imap_offset; | ||
| 974 | u32 val; | ||
| 975 | int window_idx; | ||
| 976 | |||
| 977 | iarr_offset = iproc_pcie_reg_offset(pcie, | ||
| 978 | MAP_REG(IPROC_PCIE_IARR0, region_idx)); | ||
| 979 | imap_offset = iproc_pcie_reg_offset(pcie, | ||
| 980 | MAP_REG(IPROC_PCIE_IMAP0, region_idx)); | ||
| 981 | if (iproc_pcie_reg_is_invalid(iarr_offset) || | ||
| 982 | iproc_pcie_reg_is_invalid(imap_offset)) | ||
| 983 | return -EINVAL; | ||
| 984 | |||
| 985 | dev_info(dev, "ib region [%d]: offset 0x%x axi %pap pci %pap\n", | ||
| 986 | region_idx, iarr_offset, &axi_addr, &pci_addr); | ||
| 987 | |||
| 988 | /* | ||
| 989 | * Program the IARR registers. The upper 32-bit IARR register is | ||
| 990 | * always right after the lower 32-bit IARR register. | ||
| 991 | */ | ||
| 992 | writel(lower_32_bits(pci_addr) | BIT(size_idx), | ||
| 993 | pcie->base + iarr_offset); | ||
| 994 | writel(upper_32_bits(pci_addr), pcie->base + iarr_offset + 4); | ||
| 995 | |||
| 996 | dev_info(dev, "iarr lo 0x%x iarr hi 0x%x\n", | ||
| 997 | readl(pcie->base + iarr_offset), | ||
| 998 | readl(pcie->base + iarr_offset + 4)); | ||
| 999 | |||
| 1000 | /* | ||
| 1001 | * Now program the IMAP registers. Each IARR region may have one or | ||
| 1002 | * more IMAP windows. | ||
| 1003 | */ | ||
| 1004 | size >>= ilog2(nr_windows); | ||
| 1005 | for (window_idx = 0; window_idx < nr_windows; window_idx++) { | ||
| 1006 | val = readl(pcie->base + imap_offset); | ||
| 1007 | val |= lower_32_bits(axi_addr) | IMAP_VALID; | ||
| 1008 | writel(val, pcie->base + imap_offset); | ||
| 1009 | writel(upper_32_bits(axi_addr), | ||
| 1010 | pcie->base + imap_offset + ib_map->imap_addr_offset); | ||
| 1011 | |||
| 1012 | dev_info(dev, "imap window [%d] lo 0x%x hi 0x%x\n", | ||
| 1013 | window_idx, readl(pcie->base + imap_offset), | ||
| 1014 | readl(pcie->base + imap_offset + | ||
| 1015 | ib_map->imap_addr_offset)); | ||
| 1016 | |||
| 1017 | imap_offset += ib_map->imap_window_offset; | ||
| 1018 | axi_addr += size; | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | return 0; | ||
| 1022 | } | ||
| 1023 | |||
| 1024 | static int iproc_pcie_setup_ib(struct iproc_pcie *pcie, | ||
| 1025 | struct of_pci_range *range, | ||
| 1026 | enum iproc_pcie_ib_map_type type) | ||
| 1027 | { | ||
| 1028 | struct device *dev = pcie->dev; | ||
| 1029 | struct iproc_pcie_ib *ib = &pcie->ib; | ||
| 1030 | int ret; | ||
| 1031 | unsigned int region_idx, size_idx; | ||
| 1032 | u64 axi_addr = range->cpu_addr, pci_addr = range->pci_addr; | ||
| 1033 | resource_size_t size = range->size; | ||
| 1034 | |||
| 1035 | /* iterate through all IARR mapping regions */ | ||
| 1036 | for (region_idx = 0; region_idx < ib->nr_regions; region_idx++) { | ||
| 1037 | const struct iproc_pcie_ib_map *ib_map = | ||
| 1038 | &pcie->ib_map[region_idx]; | ||
| 1039 | |||
| 1040 | /* | ||
| 1041 | * If current inbound region is already in use or not a | ||
| 1042 | * compatible type, move on to the next. | ||
| 1043 | */ | ||
| 1044 | if (iproc_pcie_ib_is_in_use(pcie, region_idx) || | ||
| 1045 | !iproc_pcie_ib_check_type(ib_map, type)) | ||
| 1046 | continue; | ||
| 1047 | |||
| 1048 | /* iterate through all supported region sizes to find a match */ | ||
| 1049 | for (size_idx = 0; size_idx < ib_map->nr_sizes; size_idx++) { | ||
| 1050 | resource_size_t region_size = | ||
| 1051 | ib_map->region_sizes[size_idx] * ib_map->size_unit; | ||
| 1052 | |||
| 1053 | if (size != region_size) | ||
| 1054 | continue; | ||
| 1055 | |||
| 1056 | if (!IS_ALIGNED(axi_addr, region_size) || | ||
| 1057 | !IS_ALIGNED(pci_addr, region_size)) { | ||
| 1058 | dev_err(dev, | ||
| 1059 | "axi %pap or pci %pap not aligned\n", | ||
| 1060 | &axi_addr, &pci_addr); | ||
| 1061 | return -EINVAL; | ||
| 1062 | } | ||
| 1063 | |||
| 1064 | /* Match found! Program IARR and all IMAP windows. */ | ||
| 1065 | ret = iproc_pcie_ib_write(pcie, region_idx, size_idx, | ||
| 1066 | ib_map->nr_windows, axi_addr, | ||
| 1067 | pci_addr, size); | ||
| 1068 | if (ret) | ||
| 1069 | goto err_ib; | ||
| 1070 | else | ||
| 1071 | return 0; | ||
| 1072 | |||
| 1073 | } | ||
| 1074 | } | ||
| 1075 | ret = -EINVAL; | ||
| 1076 | |||
| 1077 | err_ib: | ||
| 1078 | dev_err(dev, "unable to configure inbound mapping\n"); | ||
| 1079 | dev_err(dev, "axi %pap, pci %pap, res size %pap\n", | ||
| 1080 | &axi_addr, &pci_addr, &size); | ||
| 1081 | |||
| 1082 | return ret; | ||
| 1083 | } | ||
| 1084 | |||
| 1085 | static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie) | ||
| 1086 | { | ||
| 1087 | struct of_pci_range range; | ||
| 1088 | struct of_pci_range_parser parser; | ||
| 1089 | int ret; | ||
| 1090 | |||
| 1091 | /* Get the dma-ranges from DT */ | ||
| 1092 | ret = of_pci_dma_range_parser_init(&parser, pcie->dev->of_node); | ||
| 1093 | if (ret) | ||
| 1094 | return ret; | ||
| 1095 | |||
| 1096 | for_each_of_pci_range(&parser, &range) { | ||
| 1097 | /* Each range entry corresponds to an inbound mapping region */ | ||
| 1098 | ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM); | ||
| 1099 | if (ret) | ||
| 1100 | return ret; | ||
| 1101 | } | ||
| 1102 | |||
| 1103 | return 0; | ||
| 1104 | } | ||
| 1105 | |||
| 1106 | static int iproce_pcie_get_msi(struct iproc_pcie *pcie, | ||
| 1107 | struct device_node *msi_node, | ||
| 1108 | u64 *msi_addr) | ||
| 1109 | { | ||
| 1110 | struct device *dev = pcie->dev; | ||
| 1111 | int ret; | ||
| 1112 | struct resource res; | ||
| 1113 | |||
| 1114 | /* | ||
| 1115 | * Check if 'msi-map' points to ARM GICv3 ITS, which is the only | ||
| 1116 | * supported external MSI controller that requires steering. | ||
| 1117 | */ | ||
| 1118 | if (!of_device_is_compatible(msi_node, "arm,gic-v3-its")) { | ||
| 1119 | dev_err(dev, "unable to find compatible MSI controller\n"); | ||
| 1120 | return -ENODEV; | ||
| 1121 | } | ||
| 1122 | |||
| 1123 | /* derive GITS_TRANSLATER address from GICv3 */ | ||
| 1124 | ret = of_address_to_resource(msi_node, 0, &res); | ||
| 1125 | if (ret < 0) { | ||
| 1126 | dev_err(dev, "unable to obtain MSI controller resources\n"); | ||
| 1127 | return ret; | ||
| 1128 | } | ||
| 1129 | |||
| 1130 | *msi_addr = res.start + GITS_TRANSLATER; | ||
| 1131 | return 0; | ||
| 1132 | } | ||
| 1133 | |||
| 1134 | static int iproc_pcie_paxb_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr) | ||
| 1135 | { | ||
| 1136 | int ret; | ||
| 1137 | struct of_pci_range range; | ||
| 1138 | |||
| 1139 | memset(&range, 0, sizeof(range)); | ||
| 1140 | range.size = SZ_32K; | ||
| 1141 | range.pci_addr = range.cpu_addr = msi_addr & ~(range.size - 1); | ||
| 1142 | |||
| 1143 | ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_IO); | ||
| 1144 | return ret; | ||
| 1145 | } | ||
| 1146 | |||
| 1147 | static void iproc_pcie_paxc_v2_msi_steer(struct iproc_pcie *pcie, u64 msi_addr) | ||
| 1148 | { | ||
| 1149 | u32 val; | ||
| 1150 | |||
| 1151 | /* | ||
| 1152 | * Program bits [43:13] of address of GITS_TRANSLATER register into | ||
| 1153 | * bits [30:0] of the MSI base address register. In fact, in all iProc | ||
| 1154 | * based SoCs, all I/O register bases are well below the 32-bit | ||
| 1155 | * boundary, so we can safely assume bits [43:32] are always zeros. | ||
| 1156 | */ | ||
| 1157 | iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_BASE_ADDR, | ||
| 1158 | (u32)(msi_addr >> 13)); | ||
| 1159 | |||
| 1160 | /* use a default 8K window size */ | ||
| 1161 | iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_WINDOW_SIZE, 0); | ||
| 1162 | |||
| 1163 | /* steering MSI to GICv3 ITS */ | ||
| 1164 | val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_GIC_MODE); | ||
| 1165 | val |= GIC_V3_CFG; | ||
| 1166 | iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_GIC_MODE, val); | ||
| 1167 | |||
| 1168 | /* | ||
| 1169 | * Program bits [43:2] of address of GITS_TRANSLATER register into the | ||
| 1170 | * iProc MSI address registers. | ||
| 1171 | */ | ||
| 1172 | msi_addr >>= 2; | ||
| 1173 | iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_HI, | ||
| 1174 | upper_32_bits(msi_addr)); | ||
| 1175 | iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_ADDR_LO, | ||
| 1176 | lower_32_bits(msi_addr)); | ||
| 1177 | |||
| 1178 | /* enable MSI */ | ||
| 1179 | val = iproc_pcie_read_reg(pcie, IPROC_PCIE_MSI_EN_CFG); | ||
| 1180 | val |= MSI_ENABLE_CFG; | ||
| 1181 | iproc_pcie_write_reg(pcie, IPROC_PCIE_MSI_EN_CFG, val); | ||
| 1182 | } | ||
| 1183 | |||
| 1184 | static int iproc_pcie_msi_steer(struct iproc_pcie *pcie, | ||
| 1185 | struct device_node *msi_node) | ||
| 1186 | { | ||
| 1187 | struct device *dev = pcie->dev; | ||
| 1188 | int ret; | ||
| 1189 | u64 msi_addr; | ||
| 1190 | |||
| 1191 | ret = iproce_pcie_get_msi(pcie, msi_node, &msi_addr); | ||
| 1192 | if (ret < 0) { | ||
| 1193 | dev_err(dev, "msi steering failed\n"); | ||
| 1194 | return ret; | ||
| 1195 | } | ||
| 1196 | |||
| 1197 | switch (pcie->type) { | ||
| 1198 | case IPROC_PCIE_PAXB_V2: | ||
| 1199 | ret = iproc_pcie_paxb_v2_msi_steer(pcie, msi_addr); | ||
| 1200 | if (ret) | ||
| 1201 | return ret; | ||
| 1202 | break; | ||
| 1203 | case IPROC_PCIE_PAXC_V2: | ||
| 1204 | iproc_pcie_paxc_v2_msi_steer(pcie, msi_addr); | ||
| 1205 | break; | ||
| 1206 | default: | ||
| 1207 | return -EINVAL; | ||
| 1208 | } | ||
| 1209 | |||
| 1210 | return 0; | ||
| 1211 | } | ||
| 1212 | |||
| 1213 | static int iproc_pcie_msi_enable(struct iproc_pcie *pcie) | ||
| 1214 | { | ||
| 1215 | struct device_node *msi_node; | ||
| 1216 | int ret; | ||
| 1217 | |||
| 1218 | /* | ||
| 1219 | * Either the "msi-parent" or the "msi-map" phandle needs to exist | ||
| 1220 | * for us to obtain the MSI node. | ||
| 1221 | */ | ||
| 1222 | |||
| 1223 | msi_node = of_parse_phandle(pcie->dev->of_node, "msi-parent", 0); | ||
| 1224 | if (!msi_node) { | ||
| 1225 | const __be32 *msi_map = NULL; | ||
| 1226 | int len; | ||
| 1227 | u32 phandle; | ||
| 1228 | |||
| 1229 | msi_map = of_get_property(pcie->dev->of_node, "msi-map", &len); | ||
| 1230 | if (!msi_map) | ||
| 1231 | return -ENODEV; | ||
| 1232 | |||
| 1233 | phandle = be32_to_cpup(msi_map + 1); | ||
| 1234 | msi_node = of_find_node_by_phandle(phandle); | ||
| 1235 | if (!msi_node) | ||
| 1236 | return -ENODEV; | ||
| 1237 | } | ||
| 1238 | |||
| 1239 | /* | ||
| 1240 | * Certain revisions of the iProc PCIe controller require additional | ||
| 1241 | * configurations to steer the MSI writes towards an external MSI | ||
| 1242 | * controller. | ||
| 1243 | */ | ||
| 1244 | if (pcie->need_msi_steer) { | ||
| 1245 | ret = iproc_pcie_msi_steer(pcie, msi_node); | ||
| 1246 | if (ret) | ||
| 1247 | return ret; | ||
| 1248 | } | ||
| 1249 | |||
| 1250 | /* | ||
| 1251 | * If another MSI controller is being used, the call below should fail | ||
| 1252 | * but that is okay | ||
| 1253 | */ | ||
| 1254 | return iproc_msi_init(pcie, msi_node); | ||
| 1255 | } | ||
| 1256 | |||
| 1257 | static void iproc_pcie_msi_disable(struct iproc_pcie *pcie) | ||
| 1258 | { | ||
| 1259 | iproc_msi_exit(pcie); | ||
| 1260 | } | ||
| 1261 | |||
| 1262 | static int iproc_pcie_rev_init(struct iproc_pcie *pcie) | ||
| 1263 | { | ||
| 1264 | struct device *dev = pcie->dev; | ||
| 1265 | unsigned int reg_idx; | ||
| 1266 | const u16 *regs; | ||
| 1267 | |||
| 1268 | switch (pcie->type) { | ||
| 1269 | case IPROC_PCIE_PAXB_BCMA: | ||
| 1270 | regs = iproc_pcie_reg_paxb_bcma; | ||
| 1271 | break; | ||
| 1272 | case IPROC_PCIE_PAXB: | ||
| 1273 | regs = iproc_pcie_reg_paxb; | ||
| 1274 | pcie->has_apb_err_disable = true; | ||
| 1275 | if (pcie->need_ob_cfg) { | ||
| 1276 | pcie->ob_map = paxb_ob_map; | ||
| 1277 | pcie->ob.nr_windows = ARRAY_SIZE(paxb_ob_map); | ||
| 1278 | } | ||
| 1279 | break; | ||
| 1280 | case IPROC_PCIE_PAXB_V2: | ||
| 1281 | regs = iproc_pcie_reg_paxb_v2; | ||
| 1282 | pcie->has_apb_err_disable = true; | ||
| 1283 | if (pcie->need_ob_cfg) { | ||
| 1284 | pcie->ob_map = paxb_v2_ob_map; | ||
| 1285 | pcie->ob.nr_windows = ARRAY_SIZE(paxb_v2_ob_map); | ||
| 1286 | } | ||
| 1287 | pcie->ib.nr_regions = ARRAY_SIZE(paxb_v2_ib_map); | ||
| 1288 | pcie->ib_map = paxb_v2_ib_map; | ||
| 1289 | pcie->need_msi_steer = true; | ||
| 1290 | dev_warn(dev, "reads of config registers that contain %#x return incorrect data\n", | ||
| 1291 | CFG_RETRY_STATUS); | ||
| 1292 | break; | ||
| 1293 | case IPROC_PCIE_PAXC: | ||
| 1294 | regs = iproc_pcie_reg_paxc; | ||
| 1295 | pcie->ep_is_internal = true; | ||
| 1296 | break; | ||
| 1297 | case IPROC_PCIE_PAXC_V2: | ||
| 1298 | regs = iproc_pcie_reg_paxc_v2; | ||
| 1299 | pcie->ep_is_internal = true; | ||
| 1300 | pcie->need_msi_steer = true; | ||
| 1301 | break; | ||
| 1302 | default: | ||
| 1303 | dev_err(dev, "incompatible iProc PCIe interface\n"); | ||
| 1304 | return -EINVAL; | ||
| 1305 | } | ||
| 1306 | |||
| 1307 | pcie->reg_offsets = devm_kcalloc(dev, IPROC_PCIE_MAX_NUM_REG, | ||
| 1308 | sizeof(*pcie->reg_offsets), | ||
| 1309 | GFP_KERNEL); | ||
| 1310 | if (!pcie->reg_offsets) | ||
| 1311 | return -ENOMEM; | ||
| 1312 | |||
| 1313 | /* go through the register table and populate all valid registers */ | ||
| 1314 | pcie->reg_offsets[0] = (pcie->type == IPROC_PCIE_PAXC_V2) ? | ||
| 1315 | IPROC_PCIE_REG_INVALID : regs[0]; | ||
| 1316 | for (reg_idx = 1; reg_idx < IPROC_PCIE_MAX_NUM_REG; reg_idx++) | ||
| 1317 | pcie->reg_offsets[reg_idx] = regs[reg_idx] ? | ||
| 1318 | regs[reg_idx] : IPROC_PCIE_REG_INVALID; | ||
| 1319 | |||
| 1320 | return 0; | ||
| 1321 | } | ||
| 1322 | |||
| 1323 | int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) | ||
| 1324 | { | ||
| 1325 | struct device *dev; | ||
| 1326 | int ret; | ||
| 1327 | struct pci_bus *child; | ||
| 1328 | struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); | ||
| 1329 | |||
| 1330 | dev = pcie->dev; | ||
| 1331 | |||
| 1332 | ret = iproc_pcie_rev_init(pcie); | ||
| 1333 | if (ret) { | ||
| 1334 | dev_err(dev, "unable to initialize controller parameters\n"); | ||
| 1335 | return ret; | ||
| 1336 | } | ||
| 1337 | |||
| 1338 | ret = devm_request_pci_bus_resources(dev, res); | ||
| 1339 | if (ret) | ||
| 1340 | return ret; | ||
| 1341 | |||
| 1342 | ret = phy_init(pcie->phy); | ||
| 1343 | if (ret) { | ||
| 1344 | dev_err(dev, "unable to initialize PCIe PHY\n"); | ||
| 1345 | return ret; | ||
| 1346 | } | ||
| 1347 | |||
| 1348 | ret = phy_power_on(pcie->phy); | ||
| 1349 | if (ret) { | ||
| 1350 | dev_err(dev, "unable to power on PCIe PHY\n"); | ||
| 1351 | goto err_exit_phy; | ||
| 1352 | } | ||
| 1353 | |||
| 1354 | iproc_pcie_perst_ctrl(pcie, true); | ||
| 1355 | iproc_pcie_perst_ctrl(pcie, false); | ||
| 1356 | |||
| 1357 | if (pcie->need_ob_cfg) { | ||
| 1358 | ret = iproc_pcie_map_ranges(pcie, res); | ||
| 1359 | if (ret) { | ||
| 1360 | dev_err(dev, "map failed\n"); | ||
| 1361 | goto err_power_off_phy; | ||
| 1362 | } | ||
| 1363 | } | ||
| 1364 | |||
| 1365 | if (pcie->need_ib_cfg) { | ||
| 1366 | ret = iproc_pcie_map_dma_ranges(pcie); | ||
| 1367 | if (ret && ret != -ENOENT) | ||
| 1368 | goto err_power_off_phy; | ||
| 1369 | } | ||
| 1370 | |||
| 1371 | ret = iproc_pcie_check_link(pcie); | ||
| 1372 | if (ret) { | ||
| 1373 | dev_err(dev, "no PCIe EP device detected\n"); | ||
| 1374 | goto err_power_off_phy; | ||
| 1375 | } | ||
| 1376 | |||
| 1377 | iproc_pcie_enable(pcie); | ||
| 1378 | |||
| 1379 | if (IS_ENABLED(CONFIG_PCI_MSI)) | ||
| 1380 | if (iproc_pcie_msi_enable(pcie)) | ||
| 1381 | dev_info(dev, "not using iProc MSI\n"); | ||
| 1382 | |||
| 1383 | list_splice_init(res, &host->windows); | ||
| 1384 | host->busnr = 0; | ||
| 1385 | host->dev.parent = dev; | ||
| 1386 | host->ops = &iproc_pcie_ops; | ||
| 1387 | host->sysdata = pcie; | ||
| 1388 | host->map_irq = pcie->map_irq; | ||
| 1389 | host->swizzle_irq = pci_common_swizzle; | ||
| 1390 | |||
| 1391 | ret = pci_scan_root_bus_bridge(host); | ||
| 1392 | if (ret < 0) { | ||
| 1393 | dev_err(dev, "failed to scan host: %d\n", ret); | ||
| 1394 | goto err_power_off_phy; | ||
| 1395 | } | ||
| 1396 | |||
| 1397 | pci_assign_unassigned_bus_resources(host->bus); | ||
| 1398 | |||
| 1399 | pcie->root_bus = host->bus; | ||
| 1400 | |||
| 1401 | list_for_each_entry(child, &host->bus->children, node) | ||
| 1402 | pcie_bus_configure_settings(child); | ||
| 1403 | |||
| 1404 | pci_bus_add_devices(host->bus); | ||
| 1405 | |||
| 1406 | return 0; | ||
| 1407 | |||
| 1408 | err_power_off_phy: | ||
| 1409 | phy_power_off(pcie->phy); | ||
| 1410 | err_exit_phy: | ||
| 1411 | phy_exit(pcie->phy); | ||
| 1412 | return ret; | ||
| 1413 | } | ||
| 1414 | EXPORT_SYMBOL(iproc_pcie_setup); | ||
| 1415 | |||
| 1416 | int iproc_pcie_remove(struct iproc_pcie *pcie) | ||
| 1417 | { | ||
| 1418 | pci_stop_root_bus(pcie->root_bus); | ||
| 1419 | pci_remove_root_bus(pcie->root_bus); | ||
| 1420 | |||
| 1421 | iproc_pcie_msi_disable(pcie); | ||
| 1422 | |||
| 1423 | phy_power_off(pcie->phy); | ||
| 1424 | phy_exit(pcie->phy); | ||
| 1425 | |||
| 1426 | return 0; | ||
| 1427 | } | ||
| 1428 | EXPORT_SYMBOL(iproc_pcie_remove); | ||
| 1429 | |||
| 1430 | MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>"); | ||
| 1431 | MODULE_DESCRIPTION("Broadcom iPROC PCIe common driver"); | ||
| 1432 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/pci/controller/pcie-iproc.h b/drivers/pci/controller/pcie-iproc.h new file mode 100644 index 000000000000..814b600b383a --- /dev/null +++ b/drivers/pci/controller/pcie-iproc.h | |||
| @@ -0,0 +1,119 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Copyright (C) 2014-2015 Broadcom Corporation | ||
| 4 | */ | ||
| 5 | |||
| 6 | #ifndef _PCIE_IPROC_H | ||
| 7 | #define _PCIE_IPROC_H | ||
| 8 | |||
| 9 | /** | ||
| 10 | * iProc PCIe interface type | ||
| 11 | * | ||
| 12 | * PAXB is the wrapper used in root complex that can be connected to an | ||
| 13 | * external endpoint device. | ||
| 14 | * | ||
| 15 | * PAXC is the wrapper used in root complex dedicated for internal emulated | ||
| 16 | * endpoint devices. | ||
| 17 | */ | ||
| 18 | enum iproc_pcie_type { | ||
| 19 | IPROC_PCIE_PAXB_BCMA = 0, | ||
| 20 | IPROC_PCIE_PAXB, | ||
| 21 | IPROC_PCIE_PAXB_V2, | ||
| 22 | IPROC_PCIE_PAXC, | ||
| 23 | IPROC_PCIE_PAXC_V2, | ||
| 24 | }; | ||
| 25 | |||
| 26 | /** | ||
| 27 | * iProc PCIe outbound mapping | ||
| 28 | * @axi_offset: offset from the AXI address to the internal address used by | ||
| 29 | * the iProc PCIe core | ||
| 30 | * @nr_windows: total number of supported outbound mapping windows | ||
| 31 | */ | ||
| 32 | struct iproc_pcie_ob { | ||
| 33 | resource_size_t axi_offset; | ||
| 34 | unsigned int nr_windows; | ||
| 35 | }; | ||
| 36 | |||
| 37 | /** | ||
| 38 | * iProc PCIe inbound mapping | ||
| 39 | * @nr_regions: total number of supported inbound mapping regions | ||
| 40 | */ | ||
| 41 | struct iproc_pcie_ib { | ||
| 42 | unsigned int nr_regions; | ||
| 43 | }; | ||
| 44 | |||
| 45 | struct iproc_pcie_ob_map; | ||
| 46 | struct iproc_pcie_ib_map; | ||
| 47 | struct iproc_msi; | ||
| 48 | |||
| 49 | /** | ||
| 50 | * iProc PCIe device | ||
| 51 | * | ||
| 52 | * @dev: pointer to device data structure | ||
| 53 | * @type: iProc PCIe interface type | ||
| 54 | * @reg_offsets: register offsets | ||
| 55 | * @base: PCIe host controller I/O register base | ||
| 56 | * @base_addr: PCIe host controller register base physical address | ||
| 57 | * @root_bus: pointer to root bus | ||
| 58 | * @phy: optional PHY device that controls the Serdes | ||
| 59 | * @map_irq: function callback to map interrupts | ||
| 60 | * @ep_is_internal: indicates an internal emulated endpoint device is connected | ||
| 61 | * @has_apb_err_disable: indicates the controller can be configured to prevent | ||
| 62 | * unsupported request from being forwarded as an APB bus error | ||
| 63 | * | ||
| 64 | * @need_ob_cfg: indicates SW needs to configure the outbound mapping window | ||
| 65 | * @ob: outbound mapping related parameters | ||
| 66 | * @ob_map: outbound mapping related parameters specific to the controller | ||
| 67 | * | ||
| 68 | * @need_ib_cfg: indicates SW needs to configure the inbound mapping window | ||
| 69 | * @ib: inbound mapping related parameters | ||
| 70 | * @ib_map: outbound mapping region related parameters | ||
| 71 | * | ||
| 72 | * @need_msi_steer: indicates additional configuration of the iProc PCIe | ||
| 73 | * controller is required to steer MSI writes to external interrupt controller | ||
| 74 | * @msi: MSI data | ||
| 75 | */ | ||
| 76 | struct iproc_pcie { | ||
| 77 | struct device *dev; | ||
| 78 | enum iproc_pcie_type type; | ||
| 79 | u16 *reg_offsets; | ||
| 80 | void __iomem *base; | ||
| 81 | phys_addr_t base_addr; | ||
| 82 | struct resource mem; | ||
| 83 | struct pci_bus *root_bus; | ||
| 84 | struct phy *phy; | ||
| 85 | int (*map_irq)(const struct pci_dev *, u8, u8); | ||
| 86 | bool ep_is_internal; | ||
| 87 | bool has_apb_err_disable; | ||
| 88 | |||
| 89 | bool need_ob_cfg; | ||
| 90 | struct iproc_pcie_ob ob; | ||
| 91 | const struct iproc_pcie_ob_map *ob_map; | ||
| 92 | |||
| 93 | bool need_ib_cfg; | ||
| 94 | struct iproc_pcie_ib ib; | ||
| 95 | const struct iproc_pcie_ib_map *ib_map; | ||
| 96 | |||
| 97 | bool need_msi_steer; | ||
| 98 | struct iproc_msi *msi; | ||
| 99 | }; | ||
| 100 | |||
| 101 | int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res); | ||
| 102 | int iproc_pcie_remove(struct iproc_pcie *pcie); | ||
| 103 | int iproc_pcie_shutdown(struct iproc_pcie *pcie); | ||
| 104 | |||
| 105 | #ifdef CONFIG_PCIE_IPROC_MSI | ||
| 106 | int iproc_msi_init(struct iproc_pcie *pcie, struct device_node *node); | ||
| 107 | void iproc_msi_exit(struct iproc_pcie *pcie); | ||
| 108 | #else | ||
| 109 | static inline int iproc_msi_init(struct iproc_pcie *pcie, | ||
| 110 | struct device_node *node) | ||
| 111 | { | ||
| 112 | return -ENODEV; | ||
| 113 | } | ||
| 114 | static inline void iproc_msi_exit(struct iproc_pcie *pcie) | ||
| 115 | { | ||
| 116 | } | ||
| 117 | #endif | ||
| 118 | |||
| 119 | #endif /* _PCIE_IPROC_H */ | ||
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c new file mode 100644 index 000000000000..0baabe30858f --- /dev/null +++ b/drivers/pci/controller/pcie-mediatek.c | |||
| @@ -0,0 +1,1218 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * MediaTek PCIe host controller driver. | ||
| 4 | * | ||
| 5 | * Copyright (c) 2017 MediaTek Inc. | ||
| 6 | * Author: Ryder Lee <ryder.lee@mediatek.com> | ||
| 7 | * Honghui Zhang <honghui.zhang@mediatek.com> | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <linux/clk.h> | ||
| 11 | #include <linux/delay.h> | ||
| 12 | #include <linux/iopoll.h> | ||
| 13 | #include <linux/irq.h> | ||
| 14 | #include <linux/irqchip/chained_irq.h> | ||
| 15 | #include <linux/irqdomain.h> | ||
| 16 | #include <linux/kernel.h> | ||
| 17 | #include <linux/msi.h> | ||
| 18 | #include <linux/of_address.h> | ||
| 19 | #include <linux/of_pci.h> | ||
| 20 | #include <linux/of_platform.h> | ||
| 21 | #include <linux/pci.h> | ||
| 22 | #include <linux/phy/phy.h> | ||
| 23 | #include <linux/platform_device.h> | ||
| 24 | #include <linux/pm_runtime.h> | ||
| 25 | #include <linux/reset.h> | ||
| 26 | |||
| 27 | #include "../pci.h" | ||
| 28 | |||
| 29 | /* PCIe shared registers */ | ||
| 30 | #define PCIE_SYS_CFG 0x00 | ||
| 31 | #define PCIE_INT_ENABLE 0x0c | ||
| 32 | #define PCIE_CFG_ADDR 0x20 | ||
| 33 | #define PCIE_CFG_DATA 0x24 | ||
| 34 | |||
| 35 | /* PCIe per port registers */ | ||
| 36 | #define PCIE_BAR0_SETUP 0x10 | ||
| 37 | #define PCIE_CLASS 0x34 | ||
| 38 | #define PCIE_LINK_STATUS 0x50 | ||
| 39 | |||
| 40 | #define PCIE_PORT_INT_EN(x) BIT(20 + (x)) | ||
| 41 | #define PCIE_PORT_PERST(x) BIT(1 + (x)) | ||
| 42 | #define PCIE_PORT_LINKUP BIT(0) | ||
| 43 | #define PCIE_BAR_MAP_MAX GENMASK(31, 16) | ||
| 44 | |||
| 45 | #define PCIE_BAR_ENABLE BIT(0) | ||
| 46 | #define PCIE_REVISION_ID BIT(0) | ||
| 47 | #define PCIE_CLASS_CODE (0x60400 << 8) | ||
| 48 | #define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \ | ||
| 49 | ((((regn) >> 8) & GENMASK(3, 0)) << 24)) | ||
| 50 | #define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8)) | ||
| 51 | #define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11)) | ||
| 52 | #define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16)) | ||
| 53 | #define PCIE_CONF_ADDR(regn, fun, dev, bus) \ | ||
| 54 | (PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \ | ||
| 55 | PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus)) | ||
| 56 | |||
| 57 | /* MediaTek specific configuration registers */ | ||
| 58 | #define PCIE_FTS_NUM 0x70c | ||
| 59 | #define PCIE_FTS_NUM_MASK GENMASK(15, 8) | ||
| 60 | #define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8) | ||
| 61 | |||
| 62 | #define PCIE_FC_CREDIT 0x73c | ||
| 63 | #define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16)) | ||
| 64 | #define PCIE_FC_CREDIT_VAL(x) ((x) << 16) | ||
| 65 | |||
| 66 | /* PCIe V2 share registers */ | ||
| 67 | #define PCIE_SYS_CFG_V2 0x0 | ||
| 68 | #define PCIE_CSR_LTSSM_EN(x) BIT(0 + (x) * 8) | ||
| 69 | #define PCIE_CSR_ASPM_L1_EN(x) BIT(1 + (x) * 8) | ||
| 70 | |||
| 71 | /* PCIe V2 per-port registers */ | ||
| 72 | #define PCIE_MSI_VECTOR 0x0c0 | ||
| 73 | |||
| 74 | #define PCIE_CONF_VEND_ID 0x100 | ||
| 75 | #define PCIE_CONF_CLASS_ID 0x106 | ||
| 76 | |||
| 77 | #define PCIE_INT_MASK 0x420 | ||
| 78 | #define INTX_MASK GENMASK(19, 16) | ||
| 79 | #define INTX_SHIFT 16 | ||
| 80 | #define PCIE_INT_STATUS 0x424 | ||
| 81 | #define MSI_STATUS BIT(23) | ||
| 82 | #define PCIE_IMSI_STATUS 0x42c | ||
| 83 | #define PCIE_IMSI_ADDR 0x430 | ||
| 84 | #define MSI_MASK BIT(23) | ||
| 85 | #define MTK_MSI_IRQS_NUM 32 | ||
| 86 | |||
| 87 | #define PCIE_AHB_TRANS_BASE0_L 0x438 | ||
| 88 | #define PCIE_AHB_TRANS_BASE0_H 0x43c | ||
| 89 | #define AHB2PCIE_SIZE(x) ((x) & GENMASK(4, 0)) | ||
| 90 | #define PCIE_AXI_WINDOW0 0x448 | ||
| 91 | #define WIN_ENABLE BIT(7) | ||
| 92 | |||
| 93 | /* PCIe V2 configuration transaction header */ | ||
| 94 | #define PCIE_CFG_HEADER0 0x460 | ||
| 95 | #define PCIE_CFG_HEADER1 0x464 | ||
| 96 | #define PCIE_CFG_HEADER2 0x468 | ||
| 97 | #define PCIE_CFG_WDATA 0x470 | ||
| 98 | #define PCIE_APP_TLP_REQ 0x488 | ||
| 99 | #define PCIE_CFG_RDATA 0x48c | ||
| 100 | #define APP_CFG_REQ BIT(0) | ||
| 101 | #define APP_CPL_STATUS GENMASK(7, 5) | ||
| 102 | |||
| 103 | #define CFG_WRRD_TYPE_0 4 | ||
| 104 | #define CFG_WR_FMT 2 | ||
| 105 | #define CFG_RD_FMT 0 | ||
| 106 | |||
| 107 | #define CFG_DW0_LENGTH(length) ((length) & GENMASK(9, 0)) | ||
| 108 | #define CFG_DW0_TYPE(type) (((type) << 24) & GENMASK(28, 24)) | ||
| 109 | #define CFG_DW0_FMT(fmt) (((fmt) << 29) & GENMASK(31, 29)) | ||
| 110 | #define CFG_DW2_REGN(regn) ((regn) & GENMASK(11, 2)) | ||
| 111 | #define CFG_DW2_FUN(fun) (((fun) << 16) & GENMASK(18, 16)) | ||
| 112 | #define CFG_DW2_DEV(dev) (((dev) << 19) & GENMASK(23, 19)) | ||
| 113 | #define CFG_DW2_BUS(bus) (((bus) << 24) & GENMASK(31, 24)) | ||
| 114 | #define CFG_HEADER_DW0(type, fmt) \ | ||
| 115 | (CFG_DW0_LENGTH(1) | CFG_DW0_TYPE(type) | CFG_DW0_FMT(fmt)) | ||
| 116 | #define CFG_HEADER_DW1(where, size) \ | ||
| 117 | (GENMASK(((size) - 1), 0) << ((where) & 0x3)) | ||
| 118 | #define CFG_HEADER_DW2(regn, fun, dev, bus) \ | ||
| 119 | (CFG_DW2_REGN(regn) | CFG_DW2_FUN(fun) | \ | ||
| 120 | CFG_DW2_DEV(dev) | CFG_DW2_BUS(bus)) | ||
| 121 | |||
| 122 | #define PCIE_RST_CTRL 0x510 | ||
| 123 | #define PCIE_PHY_RSTB BIT(0) | ||
| 124 | #define PCIE_PIPE_SRSTB BIT(1) | ||
| 125 | #define PCIE_MAC_SRSTB BIT(2) | ||
| 126 | #define PCIE_CRSTB BIT(3) | ||
| 127 | #define PCIE_PERSTB BIT(8) | ||
| 128 | #define PCIE_LINKDOWN_RST_EN GENMASK(15, 13) | ||
| 129 | #define PCIE_LINK_STATUS_V2 0x804 | ||
| 130 | #define PCIE_PORT_LINKUP_V2 BIT(10) | ||
| 131 | |||
| 132 | struct mtk_pcie_port; | ||
| 133 | |||
| 134 | /** | ||
| 135 | * struct mtk_pcie_soc - differentiate between host generations | ||
| 136 | * @need_fix_class_id: whether this host's class ID needed to be fixed or not | ||
| 137 | * @ops: pointer to configuration access functions | ||
| 138 | * @startup: pointer to controller setting functions | ||
| 139 | * @setup_irq: pointer to initialize IRQ functions | ||
| 140 | */ | ||
| 141 | struct mtk_pcie_soc { | ||
| 142 | bool need_fix_class_id; | ||
| 143 | struct pci_ops *ops; | ||
| 144 | int (*startup)(struct mtk_pcie_port *port); | ||
| 145 | int (*setup_irq)(struct mtk_pcie_port *port, struct device_node *node); | ||
| 146 | }; | ||
| 147 | |||
| 148 | /** | ||
| 149 | * struct mtk_pcie_port - PCIe port information | ||
| 150 | * @base: IO mapped register base | ||
| 151 | * @list: port list | ||
| 152 | * @pcie: pointer to PCIe host info | ||
| 153 | * @reset: pointer to port reset control | ||
| 154 | * @sys_ck: pointer to transaction/data link layer clock | ||
| 155 | * @ahb_ck: pointer to AHB slave interface operating clock for CSR access | ||
| 156 | * and RC initiated MMIO access | ||
| 157 | * @axi_ck: pointer to application layer MMIO channel operating clock | ||
| 158 | * @aux_ck: pointer to pe2_mac_bridge and pe2_mac_core operating clock | ||
| 159 | * when pcie_mac_ck/pcie_pipe_ck is turned off | ||
| 160 | * @obff_ck: pointer to OBFF functional block operating clock | ||
| 161 | * @pipe_ck: pointer to LTSSM and PHY/MAC layer operating clock | ||
| 162 | * @phy: pointer to PHY control block | ||
| 163 | * @lane: lane count | ||
| 164 | * @slot: port slot | ||
| 165 | * @irq_domain: legacy INTx IRQ domain | ||
| 166 | * @inner_domain: inner IRQ domain | ||
| 167 | * @msi_domain: MSI IRQ domain | ||
| 168 | * @lock: protect the msi_irq_in_use bitmap | ||
| 169 | * @msi_irq_in_use: bit map for assigned MSI IRQ | ||
| 170 | */ | ||
| 171 | struct mtk_pcie_port { | ||
| 172 | void __iomem *base; | ||
| 173 | struct list_head list; | ||
| 174 | struct mtk_pcie *pcie; | ||
| 175 | struct reset_control *reset; | ||
| 176 | struct clk *sys_ck; | ||
| 177 | struct clk *ahb_ck; | ||
| 178 | struct clk *axi_ck; | ||
| 179 | struct clk *aux_ck; | ||
| 180 | struct clk *obff_ck; | ||
| 181 | struct clk *pipe_ck; | ||
| 182 | struct phy *phy; | ||
| 183 | u32 lane; | ||
| 184 | u32 slot; | ||
| 185 | struct irq_domain *irq_domain; | ||
| 186 | struct irq_domain *inner_domain; | ||
| 187 | struct irq_domain *msi_domain; | ||
| 188 | struct mutex lock; | ||
| 189 | DECLARE_BITMAP(msi_irq_in_use, MTK_MSI_IRQS_NUM); | ||
| 190 | }; | ||
| 191 | |||
| 192 | /** | ||
| 193 | * struct mtk_pcie - PCIe host information | ||
| 194 | * @dev: pointer to PCIe device | ||
| 195 | * @base: IO mapped register base | ||
| 196 | * @free_ck: free-run reference clock | ||
| 197 | * @io: IO resource | ||
| 198 | * @pio: PIO resource | ||
| 199 | * @mem: non-prefetchable memory resource | ||
| 200 | * @busn: bus range | ||
| 201 | * @offset: IO / Memory offset | ||
| 202 | * @ports: pointer to PCIe port information | ||
| 203 | * @soc: pointer to SoC-dependent operations | ||
| 204 | */ | ||
| 205 | struct mtk_pcie { | ||
| 206 | struct device *dev; | ||
| 207 | void __iomem *base; | ||
| 208 | struct clk *free_ck; | ||
| 209 | |||
| 210 | struct resource io; | ||
| 211 | struct resource pio; | ||
| 212 | struct resource mem; | ||
| 213 | struct resource busn; | ||
| 214 | struct { | ||
| 215 | resource_size_t mem; | ||
| 216 | resource_size_t io; | ||
| 217 | } offset; | ||
| 218 | struct list_head ports; | ||
| 219 | const struct mtk_pcie_soc *soc; | ||
| 220 | }; | ||
| 221 | |||
| 222 | static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie) | ||
| 223 | { | ||
| 224 | struct device *dev = pcie->dev; | ||
| 225 | |||
| 226 | clk_disable_unprepare(pcie->free_ck); | ||
| 227 | |||
| 228 | if (dev->pm_domain) { | ||
| 229 | pm_runtime_put_sync(dev); | ||
| 230 | pm_runtime_disable(dev); | ||
| 231 | } | ||
| 232 | } | ||
| 233 | |||
| 234 | static void mtk_pcie_port_free(struct mtk_pcie_port *port) | ||
| 235 | { | ||
| 236 | struct mtk_pcie *pcie = port->pcie; | ||
| 237 | struct device *dev = pcie->dev; | ||
| 238 | |||
| 239 | devm_iounmap(dev, port->base); | ||
| 240 | list_del(&port->list); | ||
| 241 | devm_kfree(dev, port); | ||
| 242 | } | ||
| 243 | |||
| 244 | static void mtk_pcie_put_resources(struct mtk_pcie *pcie) | ||
| 245 | { | ||
| 246 | struct mtk_pcie_port *port, *tmp; | ||
| 247 | |||
| 248 | list_for_each_entry_safe(port, tmp, &pcie->ports, list) { | ||
| 249 | phy_power_off(port->phy); | ||
| 250 | phy_exit(port->phy); | ||
| 251 | clk_disable_unprepare(port->pipe_ck); | ||
| 252 | clk_disable_unprepare(port->obff_ck); | ||
| 253 | clk_disable_unprepare(port->axi_ck); | ||
| 254 | clk_disable_unprepare(port->aux_ck); | ||
| 255 | clk_disable_unprepare(port->ahb_ck); | ||
| 256 | clk_disable_unprepare(port->sys_ck); | ||
| 257 | mtk_pcie_port_free(port); | ||
| 258 | } | ||
| 259 | |||
| 260 | mtk_pcie_subsys_powerdown(pcie); | ||
| 261 | } | ||
| 262 | |||
| 263 | static int mtk_pcie_check_cfg_cpld(struct mtk_pcie_port *port) | ||
| 264 | { | ||
| 265 | u32 val; | ||
| 266 | int err; | ||
| 267 | |||
| 268 | err = readl_poll_timeout_atomic(port->base + PCIE_APP_TLP_REQ, val, | ||
| 269 | !(val & APP_CFG_REQ), 10, | ||
| 270 | 100 * USEC_PER_MSEC); | ||
| 271 | if (err) | ||
| 272 | return PCIBIOS_SET_FAILED; | ||
| 273 | |||
| 274 | if (readl(port->base + PCIE_APP_TLP_REQ) & APP_CPL_STATUS) | ||
| 275 | return PCIBIOS_SET_FAILED; | ||
| 276 | |||
| 277 | return PCIBIOS_SUCCESSFUL; | ||
| 278 | } | ||
| 279 | |||
| 280 | static int mtk_pcie_hw_rd_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn, | ||
| 281 | int where, int size, u32 *val) | ||
| 282 | { | ||
| 283 | u32 tmp; | ||
| 284 | |||
| 285 | /* Write PCIe configuration transaction header for Cfgrd */ | ||
| 286 | writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_RD_FMT), | ||
| 287 | port->base + PCIE_CFG_HEADER0); | ||
| 288 | writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1); | ||
| 289 | writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus), | ||
| 290 | port->base + PCIE_CFG_HEADER2); | ||
| 291 | |||
| 292 | /* Trigger h/w to transmit Cfgrd TLP */ | ||
| 293 | tmp = readl(port->base + PCIE_APP_TLP_REQ); | ||
| 294 | tmp |= APP_CFG_REQ; | ||
| 295 | writel(tmp, port->base + PCIE_APP_TLP_REQ); | ||
| 296 | |||
| 297 | /* Check completion status */ | ||
| 298 | if (mtk_pcie_check_cfg_cpld(port)) | ||
| 299 | return PCIBIOS_SET_FAILED; | ||
| 300 | |||
| 301 | /* Read cpld payload of Cfgrd */ | ||
| 302 | *val = readl(port->base + PCIE_CFG_RDATA); | ||
| 303 | |||
| 304 | if (size == 1) | ||
| 305 | *val = (*val >> (8 * (where & 3))) & 0xff; | ||
| 306 | else if (size == 2) | ||
| 307 | *val = (*val >> (8 * (where & 3))) & 0xffff; | ||
| 308 | |||
| 309 | return PCIBIOS_SUCCESSFUL; | ||
| 310 | } | ||
| 311 | |||
| 312 | static int mtk_pcie_hw_wr_cfg(struct mtk_pcie_port *port, u32 bus, u32 devfn, | ||
| 313 | int where, int size, u32 val) | ||
| 314 | { | ||
| 315 | /* Write PCIe configuration transaction header for Cfgwr */ | ||
| 316 | writel(CFG_HEADER_DW0(CFG_WRRD_TYPE_0, CFG_WR_FMT), | ||
| 317 | port->base + PCIE_CFG_HEADER0); | ||
| 318 | writel(CFG_HEADER_DW1(where, size), port->base + PCIE_CFG_HEADER1); | ||
| 319 | writel(CFG_HEADER_DW2(where, PCI_FUNC(devfn), PCI_SLOT(devfn), bus), | ||
| 320 | port->base + PCIE_CFG_HEADER2); | ||
| 321 | |||
| 322 | /* Write Cfgwr data */ | ||
| 323 | val = val << 8 * (where & 3); | ||
| 324 | writel(val, port->base + PCIE_CFG_WDATA); | ||
| 325 | |||
| 326 | /* Trigger h/w to transmit Cfgwr TLP */ | ||
| 327 | val = readl(port->base + PCIE_APP_TLP_REQ); | ||
| 328 | val |= APP_CFG_REQ; | ||
| 329 | writel(val, port->base + PCIE_APP_TLP_REQ); | ||
| 330 | |||
| 331 | /* Check completion status */ | ||
| 332 | return mtk_pcie_check_cfg_cpld(port); | ||
| 333 | } | ||
| 334 | |||
| 335 | static struct mtk_pcie_port *mtk_pcie_find_port(struct pci_bus *bus, | ||
| 336 | unsigned int devfn) | ||
| 337 | { | ||
| 338 | struct mtk_pcie *pcie = bus->sysdata; | ||
| 339 | struct mtk_pcie_port *port; | ||
| 340 | |||
| 341 | list_for_each_entry(port, &pcie->ports, list) | ||
| 342 | if (port->slot == PCI_SLOT(devfn)) | ||
| 343 | return port; | ||
| 344 | |||
| 345 | return NULL; | ||
| 346 | } | ||
| 347 | |||
| 348 | static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn, | ||
| 349 | int where, int size, u32 *val) | ||
| 350 | { | ||
| 351 | struct mtk_pcie_port *port; | ||
| 352 | u32 bn = bus->number; | ||
| 353 | int ret; | ||
| 354 | |||
| 355 | port = mtk_pcie_find_port(bus, devfn); | ||
| 356 | if (!port) { | ||
| 357 | *val = ~0; | ||
| 358 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 359 | } | ||
| 360 | |||
| 361 | ret = mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val); | ||
| 362 | if (ret) | ||
| 363 | *val = ~0; | ||
| 364 | |||
| 365 | return ret; | ||
| 366 | } | ||
| 367 | |||
| 368 | static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn, | ||
| 369 | int where, int size, u32 val) | ||
| 370 | { | ||
| 371 | struct mtk_pcie_port *port; | ||
| 372 | u32 bn = bus->number; | ||
| 373 | |||
| 374 | port = mtk_pcie_find_port(bus, devfn); | ||
| 375 | if (!port) | ||
| 376 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 377 | |||
| 378 | return mtk_pcie_hw_wr_cfg(port, bn, devfn, where, size, val); | ||
| 379 | } | ||
| 380 | |||
| 381 | static struct pci_ops mtk_pcie_ops_v2 = { | ||
| 382 | .read = mtk_pcie_config_read, | ||
| 383 | .write = mtk_pcie_config_write, | ||
| 384 | }; | ||
| 385 | |||
| 386 | static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port) | ||
| 387 | { | ||
| 388 | struct mtk_pcie *pcie = port->pcie; | ||
| 389 | struct resource *mem = &pcie->mem; | ||
| 390 | const struct mtk_pcie_soc *soc = port->pcie->soc; | ||
| 391 | u32 val; | ||
| 392 | size_t size; | ||
| 393 | int err; | ||
| 394 | |||
| 395 | /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */ | ||
| 396 | if (pcie->base) { | ||
| 397 | val = readl(pcie->base + PCIE_SYS_CFG_V2); | ||
| 398 | val |= PCIE_CSR_LTSSM_EN(port->slot) | | ||
| 399 | PCIE_CSR_ASPM_L1_EN(port->slot); | ||
| 400 | writel(val, pcie->base + PCIE_SYS_CFG_V2); | ||
| 401 | } | ||
| 402 | |||
| 403 | /* Assert all reset signals */ | ||
| 404 | writel(0, port->base + PCIE_RST_CTRL); | ||
| 405 | |||
| 406 | /* | ||
| 407 | * Enable PCIe link down reset, if link status changed from link up to | ||
| 408 | * link down, this will reset MAC control registers and configuration | ||
| 409 | * space. | ||
| 410 | */ | ||
| 411 | writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL); | ||
| 412 | |||
| 413 | /* De-assert PHY, PE, PIPE, MAC and configuration reset */ | ||
| 414 | val = readl(port->base + PCIE_RST_CTRL); | ||
| 415 | val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB | | ||
| 416 | PCIE_MAC_SRSTB | PCIE_CRSTB; | ||
| 417 | writel(val, port->base + PCIE_RST_CTRL); | ||
| 418 | |||
| 419 | /* Set up vendor ID and class code */ | ||
| 420 | if (soc->need_fix_class_id) { | ||
| 421 | val = PCI_VENDOR_ID_MEDIATEK; | ||
| 422 | writew(val, port->base + PCIE_CONF_VEND_ID); | ||
| 423 | |||
| 424 | val = PCI_CLASS_BRIDGE_HOST; | ||
| 425 | writew(val, port->base + PCIE_CONF_CLASS_ID); | ||
| 426 | } | ||
| 427 | |||
| 428 | /* 100ms timeout value should be enough for Gen1/2 training */ | ||
| 429 | err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_V2, val, | ||
| 430 | !!(val & PCIE_PORT_LINKUP_V2), 20, | ||
| 431 | 100 * USEC_PER_MSEC); | ||
| 432 | if (err) | ||
| 433 | return -ETIMEDOUT; | ||
| 434 | |||
| 435 | /* Set INTx mask */ | ||
| 436 | val = readl(port->base + PCIE_INT_MASK); | ||
| 437 | val &= ~INTX_MASK; | ||
| 438 | writel(val, port->base + PCIE_INT_MASK); | ||
| 439 | |||
| 440 | /* Set AHB to PCIe translation windows */ | ||
| 441 | size = mem->end - mem->start; | ||
| 442 | val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size)); | ||
| 443 | writel(val, port->base + PCIE_AHB_TRANS_BASE0_L); | ||
| 444 | |||
| 445 | val = upper_32_bits(mem->start); | ||
| 446 | writel(val, port->base + PCIE_AHB_TRANS_BASE0_H); | ||
| 447 | |||
| 448 | /* Set PCIe to AXI translation memory space.*/ | ||
| 449 | val = fls(0xffffffff) | WIN_ENABLE; | ||
| 450 | writel(val, port->base + PCIE_AXI_WINDOW0); | ||
| 451 | |||
| 452 | return 0; | ||
| 453 | } | ||
| 454 | |||
| 455 | static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | ||
| 456 | { | ||
| 457 | struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); | ||
| 458 | phys_addr_t addr; | ||
| 459 | |||
| 460 | /* MT2712/MT7622 only support 32-bit MSI addresses */ | ||
| 461 | addr = virt_to_phys(port->base + PCIE_MSI_VECTOR); | ||
| 462 | msg->address_hi = 0; | ||
| 463 | msg->address_lo = lower_32_bits(addr); | ||
| 464 | |||
| 465 | msg->data = data->hwirq; | ||
| 466 | |||
| 467 | dev_dbg(port->pcie->dev, "msi#%d address_hi %#x address_lo %#x\n", | ||
| 468 | (int)data->hwirq, msg->address_hi, msg->address_lo); | ||
| 469 | } | ||
| 470 | |||
| 471 | static int mtk_msi_set_affinity(struct irq_data *irq_data, | ||
| 472 | const struct cpumask *mask, bool force) | ||
| 473 | { | ||
| 474 | return -EINVAL; | ||
| 475 | } | ||
| 476 | |||
| 477 | static void mtk_msi_ack_irq(struct irq_data *data) | ||
| 478 | { | ||
| 479 | struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data); | ||
| 480 | u32 hwirq = data->hwirq; | ||
| 481 | |||
| 482 | writel(1 << hwirq, port->base + PCIE_IMSI_STATUS); | ||
| 483 | } | ||
| 484 | |||
| 485 | static struct irq_chip mtk_msi_bottom_irq_chip = { | ||
| 486 | .name = "MTK MSI", | ||
| 487 | .irq_compose_msi_msg = mtk_compose_msi_msg, | ||
| 488 | .irq_set_affinity = mtk_msi_set_affinity, | ||
| 489 | .irq_ack = mtk_msi_ack_irq, | ||
| 490 | }; | ||
| 491 | |||
| 492 | static int mtk_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | ||
| 493 | unsigned int nr_irqs, void *args) | ||
| 494 | { | ||
| 495 | struct mtk_pcie_port *port = domain->host_data; | ||
| 496 | unsigned long bit; | ||
| 497 | |||
| 498 | WARN_ON(nr_irqs != 1); | ||
| 499 | mutex_lock(&port->lock); | ||
| 500 | |||
| 501 | bit = find_first_zero_bit(port->msi_irq_in_use, MTK_MSI_IRQS_NUM); | ||
| 502 | if (bit >= MTK_MSI_IRQS_NUM) { | ||
| 503 | mutex_unlock(&port->lock); | ||
| 504 | return -ENOSPC; | ||
| 505 | } | ||
| 506 | |||
| 507 | __set_bit(bit, port->msi_irq_in_use); | ||
| 508 | |||
| 509 | mutex_unlock(&port->lock); | ||
| 510 | |||
| 511 | irq_domain_set_info(domain, virq, bit, &mtk_msi_bottom_irq_chip, | ||
| 512 | domain->host_data, handle_edge_irq, | ||
| 513 | NULL, NULL); | ||
| 514 | |||
| 515 | return 0; | ||
| 516 | } | ||
| 517 | |||
| 518 | static void mtk_pcie_irq_domain_free(struct irq_domain *domain, | ||
| 519 | unsigned int virq, unsigned int nr_irqs) | ||
| 520 | { | ||
| 521 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); | ||
| 522 | struct mtk_pcie_port *port = irq_data_get_irq_chip_data(d); | ||
| 523 | |||
| 524 | mutex_lock(&port->lock); | ||
| 525 | |||
| 526 | if (!test_bit(d->hwirq, port->msi_irq_in_use)) | ||
| 527 | dev_err(port->pcie->dev, "trying to free unused MSI#%lu\n", | ||
| 528 | d->hwirq); | ||
| 529 | else | ||
| 530 | __clear_bit(d->hwirq, port->msi_irq_in_use); | ||
| 531 | |||
| 532 | mutex_unlock(&port->lock); | ||
| 533 | |||
| 534 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); | ||
| 535 | } | ||
| 536 | |||
| 537 | static const struct irq_domain_ops msi_domain_ops = { | ||
| 538 | .alloc = mtk_pcie_irq_domain_alloc, | ||
| 539 | .free = mtk_pcie_irq_domain_free, | ||
| 540 | }; | ||
| 541 | |||
| 542 | static struct irq_chip mtk_msi_irq_chip = { | ||
| 543 | .name = "MTK PCIe MSI", | ||
| 544 | .irq_ack = irq_chip_ack_parent, | ||
| 545 | .irq_mask = pci_msi_mask_irq, | ||
| 546 | .irq_unmask = pci_msi_unmask_irq, | ||
| 547 | }; | ||
| 548 | |||
| 549 | static struct msi_domain_info mtk_msi_domain_info = { | ||
| 550 | .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | | ||
| 551 | MSI_FLAG_PCI_MSIX), | ||
| 552 | .chip = &mtk_msi_irq_chip, | ||
| 553 | }; | ||
| 554 | |||
| 555 | static int mtk_pcie_allocate_msi_domains(struct mtk_pcie_port *port) | ||
| 556 | { | ||
| 557 | struct fwnode_handle *fwnode = of_node_to_fwnode(port->pcie->dev->of_node); | ||
| 558 | |||
| 559 | mutex_init(&port->lock); | ||
| 560 | |||
| 561 | port->inner_domain = irq_domain_create_linear(fwnode, MTK_MSI_IRQS_NUM, | ||
| 562 | &msi_domain_ops, port); | ||
| 563 | if (!port->inner_domain) { | ||
| 564 | dev_err(port->pcie->dev, "failed to create IRQ domain\n"); | ||
| 565 | return -ENOMEM; | ||
| 566 | } | ||
| 567 | |||
| 568 | port->msi_domain = pci_msi_create_irq_domain(fwnode, &mtk_msi_domain_info, | ||
| 569 | port->inner_domain); | ||
| 570 | if (!port->msi_domain) { | ||
| 571 | dev_err(port->pcie->dev, "failed to create MSI domain\n"); | ||
| 572 | irq_domain_remove(port->inner_domain); | ||
| 573 | return -ENOMEM; | ||
| 574 | } | ||
| 575 | |||
| 576 | return 0; | ||
| 577 | } | ||
| 578 | |||
| 579 | static void mtk_pcie_enable_msi(struct mtk_pcie_port *port) | ||
| 580 | { | ||
| 581 | u32 val; | ||
| 582 | phys_addr_t msg_addr; | ||
| 583 | |||
| 584 | msg_addr = virt_to_phys(port->base + PCIE_MSI_VECTOR); | ||
| 585 | val = lower_32_bits(msg_addr); | ||
| 586 | writel(val, port->base + PCIE_IMSI_ADDR); | ||
| 587 | |||
| 588 | val = readl(port->base + PCIE_INT_MASK); | ||
| 589 | val &= ~MSI_MASK; | ||
| 590 | writel(val, port->base + PCIE_INT_MASK); | ||
| 591 | } | ||
| 592 | |||
| 593 | static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq, | ||
| 594 | irq_hw_number_t hwirq) | ||
| 595 | { | ||
| 596 | irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); | ||
| 597 | irq_set_chip_data(irq, domain->host_data); | ||
| 598 | |||
| 599 | return 0; | ||
| 600 | } | ||
| 601 | |||
| 602 | static const struct irq_domain_ops intx_domain_ops = { | ||
| 603 | .map = mtk_pcie_intx_map, | ||
| 604 | }; | ||
| 605 | |||
| 606 | static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port, | ||
| 607 | struct device_node *node) | ||
| 608 | { | ||
| 609 | struct device *dev = port->pcie->dev; | ||
| 610 | struct device_node *pcie_intc_node; | ||
| 611 | int ret; | ||
| 612 | |||
| 613 | /* Setup INTx */ | ||
| 614 | pcie_intc_node = of_get_next_child(node, NULL); | ||
| 615 | if (!pcie_intc_node) { | ||
| 616 | dev_err(dev, "no PCIe Intc node found\n"); | ||
| 617 | return -ENODEV; | ||
| 618 | } | ||
| 619 | |||
| 620 | port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, | ||
| 621 | &intx_domain_ops, port); | ||
| 622 | if (!port->irq_domain) { | ||
| 623 | dev_err(dev, "failed to get INTx IRQ domain\n"); | ||
| 624 | return -ENODEV; | ||
| 625 | } | ||
| 626 | |||
| 627 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
| 628 | ret = mtk_pcie_allocate_msi_domains(port); | ||
| 629 | if (ret) | ||
| 630 | return ret; | ||
| 631 | |||
| 632 | mtk_pcie_enable_msi(port); | ||
| 633 | } | ||
| 634 | |||
| 635 | return 0; | ||
| 636 | } | ||
| 637 | |||
| 638 | static void mtk_pcie_intr_handler(struct irq_desc *desc) | ||
| 639 | { | ||
| 640 | struct mtk_pcie_port *port = irq_desc_get_handler_data(desc); | ||
| 641 | struct irq_chip *irqchip = irq_desc_get_chip(desc); | ||
| 642 | unsigned long status; | ||
| 643 | u32 virq; | ||
| 644 | u32 bit = INTX_SHIFT; | ||
| 645 | |||
| 646 | chained_irq_enter(irqchip, desc); | ||
| 647 | |||
| 648 | status = readl(port->base + PCIE_INT_STATUS); | ||
| 649 | if (status & INTX_MASK) { | ||
| 650 | for_each_set_bit_from(bit, &status, PCI_NUM_INTX + INTX_SHIFT) { | ||
| 651 | /* Clear the INTx */ | ||
| 652 | writel(1 << bit, port->base + PCIE_INT_STATUS); | ||
| 653 | virq = irq_find_mapping(port->irq_domain, | ||
| 654 | bit - INTX_SHIFT); | ||
| 655 | generic_handle_irq(virq); | ||
| 656 | } | ||
| 657 | } | ||
| 658 | |||
| 659 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
| 660 | if (status & MSI_STATUS){ | ||
| 661 | unsigned long imsi_status; | ||
| 662 | |||
| 663 | while ((imsi_status = readl(port->base + PCIE_IMSI_STATUS))) { | ||
| 664 | for_each_set_bit(bit, &imsi_status, MTK_MSI_IRQS_NUM) { | ||
| 665 | virq = irq_find_mapping(port->inner_domain, bit); | ||
| 666 | generic_handle_irq(virq); | ||
| 667 | } | ||
| 668 | } | ||
| 669 | /* Clear MSI interrupt status */ | ||
| 670 | writel(MSI_STATUS, port->base + PCIE_INT_STATUS); | ||
| 671 | } | ||
| 672 | } | ||
| 673 | |||
| 674 | chained_irq_exit(irqchip, desc); | ||
| 675 | |||
| 676 | return; | ||
| 677 | } | ||
| 678 | |||
| 679 | static int mtk_pcie_setup_irq(struct mtk_pcie_port *port, | ||
| 680 | struct device_node *node) | ||
| 681 | { | ||
| 682 | struct mtk_pcie *pcie = port->pcie; | ||
| 683 | struct device *dev = pcie->dev; | ||
| 684 | struct platform_device *pdev = to_platform_device(dev); | ||
| 685 | int err, irq; | ||
| 686 | |||
| 687 | err = mtk_pcie_init_irq_domain(port, node); | ||
| 688 | if (err) { | ||
| 689 | dev_err(dev, "failed to init PCIe IRQ domain\n"); | ||
| 690 | return err; | ||
| 691 | } | ||
| 692 | |||
| 693 | irq = platform_get_irq(pdev, port->slot); | ||
| 694 | irq_set_chained_handler_and_data(irq, mtk_pcie_intr_handler, port); | ||
| 695 | |||
| 696 | return 0; | ||
| 697 | } | ||
| 698 | |||
| 699 | static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, | ||
| 700 | unsigned int devfn, int where) | ||
| 701 | { | ||
| 702 | struct mtk_pcie *pcie = bus->sysdata; | ||
| 703 | |||
| 704 | writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn), | ||
| 705 | bus->number), pcie->base + PCIE_CFG_ADDR); | ||
| 706 | |||
| 707 | return pcie->base + PCIE_CFG_DATA + (where & 3); | ||
| 708 | } | ||
| 709 | |||
| 710 | static struct pci_ops mtk_pcie_ops = { | ||
| 711 | .map_bus = mtk_pcie_map_bus, | ||
| 712 | .read = pci_generic_config_read, | ||
| 713 | .write = pci_generic_config_write, | ||
| 714 | }; | ||
| 715 | |||
| 716 | static int mtk_pcie_startup_port(struct mtk_pcie_port *port) | ||
| 717 | { | ||
| 718 | struct mtk_pcie *pcie = port->pcie; | ||
| 719 | u32 func = PCI_FUNC(port->slot << 3); | ||
| 720 | u32 slot = PCI_SLOT(port->slot << 3); | ||
| 721 | u32 val; | ||
| 722 | int err; | ||
| 723 | |||
| 724 | /* assert port PERST_N */ | ||
| 725 | val = readl(pcie->base + PCIE_SYS_CFG); | ||
| 726 | val |= PCIE_PORT_PERST(port->slot); | ||
| 727 | writel(val, pcie->base + PCIE_SYS_CFG); | ||
| 728 | |||
| 729 | /* de-assert port PERST_N */ | ||
| 730 | val = readl(pcie->base + PCIE_SYS_CFG); | ||
| 731 | val &= ~PCIE_PORT_PERST(port->slot); | ||
| 732 | writel(val, pcie->base + PCIE_SYS_CFG); | ||
| 733 | |||
| 734 | /* 100ms timeout value should be enough for Gen1/2 training */ | ||
| 735 | err = readl_poll_timeout(port->base + PCIE_LINK_STATUS, val, | ||
| 736 | !!(val & PCIE_PORT_LINKUP), 20, | ||
| 737 | 100 * USEC_PER_MSEC); | ||
| 738 | if (err) | ||
| 739 | return -ETIMEDOUT; | ||
| 740 | |||
| 741 | /* enable interrupt */ | ||
| 742 | val = readl(pcie->base + PCIE_INT_ENABLE); | ||
| 743 | val |= PCIE_PORT_INT_EN(port->slot); | ||
| 744 | writel(val, pcie->base + PCIE_INT_ENABLE); | ||
| 745 | |||
| 746 | /* map to all DDR region. We need to set it before cfg operation. */ | ||
| 747 | writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE, | ||
| 748 | port->base + PCIE_BAR0_SETUP); | ||
| 749 | |||
| 750 | /* configure class code and revision ID */ | ||
| 751 | writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS); | ||
| 752 | |||
| 753 | /* configure FC credit */ | ||
| 754 | writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0), | ||
| 755 | pcie->base + PCIE_CFG_ADDR); | ||
| 756 | val = readl(pcie->base + PCIE_CFG_DATA); | ||
| 757 | val &= ~PCIE_FC_CREDIT_MASK; | ||
| 758 | val |= PCIE_FC_CREDIT_VAL(0x806c); | ||
| 759 | writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0), | ||
| 760 | pcie->base + PCIE_CFG_ADDR); | ||
| 761 | writel(val, pcie->base + PCIE_CFG_DATA); | ||
| 762 | |||
| 763 | /* configure RC FTS number to 250 when it leaves L0s */ | ||
| 764 | writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0), | ||
| 765 | pcie->base + PCIE_CFG_ADDR); | ||
| 766 | val = readl(pcie->base + PCIE_CFG_DATA); | ||
| 767 | val &= ~PCIE_FTS_NUM_MASK; | ||
| 768 | val |= PCIE_FTS_NUM_L0(0x50); | ||
| 769 | writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0), | ||
| 770 | pcie->base + PCIE_CFG_ADDR); | ||
| 771 | writel(val, pcie->base + PCIE_CFG_DATA); | ||
| 772 | |||
| 773 | return 0; | ||
| 774 | } | ||
| 775 | |||
| 776 | static void mtk_pcie_enable_port(struct mtk_pcie_port *port) | ||
| 777 | { | ||
| 778 | struct mtk_pcie *pcie = port->pcie; | ||
| 779 | struct device *dev = pcie->dev; | ||
| 780 | int err; | ||
| 781 | |||
| 782 | err = clk_prepare_enable(port->sys_ck); | ||
| 783 | if (err) { | ||
| 784 | dev_err(dev, "failed to enable sys_ck%d clock\n", port->slot); | ||
| 785 | goto err_sys_clk; | ||
| 786 | } | ||
| 787 | |||
| 788 | err = clk_prepare_enable(port->ahb_ck); | ||
| 789 | if (err) { | ||
| 790 | dev_err(dev, "failed to enable ahb_ck%d\n", port->slot); | ||
| 791 | goto err_ahb_clk; | ||
| 792 | } | ||
| 793 | |||
| 794 | err = clk_prepare_enable(port->aux_ck); | ||
| 795 | if (err) { | ||
| 796 | dev_err(dev, "failed to enable aux_ck%d\n", port->slot); | ||
| 797 | goto err_aux_clk; | ||
| 798 | } | ||
| 799 | |||
| 800 | err = clk_prepare_enable(port->axi_ck); | ||
| 801 | if (err) { | ||
| 802 | dev_err(dev, "failed to enable axi_ck%d\n", port->slot); | ||
| 803 | goto err_axi_clk; | ||
| 804 | } | ||
| 805 | |||
| 806 | err = clk_prepare_enable(port->obff_ck); | ||
| 807 | if (err) { | ||
| 808 | dev_err(dev, "failed to enable obff_ck%d\n", port->slot); | ||
| 809 | goto err_obff_clk; | ||
| 810 | } | ||
| 811 | |||
| 812 | err = clk_prepare_enable(port->pipe_ck); | ||
| 813 | if (err) { | ||
| 814 | dev_err(dev, "failed to enable pipe_ck%d\n", port->slot); | ||
| 815 | goto err_pipe_clk; | ||
| 816 | } | ||
| 817 | |||
| 818 | reset_control_assert(port->reset); | ||
| 819 | reset_control_deassert(port->reset); | ||
| 820 | |||
| 821 | err = phy_init(port->phy); | ||
| 822 | if (err) { | ||
| 823 | dev_err(dev, "failed to initialize port%d phy\n", port->slot); | ||
| 824 | goto err_phy_init; | ||
| 825 | } | ||
| 826 | |||
| 827 | err = phy_power_on(port->phy); | ||
| 828 | if (err) { | ||
| 829 | dev_err(dev, "failed to power on port%d phy\n", port->slot); | ||
| 830 | goto err_phy_on; | ||
| 831 | } | ||
| 832 | |||
| 833 | if (!pcie->soc->startup(port)) | ||
| 834 | return; | ||
| 835 | |||
| 836 | dev_info(dev, "Port%d link down\n", port->slot); | ||
| 837 | |||
| 838 | phy_power_off(port->phy); | ||
| 839 | err_phy_on: | ||
| 840 | phy_exit(port->phy); | ||
| 841 | err_phy_init: | ||
| 842 | clk_disable_unprepare(port->pipe_ck); | ||
| 843 | err_pipe_clk: | ||
| 844 | clk_disable_unprepare(port->obff_ck); | ||
| 845 | err_obff_clk: | ||
| 846 | clk_disable_unprepare(port->axi_ck); | ||
| 847 | err_axi_clk: | ||
| 848 | clk_disable_unprepare(port->aux_ck); | ||
| 849 | err_aux_clk: | ||
| 850 | clk_disable_unprepare(port->ahb_ck); | ||
| 851 | err_ahb_clk: | ||
| 852 | clk_disable_unprepare(port->sys_ck); | ||
| 853 | err_sys_clk: | ||
| 854 | mtk_pcie_port_free(port); | ||
| 855 | } | ||
| 856 | |||
| 857 | static int mtk_pcie_parse_port(struct mtk_pcie *pcie, | ||
| 858 | struct device_node *node, | ||
| 859 | int slot) | ||
| 860 | { | ||
| 861 | struct mtk_pcie_port *port; | ||
| 862 | struct resource *regs; | ||
| 863 | struct device *dev = pcie->dev; | ||
| 864 | struct platform_device *pdev = to_platform_device(dev); | ||
| 865 | char name[10]; | ||
| 866 | int err; | ||
| 867 | |||
| 868 | port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); | ||
| 869 | if (!port) | ||
| 870 | return -ENOMEM; | ||
| 871 | |||
| 872 | err = of_property_read_u32(node, "num-lanes", &port->lane); | ||
| 873 | if (err) { | ||
| 874 | dev_err(dev, "missing num-lanes property\n"); | ||
| 875 | return err; | ||
| 876 | } | ||
| 877 | |||
| 878 | snprintf(name, sizeof(name), "port%d", slot); | ||
| 879 | regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); | ||
| 880 | port->base = devm_ioremap_resource(dev, regs); | ||
| 881 | if (IS_ERR(port->base)) { | ||
| 882 | dev_err(dev, "failed to map port%d base\n", slot); | ||
| 883 | return PTR_ERR(port->base); | ||
| 884 | } | ||
| 885 | |||
| 886 | snprintf(name, sizeof(name), "sys_ck%d", slot); | ||
| 887 | port->sys_ck = devm_clk_get(dev, name); | ||
| 888 | if (IS_ERR(port->sys_ck)) { | ||
| 889 | dev_err(dev, "failed to get sys_ck%d clock\n", slot); | ||
| 890 | return PTR_ERR(port->sys_ck); | ||
| 891 | } | ||
| 892 | |||
| 893 | /* sys_ck might be divided into the following parts in some chips */ | ||
| 894 | snprintf(name, sizeof(name), "ahb_ck%d", slot); | ||
| 895 | port->ahb_ck = devm_clk_get(dev, name); | ||
| 896 | if (IS_ERR(port->ahb_ck)) { | ||
| 897 | if (PTR_ERR(port->ahb_ck) == -EPROBE_DEFER) | ||
| 898 | return -EPROBE_DEFER; | ||
| 899 | |||
| 900 | port->ahb_ck = NULL; | ||
| 901 | } | ||
| 902 | |||
| 903 | snprintf(name, sizeof(name), "axi_ck%d", slot); | ||
| 904 | port->axi_ck = devm_clk_get(dev, name); | ||
| 905 | if (IS_ERR(port->axi_ck)) { | ||
| 906 | if (PTR_ERR(port->axi_ck) == -EPROBE_DEFER) | ||
| 907 | return -EPROBE_DEFER; | ||
| 908 | |||
| 909 | port->axi_ck = NULL; | ||
| 910 | } | ||
| 911 | |||
| 912 | snprintf(name, sizeof(name), "aux_ck%d", slot); | ||
| 913 | port->aux_ck = devm_clk_get(dev, name); | ||
| 914 | if (IS_ERR(port->aux_ck)) { | ||
| 915 | if (PTR_ERR(port->aux_ck) == -EPROBE_DEFER) | ||
| 916 | return -EPROBE_DEFER; | ||
| 917 | |||
| 918 | port->aux_ck = NULL; | ||
| 919 | } | ||
| 920 | |||
| 921 | snprintf(name, sizeof(name), "obff_ck%d", slot); | ||
| 922 | port->obff_ck = devm_clk_get(dev, name); | ||
| 923 | if (IS_ERR(port->obff_ck)) { | ||
| 924 | if (PTR_ERR(port->obff_ck) == -EPROBE_DEFER) | ||
| 925 | return -EPROBE_DEFER; | ||
| 926 | |||
| 927 | port->obff_ck = NULL; | ||
| 928 | } | ||
| 929 | |||
| 930 | snprintf(name, sizeof(name), "pipe_ck%d", slot); | ||
| 931 | port->pipe_ck = devm_clk_get(dev, name); | ||
| 932 | if (IS_ERR(port->pipe_ck)) { | ||
| 933 | if (PTR_ERR(port->pipe_ck) == -EPROBE_DEFER) | ||
| 934 | return -EPROBE_DEFER; | ||
| 935 | |||
| 936 | port->pipe_ck = NULL; | ||
| 937 | } | ||
| 938 | |||
| 939 | snprintf(name, sizeof(name), "pcie-rst%d", slot); | ||
| 940 | port->reset = devm_reset_control_get_optional_exclusive(dev, name); | ||
| 941 | if (PTR_ERR(port->reset) == -EPROBE_DEFER) | ||
| 942 | return PTR_ERR(port->reset); | ||
| 943 | |||
| 944 | /* some platforms may use default PHY setting */ | ||
| 945 | snprintf(name, sizeof(name), "pcie-phy%d", slot); | ||
| 946 | port->phy = devm_phy_optional_get(dev, name); | ||
| 947 | if (IS_ERR(port->phy)) | ||
| 948 | return PTR_ERR(port->phy); | ||
| 949 | |||
| 950 | port->slot = slot; | ||
| 951 | port->pcie = pcie; | ||
| 952 | |||
| 953 | if (pcie->soc->setup_irq) { | ||
| 954 | err = pcie->soc->setup_irq(port, node); | ||
| 955 | if (err) | ||
| 956 | return err; | ||
| 957 | } | ||
| 958 | |||
| 959 | INIT_LIST_HEAD(&port->list); | ||
| 960 | list_add_tail(&port->list, &pcie->ports); | ||
| 961 | |||
| 962 | return 0; | ||
| 963 | } | ||
| 964 | |||
| 965 | static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie) | ||
| 966 | { | ||
| 967 | struct device *dev = pcie->dev; | ||
| 968 | struct platform_device *pdev = to_platform_device(dev); | ||
| 969 | struct resource *regs; | ||
| 970 | int err; | ||
| 971 | |||
| 972 | /* get shared registers, which are optional */ | ||
| 973 | regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "subsys"); | ||
| 974 | if (regs) { | ||
| 975 | pcie->base = devm_ioremap_resource(dev, regs); | ||
| 976 | if (IS_ERR(pcie->base)) { | ||
| 977 | dev_err(dev, "failed to map shared register\n"); | ||
| 978 | return PTR_ERR(pcie->base); | ||
| 979 | } | ||
| 980 | } | ||
| 981 | |||
| 982 | pcie->free_ck = devm_clk_get(dev, "free_ck"); | ||
| 983 | if (IS_ERR(pcie->free_ck)) { | ||
| 984 | if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER) | ||
| 985 | return -EPROBE_DEFER; | ||
| 986 | |||
| 987 | pcie->free_ck = NULL; | ||
| 988 | } | ||
| 989 | |||
| 990 | if (dev->pm_domain) { | ||
| 991 | pm_runtime_enable(dev); | ||
| 992 | pm_runtime_get_sync(dev); | ||
| 993 | } | ||
| 994 | |||
| 995 | /* enable top level clock */ | ||
| 996 | err = clk_prepare_enable(pcie->free_ck); | ||
| 997 | if (err) { | ||
| 998 | dev_err(dev, "failed to enable free_ck\n"); | ||
| 999 | goto err_free_ck; | ||
| 1000 | } | ||
| 1001 | |||
| 1002 | return 0; | ||
| 1003 | |||
| 1004 | err_free_ck: | ||
| 1005 | if (dev->pm_domain) { | ||
| 1006 | pm_runtime_put_sync(dev); | ||
| 1007 | pm_runtime_disable(dev); | ||
| 1008 | } | ||
| 1009 | |||
| 1010 | return err; | ||
| 1011 | } | ||
| 1012 | |||
| 1013 | static int mtk_pcie_setup(struct mtk_pcie *pcie) | ||
| 1014 | { | ||
| 1015 | struct device *dev = pcie->dev; | ||
| 1016 | struct device_node *node = dev->of_node, *child; | ||
| 1017 | struct of_pci_range_parser parser; | ||
| 1018 | struct of_pci_range range; | ||
| 1019 | struct resource res; | ||
| 1020 | struct mtk_pcie_port *port, *tmp; | ||
| 1021 | int err; | ||
| 1022 | |||
| 1023 | if (of_pci_range_parser_init(&parser, node)) { | ||
| 1024 | dev_err(dev, "missing \"ranges\" property\n"); | ||
| 1025 | return -EINVAL; | ||
| 1026 | } | ||
| 1027 | |||
| 1028 | for_each_of_pci_range(&parser, &range) { | ||
| 1029 | err = of_pci_range_to_resource(&range, node, &res); | ||
| 1030 | if (err < 0) | ||
| 1031 | return err; | ||
| 1032 | |||
| 1033 | switch (res.flags & IORESOURCE_TYPE_BITS) { | ||
| 1034 | case IORESOURCE_IO: | ||
| 1035 | pcie->offset.io = res.start - range.pci_addr; | ||
| 1036 | |||
| 1037 | memcpy(&pcie->pio, &res, sizeof(res)); | ||
| 1038 | pcie->pio.name = node->full_name; | ||
| 1039 | |||
| 1040 | pcie->io.start = range.cpu_addr; | ||
| 1041 | pcie->io.end = range.cpu_addr + range.size - 1; | ||
| 1042 | pcie->io.flags = IORESOURCE_MEM; | ||
| 1043 | pcie->io.name = "I/O"; | ||
| 1044 | |||
| 1045 | memcpy(&res, &pcie->io, sizeof(res)); | ||
| 1046 | break; | ||
| 1047 | |||
| 1048 | case IORESOURCE_MEM: | ||
| 1049 | pcie->offset.mem = res.start - range.pci_addr; | ||
| 1050 | |||
| 1051 | memcpy(&pcie->mem, &res, sizeof(res)); | ||
| 1052 | pcie->mem.name = "non-prefetchable"; | ||
| 1053 | break; | ||
| 1054 | } | ||
| 1055 | } | ||
| 1056 | |||
| 1057 | err = of_pci_parse_bus_range(node, &pcie->busn); | ||
| 1058 | if (err < 0) { | ||
| 1059 | dev_err(dev, "failed to parse bus ranges property: %d\n", err); | ||
| 1060 | pcie->busn.name = node->name; | ||
| 1061 | pcie->busn.start = 0; | ||
| 1062 | pcie->busn.end = 0xff; | ||
| 1063 | pcie->busn.flags = IORESOURCE_BUS; | ||
| 1064 | } | ||
| 1065 | |||
| 1066 | for_each_available_child_of_node(node, child) { | ||
| 1067 | int slot; | ||
| 1068 | |||
| 1069 | err = of_pci_get_devfn(child); | ||
| 1070 | if (err < 0) { | ||
| 1071 | dev_err(dev, "failed to parse devfn: %d\n", err); | ||
| 1072 | return err; | ||
| 1073 | } | ||
| 1074 | |||
| 1075 | slot = PCI_SLOT(err); | ||
| 1076 | |||
| 1077 | err = mtk_pcie_parse_port(pcie, child, slot); | ||
| 1078 | if (err) | ||
| 1079 | return err; | ||
| 1080 | } | ||
| 1081 | |||
| 1082 | err = mtk_pcie_subsys_powerup(pcie); | ||
| 1083 | if (err) | ||
| 1084 | return err; | ||
| 1085 | |||
| 1086 | /* enable each port, and then check link status */ | ||
| 1087 | list_for_each_entry_safe(port, tmp, &pcie->ports, list) | ||
| 1088 | mtk_pcie_enable_port(port); | ||
| 1089 | |||
| 1090 | /* power down PCIe subsys if slots are all empty (link down) */ | ||
| 1091 | if (list_empty(&pcie->ports)) | ||
| 1092 | mtk_pcie_subsys_powerdown(pcie); | ||
| 1093 | |||
| 1094 | return 0; | ||
| 1095 | } | ||
| 1096 | |||
| 1097 | static int mtk_pcie_request_resources(struct mtk_pcie *pcie) | ||
| 1098 | { | ||
| 1099 | struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); | ||
| 1100 | struct list_head *windows = &host->windows; | ||
| 1101 | struct device *dev = pcie->dev; | ||
| 1102 | int err; | ||
| 1103 | |||
| 1104 | pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io); | ||
| 1105 | pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem); | ||
| 1106 | pci_add_resource(windows, &pcie->busn); | ||
| 1107 | |||
| 1108 | err = devm_request_pci_bus_resources(dev, windows); | ||
| 1109 | if (err < 0) | ||
| 1110 | return err; | ||
| 1111 | |||
| 1112 | pci_remap_iospace(&pcie->pio, pcie->io.start); | ||
| 1113 | |||
| 1114 | return 0; | ||
| 1115 | } | ||
| 1116 | |||
| 1117 | static int mtk_pcie_register_host(struct pci_host_bridge *host) | ||
| 1118 | { | ||
| 1119 | struct mtk_pcie *pcie = pci_host_bridge_priv(host); | ||
| 1120 | struct pci_bus *child; | ||
| 1121 | int err; | ||
| 1122 | |||
| 1123 | host->busnr = pcie->busn.start; | ||
| 1124 | host->dev.parent = pcie->dev; | ||
| 1125 | host->ops = pcie->soc->ops; | ||
| 1126 | host->map_irq = of_irq_parse_and_map_pci; | ||
| 1127 | host->swizzle_irq = pci_common_swizzle; | ||
| 1128 | host->sysdata = pcie; | ||
| 1129 | |||
| 1130 | err = pci_scan_root_bus_bridge(host); | ||
| 1131 | if (err < 0) | ||
| 1132 | return err; | ||
| 1133 | |||
| 1134 | pci_bus_size_bridges(host->bus); | ||
| 1135 | pci_bus_assign_resources(host->bus); | ||
| 1136 | |||
| 1137 | list_for_each_entry(child, &host->bus->children, node) | ||
| 1138 | pcie_bus_configure_settings(child); | ||
| 1139 | |||
| 1140 | pci_bus_add_devices(host->bus); | ||
| 1141 | |||
| 1142 | return 0; | ||
| 1143 | } | ||
| 1144 | |||
| 1145 | static int mtk_pcie_probe(struct platform_device *pdev) | ||
| 1146 | { | ||
| 1147 | struct device *dev = &pdev->dev; | ||
| 1148 | struct mtk_pcie *pcie; | ||
| 1149 | struct pci_host_bridge *host; | ||
| 1150 | int err; | ||
| 1151 | |||
| 1152 | host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); | ||
| 1153 | if (!host) | ||
| 1154 | return -ENOMEM; | ||
| 1155 | |||
| 1156 | pcie = pci_host_bridge_priv(host); | ||
| 1157 | |||
| 1158 | pcie->dev = dev; | ||
| 1159 | pcie->soc = of_device_get_match_data(dev); | ||
| 1160 | platform_set_drvdata(pdev, pcie); | ||
| 1161 | INIT_LIST_HEAD(&pcie->ports); | ||
| 1162 | |||
| 1163 | err = mtk_pcie_setup(pcie); | ||
| 1164 | if (err) | ||
| 1165 | return err; | ||
| 1166 | |||
| 1167 | err = mtk_pcie_request_resources(pcie); | ||
| 1168 | if (err) | ||
| 1169 | goto put_resources; | ||
| 1170 | |||
| 1171 | err = mtk_pcie_register_host(host); | ||
| 1172 | if (err) | ||
| 1173 | goto put_resources; | ||
| 1174 | |||
| 1175 | return 0; | ||
| 1176 | |||
| 1177 | put_resources: | ||
| 1178 | if (!list_empty(&pcie->ports)) | ||
| 1179 | mtk_pcie_put_resources(pcie); | ||
| 1180 | |||
| 1181 | return err; | ||
| 1182 | } | ||
| 1183 | |||
| 1184 | static const struct mtk_pcie_soc mtk_pcie_soc_v1 = { | ||
| 1185 | .ops = &mtk_pcie_ops, | ||
| 1186 | .startup = mtk_pcie_startup_port, | ||
| 1187 | }; | ||
| 1188 | |||
| 1189 | static const struct mtk_pcie_soc mtk_pcie_soc_mt2712 = { | ||
| 1190 | .ops = &mtk_pcie_ops_v2, | ||
| 1191 | .startup = mtk_pcie_startup_port_v2, | ||
| 1192 | .setup_irq = mtk_pcie_setup_irq, | ||
| 1193 | }; | ||
| 1194 | |||
| 1195 | static const struct mtk_pcie_soc mtk_pcie_soc_mt7622 = { | ||
| 1196 | .need_fix_class_id = true, | ||
| 1197 | .ops = &mtk_pcie_ops_v2, | ||
| 1198 | .startup = mtk_pcie_startup_port_v2, | ||
| 1199 | .setup_irq = mtk_pcie_setup_irq, | ||
| 1200 | }; | ||
| 1201 | |||
| 1202 | static const struct of_device_id mtk_pcie_ids[] = { | ||
| 1203 | { .compatible = "mediatek,mt2701-pcie", .data = &mtk_pcie_soc_v1 }, | ||
| 1204 | { .compatible = "mediatek,mt7623-pcie", .data = &mtk_pcie_soc_v1 }, | ||
| 1205 | { .compatible = "mediatek,mt2712-pcie", .data = &mtk_pcie_soc_mt2712 }, | ||
| 1206 | { .compatible = "mediatek,mt7622-pcie", .data = &mtk_pcie_soc_mt7622 }, | ||
| 1207 | {}, | ||
| 1208 | }; | ||
| 1209 | |||
| 1210 | static struct platform_driver mtk_pcie_driver = { | ||
| 1211 | .probe = mtk_pcie_probe, | ||
| 1212 | .driver = { | ||
| 1213 | .name = "mtk-pcie", | ||
| 1214 | .of_match_table = mtk_pcie_ids, | ||
| 1215 | .suppress_bind_attrs = true, | ||
| 1216 | }, | ||
| 1217 | }; | ||
| 1218 | builtin_platform_driver(mtk_pcie_driver); | ||
diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c new file mode 100644 index 000000000000..4d6c20e47bed --- /dev/null +++ b/drivers/pci/controller/pcie-mobiveil.c | |||
| @@ -0,0 +1,866 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * PCIe host controller driver for Mobiveil PCIe Host controller | ||
| 4 | * | ||
| 5 | * Copyright (c) 2018 Mobiveil Inc. | ||
| 6 | * Author: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in> | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/delay.h> | ||
| 10 | #include <linux/init.h> | ||
| 11 | #include <linux/interrupt.h> | ||
| 12 | #include <linux/irq.h> | ||
| 13 | #include <linux/irqchip/chained_irq.h> | ||
| 14 | #include <linux/irqdomain.h> | ||
| 15 | #include <linux/kernel.h> | ||
| 16 | #include <linux/module.h> | ||
| 17 | #include <linux/msi.h> | ||
| 18 | #include <linux/of_address.h> | ||
| 19 | #include <linux/of_irq.h> | ||
| 20 | #include <linux/of_platform.h> | ||
| 21 | #include <linux/of_pci.h> | ||
| 22 | #include <linux/pci.h> | ||
| 23 | #include <linux/platform_device.h> | ||
| 24 | #include <linux/slab.h> | ||
| 25 | |||
| 26 | /* register offsets and bit positions */ | ||
| 27 | |||
| 28 | /* | ||
| 29 | * translation tables are grouped into windows, each window registers are | ||
| 30 | * grouped into blocks of 4 or 16 registers each | ||
| 31 | */ | ||
| 32 | #define PAB_REG_BLOCK_SIZE 16 | ||
| 33 | #define PAB_EXT_REG_BLOCK_SIZE 4 | ||
| 34 | |||
| 35 | #define PAB_REG_ADDR(offset, win) (offset + (win * PAB_REG_BLOCK_SIZE)) | ||
| 36 | #define PAB_EXT_REG_ADDR(offset, win) (offset + (win * PAB_EXT_REG_BLOCK_SIZE)) | ||
| 37 | |||
| 38 | #define LTSSM_STATUS 0x0404 | ||
| 39 | #define LTSSM_STATUS_L0_MASK 0x3f | ||
| 40 | #define LTSSM_STATUS_L0 0x2d | ||
| 41 | |||
| 42 | #define PAB_CTRL 0x0808 | ||
| 43 | #define AMBA_PIO_ENABLE_SHIFT 0 | ||
| 44 | #define PEX_PIO_ENABLE_SHIFT 1 | ||
| 45 | #define PAGE_SEL_SHIFT 13 | ||
| 46 | #define PAGE_SEL_MASK 0x3f | ||
| 47 | #define PAGE_LO_MASK 0x3ff | ||
| 48 | #define PAGE_SEL_EN 0xc00 | ||
| 49 | #define PAGE_SEL_OFFSET_SHIFT 10 | ||
| 50 | |||
| 51 | #define PAB_AXI_PIO_CTRL 0x0840 | ||
| 52 | #define APIO_EN_MASK 0xf | ||
| 53 | |||
| 54 | #define PAB_PEX_PIO_CTRL 0x08c0 | ||
| 55 | #define PIO_ENABLE_SHIFT 0 | ||
| 56 | |||
| 57 | #define PAB_INTP_AMBA_MISC_ENB 0x0b0c | ||
| 58 | #define PAB_INTP_AMBA_MISC_STAT 0x0b1c | ||
| 59 | #define PAB_INTP_INTX_MASK 0x01e0 | ||
| 60 | #define PAB_INTP_MSI_MASK 0x8 | ||
| 61 | |||
| 62 | #define PAB_AXI_AMAP_CTRL(win) PAB_REG_ADDR(0x0ba0, win) | ||
| 63 | #define WIN_ENABLE_SHIFT 0 | ||
| 64 | #define WIN_TYPE_SHIFT 1 | ||
| 65 | |||
| 66 | #define PAB_EXT_AXI_AMAP_SIZE(win) PAB_EXT_REG_ADDR(0xbaf0, win) | ||
| 67 | |||
| 68 | #define PAB_AXI_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x0ba4, win) | ||
| 69 | #define AXI_WINDOW_ALIGN_MASK 3 | ||
| 70 | |||
| 71 | #define PAB_AXI_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x0ba8, win) | ||
| 72 | #define PAB_BUS_SHIFT 24 | ||
| 73 | #define PAB_DEVICE_SHIFT 19 | ||
| 74 | #define PAB_FUNCTION_SHIFT 16 | ||
| 75 | |||
| 76 | #define PAB_AXI_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x0bac, win) | ||
| 77 | #define PAB_INTP_AXI_PIO_CLASS 0x474 | ||
| 78 | |||
| 79 | #define PAB_PEX_AMAP_CTRL(win) PAB_REG_ADDR(0x4ba0, win) | ||
| 80 | #define AMAP_CTRL_EN_SHIFT 0 | ||
| 81 | #define AMAP_CTRL_TYPE_SHIFT 1 | ||
| 82 | |||
| 83 | #define PAB_EXT_PEX_AMAP_SIZEN(win) PAB_EXT_REG_ADDR(0xbef0, win) | ||
| 84 | #define PAB_PEX_AMAP_AXI_WIN(win) PAB_REG_ADDR(0x4ba4, win) | ||
| 85 | #define PAB_PEX_AMAP_PEX_WIN_L(win) PAB_REG_ADDR(0x4ba8, win) | ||
| 86 | #define PAB_PEX_AMAP_PEX_WIN_H(win) PAB_REG_ADDR(0x4bac, win) | ||
| 87 | |||
| 88 | /* starting offset of INTX bits in status register */ | ||
| 89 | #define PAB_INTX_START 5 | ||
| 90 | |||
| 91 | /* supported number of MSI interrupts */ | ||
| 92 | #define PCI_NUM_MSI 16 | ||
| 93 | |||
| 94 | /* MSI registers */ | ||
| 95 | #define MSI_BASE_LO_OFFSET 0x04 | ||
| 96 | #define MSI_BASE_HI_OFFSET 0x08 | ||
| 97 | #define MSI_SIZE_OFFSET 0x0c | ||
| 98 | #define MSI_ENABLE_OFFSET 0x14 | ||
| 99 | #define MSI_STATUS_OFFSET 0x18 | ||
| 100 | #define MSI_DATA_OFFSET 0x20 | ||
| 101 | #define MSI_ADDR_L_OFFSET 0x24 | ||
| 102 | #define MSI_ADDR_H_OFFSET 0x28 | ||
| 103 | |||
| 104 | /* outbound and inbound window definitions */ | ||
| 105 | #define WIN_NUM_0 0 | ||
| 106 | #define WIN_NUM_1 1 | ||
| 107 | #define CFG_WINDOW_TYPE 0 | ||
| 108 | #define IO_WINDOW_TYPE 1 | ||
| 109 | #define MEM_WINDOW_TYPE 2 | ||
| 110 | #define IB_WIN_SIZE (256 * 1024 * 1024 * 1024) | ||
| 111 | #define MAX_PIO_WINDOWS 8 | ||
| 112 | |||
| 113 | /* Parameters for the waiting for link up routine */ | ||
| 114 | #define LINK_WAIT_MAX_RETRIES 10 | ||
| 115 | #define LINK_WAIT_MIN 90000 | ||
| 116 | #define LINK_WAIT_MAX 100000 | ||
| 117 | |||
| 118 | struct mobiveil_msi { /* MSI information */ | ||
| 119 | struct mutex lock; /* protect bitmap variable */ | ||
| 120 | struct irq_domain *msi_domain; | ||
| 121 | struct irq_domain *dev_domain; | ||
| 122 | phys_addr_t msi_pages_phys; | ||
| 123 | int num_of_vectors; | ||
| 124 | DECLARE_BITMAP(msi_irq_in_use, PCI_NUM_MSI); | ||
| 125 | }; | ||
| 126 | |||
| 127 | struct mobiveil_pcie { | ||
| 128 | struct platform_device *pdev; | ||
| 129 | struct list_head resources; | ||
| 130 | void __iomem *config_axi_slave_base; /* endpoint config base */ | ||
| 131 | void __iomem *csr_axi_slave_base; /* root port config base */ | ||
| 132 | void __iomem *apb_csr_base; /* MSI register base */ | ||
| 133 | void __iomem *pcie_reg_base; /* Physical PCIe Controller Base */ | ||
| 134 | struct irq_domain *intx_domain; | ||
| 135 | raw_spinlock_t intx_mask_lock; | ||
| 136 | int irq; | ||
| 137 | int apio_wins; | ||
| 138 | int ppio_wins; | ||
| 139 | int ob_wins_configured; /* configured outbound windows */ | ||
| 140 | int ib_wins_configured; /* configured inbound windows */ | ||
| 141 | struct resource *ob_io_res; | ||
| 142 | char root_bus_nr; | ||
| 143 | struct mobiveil_msi msi; | ||
| 144 | }; | ||
| 145 | |||
| 146 | static inline void csr_writel(struct mobiveil_pcie *pcie, const u32 value, | ||
| 147 | const u32 reg) | ||
| 148 | { | ||
| 149 | writel_relaxed(value, pcie->csr_axi_slave_base + reg); | ||
| 150 | } | ||
| 151 | |||
| 152 | static inline u32 csr_readl(struct mobiveil_pcie *pcie, const u32 reg) | ||
| 153 | { | ||
| 154 | return readl_relaxed(pcie->csr_axi_slave_base + reg); | ||
| 155 | } | ||
| 156 | |||
| 157 | static bool mobiveil_pcie_link_up(struct mobiveil_pcie *pcie) | ||
| 158 | { | ||
| 159 | return (csr_readl(pcie, LTSSM_STATUS) & | ||
| 160 | LTSSM_STATUS_L0_MASK) == LTSSM_STATUS_L0; | ||
| 161 | } | ||
| 162 | |||
| 163 | static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) | ||
| 164 | { | ||
| 165 | struct mobiveil_pcie *pcie = bus->sysdata; | ||
| 166 | |||
| 167 | /* Only one device down on each root port */ | ||
| 168 | if ((bus->number == pcie->root_bus_nr) && (devfn > 0)) | ||
| 169 | return false; | ||
| 170 | |||
| 171 | /* | ||
| 172 | * Do not read more than one device on the bus directly | ||
| 173 | * attached to RC | ||
| 174 | */ | ||
| 175 | if ((bus->primary == pcie->root_bus_nr) && (devfn > 0)) | ||
| 176 | return false; | ||
| 177 | |||
| 178 | return true; | ||
| 179 | } | ||
| 180 | |||
| 181 | /* | ||
| 182 | * mobiveil_pcie_map_bus - routine to get the configuration base of either | ||
| 183 | * root port or endpoint | ||
| 184 | */ | ||
| 185 | static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus, | ||
| 186 | unsigned int devfn, int where) | ||
| 187 | { | ||
| 188 | struct mobiveil_pcie *pcie = bus->sysdata; | ||
| 189 | |||
| 190 | if (!mobiveil_pcie_valid_device(bus, devfn)) | ||
| 191 | return NULL; | ||
| 192 | |||
| 193 | if (bus->number == pcie->root_bus_nr) { | ||
| 194 | /* RC config access */ | ||
| 195 | return pcie->csr_axi_slave_base + where; | ||
| 196 | } | ||
| 197 | |||
| 198 | /* | ||
| 199 | * EP config access (in Config/APIO space) | ||
| 200 | * Program PEX Address base (31..16 bits) with appropriate value | ||
| 201 | * (BDF) in PAB_AXI_AMAP_PEX_WIN_L0 Register. | ||
| 202 | * Relies on pci_lock serialization | ||
| 203 | */ | ||
| 204 | csr_writel(pcie, bus->number << PAB_BUS_SHIFT | | ||
| 205 | PCI_SLOT(devfn) << PAB_DEVICE_SHIFT | | ||
| 206 | PCI_FUNC(devfn) << PAB_FUNCTION_SHIFT, | ||
| 207 | PAB_AXI_AMAP_PEX_WIN_L(WIN_NUM_0)); | ||
| 208 | return pcie->config_axi_slave_base + where; | ||
| 209 | } | ||
| 210 | |||
| 211 | static struct pci_ops mobiveil_pcie_ops = { | ||
| 212 | .map_bus = mobiveil_pcie_map_bus, | ||
| 213 | .read = pci_generic_config_read, | ||
| 214 | .write = pci_generic_config_write, | ||
| 215 | }; | ||
| 216 | |||
| 217 | static void mobiveil_pcie_isr(struct irq_desc *desc) | ||
| 218 | { | ||
| 219 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
| 220 | struct mobiveil_pcie *pcie = irq_desc_get_handler_data(desc); | ||
| 221 | struct device *dev = &pcie->pdev->dev; | ||
| 222 | struct mobiveil_msi *msi = &pcie->msi; | ||
| 223 | u32 msi_data, msi_addr_lo, msi_addr_hi; | ||
| 224 | u32 intr_status, msi_status; | ||
| 225 | unsigned long shifted_status; | ||
| 226 | u32 bit, virq, val, mask; | ||
| 227 | |||
| 228 | /* | ||
| 229 | * The core provides a single interrupt for both INTx/MSI messages. | ||
| 230 | * So we'll read both INTx and MSI status | ||
| 231 | */ | ||
| 232 | |||
| 233 | chained_irq_enter(chip, desc); | ||
| 234 | |||
| 235 | /* read INTx status */ | ||
| 236 | val = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT); | ||
| 237 | mask = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); | ||
| 238 | intr_status = val & mask; | ||
| 239 | |||
| 240 | /* Handle INTx */ | ||
| 241 | if (intr_status & PAB_INTP_INTX_MASK) { | ||
| 242 | shifted_status = csr_readl(pcie, PAB_INTP_AMBA_MISC_STAT) >> | ||
| 243 | PAB_INTX_START; | ||
| 244 | do { | ||
| 245 | for_each_set_bit(bit, &shifted_status, PCI_NUM_INTX) { | ||
| 246 | virq = irq_find_mapping(pcie->intx_domain, | ||
| 247 | bit + 1); | ||
| 248 | if (virq) | ||
| 249 | generic_handle_irq(virq); | ||
| 250 | else | ||
| 251 | dev_err_ratelimited(dev, | ||
| 252 | "unexpected IRQ, INT%d\n", bit); | ||
| 253 | |||
| 254 | /* clear interrupt */ | ||
| 255 | csr_writel(pcie, | ||
| 256 | shifted_status << PAB_INTX_START, | ||
| 257 | PAB_INTP_AMBA_MISC_STAT); | ||
| 258 | } | ||
| 259 | } while ((shifted_status >> PAB_INTX_START) != 0); | ||
| 260 | } | ||
| 261 | |||
| 262 | /* read extra MSI status register */ | ||
| 263 | msi_status = readl_relaxed(pcie->apb_csr_base + MSI_STATUS_OFFSET); | ||
| 264 | |||
| 265 | /* handle MSI interrupts */ | ||
| 266 | while (msi_status & 1) { | ||
| 267 | msi_data = readl_relaxed(pcie->apb_csr_base | ||
| 268 | + MSI_DATA_OFFSET); | ||
| 269 | |||
| 270 | /* | ||
| 271 | * MSI_STATUS_OFFSET register gets updated to zero | ||
| 272 | * once we pop not only the MSI data but also address | ||
| 273 | * from MSI hardware FIFO. So keeping these following | ||
| 274 | * two dummy reads. | ||
| 275 | */ | ||
| 276 | msi_addr_lo = readl_relaxed(pcie->apb_csr_base + | ||
| 277 | MSI_ADDR_L_OFFSET); | ||
| 278 | msi_addr_hi = readl_relaxed(pcie->apb_csr_base + | ||
| 279 | MSI_ADDR_H_OFFSET); | ||
| 280 | dev_dbg(dev, "MSI registers, data: %08x, addr: %08x:%08x\n", | ||
| 281 | msi_data, msi_addr_hi, msi_addr_lo); | ||
| 282 | |||
| 283 | virq = irq_find_mapping(msi->dev_domain, msi_data); | ||
| 284 | if (virq) | ||
| 285 | generic_handle_irq(virq); | ||
| 286 | |||
| 287 | msi_status = readl_relaxed(pcie->apb_csr_base + | ||
| 288 | MSI_STATUS_OFFSET); | ||
| 289 | } | ||
| 290 | |||
| 291 | /* Clear the interrupt status */ | ||
| 292 | csr_writel(pcie, intr_status, PAB_INTP_AMBA_MISC_STAT); | ||
| 293 | chained_irq_exit(chip, desc); | ||
| 294 | } | ||
| 295 | |||
| 296 | static int mobiveil_pcie_parse_dt(struct mobiveil_pcie *pcie) | ||
| 297 | { | ||
| 298 | struct device *dev = &pcie->pdev->dev; | ||
| 299 | struct platform_device *pdev = pcie->pdev; | ||
| 300 | struct device_node *node = dev->of_node; | ||
| 301 | struct resource *res; | ||
| 302 | const char *type; | ||
| 303 | |||
| 304 | type = of_get_property(node, "device_type", NULL); | ||
| 305 | if (!type || strcmp(type, "pci")) { | ||
| 306 | dev_err(dev, "invalid \"device_type\" %s\n", type); | ||
| 307 | return -EINVAL; | ||
| 308 | } | ||
| 309 | |||
| 310 | /* map config resource */ | ||
| 311 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | ||
| 312 | "config_axi_slave"); | ||
| 313 | pcie->config_axi_slave_base = devm_pci_remap_cfg_resource(dev, res); | ||
| 314 | if (IS_ERR(pcie->config_axi_slave_base)) | ||
| 315 | return PTR_ERR(pcie->config_axi_slave_base); | ||
| 316 | pcie->ob_io_res = res; | ||
| 317 | |||
| 318 | /* map csr resource */ | ||
| 319 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, | ||
| 320 | "csr_axi_slave"); | ||
| 321 | pcie->csr_axi_slave_base = devm_pci_remap_cfg_resource(dev, res); | ||
| 322 | if (IS_ERR(pcie->csr_axi_slave_base)) | ||
| 323 | return PTR_ERR(pcie->csr_axi_slave_base); | ||
| 324 | pcie->pcie_reg_base = res->start; | ||
| 325 | |||
| 326 | /* map MSI config resource */ | ||
| 327 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb_csr"); | ||
| 328 | pcie->apb_csr_base = devm_pci_remap_cfg_resource(dev, res); | ||
| 329 | if (IS_ERR(pcie->apb_csr_base)) | ||
| 330 | return PTR_ERR(pcie->apb_csr_base); | ||
| 331 | |||
| 332 | /* read the number of windows requested */ | ||
| 333 | if (of_property_read_u32(node, "apio-wins", &pcie->apio_wins)) | ||
| 334 | pcie->apio_wins = MAX_PIO_WINDOWS; | ||
| 335 | |||
| 336 | if (of_property_read_u32(node, "ppio-wins", &pcie->ppio_wins)) | ||
| 337 | pcie->ppio_wins = MAX_PIO_WINDOWS; | ||
| 338 | |||
| 339 | pcie->irq = platform_get_irq(pdev, 0); | ||
| 340 | if (pcie->irq <= 0) { | ||
| 341 | dev_err(dev, "failed to map IRQ: %d\n", pcie->irq); | ||
| 342 | return -ENODEV; | ||
| 343 | } | ||
| 344 | |||
| 345 | irq_set_chained_handler_and_data(pcie->irq, mobiveil_pcie_isr, pcie); | ||
| 346 | |||
| 347 | return 0; | ||
| 348 | } | ||
| 349 | |||
| 350 | /* | ||
| 351 | * select_paged_register - routine to access paged register of root complex | ||
| 352 | * | ||
| 353 | * registers of RC are paged, for this scheme to work | ||
| 354 | * extracted higher 6 bits of the offset will be written to pg_sel | ||
| 355 | * field of PAB_CTRL register and rest of the lower 10 bits enabled with | ||
| 356 | * PAGE_SEL_EN are used as offset of the register. | ||
| 357 | */ | ||
| 358 | static void select_paged_register(struct mobiveil_pcie *pcie, u32 offset) | ||
| 359 | { | ||
| 360 | int pab_ctrl_dw, pg_sel; | ||
| 361 | |||
| 362 | /* clear pg_sel field */ | ||
| 363 | pab_ctrl_dw = csr_readl(pcie, PAB_CTRL); | ||
| 364 | pab_ctrl_dw = (pab_ctrl_dw & ~(PAGE_SEL_MASK << PAGE_SEL_SHIFT)); | ||
| 365 | |||
| 366 | /* set pg_sel field */ | ||
| 367 | pg_sel = (offset >> PAGE_SEL_OFFSET_SHIFT) & PAGE_SEL_MASK; | ||
| 368 | pab_ctrl_dw |= ((pg_sel << PAGE_SEL_SHIFT)); | ||
| 369 | csr_writel(pcie, pab_ctrl_dw, PAB_CTRL); | ||
| 370 | } | ||
| 371 | |||
| 372 | static void write_paged_register(struct mobiveil_pcie *pcie, | ||
| 373 | u32 val, u32 offset) | ||
| 374 | { | ||
| 375 | u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN; | ||
| 376 | |||
| 377 | select_paged_register(pcie, offset); | ||
| 378 | csr_writel(pcie, val, off); | ||
| 379 | } | ||
| 380 | |||
| 381 | static u32 read_paged_register(struct mobiveil_pcie *pcie, u32 offset) | ||
| 382 | { | ||
| 383 | u32 off = (offset & PAGE_LO_MASK) | PAGE_SEL_EN; | ||
| 384 | |||
| 385 | select_paged_register(pcie, offset); | ||
| 386 | return csr_readl(pcie, off); | ||
| 387 | } | ||
| 388 | |||
| 389 | static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num, | ||
| 390 | int pci_addr, u32 type, u64 size) | ||
| 391 | { | ||
| 392 | int pio_ctrl_val; | ||
| 393 | int amap_ctrl_dw; | ||
| 394 | u64 size64 = ~(size - 1); | ||
| 395 | |||
| 396 | if ((pcie->ib_wins_configured + 1) > pcie->ppio_wins) { | ||
| 397 | dev_err(&pcie->pdev->dev, | ||
| 398 | "ERROR: max inbound windows reached !\n"); | ||
| 399 | return; | ||
| 400 | } | ||
| 401 | |||
| 402 | pio_ctrl_val = csr_readl(pcie, PAB_PEX_PIO_CTRL); | ||
| 403 | csr_writel(pcie, | ||
| 404 | pio_ctrl_val | (1 << PIO_ENABLE_SHIFT), PAB_PEX_PIO_CTRL); | ||
| 405 | amap_ctrl_dw = read_paged_register(pcie, PAB_PEX_AMAP_CTRL(win_num)); | ||
| 406 | amap_ctrl_dw = (amap_ctrl_dw | (type << AMAP_CTRL_TYPE_SHIFT)); | ||
| 407 | amap_ctrl_dw = (amap_ctrl_dw | (1 << AMAP_CTRL_EN_SHIFT)); | ||
| 408 | |||
| 409 | write_paged_register(pcie, amap_ctrl_dw | lower_32_bits(size64), | ||
| 410 | PAB_PEX_AMAP_CTRL(win_num)); | ||
| 411 | |||
| 412 | write_paged_register(pcie, upper_32_bits(size64), | ||
| 413 | PAB_EXT_PEX_AMAP_SIZEN(win_num)); | ||
| 414 | |||
| 415 | write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_AXI_WIN(win_num)); | ||
| 416 | write_paged_register(pcie, pci_addr, PAB_PEX_AMAP_PEX_WIN_L(win_num)); | ||
| 417 | write_paged_register(pcie, 0, PAB_PEX_AMAP_PEX_WIN_H(win_num)); | ||
| 418 | } | ||
| 419 | |||
| 420 | /* | ||
| 421 | * routine to program the outbound windows | ||
| 422 | */ | ||
| 423 | static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num, | ||
| 424 | u64 cpu_addr, u64 pci_addr, u32 config_io_bit, u64 size) | ||
| 425 | { | ||
| 426 | |||
| 427 | u32 value, type; | ||
| 428 | u64 size64 = ~(size - 1); | ||
| 429 | |||
| 430 | if ((pcie->ob_wins_configured + 1) > pcie->apio_wins) { | ||
| 431 | dev_err(&pcie->pdev->dev, | ||
| 432 | "ERROR: max outbound windows reached !\n"); | ||
| 433 | return; | ||
| 434 | } | ||
| 435 | |||
| 436 | /* | ||
| 437 | * program Enable Bit to 1, Type Bit to (00) base 2, AXI Window Size Bit | ||
| 438 | * to 4 KB in PAB_AXI_AMAP_CTRL register | ||
| 439 | */ | ||
| 440 | type = config_io_bit; | ||
| 441 | value = csr_readl(pcie, PAB_AXI_AMAP_CTRL(win_num)); | ||
| 442 | csr_writel(pcie, 1 << WIN_ENABLE_SHIFT | type << WIN_TYPE_SHIFT | | ||
| 443 | lower_32_bits(size64), PAB_AXI_AMAP_CTRL(win_num)); | ||
| 444 | |||
| 445 | write_paged_register(pcie, upper_32_bits(size64), | ||
| 446 | PAB_EXT_AXI_AMAP_SIZE(win_num)); | ||
| 447 | |||
| 448 | /* | ||
| 449 | * program AXI window base with appropriate value in | ||
| 450 | * PAB_AXI_AMAP_AXI_WIN0 register | ||
| 451 | */ | ||
| 452 | value = csr_readl(pcie, PAB_AXI_AMAP_AXI_WIN(win_num)); | ||
| 453 | csr_writel(pcie, cpu_addr & (~AXI_WINDOW_ALIGN_MASK), | ||
| 454 | PAB_AXI_AMAP_AXI_WIN(win_num)); | ||
| 455 | |||
| 456 | value = csr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_H(win_num)); | ||
| 457 | |||
| 458 | csr_writel(pcie, lower_32_bits(pci_addr), | ||
| 459 | PAB_AXI_AMAP_PEX_WIN_L(win_num)); | ||
| 460 | csr_writel(pcie, upper_32_bits(pci_addr), | ||
| 461 | PAB_AXI_AMAP_PEX_WIN_H(win_num)); | ||
| 462 | |||
| 463 | pcie->ob_wins_configured++; | ||
| 464 | } | ||
| 465 | |||
| 466 | static int mobiveil_bringup_link(struct mobiveil_pcie *pcie) | ||
| 467 | { | ||
| 468 | int retries; | ||
| 469 | |||
| 470 | /* check if the link is up or not */ | ||
| 471 | for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { | ||
| 472 | if (mobiveil_pcie_link_up(pcie)) | ||
| 473 | return 0; | ||
| 474 | |||
| 475 | usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX); | ||
| 476 | } | ||
| 477 | dev_err(&pcie->pdev->dev, "link never came up\n"); | ||
| 478 | return -ETIMEDOUT; | ||
| 479 | } | ||
| 480 | |||
| 481 | static void mobiveil_pcie_enable_msi(struct mobiveil_pcie *pcie) | ||
| 482 | { | ||
| 483 | phys_addr_t msg_addr = pcie->pcie_reg_base; | ||
| 484 | struct mobiveil_msi *msi = &pcie->msi; | ||
| 485 | |||
| 486 | pcie->msi.num_of_vectors = PCI_NUM_MSI; | ||
| 487 | msi->msi_pages_phys = (phys_addr_t)msg_addr; | ||
| 488 | |||
| 489 | writel_relaxed(lower_32_bits(msg_addr), | ||
| 490 | pcie->apb_csr_base + MSI_BASE_LO_OFFSET); | ||
| 491 | writel_relaxed(upper_32_bits(msg_addr), | ||
| 492 | pcie->apb_csr_base + MSI_BASE_HI_OFFSET); | ||
| 493 | writel_relaxed(4096, pcie->apb_csr_base + MSI_SIZE_OFFSET); | ||
| 494 | writel_relaxed(1, pcie->apb_csr_base + MSI_ENABLE_OFFSET); | ||
| 495 | } | ||
| 496 | |||
| 497 | static int mobiveil_host_init(struct mobiveil_pcie *pcie) | ||
| 498 | { | ||
| 499 | u32 value, pab_ctrl, type = 0; | ||
| 500 | int err; | ||
| 501 | struct resource_entry *win, *tmp; | ||
| 502 | |||
| 503 | err = mobiveil_bringup_link(pcie); | ||
| 504 | if (err) { | ||
| 505 | dev_info(&pcie->pdev->dev, "link bring-up failed\n"); | ||
| 506 | return err; | ||
| 507 | } | ||
| 508 | |||
| 509 | /* | ||
| 510 | * program Bus Master Enable Bit in Command Register in PAB Config | ||
| 511 | * Space | ||
| 512 | */ | ||
| 513 | value = csr_readl(pcie, PCI_COMMAND); | ||
| 514 | csr_writel(pcie, value | PCI_COMMAND_IO | PCI_COMMAND_MEMORY | | ||
| 515 | PCI_COMMAND_MASTER, PCI_COMMAND); | ||
| 516 | |||
| 517 | /* | ||
| 518 | * program PIO Enable Bit to 1 (and PEX PIO Enable to 1) in PAB_CTRL | ||
| 519 | * register | ||
| 520 | */ | ||
| 521 | pab_ctrl = csr_readl(pcie, PAB_CTRL); | ||
| 522 | csr_writel(pcie, pab_ctrl | (1 << AMBA_PIO_ENABLE_SHIFT) | | ||
| 523 | (1 << PEX_PIO_ENABLE_SHIFT), PAB_CTRL); | ||
| 524 | |||
| 525 | csr_writel(pcie, (PAB_INTP_INTX_MASK | PAB_INTP_MSI_MASK), | ||
| 526 | PAB_INTP_AMBA_MISC_ENB); | ||
| 527 | |||
| 528 | /* | ||
| 529 | * program PIO Enable Bit to 1 and Config Window Enable Bit to 1 in | ||
| 530 | * PAB_AXI_PIO_CTRL Register | ||
| 531 | */ | ||
| 532 | value = csr_readl(pcie, PAB_AXI_PIO_CTRL); | ||
| 533 | csr_writel(pcie, value | APIO_EN_MASK, PAB_AXI_PIO_CTRL); | ||
| 534 | |||
| 535 | /* | ||
| 536 | * we'll program one outbound window for config reads and | ||
| 537 | * another default inbound window for all the upstream traffic | ||
| 538 | * rest of the outbound windows will be configured according to | ||
| 539 | * the "ranges" field defined in device tree | ||
| 540 | */ | ||
| 541 | |||
| 542 | /* config outbound translation window */ | ||
| 543 | program_ob_windows(pcie, pcie->ob_wins_configured, | ||
| 544 | pcie->ob_io_res->start, 0, CFG_WINDOW_TYPE, | ||
| 545 | resource_size(pcie->ob_io_res)); | ||
| 546 | |||
| 547 | /* memory inbound translation window */ | ||
| 548 | program_ib_windows(pcie, WIN_NUM_1, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE); | ||
| 549 | |||
| 550 | /* Get the I/O and memory ranges from DT */ | ||
| 551 | resource_list_for_each_entry_safe(win, tmp, &pcie->resources) { | ||
| 552 | type = 0; | ||
| 553 | if (resource_type(win->res) == IORESOURCE_MEM) | ||
| 554 | type = MEM_WINDOW_TYPE; | ||
| 555 | if (resource_type(win->res) == IORESOURCE_IO) | ||
| 556 | type = IO_WINDOW_TYPE; | ||
| 557 | if (type) { | ||
| 558 | /* configure outbound translation window */ | ||
| 559 | program_ob_windows(pcie, pcie->ob_wins_configured, | ||
| 560 | win->res->start, 0, type, | ||
| 561 | resource_size(win->res)); | ||
| 562 | } | ||
| 563 | } | ||
| 564 | |||
| 565 | /* setup MSI hardware registers */ | ||
| 566 | mobiveil_pcie_enable_msi(pcie); | ||
| 567 | |||
| 568 | return err; | ||
| 569 | } | ||
| 570 | |||
| 571 | static void mobiveil_mask_intx_irq(struct irq_data *data) | ||
| 572 | { | ||
| 573 | struct irq_desc *desc = irq_to_desc(data->irq); | ||
| 574 | struct mobiveil_pcie *pcie; | ||
| 575 | unsigned long flags; | ||
| 576 | u32 mask, shifted_val; | ||
| 577 | |||
| 578 | pcie = irq_desc_get_chip_data(desc); | ||
| 579 | mask = 1 << ((data->hwirq + PAB_INTX_START) - 1); | ||
| 580 | raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags); | ||
| 581 | shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); | ||
| 582 | csr_writel(pcie, (shifted_val & (~mask)), PAB_INTP_AMBA_MISC_ENB); | ||
| 583 | raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags); | ||
| 584 | } | ||
| 585 | |||
| 586 | static void mobiveil_unmask_intx_irq(struct irq_data *data) | ||
| 587 | { | ||
| 588 | struct irq_desc *desc = irq_to_desc(data->irq); | ||
| 589 | struct mobiveil_pcie *pcie; | ||
| 590 | unsigned long flags; | ||
| 591 | u32 shifted_val, mask; | ||
| 592 | |||
| 593 | pcie = irq_desc_get_chip_data(desc); | ||
| 594 | mask = 1 << ((data->hwirq + PAB_INTX_START) - 1); | ||
| 595 | raw_spin_lock_irqsave(&pcie->intx_mask_lock, flags); | ||
| 596 | shifted_val = csr_readl(pcie, PAB_INTP_AMBA_MISC_ENB); | ||
| 597 | csr_writel(pcie, (shifted_val | mask), PAB_INTP_AMBA_MISC_ENB); | ||
| 598 | raw_spin_unlock_irqrestore(&pcie->intx_mask_lock, flags); | ||
| 599 | } | ||
| 600 | |||
| 601 | static struct irq_chip intx_irq_chip = { | ||
| 602 | .name = "mobiveil_pcie:intx", | ||
| 603 | .irq_enable = mobiveil_unmask_intx_irq, | ||
| 604 | .irq_disable = mobiveil_mask_intx_irq, | ||
| 605 | .irq_mask = mobiveil_mask_intx_irq, | ||
| 606 | .irq_unmask = mobiveil_unmask_intx_irq, | ||
| 607 | }; | ||
| 608 | |||
| 609 | /* routine to setup the INTx related data */ | ||
| 610 | static int mobiveil_pcie_intx_map(struct irq_domain *domain, unsigned int irq, | ||
| 611 | irq_hw_number_t hwirq) | ||
| 612 | { | ||
| 613 | irq_set_chip_and_handler(irq, &intx_irq_chip, handle_level_irq); | ||
| 614 | irq_set_chip_data(irq, domain->host_data); | ||
| 615 | return 0; | ||
| 616 | } | ||
| 617 | |||
| 618 | /* INTx domain operations structure */ | ||
| 619 | static const struct irq_domain_ops intx_domain_ops = { | ||
| 620 | .map = mobiveil_pcie_intx_map, | ||
| 621 | }; | ||
| 622 | |||
| 623 | static struct irq_chip mobiveil_msi_irq_chip = { | ||
| 624 | .name = "Mobiveil PCIe MSI", | ||
| 625 | .irq_mask = pci_msi_mask_irq, | ||
| 626 | .irq_unmask = pci_msi_unmask_irq, | ||
| 627 | }; | ||
| 628 | |||
| 629 | static struct msi_domain_info mobiveil_msi_domain_info = { | ||
| 630 | .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | | ||
| 631 | MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), | ||
| 632 | .chip = &mobiveil_msi_irq_chip, | ||
| 633 | }; | ||
| 634 | |||
| 635 | static void mobiveil_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | ||
| 636 | { | ||
| 637 | struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data); | ||
| 638 | phys_addr_t addr = pcie->pcie_reg_base + (data->hwirq * sizeof(int)); | ||
| 639 | |||
| 640 | msg->address_lo = lower_32_bits(addr); | ||
| 641 | msg->address_hi = upper_32_bits(addr); | ||
| 642 | msg->data = data->hwirq; | ||
| 643 | |||
| 644 | dev_dbg(&pcie->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n", | ||
| 645 | (int)data->hwirq, msg->address_hi, msg->address_lo); | ||
| 646 | } | ||
| 647 | |||
| 648 | static int mobiveil_msi_set_affinity(struct irq_data *irq_data, | ||
| 649 | const struct cpumask *mask, bool force) | ||
| 650 | { | ||
| 651 | return -EINVAL; | ||
| 652 | } | ||
| 653 | |||
| 654 | static struct irq_chip mobiveil_msi_bottom_irq_chip = { | ||
| 655 | .name = "Mobiveil MSI", | ||
| 656 | .irq_compose_msi_msg = mobiveil_compose_msi_msg, | ||
| 657 | .irq_set_affinity = mobiveil_msi_set_affinity, | ||
| 658 | }; | ||
| 659 | |||
| 660 | static int mobiveil_irq_msi_domain_alloc(struct irq_domain *domain, | ||
| 661 | unsigned int virq, unsigned int nr_irqs, void *args) | ||
| 662 | { | ||
| 663 | struct mobiveil_pcie *pcie = domain->host_data; | ||
| 664 | struct mobiveil_msi *msi = &pcie->msi; | ||
| 665 | unsigned long bit; | ||
| 666 | |||
| 667 | WARN_ON(nr_irqs != 1); | ||
| 668 | mutex_lock(&msi->lock); | ||
| 669 | |||
| 670 | bit = find_first_zero_bit(msi->msi_irq_in_use, msi->num_of_vectors); | ||
| 671 | if (bit >= msi->num_of_vectors) { | ||
| 672 | mutex_unlock(&msi->lock); | ||
| 673 | return -ENOSPC; | ||
| 674 | } | ||
| 675 | |||
| 676 | set_bit(bit, msi->msi_irq_in_use); | ||
| 677 | |||
| 678 | mutex_unlock(&msi->lock); | ||
| 679 | |||
| 680 | irq_domain_set_info(domain, virq, bit, &mobiveil_msi_bottom_irq_chip, | ||
| 681 | domain->host_data, handle_level_irq, | ||
| 682 | NULL, NULL); | ||
| 683 | return 0; | ||
| 684 | } | ||
| 685 | |||
| 686 | static void mobiveil_irq_msi_domain_free(struct irq_domain *domain, | ||
| 687 | unsigned int virq, unsigned int nr_irqs) | ||
| 688 | { | ||
| 689 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); | ||
| 690 | struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(d); | ||
| 691 | struct mobiveil_msi *msi = &pcie->msi; | ||
| 692 | |||
| 693 | mutex_lock(&msi->lock); | ||
| 694 | |||
| 695 | if (!test_bit(d->hwirq, msi->msi_irq_in_use)) { | ||
| 696 | dev_err(&pcie->pdev->dev, "trying to free unused MSI#%lu\n", | ||
| 697 | d->hwirq); | ||
| 698 | } else { | ||
| 699 | __clear_bit(d->hwirq, msi->msi_irq_in_use); | ||
| 700 | } | ||
| 701 | |||
| 702 | mutex_unlock(&msi->lock); | ||
| 703 | } | ||
| 704 | static const struct irq_domain_ops msi_domain_ops = { | ||
| 705 | .alloc = mobiveil_irq_msi_domain_alloc, | ||
| 706 | .free = mobiveil_irq_msi_domain_free, | ||
| 707 | }; | ||
| 708 | |||
| 709 | static int mobiveil_allocate_msi_domains(struct mobiveil_pcie *pcie) | ||
| 710 | { | ||
| 711 | struct device *dev = &pcie->pdev->dev; | ||
| 712 | struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); | ||
| 713 | struct mobiveil_msi *msi = &pcie->msi; | ||
| 714 | |||
| 715 | mutex_init(&pcie->msi.lock); | ||
| 716 | msi->dev_domain = irq_domain_add_linear(NULL, msi->num_of_vectors, | ||
| 717 | &msi_domain_ops, pcie); | ||
| 718 | if (!msi->dev_domain) { | ||
| 719 | dev_err(dev, "failed to create IRQ domain\n"); | ||
| 720 | return -ENOMEM; | ||
| 721 | } | ||
| 722 | |||
| 723 | msi->msi_domain = pci_msi_create_irq_domain(fwnode, | ||
| 724 | &mobiveil_msi_domain_info, msi->dev_domain); | ||
| 725 | if (!msi->msi_domain) { | ||
| 726 | dev_err(dev, "failed to create MSI domain\n"); | ||
| 727 | irq_domain_remove(msi->dev_domain); | ||
| 728 | return -ENOMEM; | ||
| 729 | } | ||
| 730 | return 0; | ||
| 731 | } | ||
| 732 | |||
| 733 | static int mobiveil_pcie_init_irq_domain(struct mobiveil_pcie *pcie) | ||
| 734 | { | ||
| 735 | struct device *dev = &pcie->pdev->dev; | ||
| 736 | struct device_node *node = dev->of_node; | ||
| 737 | int ret; | ||
| 738 | |||
| 739 | /* setup INTx */ | ||
| 740 | pcie->intx_domain = irq_domain_add_linear(node, | ||
| 741 | PCI_NUM_INTX, &intx_domain_ops, pcie); | ||
| 742 | |||
| 743 | if (!pcie->intx_domain) { | ||
| 744 | dev_err(dev, "Failed to get a INTx IRQ domain\n"); | ||
| 745 | return -ENODEV; | ||
| 746 | } | ||
| 747 | |||
| 748 | raw_spin_lock_init(&pcie->intx_mask_lock); | ||
| 749 | |||
| 750 | /* setup MSI */ | ||
| 751 | ret = mobiveil_allocate_msi_domains(pcie); | ||
| 752 | if (ret) | ||
| 753 | return ret; | ||
| 754 | |||
| 755 | return 0; | ||
| 756 | } | ||
| 757 | |||
| 758 | static int mobiveil_pcie_probe(struct platform_device *pdev) | ||
| 759 | { | ||
| 760 | struct mobiveil_pcie *pcie; | ||
| 761 | struct pci_bus *bus; | ||
| 762 | struct pci_bus *child; | ||
| 763 | struct pci_host_bridge *bridge; | ||
| 764 | struct device *dev = &pdev->dev; | ||
| 765 | resource_size_t iobase; | ||
| 766 | int ret; | ||
| 767 | |||
| 768 | /* allocate the PCIe port */ | ||
| 769 | bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); | ||
| 770 | if (!bridge) | ||
| 771 | return -ENODEV; | ||
| 772 | |||
| 773 | pcie = pci_host_bridge_priv(bridge); | ||
| 774 | if (!pcie) | ||
| 775 | return -ENOMEM; | ||
| 776 | |||
| 777 | pcie->pdev = pdev; | ||
| 778 | |||
| 779 | ret = mobiveil_pcie_parse_dt(pcie); | ||
| 780 | if (ret) { | ||
| 781 | dev_err(dev, "Parsing DT failed, ret: %x\n", ret); | ||
| 782 | return ret; | ||
| 783 | } | ||
| 784 | |||
| 785 | INIT_LIST_HEAD(&pcie->resources); | ||
| 786 | |||
| 787 | /* parse the host bridge base addresses from the device tree file */ | ||
| 788 | ret = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, | ||
| 789 | &pcie->resources, &iobase); | ||
| 790 | if (ret) { | ||
| 791 | dev_err(dev, "Getting bridge resources failed\n"); | ||
| 792 | return -ENOMEM; | ||
| 793 | } | ||
| 794 | |||
| 795 | /* | ||
| 796 | * configure all inbound and outbound windows and prepare the RC for | ||
| 797 | * config access | ||
| 798 | */ | ||
| 799 | ret = mobiveil_host_init(pcie); | ||
| 800 | if (ret) { | ||
| 801 | dev_err(dev, "Failed to initialize host\n"); | ||
| 802 | goto error; | ||
| 803 | } | ||
| 804 | |||
| 805 | /* fixup for PCIe class register */ | ||
| 806 | csr_writel(pcie, 0x060402ab, PAB_INTP_AXI_PIO_CLASS); | ||
| 807 | |||
| 808 | /* initialize the IRQ domains */ | ||
| 809 | ret = mobiveil_pcie_init_irq_domain(pcie); | ||
| 810 | if (ret) { | ||
| 811 | dev_err(dev, "Failed creating IRQ Domain\n"); | ||
| 812 | goto error; | ||
| 813 | } | ||
| 814 | |||
| 815 | ret = devm_request_pci_bus_resources(dev, &pcie->resources); | ||
| 816 | if (ret) | ||
| 817 | goto error; | ||
| 818 | |||
| 819 | /* Initialize bridge */ | ||
| 820 | list_splice_init(&pcie->resources, &bridge->windows); | ||
| 821 | bridge->dev.parent = dev; | ||
| 822 | bridge->sysdata = pcie; | ||
| 823 | bridge->busnr = pcie->root_bus_nr; | ||
| 824 | bridge->ops = &mobiveil_pcie_ops; | ||
| 825 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 826 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 827 | |||
| 828 | /* setup the kernel resources for the newly added PCIe root bus */ | ||
| 829 | ret = pci_scan_root_bus_bridge(bridge); | ||
| 830 | if (ret) | ||
| 831 | goto error; | ||
| 832 | |||
| 833 | bus = bridge->bus; | ||
| 834 | |||
| 835 | pci_assign_unassigned_bus_resources(bus); | ||
| 836 | list_for_each_entry(child, &bus->children, node) | ||
| 837 | pcie_bus_configure_settings(child); | ||
| 838 | pci_bus_add_devices(bus); | ||
| 839 | |||
| 840 | return 0; | ||
| 841 | error: | ||
| 842 | pci_free_resource_list(&pcie->resources); | ||
| 843 | return ret; | ||
| 844 | } | ||
| 845 | |||
| 846 | static const struct of_device_id mobiveil_pcie_of_match[] = { | ||
| 847 | {.compatible = "mbvl,gpex40-pcie",}, | ||
| 848 | {}, | ||
| 849 | }; | ||
| 850 | |||
| 851 | MODULE_DEVICE_TABLE(of, mobiveil_pcie_of_match); | ||
| 852 | |||
| 853 | static struct platform_driver mobiveil_pcie_driver = { | ||
| 854 | .probe = mobiveil_pcie_probe, | ||
| 855 | .driver = { | ||
| 856 | .name = "mobiveil-pcie", | ||
| 857 | .of_match_table = mobiveil_pcie_of_match, | ||
| 858 | .suppress_bind_attrs = true, | ||
| 859 | }, | ||
| 860 | }; | ||
| 861 | |||
| 862 | builtin_platform_driver(mobiveil_pcie_driver); | ||
| 863 | |||
| 864 | MODULE_LICENSE("GPL v2"); | ||
| 865 | MODULE_DESCRIPTION("Mobiveil PCIe host controller driver"); | ||
| 866 | MODULE_AUTHOR("Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>"); | ||
diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c new file mode 100644 index 000000000000..874d75c9ee4a --- /dev/null +++ b/drivers/pci/controller/pcie-rcar.c | |||
| @@ -0,0 +1,1222 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * PCIe driver for Renesas R-Car SoCs | ||
| 4 | * Copyright (C) 2014 Renesas Electronics Europe Ltd | ||
| 5 | * | ||
| 6 | * Based on: | ||
| 7 | * arch/sh/drivers/pci/pcie-sh7786.c | ||
| 8 | * arch/sh/drivers/pci/ops-sh7786.c | ||
| 9 | * Copyright (C) 2009 - 2011 Paul Mundt | ||
| 10 | * | ||
| 11 | * Author: Phil Edworthy <phil.edworthy@renesas.com> | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/bitops.h> | ||
| 15 | #include <linux/clk.h> | ||
| 16 | #include <linux/delay.h> | ||
| 17 | #include <linux/interrupt.h> | ||
| 18 | #include <linux/irq.h> | ||
| 19 | #include <linux/irqdomain.h> | ||
| 20 | #include <linux/kernel.h> | ||
| 21 | #include <linux/init.h> | ||
| 22 | #include <linux/msi.h> | ||
| 23 | #include <linux/of_address.h> | ||
| 24 | #include <linux/of_irq.h> | ||
| 25 | #include <linux/of_pci.h> | ||
| 26 | #include <linux/of_platform.h> | ||
| 27 | #include <linux/pci.h> | ||
| 28 | #include <linux/phy/phy.h> | ||
| 29 | #include <linux/platform_device.h> | ||
| 30 | #include <linux/pm_runtime.h> | ||
| 31 | #include <linux/slab.h> | ||
| 32 | |||
| 33 | #include "../pci.h" | ||
| 34 | |||
| 35 | #define PCIECAR 0x000010 | ||
| 36 | #define PCIECCTLR 0x000018 | ||
| 37 | #define CONFIG_SEND_ENABLE BIT(31) | ||
| 38 | #define TYPE0 (0 << 8) | ||
| 39 | #define TYPE1 BIT(8) | ||
| 40 | #define PCIECDR 0x000020 | ||
| 41 | #define PCIEMSR 0x000028 | ||
| 42 | #define PCIEINTXR 0x000400 | ||
| 43 | #define PCIEPHYSR 0x0007f0 | ||
| 44 | #define PHYRDY BIT(0) | ||
| 45 | #define PCIEMSITXR 0x000840 | ||
| 46 | |||
| 47 | /* Transfer control */ | ||
| 48 | #define PCIETCTLR 0x02000 | ||
| 49 | #define CFINIT 1 | ||
| 50 | #define PCIETSTR 0x02004 | ||
| 51 | #define DATA_LINK_ACTIVE 1 | ||
| 52 | #define PCIEERRFR 0x02020 | ||
| 53 | #define UNSUPPORTED_REQUEST BIT(4) | ||
| 54 | #define PCIEMSIFR 0x02044 | ||
| 55 | #define PCIEMSIALR 0x02048 | ||
| 56 | #define MSIFE 1 | ||
| 57 | #define PCIEMSIAUR 0x0204c | ||
| 58 | #define PCIEMSIIER 0x02050 | ||
| 59 | |||
| 60 | /* root port address */ | ||
| 61 | #define PCIEPRAR(x) (0x02080 + ((x) * 0x4)) | ||
| 62 | |||
| 63 | /* local address reg & mask */ | ||
| 64 | #define PCIELAR(x) (0x02200 + ((x) * 0x20)) | ||
| 65 | #define PCIELAMR(x) (0x02208 + ((x) * 0x20)) | ||
| 66 | #define LAM_PREFETCH BIT(3) | ||
| 67 | #define LAM_64BIT BIT(2) | ||
| 68 | #define LAR_ENABLE BIT(1) | ||
| 69 | |||
| 70 | /* PCIe address reg & mask */ | ||
| 71 | #define PCIEPALR(x) (0x03400 + ((x) * 0x20)) | ||
| 72 | #define PCIEPAUR(x) (0x03404 + ((x) * 0x20)) | ||
| 73 | #define PCIEPAMR(x) (0x03408 + ((x) * 0x20)) | ||
| 74 | #define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20)) | ||
| 75 | #define PAR_ENABLE BIT(31) | ||
| 76 | #define IO_SPACE BIT(8) | ||
| 77 | |||
| 78 | /* Configuration */ | ||
| 79 | #define PCICONF(x) (0x010000 + ((x) * 0x4)) | ||
| 80 | #define PMCAP(x) (0x010040 + ((x) * 0x4)) | ||
| 81 | #define EXPCAP(x) (0x010070 + ((x) * 0x4)) | ||
| 82 | #define VCCAP(x) (0x010100 + ((x) * 0x4)) | ||
| 83 | |||
| 84 | /* link layer */ | ||
| 85 | #define IDSETR1 0x011004 | ||
| 86 | #define TLCTLR 0x011048 | ||
| 87 | #define MACSR 0x011054 | ||
| 88 | #define SPCHGFIN BIT(4) | ||
| 89 | #define SPCHGFAIL BIT(6) | ||
| 90 | #define SPCHGSUC BIT(7) | ||
| 91 | #define LINK_SPEED (0xf << 16) | ||
| 92 | #define LINK_SPEED_2_5GTS (1 << 16) | ||
| 93 | #define LINK_SPEED_5_0GTS (2 << 16) | ||
| 94 | #define MACCTLR 0x011058 | ||
| 95 | #define SPEED_CHANGE BIT(24) | ||
| 96 | #define SCRAMBLE_DISABLE BIT(27) | ||
| 97 | #define MACS2R 0x011078 | ||
| 98 | #define MACCGSPSETR 0x011084 | ||
| 99 | #define SPCNGRSN BIT(31) | ||
| 100 | |||
| 101 | /* R-Car H1 PHY */ | ||
| 102 | #define H1_PCIEPHYADRR 0x04000c | ||
| 103 | #define WRITE_CMD BIT(16) | ||
| 104 | #define PHY_ACK BIT(24) | ||
| 105 | #define RATE_POS 12 | ||
| 106 | #define LANE_POS 8 | ||
| 107 | #define ADR_POS 0 | ||
| 108 | #define H1_PCIEPHYDOUTR 0x040014 | ||
| 109 | |||
| 110 | /* R-Car Gen2 PHY */ | ||
| 111 | #define GEN2_PCIEPHYADDR 0x780 | ||
| 112 | #define GEN2_PCIEPHYDATA 0x784 | ||
| 113 | #define GEN2_PCIEPHYCTRL 0x78c | ||
| 114 | |||
| 115 | #define INT_PCI_MSI_NR 32 | ||
| 116 | |||
| 117 | #define RCONF(x) (PCICONF(0) + (x)) | ||
| 118 | #define RPMCAP(x) (PMCAP(0) + (x)) | ||
| 119 | #define REXPCAP(x) (EXPCAP(0) + (x)) | ||
| 120 | #define RVCCAP(x) (VCCAP(0) + (x)) | ||
| 121 | |||
| 122 | #define PCIE_CONF_BUS(b) (((b) & 0xff) << 24) | ||
| 123 | #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19) | ||
| 124 | #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16) | ||
| 125 | |||
| 126 | #define RCAR_PCI_MAX_RESOURCES 4 | ||
| 127 | #define MAX_NR_INBOUND_MAPS 6 | ||
| 128 | |||
| 129 | struct rcar_msi { | ||
| 130 | DECLARE_BITMAP(used, INT_PCI_MSI_NR); | ||
| 131 | struct irq_domain *domain; | ||
| 132 | struct msi_controller chip; | ||
| 133 | unsigned long pages; | ||
| 134 | struct mutex lock; | ||
| 135 | int irq1; | ||
| 136 | int irq2; | ||
| 137 | }; | ||
| 138 | |||
| 139 | static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip) | ||
| 140 | { | ||
| 141 | return container_of(chip, struct rcar_msi, chip); | ||
| 142 | } | ||
| 143 | |||
| 144 | /* Structure representing the PCIe interface */ | ||
| 145 | struct rcar_pcie { | ||
| 146 | struct device *dev; | ||
| 147 | struct phy *phy; | ||
| 148 | void __iomem *base; | ||
| 149 | struct list_head resources; | ||
| 150 | int root_bus_nr; | ||
| 151 | struct clk *bus_clk; | ||
| 152 | struct rcar_msi msi; | ||
| 153 | }; | ||
| 154 | |||
| 155 | static void rcar_pci_write_reg(struct rcar_pcie *pcie, unsigned long val, | ||
| 156 | unsigned long reg) | ||
| 157 | { | ||
| 158 | writel(val, pcie->base + reg); | ||
| 159 | } | ||
| 160 | |||
| 161 | static unsigned long rcar_pci_read_reg(struct rcar_pcie *pcie, | ||
| 162 | unsigned long reg) | ||
| 163 | { | ||
| 164 | return readl(pcie->base + reg); | ||
| 165 | } | ||
| 166 | |||
| 167 | enum { | ||
| 168 | RCAR_PCI_ACCESS_READ, | ||
| 169 | RCAR_PCI_ACCESS_WRITE, | ||
| 170 | }; | ||
| 171 | |||
| 172 | static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data) | ||
| 173 | { | ||
| 174 | int shift = 8 * (where & 3); | ||
| 175 | u32 val = rcar_pci_read_reg(pcie, where & ~3); | ||
| 176 | |||
| 177 | val &= ~(mask << shift); | ||
| 178 | val |= data << shift; | ||
| 179 | rcar_pci_write_reg(pcie, val, where & ~3); | ||
| 180 | } | ||
| 181 | |||
| 182 | static u32 rcar_read_conf(struct rcar_pcie *pcie, int where) | ||
| 183 | { | ||
| 184 | int shift = 8 * (where & 3); | ||
| 185 | u32 val = rcar_pci_read_reg(pcie, where & ~3); | ||
| 186 | |||
| 187 | return val >> shift; | ||
| 188 | } | ||
| 189 | |||
| 190 | /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ | ||
| 191 | static int rcar_pcie_config_access(struct rcar_pcie *pcie, | ||
| 192 | unsigned char access_type, struct pci_bus *bus, | ||
| 193 | unsigned int devfn, int where, u32 *data) | ||
| 194 | { | ||
| 195 | int dev, func, reg, index; | ||
| 196 | |||
| 197 | dev = PCI_SLOT(devfn); | ||
| 198 | func = PCI_FUNC(devfn); | ||
| 199 | reg = where & ~3; | ||
| 200 | index = reg / 4; | ||
| 201 | |||
| 202 | /* | ||
| 203 | * While each channel has its own memory-mapped extended config | ||
| 204 | * space, it's generally only accessible when in endpoint mode. | ||
| 205 | * When in root complex mode, the controller is unable to target | ||
| 206 | * itself with either type 0 or type 1 accesses, and indeed, any | ||
| 207 | * controller initiated target transfer to its own config space | ||
| 208 | * result in a completer abort. | ||
| 209 | * | ||
| 210 | * Each channel effectively only supports a single device, but as | ||
| 211 | * the same channel <-> device access works for any PCI_SLOT() | ||
| 212 | * value, we cheat a bit here and bind the controller's config | ||
| 213 | * space to devfn 0 in order to enable self-enumeration. In this | ||
| 214 | * case the regular ECAR/ECDR path is sidelined and the mangled | ||
| 215 | * config access itself is initiated as an internal bus transaction. | ||
| 216 | */ | ||
| 217 | if (pci_is_root_bus(bus)) { | ||
| 218 | if (dev != 0) | ||
| 219 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 220 | |||
| 221 | if (access_type == RCAR_PCI_ACCESS_READ) { | ||
| 222 | *data = rcar_pci_read_reg(pcie, PCICONF(index)); | ||
| 223 | } else { | ||
| 224 | /* Keep an eye out for changes to the root bus number */ | ||
| 225 | if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS)) | ||
| 226 | pcie->root_bus_nr = *data & 0xff; | ||
| 227 | |||
| 228 | rcar_pci_write_reg(pcie, *data, PCICONF(index)); | ||
| 229 | } | ||
| 230 | |||
| 231 | return PCIBIOS_SUCCESSFUL; | ||
| 232 | } | ||
| 233 | |||
| 234 | if (pcie->root_bus_nr < 0) | ||
| 235 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 236 | |||
| 237 | /* Clear errors */ | ||
| 238 | rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR); | ||
| 239 | |||
| 240 | /* Set the PIO address */ | ||
| 241 | rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) | | ||
| 242 | PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR); | ||
| 243 | |||
| 244 | /* Enable the configuration access */ | ||
| 245 | if (bus->parent->number == pcie->root_bus_nr) | ||
| 246 | rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR); | ||
| 247 | else | ||
| 248 | rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR); | ||
| 249 | |||
| 250 | /* Check for errors */ | ||
| 251 | if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST) | ||
| 252 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 253 | |||
| 254 | /* Check for master and target aborts */ | ||
| 255 | if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) & | ||
| 256 | (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT)) | ||
| 257 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 258 | |||
| 259 | if (access_type == RCAR_PCI_ACCESS_READ) | ||
| 260 | *data = rcar_pci_read_reg(pcie, PCIECDR); | ||
| 261 | else | ||
| 262 | rcar_pci_write_reg(pcie, *data, PCIECDR); | ||
| 263 | |||
| 264 | /* Disable the configuration access */ | ||
| 265 | rcar_pci_write_reg(pcie, 0, PCIECCTLR); | ||
| 266 | |||
| 267 | return PCIBIOS_SUCCESSFUL; | ||
| 268 | } | ||
| 269 | |||
| 270 | static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, | ||
| 271 | int where, int size, u32 *val) | ||
| 272 | { | ||
| 273 | struct rcar_pcie *pcie = bus->sysdata; | ||
| 274 | int ret; | ||
| 275 | |||
| 276 | ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ, | ||
| 277 | bus, devfn, where, val); | ||
| 278 | if (ret != PCIBIOS_SUCCESSFUL) { | ||
| 279 | *val = 0xffffffff; | ||
| 280 | return ret; | ||
| 281 | } | ||
| 282 | |||
| 283 | if (size == 1) | ||
| 284 | *val = (*val >> (8 * (where & 3))) & 0xff; | ||
| 285 | else if (size == 2) | ||
| 286 | *val = (*val >> (8 * (where & 2))) & 0xffff; | ||
| 287 | |||
| 288 | dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n", | ||
| 289 | bus->number, devfn, where, size, (unsigned long)*val); | ||
| 290 | |||
| 291 | return ret; | ||
| 292 | } | ||
| 293 | |||
| 294 | /* Serialization is provided by 'pci_lock' in drivers/pci/access.c */ | ||
| 295 | static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn, | ||
| 296 | int where, int size, u32 val) | ||
| 297 | { | ||
| 298 | struct rcar_pcie *pcie = bus->sysdata; | ||
| 299 | int shift, ret; | ||
| 300 | u32 data; | ||
| 301 | |||
| 302 | ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ, | ||
| 303 | bus, devfn, where, &data); | ||
| 304 | if (ret != PCIBIOS_SUCCESSFUL) | ||
| 305 | return ret; | ||
| 306 | |||
| 307 | dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n", | ||
| 308 | bus->number, devfn, where, size, (unsigned long)val); | ||
| 309 | |||
| 310 | if (size == 1) { | ||
| 311 | shift = 8 * (where & 3); | ||
| 312 | data &= ~(0xff << shift); | ||
| 313 | data |= ((val & 0xff) << shift); | ||
| 314 | } else if (size == 2) { | ||
| 315 | shift = 8 * (where & 2); | ||
| 316 | data &= ~(0xffff << shift); | ||
| 317 | data |= ((val & 0xffff) << shift); | ||
| 318 | } else | ||
| 319 | data = val; | ||
| 320 | |||
| 321 | ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_WRITE, | ||
| 322 | bus, devfn, where, &data); | ||
| 323 | |||
| 324 | return ret; | ||
| 325 | } | ||
| 326 | |||
| 327 | static struct pci_ops rcar_pcie_ops = { | ||
| 328 | .read = rcar_pcie_read_conf, | ||
| 329 | .write = rcar_pcie_write_conf, | ||
| 330 | }; | ||
| 331 | |||
| 332 | static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie, | ||
| 333 | struct resource *res) | ||
| 334 | { | ||
| 335 | /* Setup PCIe address space mappings for each resource */ | ||
| 336 | resource_size_t size; | ||
| 337 | resource_size_t res_start; | ||
| 338 | u32 mask; | ||
| 339 | |||
| 340 | rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win)); | ||
| 341 | |||
| 342 | /* | ||
| 343 | * The PAMR mask is calculated in units of 128Bytes, which | ||
| 344 | * keeps things pretty simple. | ||
| 345 | */ | ||
| 346 | size = resource_size(res); | ||
| 347 | mask = (roundup_pow_of_two(size) / SZ_128) - 1; | ||
| 348 | rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win)); | ||
| 349 | |||
| 350 | if (res->flags & IORESOURCE_IO) | ||
| 351 | res_start = pci_pio_to_address(res->start); | ||
| 352 | else | ||
| 353 | res_start = res->start; | ||
| 354 | |||
| 355 | rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win)); | ||
| 356 | rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F, | ||
| 357 | PCIEPALR(win)); | ||
| 358 | |||
| 359 | /* First resource is for IO */ | ||
| 360 | mask = PAR_ENABLE; | ||
| 361 | if (res->flags & IORESOURCE_IO) | ||
| 362 | mask |= IO_SPACE; | ||
| 363 | |||
| 364 | rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win)); | ||
| 365 | } | ||
| 366 | |||
| 367 | static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci) | ||
| 368 | { | ||
| 369 | struct resource_entry *win; | ||
| 370 | int i = 0; | ||
| 371 | |||
| 372 | /* Setup PCI resources */ | ||
| 373 | resource_list_for_each_entry(win, &pci->resources) { | ||
| 374 | struct resource *res = win->res; | ||
| 375 | |||
| 376 | if (!res->flags) | ||
| 377 | continue; | ||
| 378 | |||
| 379 | switch (resource_type(res)) { | ||
| 380 | case IORESOURCE_IO: | ||
| 381 | case IORESOURCE_MEM: | ||
| 382 | rcar_pcie_setup_window(i, pci, res); | ||
| 383 | i++; | ||
| 384 | break; | ||
| 385 | case IORESOURCE_BUS: | ||
| 386 | pci->root_bus_nr = res->start; | ||
| 387 | break; | ||
| 388 | default: | ||
| 389 | continue; | ||
| 390 | } | ||
| 391 | |||
| 392 | pci_add_resource(resource, res); | ||
| 393 | } | ||
| 394 | |||
| 395 | return 1; | ||
| 396 | } | ||
| 397 | |||
| 398 | static void rcar_pcie_force_speedup(struct rcar_pcie *pcie) | ||
| 399 | { | ||
| 400 | struct device *dev = pcie->dev; | ||
| 401 | unsigned int timeout = 1000; | ||
| 402 | u32 macsr; | ||
| 403 | |||
| 404 | if ((rcar_pci_read_reg(pcie, MACS2R) & LINK_SPEED) != LINK_SPEED_5_0GTS) | ||
| 405 | return; | ||
| 406 | |||
| 407 | if (rcar_pci_read_reg(pcie, MACCTLR) & SPEED_CHANGE) { | ||
| 408 | dev_err(dev, "Speed change already in progress\n"); | ||
| 409 | return; | ||
| 410 | } | ||
| 411 | |||
| 412 | macsr = rcar_pci_read_reg(pcie, MACSR); | ||
| 413 | if ((macsr & LINK_SPEED) == LINK_SPEED_5_0GTS) | ||
| 414 | goto done; | ||
| 415 | |||
| 416 | /* Set target link speed to 5.0 GT/s */ | ||
| 417 | rcar_rmw32(pcie, EXPCAP(12), PCI_EXP_LNKSTA_CLS, | ||
| 418 | PCI_EXP_LNKSTA_CLS_5_0GB); | ||
| 419 | |||
| 420 | /* Set speed change reason as intentional factor */ | ||
| 421 | rcar_rmw32(pcie, MACCGSPSETR, SPCNGRSN, 0); | ||
| 422 | |||
| 423 | /* Clear SPCHGFIN, SPCHGSUC, and SPCHGFAIL */ | ||
| 424 | if (macsr & (SPCHGFIN | SPCHGSUC | SPCHGFAIL)) | ||
| 425 | rcar_pci_write_reg(pcie, macsr, MACSR); | ||
| 426 | |||
| 427 | /* Start link speed change */ | ||
| 428 | rcar_rmw32(pcie, MACCTLR, SPEED_CHANGE, SPEED_CHANGE); | ||
| 429 | |||
| 430 | while (timeout--) { | ||
| 431 | macsr = rcar_pci_read_reg(pcie, MACSR); | ||
| 432 | if (macsr & SPCHGFIN) { | ||
| 433 | /* Clear the interrupt bits */ | ||
| 434 | rcar_pci_write_reg(pcie, macsr, MACSR); | ||
| 435 | |||
| 436 | if (macsr & SPCHGFAIL) | ||
| 437 | dev_err(dev, "Speed change failed\n"); | ||
| 438 | |||
| 439 | goto done; | ||
| 440 | } | ||
| 441 | |||
| 442 | msleep(1); | ||
| 443 | } | ||
| 444 | |||
| 445 | dev_err(dev, "Speed change timed out\n"); | ||
| 446 | |||
| 447 | done: | ||
| 448 | dev_info(dev, "Current link speed is %s GT/s\n", | ||
| 449 | (macsr & LINK_SPEED) == LINK_SPEED_5_0GTS ? "5" : "2.5"); | ||
| 450 | } | ||
| 451 | |||
| 452 | static int rcar_pcie_enable(struct rcar_pcie *pcie) | ||
| 453 | { | ||
| 454 | struct device *dev = pcie->dev; | ||
| 455 | struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); | ||
| 456 | struct pci_bus *bus, *child; | ||
| 457 | int ret; | ||
| 458 | |||
| 459 | /* Try setting 5 GT/s link speed */ | ||
| 460 | rcar_pcie_force_speedup(pcie); | ||
| 461 | |||
| 462 | rcar_pcie_setup(&bridge->windows, pcie); | ||
| 463 | |||
| 464 | pci_add_flags(PCI_REASSIGN_ALL_BUS); | ||
| 465 | |||
| 466 | bridge->dev.parent = dev; | ||
| 467 | bridge->sysdata = pcie; | ||
| 468 | bridge->busnr = pcie->root_bus_nr; | ||
| 469 | bridge->ops = &rcar_pcie_ops; | ||
| 470 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 471 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 472 | if (IS_ENABLED(CONFIG_PCI_MSI)) | ||
| 473 | bridge->msi = &pcie->msi.chip; | ||
| 474 | |||
| 475 | ret = pci_scan_root_bus_bridge(bridge); | ||
| 476 | if (ret < 0) | ||
| 477 | return ret; | ||
| 478 | |||
| 479 | bus = bridge->bus; | ||
| 480 | |||
| 481 | pci_bus_size_bridges(bus); | ||
| 482 | pci_bus_assign_resources(bus); | ||
| 483 | |||
| 484 | list_for_each_entry(child, &bus->children, node) | ||
| 485 | pcie_bus_configure_settings(child); | ||
| 486 | |||
| 487 | pci_bus_add_devices(bus); | ||
| 488 | |||
| 489 | return 0; | ||
| 490 | } | ||
| 491 | |||
| 492 | static int phy_wait_for_ack(struct rcar_pcie *pcie) | ||
| 493 | { | ||
| 494 | struct device *dev = pcie->dev; | ||
| 495 | unsigned int timeout = 100; | ||
| 496 | |||
| 497 | while (timeout--) { | ||
| 498 | if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK) | ||
| 499 | return 0; | ||
| 500 | |||
| 501 | udelay(100); | ||
| 502 | } | ||
| 503 | |||
| 504 | dev_err(dev, "Access to PCIe phy timed out\n"); | ||
| 505 | |||
| 506 | return -ETIMEDOUT; | ||
| 507 | } | ||
| 508 | |||
| 509 | static void phy_write_reg(struct rcar_pcie *pcie, | ||
| 510 | unsigned int rate, unsigned int addr, | ||
| 511 | unsigned int lane, unsigned int data) | ||
| 512 | { | ||
| 513 | unsigned long phyaddr; | ||
| 514 | |||
| 515 | phyaddr = WRITE_CMD | | ||
| 516 | ((rate & 1) << RATE_POS) | | ||
| 517 | ((lane & 0xf) << LANE_POS) | | ||
| 518 | ((addr & 0xff) << ADR_POS); | ||
| 519 | |||
| 520 | /* Set write data */ | ||
| 521 | rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR); | ||
| 522 | rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR); | ||
| 523 | |||
| 524 | /* Ignore errors as they will be dealt with if the data link is down */ | ||
| 525 | phy_wait_for_ack(pcie); | ||
| 526 | |||
| 527 | /* Clear command */ | ||
| 528 | rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR); | ||
| 529 | rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR); | ||
| 530 | |||
| 531 | /* Ignore errors as they will be dealt with if the data link is down */ | ||
| 532 | phy_wait_for_ack(pcie); | ||
| 533 | } | ||
| 534 | |||
| 535 | static int rcar_pcie_wait_for_phyrdy(struct rcar_pcie *pcie) | ||
| 536 | { | ||
| 537 | unsigned int timeout = 10; | ||
| 538 | |||
| 539 | while (timeout--) { | ||
| 540 | if (rcar_pci_read_reg(pcie, PCIEPHYSR) & PHYRDY) | ||
| 541 | return 0; | ||
| 542 | |||
| 543 | msleep(5); | ||
| 544 | } | ||
| 545 | |||
| 546 | return -ETIMEDOUT; | ||
| 547 | } | ||
| 548 | |||
| 549 | static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie) | ||
| 550 | { | ||
| 551 | unsigned int timeout = 10000; | ||
| 552 | |||
| 553 | while (timeout--) { | ||
| 554 | if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE)) | ||
| 555 | return 0; | ||
| 556 | |||
| 557 | udelay(5); | ||
| 558 | cpu_relax(); | ||
| 559 | } | ||
| 560 | |||
| 561 | return -ETIMEDOUT; | ||
| 562 | } | ||
| 563 | |||
| 564 | static int rcar_pcie_hw_init(struct rcar_pcie *pcie) | ||
| 565 | { | ||
| 566 | int err; | ||
| 567 | |||
| 568 | /* Begin initialization */ | ||
| 569 | rcar_pci_write_reg(pcie, 0, PCIETCTLR); | ||
| 570 | |||
| 571 | /* Set mode */ | ||
| 572 | rcar_pci_write_reg(pcie, 1, PCIEMSR); | ||
| 573 | |||
| 574 | err = rcar_pcie_wait_for_phyrdy(pcie); | ||
| 575 | if (err) | ||
| 576 | return err; | ||
| 577 | |||
| 578 | /* | ||
| 579 | * Initial header for port config space is type 1, set the device | ||
| 580 | * class to match. Hardware takes care of propagating the IDSETR | ||
| 581 | * settings, so there is no need to bother with a quirk. | ||
| 582 | */ | ||
| 583 | rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1); | ||
| 584 | |||
| 585 | /* | ||
| 586 | * Setup Secondary Bus Number & Subordinate Bus Number, even though | ||
| 587 | * they aren't used, to avoid bridge being detected as broken. | ||
| 588 | */ | ||
| 589 | rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1); | ||
| 590 | rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1); | ||
| 591 | |||
| 592 | /* Initialize default capabilities. */ | ||
| 593 | rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP); | ||
| 594 | rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS), | ||
| 595 | PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4); | ||
| 596 | rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f, | ||
| 597 | PCI_HEADER_TYPE_BRIDGE); | ||
| 598 | |||
| 599 | /* Enable data link layer active state reporting */ | ||
| 600 | rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC, | ||
| 601 | PCI_EXP_LNKCAP_DLLLARC); | ||
| 602 | |||
| 603 | /* Write out the physical slot number = 0 */ | ||
| 604 | rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0); | ||
| 605 | |||
| 606 | /* Set the completion timer timeout to the maximum 50ms. */ | ||
| 607 | rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50); | ||
| 608 | |||
| 609 | /* Terminate list of capabilities (Next Capability Offset=0) */ | ||
| 610 | rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0); | ||
| 611 | |||
| 612 | /* Enable MSI */ | ||
| 613 | if (IS_ENABLED(CONFIG_PCI_MSI)) | ||
| 614 | rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR); | ||
| 615 | |||
| 616 | /* Finish initialization - establish a PCI Express link */ | ||
| 617 | rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR); | ||
| 618 | |||
| 619 | /* This will timeout if we don't have a link. */ | ||
| 620 | err = rcar_pcie_wait_for_dl(pcie); | ||
| 621 | if (err) | ||
| 622 | return err; | ||
| 623 | |||
| 624 | /* Enable INTx interrupts */ | ||
| 625 | rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8); | ||
| 626 | |||
| 627 | wmb(); | ||
| 628 | |||
| 629 | return 0; | ||
| 630 | } | ||
| 631 | |||
| 632 | static int rcar_pcie_phy_init_h1(struct rcar_pcie *pcie) | ||
| 633 | { | ||
| 634 | /* Initialize the phy */ | ||
| 635 | phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191); | ||
| 636 | phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180); | ||
| 637 | phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188); | ||
| 638 | phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188); | ||
| 639 | phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014); | ||
| 640 | phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014); | ||
| 641 | phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0); | ||
| 642 | phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB); | ||
| 643 | phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062); | ||
| 644 | phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000); | ||
| 645 | phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000); | ||
| 646 | phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806); | ||
| 647 | |||
| 648 | phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5); | ||
| 649 | phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F); | ||
| 650 | phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000); | ||
| 651 | |||
| 652 | return 0; | ||
| 653 | } | ||
| 654 | |||
| 655 | static int rcar_pcie_phy_init_gen2(struct rcar_pcie *pcie) | ||
| 656 | { | ||
| 657 | /* | ||
| 658 | * These settings come from the R-Car Series, 2nd Generation User's | ||
| 659 | * Manual, section 50.3.1 (2) Initialization of the physical layer. | ||
| 660 | */ | ||
| 661 | rcar_pci_write_reg(pcie, 0x000f0030, GEN2_PCIEPHYADDR); | ||
| 662 | rcar_pci_write_reg(pcie, 0x00381203, GEN2_PCIEPHYDATA); | ||
| 663 | rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL); | ||
| 664 | rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL); | ||
| 665 | |||
| 666 | rcar_pci_write_reg(pcie, 0x000f0054, GEN2_PCIEPHYADDR); | ||
| 667 | /* The following value is for DC connection, no termination resistor */ | ||
| 668 | rcar_pci_write_reg(pcie, 0x13802007, GEN2_PCIEPHYDATA); | ||
| 669 | rcar_pci_write_reg(pcie, 0x00000001, GEN2_PCIEPHYCTRL); | ||
| 670 | rcar_pci_write_reg(pcie, 0x00000006, GEN2_PCIEPHYCTRL); | ||
| 671 | |||
| 672 | return 0; | ||
| 673 | } | ||
| 674 | |||
| 675 | static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie) | ||
| 676 | { | ||
| 677 | int err; | ||
| 678 | |||
| 679 | err = phy_init(pcie->phy); | ||
| 680 | if (err) | ||
| 681 | return err; | ||
| 682 | |||
| 683 | return phy_power_on(pcie->phy); | ||
| 684 | } | ||
| 685 | |||
| 686 | static int rcar_msi_alloc(struct rcar_msi *chip) | ||
| 687 | { | ||
| 688 | int msi; | ||
| 689 | |||
| 690 | mutex_lock(&chip->lock); | ||
| 691 | |||
| 692 | msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR); | ||
| 693 | if (msi < INT_PCI_MSI_NR) | ||
| 694 | set_bit(msi, chip->used); | ||
| 695 | else | ||
| 696 | msi = -ENOSPC; | ||
| 697 | |||
| 698 | mutex_unlock(&chip->lock); | ||
| 699 | |||
| 700 | return msi; | ||
| 701 | } | ||
| 702 | |||
| 703 | static int rcar_msi_alloc_region(struct rcar_msi *chip, int no_irqs) | ||
| 704 | { | ||
| 705 | int msi; | ||
| 706 | |||
| 707 | mutex_lock(&chip->lock); | ||
| 708 | msi = bitmap_find_free_region(chip->used, INT_PCI_MSI_NR, | ||
| 709 | order_base_2(no_irqs)); | ||
| 710 | mutex_unlock(&chip->lock); | ||
| 711 | |||
| 712 | return msi; | ||
| 713 | } | ||
| 714 | |||
| 715 | static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq) | ||
| 716 | { | ||
| 717 | mutex_lock(&chip->lock); | ||
| 718 | clear_bit(irq, chip->used); | ||
| 719 | mutex_unlock(&chip->lock); | ||
| 720 | } | ||
| 721 | |||
| 722 | static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) | ||
| 723 | { | ||
| 724 | struct rcar_pcie *pcie = data; | ||
| 725 | struct rcar_msi *msi = &pcie->msi; | ||
| 726 | struct device *dev = pcie->dev; | ||
| 727 | unsigned long reg; | ||
| 728 | |||
| 729 | reg = rcar_pci_read_reg(pcie, PCIEMSIFR); | ||
| 730 | |||
| 731 | /* MSI & INTx share an interrupt - we only handle MSI here */ | ||
| 732 | if (!reg) | ||
| 733 | return IRQ_NONE; | ||
| 734 | |||
| 735 | while (reg) { | ||
| 736 | unsigned int index = find_first_bit(®, 32); | ||
| 737 | unsigned int irq; | ||
| 738 | |||
| 739 | /* clear the interrupt */ | ||
| 740 | rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR); | ||
| 741 | |||
| 742 | irq = irq_find_mapping(msi->domain, index); | ||
| 743 | if (irq) { | ||
| 744 | if (test_bit(index, msi->used)) | ||
| 745 | generic_handle_irq(irq); | ||
| 746 | else | ||
| 747 | dev_info(dev, "unhandled MSI\n"); | ||
| 748 | } else { | ||
| 749 | /* Unknown MSI, just clear it */ | ||
| 750 | dev_dbg(dev, "unexpected MSI\n"); | ||
| 751 | } | ||
| 752 | |||
| 753 | /* see if there's any more pending in this vector */ | ||
| 754 | reg = rcar_pci_read_reg(pcie, PCIEMSIFR); | ||
| 755 | } | ||
| 756 | |||
| 757 | return IRQ_HANDLED; | ||
| 758 | } | ||
| 759 | |||
| 760 | static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, | ||
| 761 | struct msi_desc *desc) | ||
| 762 | { | ||
| 763 | struct rcar_msi *msi = to_rcar_msi(chip); | ||
| 764 | struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip); | ||
| 765 | struct msi_msg msg; | ||
| 766 | unsigned int irq; | ||
| 767 | int hwirq; | ||
| 768 | |||
| 769 | hwirq = rcar_msi_alloc(msi); | ||
| 770 | if (hwirq < 0) | ||
| 771 | return hwirq; | ||
| 772 | |||
| 773 | irq = irq_find_mapping(msi->domain, hwirq); | ||
| 774 | if (!irq) { | ||
| 775 | rcar_msi_free(msi, hwirq); | ||
| 776 | return -EINVAL; | ||
| 777 | } | ||
| 778 | |||
| 779 | irq_set_msi_desc(irq, desc); | ||
| 780 | |||
| 781 | msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE; | ||
| 782 | msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); | ||
| 783 | msg.data = hwirq; | ||
| 784 | |||
| 785 | pci_write_msi_msg(irq, &msg); | ||
| 786 | |||
| 787 | return 0; | ||
| 788 | } | ||
| 789 | |||
| 790 | static int rcar_msi_setup_irqs(struct msi_controller *chip, | ||
| 791 | struct pci_dev *pdev, int nvec, int type) | ||
| 792 | { | ||
| 793 | struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip); | ||
| 794 | struct rcar_msi *msi = to_rcar_msi(chip); | ||
| 795 | struct msi_desc *desc; | ||
| 796 | struct msi_msg msg; | ||
| 797 | unsigned int irq; | ||
| 798 | int hwirq; | ||
| 799 | int i; | ||
| 800 | |||
| 801 | /* MSI-X interrupts are not supported */ | ||
| 802 | if (type == PCI_CAP_ID_MSIX) | ||
| 803 | return -EINVAL; | ||
| 804 | |||
| 805 | WARN_ON(!list_is_singular(&pdev->dev.msi_list)); | ||
| 806 | desc = list_entry(pdev->dev.msi_list.next, struct msi_desc, list); | ||
| 807 | |||
| 808 | hwirq = rcar_msi_alloc_region(msi, nvec); | ||
| 809 | if (hwirq < 0) | ||
| 810 | return -ENOSPC; | ||
| 811 | |||
| 812 | irq = irq_find_mapping(msi->domain, hwirq); | ||
| 813 | if (!irq) | ||
| 814 | return -ENOSPC; | ||
| 815 | |||
| 816 | for (i = 0; i < nvec; i++) { | ||
| 817 | /* | ||
| 818 | * irq_create_mapping() called from rcar_pcie_probe() pre- | ||
| 819 | * allocates descs, so there is no need to allocate descs here. | ||
| 820 | * We can therefore assume that if irq_find_mapping() above | ||
| 821 | * returns non-zero, then the descs are also successfully | ||
| 822 | * allocated. | ||
| 823 | */ | ||
| 824 | if (irq_set_msi_desc_off(irq, i, desc)) { | ||
| 825 | /* TODO: clear */ | ||
| 826 | return -EINVAL; | ||
| 827 | } | ||
| 828 | } | ||
| 829 | |||
| 830 | desc->nvec_used = nvec; | ||
| 831 | desc->msi_attrib.multiple = order_base_2(nvec); | ||
| 832 | |||
| 833 | msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE; | ||
| 834 | msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); | ||
| 835 | msg.data = hwirq; | ||
| 836 | |||
| 837 | pci_write_msi_msg(irq, &msg); | ||
| 838 | |||
| 839 | return 0; | ||
| 840 | } | ||
| 841 | |||
| 842 | static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq) | ||
| 843 | { | ||
| 844 | struct rcar_msi *msi = to_rcar_msi(chip); | ||
| 845 | struct irq_data *d = irq_get_irq_data(irq); | ||
| 846 | |||
| 847 | rcar_msi_free(msi, d->hwirq); | ||
| 848 | } | ||
| 849 | |||
| 850 | static struct irq_chip rcar_msi_irq_chip = { | ||
| 851 | .name = "R-Car PCIe MSI", | ||
| 852 | .irq_enable = pci_msi_unmask_irq, | ||
| 853 | .irq_disable = pci_msi_mask_irq, | ||
| 854 | .irq_mask = pci_msi_mask_irq, | ||
| 855 | .irq_unmask = pci_msi_unmask_irq, | ||
| 856 | }; | ||
| 857 | |||
| 858 | static int rcar_msi_map(struct irq_domain *domain, unsigned int irq, | ||
| 859 | irq_hw_number_t hwirq) | ||
| 860 | { | ||
| 861 | irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq); | ||
| 862 | irq_set_chip_data(irq, domain->host_data); | ||
| 863 | |||
| 864 | return 0; | ||
| 865 | } | ||
| 866 | |||
| 867 | static const struct irq_domain_ops msi_domain_ops = { | ||
| 868 | .map = rcar_msi_map, | ||
| 869 | }; | ||
| 870 | |||
| 871 | static void rcar_pcie_unmap_msi(struct rcar_pcie *pcie) | ||
| 872 | { | ||
| 873 | struct rcar_msi *msi = &pcie->msi; | ||
| 874 | int i, irq; | ||
| 875 | |||
| 876 | for (i = 0; i < INT_PCI_MSI_NR; i++) { | ||
| 877 | irq = irq_find_mapping(msi->domain, i); | ||
| 878 | if (irq > 0) | ||
| 879 | irq_dispose_mapping(irq); | ||
| 880 | } | ||
| 881 | |||
| 882 | irq_domain_remove(msi->domain); | ||
| 883 | } | ||
| 884 | |||
| 885 | static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) | ||
| 886 | { | ||
| 887 | struct device *dev = pcie->dev; | ||
| 888 | struct rcar_msi *msi = &pcie->msi; | ||
| 889 | unsigned long base; | ||
| 890 | int err, i; | ||
| 891 | |||
| 892 | mutex_init(&msi->lock); | ||
| 893 | |||
| 894 | msi->chip.dev = dev; | ||
| 895 | msi->chip.setup_irq = rcar_msi_setup_irq; | ||
| 896 | msi->chip.setup_irqs = rcar_msi_setup_irqs; | ||
| 897 | msi->chip.teardown_irq = rcar_msi_teardown_irq; | ||
| 898 | |||
| 899 | msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR, | ||
| 900 | &msi_domain_ops, &msi->chip); | ||
| 901 | if (!msi->domain) { | ||
| 902 | dev_err(dev, "failed to create IRQ domain\n"); | ||
| 903 | return -ENOMEM; | ||
| 904 | } | ||
| 905 | |||
| 906 | for (i = 0; i < INT_PCI_MSI_NR; i++) | ||
| 907 | irq_create_mapping(msi->domain, i); | ||
| 908 | |||
| 909 | /* Two irqs are for MSI, but they are also used for non-MSI irqs */ | ||
| 910 | err = devm_request_irq(dev, msi->irq1, rcar_pcie_msi_irq, | ||
| 911 | IRQF_SHARED | IRQF_NO_THREAD, | ||
| 912 | rcar_msi_irq_chip.name, pcie); | ||
| 913 | if (err < 0) { | ||
| 914 | dev_err(dev, "failed to request IRQ: %d\n", err); | ||
| 915 | goto err; | ||
| 916 | } | ||
| 917 | |||
| 918 | err = devm_request_irq(dev, msi->irq2, rcar_pcie_msi_irq, | ||
| 919 | IRQF_SHARED | IRQF_NO_THREAD, | ||
| 920 | rcar_msi_irq_chip.name, pcie); | ||
| 921 | if (err < 0) { | ||
| 922 | dev_err(dev, "failed to request IRQ: %d\n", err); | ||
| 923 | goto err; | ||
| 924 | } | ||
| 925 | |||
| 926 | /* setup MSI data target */ | ||
| 927 | msi->pages = __get_free_pages(GFP_KERNEL, 0); | ||
| 928 | base = virt_to_phys((void *)msi->pages); | ||
| 929 | |||
| 930 | rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR); | ||
| 931 | rcar_pci_write_reg(pcie, 0, PCIEMSIAUR); | ||
| 932 | |||
| 933 | /* enable all MSI interrupts */ | ||
| 934 | rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER); | ||
| 935 | |||
| 936 | return 0; | ||
| 937 | |||
| 938 | err: | ||
| 939 | rcar_pcie_unmap_msi(pcie); | ||
| 940 | return err; | ||
| 941 | } | ||
| 942 | |||
| 943 | static void rcar_pcie_teardown_msi(struct rcar_pcie *pcie) | ||
| 944 | { | ||
| 945 | struct rcar_msi *msi = &pcie->msi; | ||
| 946 | |||
| 947 | /* Disable all MSI interrupts */ | ||
| 948 | rcar_pci_write_reg(pcie, 0, PCIEMSIIER); | ||
| 949 | |||
| 950 | /* Disable address decoding of the MSI interrupt, MSIFE */ | ||
| 951 | rcar_pci_write_reg(pcie, 0, PCIEMSIALR); | ||
| 952 | |||
| 953 | free_pages(msi->pages, 0); | ||
| 954 | |||
| 955 | rcar_pcie_unmap_msi(pcie); | ||
| 956 | } | ||
| 957 | |||
| 958 | static int rcar_pcie_get_resources(struct rcar_pcie *pcie) | ||
| 959 | { | ||
| 960 | struct device *dev = pcie->dev; | ||
| 961 | struct resource res; | ||
| 962 | int err, i; | ||
| 963 | |||
| 964 | pcie->phy = devm_phy_optional_get(dev, "pcie"); | ||
| 965 | if (IS_ERR(pcie->phy)) | ||
| 966 | return PTR_ERR(pcie->phy); | ||
| 967 | |||
| 968 | err = of_address_to_resource(dev->of_node, 0, &res); | ||
| 969 | if (err) | ||
| 970 | return err; | ||
| 971 | |||
| 972 | pcie->base = devm_ioremap_resource(dev, &res); | ||
| 973 | if (IS_ERR(pcie->base)) | ||
| 974 | return PTR_ERR(pcie->base); | ||
| 975 | |||
| 976 | pcie->bus_clk = devm_clk_get(dev, "pcie_bus"); | ||
| 977 | if (IS_ERR(pcie->bus_clk)) { | ||
| 978 | dev_err(dev, "cannot get pcie bus clock\n"); | ||
| 979 | return PTR_ERR(pcie->bus_clk); | ||
| 980 | } | ||
| 981 | |||
| 982 | i = irq_of_parse_and_map(dev->of_node, 0); | ||
| 983 | if (!i) { | ||
| 984 | dev_err(dev, "cannot get platform resources for msi interrupt\n"); | ||
| 985 | err = -ENOENT; | ||
| 986 | goto err_irq1; | ||
| 987 | } | ||
| 988 | pcie->msi.irq1 = i; | ||
| 989 | |||
| 990 | i = irq_of_parse_and_map(dev->of_node, 1); | ||
| 991 | if (!i) { | ||
| 992 | dev_err(dev, "cannot get platform resources for msi interrupt\n"); | ||
| 993 | err = -ENOENT; | ||
| 994 | goto err_irq2; | ||
| 995 | } | ||
| 996 | pcie->msi.irq2 = i; | ||
| 997 | |||
| 998 | return 0; | ||
| 999 | |||
| 1000 | err_irq2: | ||
| 1001 | irq_dispose_mapping(pcie->msi.irq1); | ||
| 1002 | err_irq1: | ||
| 1003 | return err; | ||
| 1004 | } | ||
| 1005 | |||
| 1006 | static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie, | ||
| 1007 | struct of_pci_range *range, | ||
| 1008 | int *index) | ||
| 1009 | { | ||
| 1010 | u64 restype = range->flags; | ||
| 1011 | u64 cpu_addr = range->cpu_addr; | ||
| 1012 | u64 cpu_end = range->cpu_addr + range->size; | ||
| 1013 | u64 pci_addr = range->pci_addr; | ||
| 1014 | u32 flags = LAM_64BIT | LAR_ENABLE; | ||
| 1015 | u64 mask; | ||
| 1016 | u64 size; | ||
| 1017 | int idx = *index; | ||
| 1018 | |||
| 1019 | if (restype & IORESOURCE_PREFETCH) | ||
| 1020 | flags |= LAM_PREFETCH; | ||
| 1021 | |||
| 1022 | /* | ||
| 1023 | * If the size of the range is larger than the alignment of the start | ||
| 1024 | * address, we have to use multiple entries to perform the mapping. | ||
| 1025 | */ | ||
| 1026 | if (cpu_addr > 0) { | ||
| 1027 | unsigned long nr_zeros = __ffs64(cpu_addr); | ||
| 1028 | u64 alignment = 1ULL << nr_zeros; | ||
| 1029 | |||
| 1030 | size = min(range->size, alignment); | ||
| 1031 | } else { | ||
| 1032 | size = range->size; | ||
| 1033 | } | ||
| 1034 | /* Hardware supports max 4GiB inbound region */ | ||
| 1035 | size = min(size, 1ULL << 32); | ||
| 1036 | |||
| 1037 | mask = roundup_pow_of_two(size) - 1; | ||
| 1038 | mask &= ~0xf; | ||
| 1039 | |||
| 1040 | while (cpu_addr < cpu_end) { | ||
| 1041 | /* | ||
| 1042 | * Set up 64-bit inbound regions as the range parser doesn't | ||
| 1043 | * distinguish between 32 and 64-bit types. | ||
| 1044 | */ | ||
| 1045 | rcar_pci_write_reg(pcie, lower_32_bits(pci_addr), | ||
| 1046 | PCIEPRAR(idx)); | ||
| 1047 | rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx)); | ||
| 1048 | rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags, | ||
| 1049 | PCIELAMR(idx)); | ||
| 1050 | |||
| 1051 | rcar_pci_write_reg(pcie, upper_32_bits(pci_addr), | ||
| 1052 | PCIEPRAR(idx + 1)); | ||
| 1053 | rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr), | ||
| 1054 | PCIELAR(idx + 1)); | ||
| 1055 | rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1)); | ||
| 1056 | |||
| 1057 | pci_addr += size; | ||
| 1058 | cpu_addr += size; | ||
| 1059 | idx += 2; | ||
| 1060 | |||
| 1061 | if (idx > MAX_NR_INBOUND_MAPS) { | ||
| 1062 | dev_err(pcie->dev, "Failed to map inbound regions!\n"); | ||
| 1063 | return -EINVAL; | ||
| 1064 | } | ||
| 1065 | } | ||
| 1066 | *index = idx; | ||
| 1067 | |||
| 1068 | return 0; | ||
| 1069 | } | ||
| 1070 | |||
| 1071 | static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie, | ||
| 1072 | struct device_node *np) | ||
| 1073 | { | ||
| 1074 | struct of_pci_range range; | ||
| 1075 | struct of_pci_range_parser parser; | ||
| 1076 | int index = 0; | ||
| 1077 | int err; | ||
| 1078 | |||
| 1079 | if (of_pci_dma_range_parser_init(&parser, np)) | ||
| 1080 | return -EINVAL; | ||
| 1081 | |||
| 1082 | /* Get the dma-ranges from DT */ | ||
| 1083 | for_each_of_pci_range(&parser, &range) { | ||
| 1084 | u64 end = range.cpu_addr + range.size - 1; | ||
| 1085 | |||
| 1086 | dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n", | ||
| 1087 | range.flags, range.cpu_addr, end, range.pci_addr); | ||
| 1088 | |||
| 1089 | err = rcar_pcie_inbound_ranges(pcie, &range, &index); | ||
| 1090 | if (err) | ||
| 1091 | return err; | ||
| 1092 | } | ||
| 1093 | |||
| 1094 | return 0; | ||
| 1095 | } | ||
| 1096 | |||
| 1097 | static const struct of_device_id rcar_pcie_of_match[] = { | ||
| 1098 | { .compatible = "renesas,pcie-r8a7779", | ||
| 1099 | .data = rcar_pcie_phy_init_h1 }, | ||
| 1100 | { .compatible = "renesas,pcie-r8a7790", | ||
| 1101 | .data = rcar_pcie_phy_init_gen2 }, | ||
| 1102 | { .compatible = "renesas,pcie-r8a7791", | ||
| 1103 | .data = rcar_pcie_phy_init_gen2 }, | ||
| 1104 | { .compatible = "renesas,pcie-rcar-gen2", | ||
| 1105 | .data = rcar_pcie_phy_init_gen2 }, | ||
| 1106 | { .compatible = "renesas,pcie-r8a7795", | ||
| 1107 | .data = rcar_pcie_phy_init_gen3 }, | ||
| 1108 | { .compatible = "renesas,pcie-rcar-gen3", | ||
| 1109 | .data = rcar_pcie_phy_init_gen3 }, | ||
| 1110 | {}, | ||
| 1111 | }; | ||
| 1112 | |||
| 1113 | static int rcar_pcie_probe(struct platform_device *pdev) | ||
| 1114 | { | ||
| 1115 | struct device *dev = &pdev->dev; | ||
| 1116 | struct rcar_pcie *pcie; | ||
| 1117 | unsigned int data; | ||
| 1118 | int err; | ||
| 1119 | int (*phy_init_fn)(struct rcar_pcie *); | ||
| 1120 | struct pci_host_bridge *bridge; | ||
| 1121 | |||
| 1122 | bridge = pci_alloc_host_bridge(sizeof(*pcie)); | ||
| 1123 | if (!bridge) | ||
| 1124 | return -ENOMEM; | ||
| 1125 | |||
| 1126 | pcie = pci_host_bridge_priv(bridge); | ||
| 1127 | |||
| 1128 | pcie->dev = dev; | ||
| 1129 | |||
| 1130 | err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL); | ||
| 1131 | if (err) | ||
| 1132 | goto err_free_bridge; | ||
| 1133 | |||
| 1134 | pm_runtime_enable(pcie->dev); | ||
| 1135 | err = pm_runtime_get_sync(pcie->dev); | ||
| 1136 | if (err < 0) { | ||
| 1137 | dev_err(pcie->dev, "pm_runtime_get_sync failed\n"); | ||
| 1138 | goto err_pm_disable; | ||
| 1139 | } | ||
| 1140 | |||
| 1141 | err = rcar_pcie_get_resources(pcie); | ||
| 1142 | if (err < 0) { | ||
| 1143 | dev_err(dev, "failed to request resources: %d\n", err); | ||
| 1144 | goto err_pm_put; | ||
| 1145 | } | ||
| 1146 | |||
| 1147 | err = clk_prepare_enable(pcie->bus_clk); | ||
| 1148 | if (err) { | ||
| 1149 | dev_err(dev, "failed to enable bus clock: %d\n", err); | ||
| 1150 | goto err_unmap_msi_irqs; | ||
| 1151 | } | ||
| 1152 | |||
| 1153 | err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node); | ||
| 1154 | if (err) | ||
| 1155 | goto err_clk_disable; | ||
| 1156 | |||
| 1157 | phy_init_fn = of_device_get_match_data(dev); | ||
| 1158 | err = phy_init_fn(pcie); | ||
| 1159 | if (err) { | ||
| 1160 | dev_err(dev, "failed to init PCIe PHY\n"); | ||
| 1161 | goto err_clk_disable; | ||
| 1162 | } | ||
| 1163 | |||
| 1164 | /* Failure to get a link might just be that no cards are inserted */ | ||
| 1165 | if (rcar_pcie_hw_init(pcie)) { | ||
| 1166 | dev_info(dev, "PCIe link down\n"); | ||
| 1167 | err = -ENODEV; | ||
| 1168 | goto err_clk_disable; | ||
| 1169 | } | ||
| 1170 | |||
| 1171 | data = rcar_pci_read_reg(pcie, MACSR); | ||
| 1172 | dev_info(dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f); | ||
| 1173 | |||
| 1174 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
| 1175 | err = rcar_pcie_enable_msi(pcie); | ||
| 1176 | if (err < 0) { | ||
| 1177 | dev_err(dev, | ||
| 1178 | "failed to enable MSI support: %d\n", | ||
| 1179 | err); | ||
| 1180 | goto err_clk_disable; | ||
| 1181 | } | ||
| 1182 | } | ||
| 1183 | |||
| 1184 | err = rcar_pcie_enable(pcie); | ||
| 1185 | if (err) | ||
| 1186 | goto err_msi_teardown; | ||
| 1187 | |||
| 1188 | return 0; | ||
| 1189 | |||
| 1190 | err_msi_teardown: | ||
| 1191 | if (IS_ENABLED(CONFIG_PCI_MSI)) | ||
| 1192 | rcar_pcie_teardown_msi(pcie); | ||
| 1193 | |||
| 1194 | err_clk_disable: | ||
| 1195 | clk_disable_unprepare(pcie->bus_clk); | ||
| 1196 | |||
| 1197 | err_unmap_msi_irqs: | ||
| 1198 | irq_dispose_mapping(pcie->msi.irq2); | ||
| 1199 | irq_dispose_mapping(pcie->msi.irq1); | ||
| 1200 | |||
| 1201 | err_pm_put: | ||
| 1202 | pm_runtime_put(dev); | ||
| 1203 | |||
| 1204 | err_pm_disable: | ||
| 1205 | pm_runtime_disable(dev); | ||
| 1206 | pci_free_resource_list(&pcie->resources); | ||
| 1207 | |||
| 1208 | err_free_bridge: | ||
| 1209 | pci_free_host_bridge(bridge); | ||
| 1210 | |||
| 1211 | return err; | ||
| 1212 | } | ||
| 1213 | |||
| 1214 | static struct platform_driver rcar_pcie_driver = { | ||
| 1215 | .driver = { | ||
| 1216 | .name = "rcar-pcie", | ||
| 1217 | .of_match_table = rcar_pcie_of_match, | ||
| 1218 | .suppress_bind_attrs = true, | ||
| 1219 | }, | ||
| 1220 | .probe = rcar_pcie_probe, | ||
| 1221 | }; | ||
| 1222 | builtin_platform_driver(rcar_pcie_driver); | ||
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c new file mode 100644 index 000000000000..fc267a49a932 --- /dev/null +++ b/drivers/pci/controller/pcie-rockchip-ep.c | |||
| @@ -0,0 +1,642 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 2 | /* | ||
| 3 | * Rockchip AXI PCIe endpoint controller driver | ||
| 4 | * | ||
| 5 | * Copyright (c) 2018 Rockchip, Inc. | ||
| 6 | * | ||
| 7 | * Author: Shawn Lin <shawn.lin@rock-chips.com> | ||
| 8 | * Simon Xue <xxm@rock-chips.com> | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/configfs.h> | ||
| 12 | #include <linux/delay.h> | ||
| 13 | #include <linux/kernel.h> | ||
| 14 | #include <linux/of.h> | ||
| 15 | #include <linux/pci-epc.h> | ||
| 16 | #include <linux/platform_device.h> | ||
| 17 | #include <linux/pci-epf.h> | ||
| 18 | #include <linux/sizes.h> | ||
| 19 | |||
| 20 | #include "pcie-rockchip.h" | ||
| 21 | |||
| 22 | /** | ||
| 23 | * struct rockchip_pcie_ep - private data for PCIe endpoint controller driver | ||
| 24 | * @rockchip: Rockchip PCIe controller | ||
| 25 | * @max_regions: maximum number of regions supported by hardware | ||
| 26 | * @ob_region_map: bitmask of mapped outbound regions | ||
| 27 | * @ob_addr: base addresses in the AXI bus where the outbound regions start | ||
| 28 | * @irq_phys_addr: base address on the AXI bus where the MSI/legacy IRQ | ||
| 29 | * dedicated outbound regions is mapped. | ||
| 30 | * @irq_cpu_addr: base address in the CPU space where a write access triggers | ||
| 31 | * the sending of a memory write (MSI) / normal message (legacy | ||
| 32 | * IRQ) TLP through the PCIe bus. | ||
| 33 | * @irq_pci_addr: used to save the current mapping of the MSI/legacy IRQ | ||
| 34 | * dedicated outbound region. | ||
| 35 | * @irq_pci_fn: the latest PCI function that has updated the mapping of | ||
| 36 | * the MSI/legacy IRQ dedicated outbound region. | ||
| 37 | * @irq_pending: bitmask of asserted legacy IRQs. | ||
| 38 | */ | ||
| 39 | struct rockchip_pcie_ep { | ||
| 40 | struct rockchip_pcie rockchip; | ||
| 41 | struct pci_epc *epc; | ||
| 42 | u32 max_regions; | ||
| 43 | unsigned long ob_region_map; | ||
| 44 | phys_addr_t *ob_addr; | ||
| 45 | phys_addr_t irq_phys_addr; | ||
| 46 | void __iomem *irq_cpu_addr; | ||
| 47 | u64 irq_pci_addr; | ||
| 48 | u8 irq_pci_fn; | ||
| 49 | u8 irq_pending; | ||
| 50 | }; | ||
| 51 | |||
| 52 | static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip, | ||
| 53 | u32 region) | ||
| 54 | { | ||
| 55 | rockchip_pcie_write(rockchip, 0, | ||
| 56 | ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(region)); | ||
| 57 | rockchip_pcie_write(rockchip, 0, | ||
| 58 | ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(region)); | ||
| 59 | rockchip_pcie_write(rockchip, 0, | ||
| 60 | ROCKCHIP_PCIE_AT_OB_REGION_DESC0(region)); | ||
| 61 | rockchip_pcie_write(rockchip, 0, | ||
| 62 | ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region)); | ||
| 63 | rockchip_pcie_write(rockchip, 0, | ||
| 64 | ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(region)); | ||
| 65 | rockchip_pcie_write(rockchip, 0, | ||
| 66 | ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(region)); | ||
| 67 | } | ||
| 68 | |||
| 69 | static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn, | ||
| 70 | u32 r, u32 type, u64 cpu_addr, | ||
| 71 | u64 pci_addr, size_t size) | ||
| 72 | { | ||
| 73 | u64 sz = 1ULL << fls64(size - 1); | ||
| 74 | int num_pass_bits = ilog2(sz); | ||
| 75 | u32 addr0, addr1, desc0, desc1; | ||
| 76 | bool is_nor_msg = (type == AXI_WRAPPER_NOR_MSG); | ||
| 77 | |||
| 78 | /* The minimal region size is 1MB */ | ||
| 79 | if (num_pass_bits < 8) | ||
| 80 | num_pass_bits = 8; | ||
| 81 | |||
| 82 | cpu_addr -= rockchip->mem_res->start; | ||
| 83 | addr0 = ((is_nor_msg ? 0x10 : (num_pass_bits - 1)) & | ||
| 84 | PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) | | ||
| 85 | (lower_32_bits(cpu_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR); | ||
| 86 | addr1 = upper_32_bits(is_nor_msg ? cpu_addr : pci_addr); | ||
| 87 | desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | type; | ||
| 88 | desc1 = 0; | ||
| 89 | |||
| 90 | if (is_nor_msg) { | ||
| 91 | rockchip_pcie_write(rockchip, 0, | ||
| 92 | ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r)); | ||
| 93 | rockchip_pcie_write(rockchip, 0, | ||
| 94 | ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r)); | ||
| 95 | rockchip_pcie_write(rockchip, desc0, | ||
| 96 | ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r)); | ||
| 97 | rockchip_pcie_write(rockchip, desc1, | ||
| 98 | ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r)); | ||
| 99 | } else { | ||
| 100 | /* PCI bus address region */ | ||
| 101 | rockchip_pcie_write(rockchip, addr0, | ||
| 102 | ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r)); | ||
| 103 | rockchip_pcie_write(rockchip, addr1, | ||
| 104 | ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r)); | ||
| 105 | rockchip_pcie_write(rockchip, desc0, | ||
| 106 | ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r)); | ||
| 107 | rockchip_pcie_write(rockchip, desc1, | ||
| 108 | ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r)); | ||
| 109 | |||
| 110 | addr0 = | ||
| 111 | ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) | | ||
| 112 | (lower_32_bits(cpu_addr) & | ||
| 113 | PCIE_CORE_OB_REGION_ADDR0_LO_ADDR); | ||
| 114 | addr1 = upper_32_bits(cpu_addr); | ||
| 115 | } | ||
| 116 | |||
| 117 | /* CPU bus address region */ | ||
| 118 | rockchip_pcie_write(rockchip, addr0, | ||
| 119 | ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r)); | ||
| 120 | rockchip_pcie_write(rockchip, addr1, | ||
| 121 | ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r)); | ||
| 122 | } | ||
| 123 | |||
| 124 | static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, | ||
| 125 | struct pci_epf_header *hdr) | ||
| 126 | { | ||
| 127 | struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 128 | struct rockchip_pcie *rockchip = &ep->rockchip; | ||
| 129 | |||
| 130 | /* All functions share the same vendor ID with function 0 */ | ||
| 131 | if (fn == 0) { | ||
| 132 | u32 vid_regs = (hdr->vendorid & GENMASK(15, 0)) | | ||
| 133 | (hdr->subsys_vendor_id & GENMASK(31, 16)) << 16; | ||
| 134 | |||
| 135 | rockchip_pcie_write(rockchip, vid_regs, | ||
| 136 | PCIE_CORE_CONFIG_VENDOR); | ||
| 137 | } | ||
| 138 | |||
| 139 | rockchip_pcie_write(rockchip, hdr->deviceid << 16, | ||
| 140 | ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_VENDOR_ID); | ||
| 141 | |||
| 142 | rockchip_pcie_write(rockchip, | ||
| 143 | hdr->revid | | ||
| 144 | hdr->progif_code << 8 | | ||
| 145 | hdr->subclass_code << 16 | | ||
| 146 | hdr->baseclass_code << 24, | ||
| 147 | ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + PCI_REVISION_ID); | ||
| 148 | rockchip_pcie_write(rockchip, hdr->cache_line_size, | ||
| 149 | ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + | ||
| 150 | PCI_CACHE_LINE_SIZE); | ||
| 151 | rockchip_pcie_write(rockchip, hdr->subsys_id << 16, | ||
| 152 | ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + | ||
| 153 | PCI_SUBSYSTEM_VENDOR_ID); | ||
| 154 | rockchip_pcie_write(rockchip, hdr->interrupt_pin << 8, | ||
| 155 | ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + | ||
| 156 | PCI_INTERRUPT_LINE); | ||
| 157 | |||
| 158 | return 0; | ||
| 159 | } | ||
| 160 | |||
| 161 | static int rockchip_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, | ||
| 162 | struct pci_epf_bar *epf_bar) | ||
| 163 | { | ||
| 164 | struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 165 | struct rockchip_pcie *rockchip = &ep->rockchip; | ||
| 166 | dma_addr_t bar_phys = epf_bar->phys_addr; | ||
| 167 | enum pci_barno bar = epf_bar->barno; | ||
| 168 | int flags = epf_bar->flags; | ||
| 169 | u32 addr0, addr1, reg, cfg, b, aperture, ctrl; | ||
| 170 | u64 sz; | ||
| 171 | |||
| 172 | /* BAR size is 2^(aperture + 7) */ | ||
| 173 | sz = max_t(size_t, epf_bar->size, MIN_EP_APERTURE); | ||
| 174 | |||
| 175 | /* | ||
| 176 | * roundup_pow_of_two() returns an unsigned long, which is not suited | ||
| 177 | * for 64bit values. | ||
| 178 | */ | ||
| 179 | sz = 1ULL << fls64(sz - 1); | ||
| 180 | aperture = ilog2(sz) - 7; /* 128B -> 0, 256B -> 1, 512B -> 2, ... */ | ||
| 181 | |||
| 182 | if ((flags & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { | ||
| 183 | ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS; | ||
| 184 | } else { | ||
| 185 | bool is_prefetch = !!(flags & PCI_BASE_ADDRESS_MEM_PREFETCH); | ||
| 186 | bool is_64bits = sz > SZ_2G; | ||
| 187 | |||
| 188 | if (is_64bits && (bar & 1)) | ||
| 189 | return -EINVAL; | ||
| 190 | |||
| 191 | if (is_64bits && is_prefetch) | ||
| 192 | ctrl = | ||
| 193 | ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS; | ||
| 194 | else if (is_prefetch) | ||
| 195 | ctrl = | ||
| 196 | ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS; | ||
| 197 | else if (is_64bits) | ||
| 198 | ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS; | ||
| 199 | else | ||
| 200 | ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS; | ||
| 201 | } | ||
| 202 | |||
| 203 | if (bar < BAR_4) { | ||
| 204 | reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn); | ||
| 205 | b = bar; | ||
| 206 | } else { | ||
| 207 | reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn); | ||
| 208 | b = bar - BAR_4; | ||
| 209 | } | ||
| 210 | |||
| 211 | addr0 = lower_32_bits(bar_phys); | ||
| 212 | addr1 = upper_32_bits(bar_phys); | ||
| 213 | |||
| 214 | cfg = rockchip_pcie_read(rockchip, reg); | ||
| 215 | cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | | ||
| 216 | ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); | ||
| 217 | cfg |= (ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, aperture) | | ||
| 218 | ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); | ||
| 219 | |||
| 220 | rockchip_pcie_write(rockchip, cfg, reg); | ||
| 221 | rockchip_pcie_write(rockchip, addr0, | ||
| 222 | ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar)); | ||
| 223 | rockchip_pcie_write(rockchip, addr1, | ||
| 224 | ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar)); | ||
| 225 | |||
| 226 | return 0; | ||
| 227 | } | ||
| 228 | |||
| 229 | static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, | ||
| 230 | struct pci_epf_bar *epf_bar) | ||
| 231 | { | ||
| 232 | struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 233 | struct rockchip_pcie *rockchip = &ep->rockchip; | ||
| 234 | u32 reg, cfg, b, ctrl; | ||
| 235 | enum pci_barno bar = epf_bar->barno; | ||
| 236 | |||
| 237 | if (bar < BAR_4) { | ||
| 238 | reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn); | ||
| 239 | b = bar; | ||
| 240 | } else { | ||
| 241 | reg = ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn); | ||
| 242 | b = bar - BAR_4; | ||
| 243 | } | ||
| 244 | |||
| 245 | ctrl = ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED; | ||
| 246 | cfg = rockchip_pcie_read(rockchip, reg); | ||
| 247 | cfg &= ~(ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) | | ||
| 248 | ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)); | ||
| 249 | cfg |= ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl); | ||
| 250 | |||
| 251 | rockchip_pcie_write(rockchip, cfg, reg); | ||
| 252 | rockchip_pcie_write(rockchip, 0x0, | ||
| 253 | ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar)); | ||
| 254 | rockchip_pcie_write(rockchip, 0x0, | ||
| 255 | ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar)); | ||
| 256 | } | ||
| 257 | |||
| 258 | static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, | ||
| 259 | phys_addr_t addr, u64 pci_addr, | ||
| 260 | size_t size) | ||
| 261 | { | ||
| 262 | struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 263 | struct rockchip_pcie *pcie = &ep->rockchip; | ||
| 264 | u32 r; | ||
| 265 | |||
| 266 | r = find_first_zero_bit(&ep->ob_region_map, | ||
| 267 | sizeof(ep->ob_region_map) * BITS_PER_LONG); | ||
| 268 | /* | ||
| 269 | * Region 0 is reserved for configuration space and shouldn't | ||
| 270 | * be used elsewhere per TRM, so leave it out. | ||
| 271 | */ | ||
| 272 | if (r >= ep->max_regions - 1) { | ||
| 273 | dev_err(&epc->dev, "no free outbound region\n"); | ||
| 274 | return -EINVAL; | ||
| 275 | } | ||
| 276 | |||
| 277 | rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, AXI_WRAPPER_MEM_WRITE, addr, | ||
| 278 | pci_addr, size); | ||
| 279 | |||
| 280 | set_bit(r, &ep->ob_region_map); | ||
| 281 | ep->ob_addr[r] = addr; | ||
| 282 | |||
| 283 | return 0; | ||
| 284 | } | ||
| 285 | |||
| 286 | static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, | ||
| 287 | phys_addr_t addr) | ||
| 288 | { | ||
| 289 | struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 290 | struct rockchip_pcie *rockchip = &ep->rockchip; | ||
| 291 | u32 r; | ||
| 292 | |||
| 293 | for (r = 0; r < ep->max_regions - 1; r++) | ||
| 294 | if (ep->ob_addr[r] == addr) | ||
| 295 | break; | ||
| 296 | |||
| 297 | /* | ||
| 298 | * Region 0 is reserved for configuration space and shouldn't | ||
| 299 | * be used elsewhere per TRM, so leave it out. | ||
| 300 | */ | ||
| 301 | if (r == ep->max_regions - 1) | ||
| 302 | return; | ||
| 303 | |||
| 304 | rockchip_pcie_clear_ep_ob_atu(rockchip, r); | ||
| 305 | |||
| 306 | ep->ob_addr[r] = 0; | ||
| 307 | clear_bit(r, &ep->ob_region_map); | ||
| 308 | } | ||
| 309 | |||
| 310 | static int rockchip_pcie_ep_set_msi(struct pci_epc *epc, u8 fn, | ||
| 311 | u8 multi_msg_cap) | ||
| 312 | { | ||
| 313 | struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 314 | struct rockchip_pcie *rockchip = &ep->rockchip; | ||
| 315 | u16 flags; | ||
| 316 | |||
| 317 | flags = rockchip_pcie_read(rockchip, | ||
| 318 | ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + | ||
| 319 | ROCKCHIP_PCIE_EP_MSI_CTRL_REG); | ||
| 320 | flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK; | ||
| 321 | flags |= | ||
| 322 | ((multi_msg_cap << 1) << ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET) | | ||
| 323 | PCI_MSI_FLAGS_64BIT; | ||
| 324 | flags &= ~ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP; | ||
| 325 | rockchip_pcie_write(rockchip, flags, | ||
| 326 | ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + | ||
| 327 | ROCKCHIP_PCIE_EP_MSI_CTRL_REG); | ||
| 328 | return 0; | ||
| 329 | } | ||
| 330 | |||
| 331 | static int rockchip_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) | ||
| 332 | { | ||
| 333 | struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 334 | struct rockchip_pcie *rockchip = &ep->rockchip; | ||
| 335 | u16 flags; | ||
| 336 | |||
| 337 | flags = rockchip_pcie_read(rockchip, | ||
| 338 | ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + | ||
| 339 | ROCKCHIP_PCIE_EP_MSI_CTRL_REG); | ||
| 340 | if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME)) | ||
| 341 | return -EINVAL; | ||
| 342 | |||
| 343 | return ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >> | ||
| 344 | ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET); | ||
| 345 | } | ||
| 346 | |||
| 347 | static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn, | ||
| 348 | u8 intx, bool is_asserted) | ||
| 349 | { | ||
| 350 | struct rockchip_pcie *rockchip = &ep->rockchip; | ||
| 351 | u32 r = ep->max_regions - 1; | ||
| 352 | u32 offset; | ||
| 353 | u16 status; | ||
| 354 | u8 msg_code; | ||
| 355 | |||
| 356 | if (unlikely(ep->irq_pci_addr != ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR || | ||
| 357 | ep->irq_pci_fn != fn)) { | ||
| 358 | rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r, | ||
| 359 | AXI_WRAPPER_NOR_MSG, | ||
| 360 | ep->irq_phys_addr, 0, 0); | ||
| 361 | ep->irq_pci_addr = ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR; | ||
| 362 | ep->irq_pci_fn = fn; | ||
| 363 | } | ||
| 364 | |||
| 365 | intx &= 3; | ||
| 366 | if (is_asserted) { | ||
| 367 | ep->irq_pending |= BIT(intx); | ||
| 368 | msg_code = ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA + intx; | ||
| 369 | } else { | ||
| 370 | ep->irq_pending &= ~BIT(intx); | ||
| 371 | msg_code = ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA + intx; | ||
| 372 | } | ||
| 373 | |||
| 374 | status = rockchip_pcie_read(rockchip, | ||
| 375 | ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + | ||
| 376 | ROCKCHIP_PCIE_EP_CMD_STATUS); | ||
| 377 | status &= ROCKCHIP_PCIE_EP_CMD_STATUS_IS; | ||
| 378 | |||
| 379 | if ((status != 0) ^ (ep->irq_pending != 0)) { | ||
| 380 | status ^= ROCKCHIP_PCIE_EP_CMD_STATUS_IS; | ||
| 381 | rockchip_pcie_write(rockchip, status, | ||
| 382 | ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + | ||
| 383 | ROCKCHIP_PCIE_EP_CMD_STATUS); | ||
| 384 | } | ||
| 385 | |||
| 386 | offset = | ||
| 387 | ROCKCHIP_PCIE_MSG_ROUTING(ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX) | | ||
| 388 | ROCKCHIP_PCIE_MSG_CODE(msg_code) | ROCKCHIP_PCIE_MSG_NO_DATA; | ||
| 389 | writel(0, ep->irq_cpu_addr + offset); | ||
| 390 | } | ||
| 391 | |||
| 392 | static int rockchip_pcie_ep_send_legacy_irq(struct rockchip_pcie_ep *ep, u8 fn, | ||
| 393 | u8 intx) | ||
| 394 | { | ||
| 395 | u16 cmd; | ||
| 396 | |||
| 397 | cmd = rockchip_pcie_read(&ep->rockchip, | ||
| 398 | ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + | ||
| 399 | ROCKCHIP_PCIE_EP_CMD_STATUS); | ||
| 400 | |||
| 401 | if (cmd & PCI_COMMAND_INTX_DISABLE) | ||
| 402 | return -EINVAL; | ||
| 403 | |||
| 404 | /* | ||
| 405 | * Should add some delay between toggling INTx per TRM vaguely saying | ||
| 406 | * it depends on some cycles of the AHB bus clock to function it. So | ||
| 407 | * add sufficient 1ms here. | ||
| 408 | */ | ||
| 409 | rockchip_pcie_ep_assert_intx(ep, fn, intx, true); | ||
| 410 | mdelay(1); | ||
| 411 | rockchip_pcie_ep_assert_intx(ep, fn, intx, false); | ||
| 412 | return 0; | ||
| 413 | } | ||
| 414 | |||
| 415 | static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn, | ||
| 416 | u8 interrupt_num) | ||
| 417 | { | ||
| 418 | struct rockchip_pcie *rockchip = &ep->rockchip; | ||
| 419 | u16 flags, mme, data, data_mask; | ||
| 420 | u8 msi_count; | ||
| 421 | u64 pci_addr, pci_addr_mask = 0xff; | ||
| 422 | |||
| 423 | /* Check MSI enable bit */ | ||
| 424 | flags = rockchip_pcie_read(&ep->rockchip, | ||
| 425 | ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + | ||
| 426 | ROCKCHIP_PCIE_EP_MSI_CTRL_REG); | ||
| 427 | if (!(flags & ROCKCHIP_PCIE_EP_MSI_CTRL_ME)) | ||
| 428 | return -EINVAL; | ||
| 429 | |||
| 430 | /* Get MSI numbers from MME */ | ||
| 431 | mme = ((flags & ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK) >> | ||
| 432 | ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET); | ||
| 433 | msi_count = 1 << mme; | ||
| 434 | if (!interrupt_num || interrupt_num > msi_count) | ||
| 435 | return -EINVAL; | ||
| 436 | |||
| 437 | /* Set MSI private data */ | ||
| 438 | data_mask = msi_count - 1; | ||
| 439 | data = rockchip_pcie_read(rockchip, | ||
| 440 | ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + | ||
| 441 | ROCKCHIP_PCIE_EP_MSI_CTRL_REG + | ||
| 442 | PCI_MSI_DATA_64); | ||
| 443 | data = (data & ~data_mask) | ((interrupt_num - 1) & data_mask); | ||
| 444 | |||
| 445 | /* Get MSI PCI address */ | ||
| 446 | pci_addr = rockchip_pcie_read(rockchip, | ||
| 447 | ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + | ||
| 448 | ROCKCHIP_PCIE_EP_MSI_CTRL_REG + | ||
| 449 | PCI_MSI_ADDRESS_HI); | ||
| 450 | pci_addr <<= 32; | ||
| 451 | pci_addr |= rockchip_pcie_read(rockchip, | ||
| 452 | ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + | ||
| 453 | ROCKCHIP_PCIE_EP_MSI_CTRL_REG + | ||
| 454 | PCI_MSI_ADDRESS_LO); | ||
| 455 | pci_addr &= GENMASK_ULL(63, 2); | ||
| 456 | |||
| 457 | /* Set the outbound region if needed. */ | ||
| 458 | if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || | ||
| 459 | ep->irq_pci_fn != fn)) { | ||
| 460 | rockchip_pcie_prog_ep_ob_atu(rockchip, fn, ep->max_regions - 1, | ||
| 461 | AXI_WRAPPER_MEM_WRITE, | ||
| 462 | ep->irq_phys_addr, | ||
| 463 | pci_addr & ~pci_addr_mask, | ||
| 464 | pci_addr_mask + 1); | ||
| 465 | ep->irq_pci_addr = (pci_addr & ~pci_addr_mask); | ||
| 466 | ep->irq_pci_fn = fn; | ||
| 467 | } | ||
| 468 | |||
| 469 | writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); | ||
| 470 | return 0; | ||
| 471 | } | ||
| 472 | |||
| 473 | static int rockchip_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, | ||
| 474 | enum pci_epc_irq_type type, | ||
| 475 | u8 interrupt_num) | ||
| 476 | { | ||
| 477 | struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 478 | |||
| 479 | switch (type) { | ||
| 480 | case PCI_EPC_IRQ_LEGACY: | ||
| 481 | return rockchip_pcie_ep_send_legacy_irq(ep, fn, 0); | ||
| 482 | case PCI_EPC_IRQ_MSI: | ||
| 483 | return rockchip_pcie_ep_send_msi_irq(ep, fn, interrupt_num); | ||
| 484 | default: | ||
| 485 | return -EINVAL; | ||
| 486 | } | ||
| 487 | } | ||
| 488 | |||
| 489 | static int rockchip_pcie_ep_start(struct pci_epc *epc) | ||
| 490 | { | ||
| 491 | struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); | ||
| 492 | struct rockchip_pcie *rockchip = &ep->rockchip; | ||
| 493 | struct pci_epf *epf; | ||
| 494 | u32 cfg; | ||
| 495 | |||
| 496 | cfg = BIT(0); | ||
| 497 | list_for_each_entry(epf, &epc->pci_epf, list) | ||
| 498 | cfg |= BIT(epf->func_no); | ||
| 499 | |||
| 500 | rockchip_pcie_write(rockchip, cfg, PCIE_CORE_PHY_FUNC_CFG); | ||
| 501 | |||
| 502 | list_for_each_entry(epf, &epc->pci_epf, list) | ||
| 503 | pci_epf_linkup(epf); | ||
| 504 | |||
| 505 | return 0; | ||
| 506 | } | ||
| 507 | |||
| 508 | static const struct pci_epc_ops rockchip_pcie_epc_ops = { | ||
| 509 | .write_header = rockchip_pcie_ep_write_header, | ||
| 510 | .set_bar = rockchip_pcie_ep_set_bar, | ||
| 511 | .clear_bar = rockchip_pcie_ep_clear_bar, | ||
| 512 | .map_addr = rockchip_pcie_ep_map_addr, | ||
| 513 | .unmap_addr = rockchip_pcie_ep_unmap_addr, | ||
| 514 | .set_msi = rockchip_pcie_ep_set_msi, | ||
| 515 | .get_msi = rockchip_pcie_ep_get_msi, | ||
| 516 | .raise_irq = rockchip_pcie_ep_raise_irq, | ||
| 517 | .start = rockchip_pcie_ep_start, | ||
| 518 | }; | ||
| 519 | |||
| 520 | static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip, | ||
| 521 | struct rockchip_pcie_ep *ep) | ||
| 522 | { | ||
| 523 | struct device *dev = rockchip->dev; | ||
| 524 | int err; | ||
| 525 | |||
| 526 | err = rockchip_pcie_parse_dt(rockchip); | ||
| 527 | if (err) | ||
| 528 | return err; | ||
| 529 | |||
| 530 | err = rockchip_pcie_get_phys(rockchip); | ||
| 531 | if (err) | ||
| 532 | return err; | ||
| 533 | |||
| 534 | err = of_property_read_u32(dev->of_node, | ||
| 535 | "rockchip,max-outbound-regions", | ||
| 536 | &ep->max_regions); | ||
| 537 | if (err < 0 || ep->max_regions > MAX_REGION_LIMIT) | ||
| 538 | ep->max_regions = MAX_REGION_LIMIT; | ||
| 539 | |||
| 540 | err = of_property_read_u8(dev->of_node, "max-functions", | ||
| 541 | &ep->epc->max_functions); | ||
| 542 | if (err < 0) | ||
| 543 | ep->epc->max_functions = 1; | ||
| 544 | |||
| 545 | return 0; | ||
| 546 | } | ||
| 547 | |||
| 548 | static const struct of_device_id rockchip_pcie_ep_of_match[] = { | ||
| 549 | { .compatible = "rockchip,rk3399-pcie-ep"}, | ||
| 550 | {}, | ||
| 551 | }; | ||
| 552 | |||
| 553 | static int rockchip_pcie_ep_probe(struct platform_device *pdev) | ||
| 554 | { | ||
| 555 | struct device *dev = &pdev->dev; | ||
| 556 | struct rockchip_pcie_ep *ep; | ||
| 557 | struct rockchip_pcie *rockchip; | ||
| 558 | struct pci_epc *epc; | ||
| 559 | size_t max_regions; | ||
| 560 | int err; | ||
| 561 | |||
| 562 | ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); | ||
| 563 | if (!ep) | ||
| 564 | return -ENOMEM; | ||
| 565 | |||
| 566 | rockchip = &ep->rockchip; | ||
| 567 | rockchip->is_rc = false; | ||
| 568 | rockchip->dev = dev; | ||
| 569 | |||
| 570 | epc = devm_pci_epc_create(dev, &rockchip_pcie_epc_ops); | ||
| 571 | if (IS_ERR(epc)) { | ||
| 572 | dev_err(dev, "failed to create epc device\n"); | ||
| 573 | return PTR_ERR(epc); | ||
| 574 | } | ||
| 575 | |||
| 576 | ep->epc = epc; | ||
| 577 | epc_set_drvdata(epc, ep); | ||
| 578 | |||
| 579 | err = rockchip_pcie_parse_ep_dt(rockchip, ep); | ||
| 580 | if (err) | ||
| 581 | return err; | ||
| 582 | |||
| 583 | err = rockchip_pcie_enable_clocks(rockchip); | ||
| 584 | if (err) | ||
| 585 | return err; | ||
| 586 | |||
| 587 | err = rockchip_pcie_init_port(rockchip); | ||
| 588 | if (err) | ||
| 589 | goto err_disable_clocks; | ||
| 590 | |||
| 591 | /* Establish the link automatically */ | ||
| 592 | rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE, | ||
| 593 | PCIE_CLIENT_CONFIG); | ||
| 594 | |||
| 595 | max_regions = ep->max_regions; | ||
| 596 | ep->ob_addr = devm_kzalloc(dev, max_regions * sizeof(*ep->ob_addr), | ||
| 597 | GFP_KERNEL); | ||
| 598 | |||
| 599 | if (!ep->ob_addr) { | ||
| 600 | err = -ENOMEM; | ||
| 601 | goto err_uninit_port; | ||
| 602 | } | ||
| 603 | |||
| 604 | /* Only enable function 0 by default */ | ||
| 605 | rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG); | ||
| 606 | |||
| 607 | err = pci_epc_mem_init(epc, rockchip->mem_res->start, | ||
| 608 | resource_size(rockchip->mem_res)); | ||
| 609 | if (err < 0) { | ||
| 610 | dev_err(dev, "failed to initialize the memory space\n"); | ||
| 611 | goto err_uninit_port; | ||
| 612 | } | ||
| 613 | |||
| 614 | ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, | ||
| 615 | SZ_128K); | ||
| 616 | if (!ep->irq_cpu_addr) { | ||
| 617 | dev_err(dev, "failed to reserve memory space for MSI\n"); | ||
| 618 | err = -ENOMEM; | ||
| 619 | goto err_epc_mem_exit; | ||
| 620 | } | ||
| 621 | |||
| 622 | ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR; | ||
| 623 | |||
| 624 | return 0; | ||
| 625 | err_epc_mem_exit: | ||
| 626 | pci_epc_mem_exit(epc); | ||
| 627 | err_uninit_port: | ||
| 628 | rockchip_pcie_deinit_phys(rockchip); | ||
| 629 | err_disable_clocks: | ||
| 630 | rockchip_pcie_disable_clocks(rockchip); | ||
| 631 | return err; | ||
| 632 | } | ||
| 633 | |||
| 634 | static struct platform_driver rockchip_pcie_ep_driver = { | ||
| 635 | .driver = { | ||
| 636 | .name = "rockchip-pcie-ep", | ||
| 637 | .of_match_table = rockchip_pcie_ep_of_match, | ||
| 638 | }, | ||
| 639 | .probe = rockchip_pcie_ep_probe, | ||
| 640 | }; | ||
| 641 | |||
| 642 | builtin_platform_driver(rockchip_pcie_ep_driver); | ||
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c new file mode 100644 index 000000000000..1372d270764f --- /dev/null +++ b/drivers/pci/controller/pcie-rockchip-host.c | |||
| @@ -0,0 +1,1142 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 2 | /* | ||
| 3 | * Rockchip AXI PCIe host controller driver | ||
| 4 | * | ||
| 5 | * Copyright (c) 2016 Rockchip, Inc. | ||
| 6 | * | ||
| 7 | * Author: Shawn Lin <shawn.lin@rock-chips.com> | ||
| 8 | * Wenrui Li <wenrui.li@rock-chips.com> | ||
| 9 | * | ||
| 10 | * Bits taken from Synopsys DesignWare Host controller driver and | ||
| 11 | * ARM PCI Host generic driver. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/bitrev.h> | ||
| 15 | #include <linux/clk.h> | ||
| 16 | #include <linux/delay.h> | ||
| 17 | #include <linux/gpio/consumer.h> | ||
| 18 | #include <linux/init.h> | ||
| 19 | #include <linux/interrupt.h> | ||
| 20 | #include <linux/iopoll.h> | ||
| 21 | #include <linux/irq.h> | ||
| 22 | #include <linux/irqchip/chained_irq.h> | ||
| 23 | #include <linux/irqdomain.h> | ||
| 24 | #include <linux/kernel.h> | ||
| 25 | #include <linux/mfd/syscon.h> | ||
| 26 | #include <linux/module.h> | ||
| 27 | #include <linux/of_address.h> | ||
| 28 | #include <linux/of_device.h> | ||
| 29 | #include <linux/of_pci.h> | ||
| 30 | #include <linux/of_platform.h> | ||
| 31 | #include <linux/of_irq.h> | ||
| 32 | #include <linux/pci.h> | ||
| 33 | #include <linux/pci_ids.h> | ||
| 34 | #include <linux/phy/phy.h> | ||
| 35 | #include <linux/platform_device.h> | ||
| 36 | #include <linux/reset.h> | ||
| 37 | #include <linux/regmap.h> | ||
| 38 | |||
| 39 | #include "../pci.h" | ||
| 40 | #include "pcie-rockchip.h" | ||
| 41 | |||
| 42 | static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip) | ||
| 43 | { | ||
| 44 | u32 status; | ||
| 45 | |||
| 46 | status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); | ||
| 47 | status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE); | ||
| 48 | rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); | ||
| 49 | } | ||
| 50 | |||
| 51 | static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip) | ||
| 52 | { | ||
| 53 | u32 status; | ||
| 54 | |||
| 55 | status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); | ||
| 56 | status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16; | ||
| 57 | rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); | ||
| 58 | } | ||
| 59 | |||
| 60 | static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip) | ||
| 61 | { | ||
| 62 | u32 val; | ||
| 63 | |||
| 64 | /* Update Tx credit maximum update interval */ | ||
| 65 | val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1); | ||
| 66 | val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK; | ||
| 67 | val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */ | ||
| 68 | rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1); | ||
| 69 | } | ||
| 70 | |||
| 71 | static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip, | ||
| 72 | struct pci_bus *bus, int dev) | ||
| 73 | { | ||
| 74 | /* access only one slot on each root port */ | ||
| 75 | if (bus->number == rockchip->root_bus_nr && dev > 0) | ||
| 76 | return 0; | ||
| 77 | |||
| 78 | /* | ||
| 79 | * do not read more than one device on the bus directly attached | ||
| 80 | * to RC's downstream side. | ||
| 81 | */ | ||
| 82 | if (bus->primary == rockchip->root_bus_nr && dev > 0) | ||
| 83 | return 0; | ||
| 84 | |||
| 85 | return 1; | ||
| 86 | } | ||
| 87 | |||
| 88 | static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip) | ||
| 89 | { | ||
| 90 | u32 val; | ||
| 91 | u8 map; | ||
| 92 | |||
| 93 | if (rockchip->legacy_phy) | ||
| 94 | return GENMASK(MAX_LANE_NUM - 1, 0); | ||
| 95 | |||
| 96 | val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP); | ||
| 97 | map = val & PCIE_CORE_LANE_MAP_MASK; | ||
| 98 | |||
| 99 | /* The link may be using a reverse-indexed mapping. */ | ||
| 100 | if (val & PCIE_CORE_LANE_MAP_REVERSE) | ||
| 101 | map = bitrev8(map) >> 4; | ||
| 102 | |||
| 103 | return map; | ||
| 104 | } | ||
| 105 | |||
| 106 | static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip, | ||
| 107 | int where, int size, u32 *val) | ||
| 108 | { | ||
| 109 | void __iomem *addr; | ||
| 110 | |||
| 111 | addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where; | ||
| 112 | |||
| 113 | if (!IS_ALIGNED((uintptr_t)addr, size)) { | ||
| 114 | *val = 0; | ||
| 115 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
| 116 | } | ||
| 117 | |||
| 118 | if (size == 4) { | ||
| 119 | *val = readl(addr); | ||
| 120 | } else if (size == 2) { | ||
| 121 | *val = readw(addr); | ||
| 122 | } else if (size == 1) { | ||
| 123 | *val = readb(addr); | ||
| 124 | } else { | ||
| 125 | *val = 0; | ||
| 126 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
| 127 | } | ||
| 128 | return PCIBIOS_SUCCESSFUL; | ||
| 129 | } | ||
| 130 | |||
| 131 | static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip, | ||
| 132 | int where, int size, u32 val) | ||
| 133 | { | ||
| 134 | u32 mask, tmp, offset; | ||
| 135 | void __iomem *addr; | ||
| 136 | |||
| 137 | offset = where & ~0x3; | ||
| 138 | addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset; | ||
| 139 | |||
| 140 | if (size == 4) { | ||
| 141 | writel(val, addr); | ||
| 142 | return PCIBIOS_SUCCESSFUL; | ||
| 143 | } | ||
| 144 | |||
| 145 | mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); | ||
| 146 | |||
| 147 | /* | ||
| 148 | * N.B. This read/modify/write isn't safe in general because it can | ||
| 149 | * corrupt RW1C bits in adjacent registers. But the hardware | ||
| 150 | * doesn't support smaller writes. | ||
| 151 | */ | ||
| 152 | tmp = readl(addr) & mask; | ||
| 153 | tmp |= val << ((where & 0x3) * 8); | ||
| 154 | writel(tmp, addr); | ||
| 155 | |||
| 156 | return PCIBIOS_SUCCESSFUL; | ||
| 157 | } | ||
| 158 | |||
| 159 | static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip, | ||
| 160 | struct pci_bus *bus, u32 devfn, | ||
| 161 | int where, int size, u32 *val) | ||
| 162 | { | ||
| 163 | u32 busdev; | ||
| 164 | |||
| 165 | busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn), | ||
| 166 | PCI_FUNC(devfn), where); | ||
| 167 | |||
| 168 | if (!IS_ALIGNED(busdev, size)) { | ||
| 169 | *val = 0; | ||
| 170 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
| 171 | } | ||
| 172 | |||
| 173 | if (bus->parent->number == rockchip->root_bus_nr) | ||
| 174 | rockchip_pcie_cfg_configuration_accesses(rockchip, | ||
| 175 | AXI_WRAPPER_TYPE0_CFG); | ||
| 176 | else | ||
| 177 | rockchip_pcie_cfg_configuration_accesses(rockchip, | ||
| 178 | AXI_WRAPPER_TYPE1_CFG); | ||
| 179 | |||
| 180 | if (size == 4) { | ||
| 181 | *val = readl(rockchip->reg_base + busdev); | ||
| 182 | } else if (size == 2) { | ||
| 183 | *val = readw(rockchip->reg_base + busdev); | ||
| 184 | } else if (size == 1) { | ||
| 185 | *val = readb(rockchip->reg_base + busdev); | ||
| 186 | } else { | ||
| 187 | *val = 0; | ||
| 188 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
| 189 | } | ||
| 190 | return PCIBIOS_SUCCESSFUL; | ||
| 191 | } | ||
| 192 | |||
| 193 | static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip, | ||
| 194 | struct pci_bus *bus, u32 devfn, | ||
| 195 | int where, int size, u32 val) | ||
| 196 | { | ||
| 197 | u32 busdev; | ||
| 198 | |||
| 199 | busdev = PCIE_ECAM_ADDR(bus->number, PCI_SLOT(devfn), | ||
| 200 | PCI_FUNC(devfn), where); | ||
| 201 | if (!IS_ALIGNED(busdev, size)) | ||
| 202 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
| 203 | |||
| 204 | if (bus->parent->number == rockchip->root_bus_nr) | ||
| 205 | rockchip_pcie_cfg_configuration_accesses(rockchip, | ||
| 206 | AXI_WRAPPER_TYPE0_CFG); | ||
| 207 | else | ||
| 208 | rockchip_pcie_cfg_configuration_accesses(rockchip, | ||
| 209 | AXI_WRAPPER_TYPE1_CFG); | ||
| 210 | |||
| 211 | if (size == 4) | ||
| 212 | writel(val, rockchip->reg_base + busdev); | ||
| 213 | else if (size == 2) | ||
| 214 | writew(val, rockchip->reg_base + busdev); | ||
| 215 | else if (size == 1) | ||
| 216 | writeb(val, rockchip->reg_base + busdev); | ||
| 217 | else | ||
| 218 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
| 219 | |||
| 220 | return PCIBIOS_SUCCESSFUL; | ||
| 221 | } | ||
| 222 | |||
| 223 | static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, | ||
| 224 | int size, u32 *val) | ||
| 225 | { | ||
| 226 | struct rockchip_pcie *rockchip = bus->sysdata; | ||
| 227 | |||
| 228 | if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) { | ||
| 229 | *val = 0xffffffff; | ||
| 230 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 231 | } | ||
| 232 | |||
| 233 | if (bus->number == rockchip->root_bus_nr) | ||
| 234 | return rockchip_pcie_rd_own_conf(rockchip, where, size, val); | ||
| 235 | |||
| 236 | return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size, | ||
| 237 | val); | ||
| 238 | } | ||
| 239 | |||
| 240 | static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn, | ||
| 241 | int where, int size, u32 val) | ||
| 242 | { | ||
| 243 | struct rockchip_pcie *rockchip = bus->sysdata; | ||
| 244 | |||
| 245 | if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) | ||
| 246 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 247 | |||
| 248 | if (bus->number == rockchip->root_bus_nr) | ||
| 249 | return rockchip_pcie_wr_own_conf(rockchip, where, size, val); | ||
| 250 | |||
| 251 | return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size, | ||
| 252 | val); | ||
| 253 | } | ||
| 254 | |||
| 255 | static struct pci_ops rockchip_pcie_ops = { | ||
| 256 | .read = rockchip_pcie_rd_conf, | ||
| 257 | .write = rockchip_pcie_wr_conf, | ||
| 258 | }; | ||
| 259 | |||
| 260 | static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) | ||
| 261 | { | ||
| 262 | int curr; | ||
| 263 | u32 status, scale, power; | ||
| 264 | |||
| 265 | if (IS_ERR(rockchip->vpcie3v3)) | ||
| 266 | return; | ||
| 267 | |||
| 268 | /* | ||
| 269 | * Set RC's captured slot power limit and scale if | ||
| 270 | * vpcie3v3 available. The default values are both zero | ||
| 271 | * which means the software should set these two according | ||
| 272 | * to the actual power supply. | ||
| 273 | */ | ||
| 274 | curr = regulator_get_current_limit(rockchip->vpcie3v3); | ||
| 275 | if (curr <= 0) | ||
| 276 | return; | ||
| 277 | |||
| 278 | scale = 3; /* 0.001x */ | ||
| 279 | curr = curr / 1000; /* convert to mA */ | ||
| 280 | power = (curr * 3300) / 1000; /* milliwatt */ | ||
| 281 | while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) { | ||
| 282 | if (!scale) { | ||
| 283 | dev_warn(rockchip->dev, "invalid power supply\n"); | ||
| 284 | return; | ||
| 285 | } | ||
| 286 | scale--; | ||
| 287 | power = power / 10; | ||
| 288 | } | ||
| 289 | |||
| 290 | status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR); | ||
| 291 | status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) | | ||
| 292 | (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT); | ||
| 293 | rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR); | ||
| 294 | } | ||
| 295 | |||
| 296 | /** | ||
| 297 | * rockchip_pcie_host_init_port - Initialize hardware | ||
| 298 | * @rockchip: PCIe port information | ||
| 299 | */ | ||
| 300 | static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) | ||
| 301 | { | ||
| 302 | struct device *dev = rockchip->dev; | ||
| 303 | int err, i = MAX_LANE_NUM; | ||
| 304 | u32 status; | ||
| 305 | |||
| 306 | gpiod_set_value_cansleep(rockchip->ep_gpio, 0); | ||
| 307 | |||
| 308 | err = rockchip_pcie_init_port(rockchip); | ||
| 309 | if (err) | ||
| 310 | return err; | ||
| 311 | |||
| 312 | /* Fix the transmitted FTS count desired to exit from L0s. */ | ||
| 313 | status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1); | ||
| 314 | status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) | | ||
| 315 | (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT); | ||
| 316 | rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1); | ||
| 317 | |||
| 318 | rockchip_pcie_set_power_limit(rockchip); | ||
| 319 | |||
| 320 | /* Set RC's clock architecture as common clock */ | ||
| 321 | status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); | ||
| 322 | status |= PCI_EXP_LNKSTA_SLC << 16; | ||
| 323 | rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); | ||
| 324 | |||
| 325 | /* Set RC's RCB to 128 */ | ||
| 326 | status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); | ||
| 327 | status |= PCI_EXP_LNKCTL_RCB; | ||
| 328 | rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); | ||
| 329 | |||
| 330 | /* Enable Gen1 training */ | ||
| 331 | rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE, | ||
| 332 | PCIE_CLIENT_CONFIG); | ||
| 333 | |||
| 334 | gpiod_set_value_cansleep(rockchip->ep_gpio, 1); | ||
| 335 | |||
| 336 | /* 500ms timeout value should be enough for Gen1/2 training */ | ||
| 337 | err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1, | ||
| 338 | status, PCIE_LINK_UP(status), 20, | ||
| 339 | 500 * USEC_PER_MSEC); | ||
| 340 | if (err) { | ||
| 341 | dev_err(dev, "PCIe link training gen1 timeout!\n"); | ||
| 342 | goto err_power_off_phy; | ||
| 343 | } | ||
| 344 | |||
| 345 | if (rockchip->link_gen == 2) { | ||
| 346 | /* | ||
| 347 | * Enable retrain for gen2. This should be configured only after | ||
| 348 | * gen1 finished. | ||
| 349 | */ | ||
| 350 | status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); | ||
| 351 | status |= PCI_EXP_LNKCTL_RL; | ||
| 352 | rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); | ||
| 353 | |||
| 354 | err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL, | ||
| 355 | status, PCIE_LINK_IS_GEN2(status), 20, | ||
| 356 | 500 * USEC_PER_MSEC); | ||
| 357 | if (err) | ||
| 358 | dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n"); | ||
| 359 | } | ||
| 360 | |||
| 361 | /* Check the final link width from negotiated lane counter from MGMT */ | ||
| 362 | status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL); | ||
| 363 | status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >> | ||
| 364 | PCIE_CORE_PL_CONF_LANE_SHIFT); | ||
| 365 | dev_dbg(dev, "current link width is x%d\n", status); | ||
| 366 | |||
| 367 | /* Power off unused lane(s) */ | ||
| 368 | rockchip->lanes_map = rockchip_pcie_lane_map(rockchip); | ||
| 369 | for (i = 0; i < MAX_LANE_NUM; i++) { | ||
| 370 | if (!(rockchip->lanes_map & BIT(i))) { | ||
| 371 | dev_dbg(dev, "idling lane %d\n", i); | ||
| 372 | phy_power_off(rockchip->phys[i]); | ||
| 373 | } | ||
| 374 | } | ||
| 375 | |||
| 376 | rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID, | ||
| 377 | PCIE_CORE_CONFIG_VENDOR); | ||
| 378 | rockchip_pcie_write(rockchip, | ||
| 379 | PCI_CLASS_BRIDGE_PCI << PCIE_RC_CONFIG_SCC_SHIFT, | ||
| 380 | PCIE_RC_CONFIG_RID_CCR); | ||
| 381 | |||
| 382 | /* Clear THP cap's next cap pointer to remove L1 substate cap */ | ||
| 383 | status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP); | ||
| 384 | status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK; | ||
| 385 | rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP); | ||
| 386 | |||
| 387 | /* Clear L0s from RC's link cap */ | ||
| 388 | if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) { | ||
| 389 | status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP); | ||
| 390 | status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S; | ||
| 391 | rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP); | ||
| 392 | } | ||
| 393 | |||
| 394 | status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR); | ||
| 395 | status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK; | ||
| 396 | status |= PCIE_RC_CONFIG_DCSR_MPS_256; | ||
| 397 | rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR); | ||
| 398 | |||
| 399 | return 0; | ||
| 400 | err_power_off_phy: | ||
| 401 | while (i--) | ||
| 402 | phy_power_off(rockchip->phys[i]); | ||
| 403 | i = MAX_LANE_NUM; | ||
| 404 | while (i--) | ||
| 405 | phy_exit(rockchip->phys[i]); | ||
| 406 | return err; | ||
| 407 | } | ||
| 408 | |||
| 409 | static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg) | ||
| 410 | { | ||
| 411 | struct rockchip_pcie *rockchip = arg; | ||
| 412 | struct device *dev = rockchip->dev; | ||
| 413 | u32 reg; | ||
| 414 | u32 sub_reg; | ||
| 415 | |||
| 416 | reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); | ||
| 417 | if (reg & PCIE_CLIENT_INT_LOCAL) { | ||
| 418 | dev_dbg(dev, "local interrupt received\n"); | ||
| 419 | sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS); | ||
| 420 | if (sub_reg & PCIE_CORE_INT_PRFPE) | ||
| 421 | dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n"); | ||
| 422 | |||
| 423 | if (sub_reg & PCIE_CORE_INT_CRFPE) | ||
| 424 | dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n"); | ||
| 425 | |||
| 426 | if (sub_reg & PCIE_CORE_INT_RRPE) | ||
| 427 | dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n"); | ||
| 428 | |||
| 429 | if (sub_reg & PCIE_CORE_INT_PRFO) | ||
| 430 | dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n"); | ||
| 431 | |||
| 432 | if (sub_reg & PCIE_CORE_INT_CRFO) | ||
| 433 | dev_dbg(dev, "overflow occurred in the completion receive FIFO\n"); | ||
| 434 | |||
| 435 | if (sub_reg & PCIE_CORE_INT_RT) | ||
| 436 | dev_dbg(dev, "replay timer timed out\n"); | ||
| 437 | |||
| 438 | if (sub_reg & PCIE_CORE_INT_RTR) | ||
| 439 | dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n"); | ||
| 440 | |||
| 441 | if (sub_reg & PCIE_CORE_INT_PE) | ||
| 442 | dev_dbg(dev, "phy error detected on receive side\n"); | ||
| 443 | |||
| 444 | if (sub_reg & PCIE_CORE_INT_MTR) | ||
| 445 | dev_dbg(dev, "malformed TLP received from the link\n"); | ||
| 446 | |||
| 447 | if (sub_reg & PCIE_CORE_INT_UCR) | ||
| 448 | dev_dbg(dev, "malformed TLP received from the link\n"); | ||
| 449 | |||
| 450 | if (sub_reg & PCIE_CORE_INT_FCE) | ||
| 451 | dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n"); | ||
| 452 | |||
| 453 | if (sub_reg & PCIE_CORE_INT_CT) | ||
| 454 | dev_dbg(dev, "a request timed out waiting for completion\n"); | ||
| 455 | |||
| 456 | if (sub_reg & PCIE_CORE_INT_UTC) | ||
| 457 | dev_dbg(dev, "unmapped TC error\n"); | ||
| 458 | |||
| 459 | if (sub_reg & PCIE_CORE_INT_MMVC) | ||
| 460 | dev_dbg(dev, "MSI mask register changes\n"); | ||
| 461 | |||
| 462 | rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS); | ||
| 463 | } else if (reg & PCIE_CLIENT_INT_PHY) { | ||
| 464 | dev_dbg(dev, "phy link changes\n"); | ||
| 465 | rockchip_pcie_update_txcredit_mui(rockchip); | ||
| 466 | rockchip_pcie_clr_bw_int(rockchip); | ||
| 467 | } | ||
| 468 | |||
| 469 | rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL, | ||
| 470 | PCIE_CLIENT_INT_STATUS); | ||
| 471 | |||
| 472 | return IRQ_HANDLED; | ||
| 473 | } | ||
| 474 | |||
| 475 | static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg) | ||
| 476 | { | ||
| 477 | struct rockchip_pcie *rockchip = arg; | ||
| 478 | struct device *dev = rockchip->dev; | ||
| 479 | u32 reg; | ||
| 480 | |||
| 481 | reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); | ||
| 482 | if (reg & PCIE_CLIENT_INT_LEGACY_DONE) | ||
| 483 | dev_dbg(dev, "legacy done interrupt received\n"); | ||
| 484 | |||
| 485 | if (reg & PCIE_CLIENT_INT_MSG) | ||
| 486 | dev_dbg(dev, "message done interrupt received\n"); | ||
| 487 | |||
| 488 | if (reg & PCIE_CLIENT_INT_HOT_RST) | ||
| 489 | dev_dbg(dev, "hot reset interrupt received\n"); | ||
| 490 | |||
| 491 | if (reg & PCIE_CLIENT_INT_DPA) | ||
| 492 | dev_dbg(dev, "dpa interrupt received\n"); | ||
| 493 | |||
| 494 | if (reg & PCIE_CLIENT_INT_FATAL_ERR) | ||
| 495 | dev_dbg(dev, "fatal error interrupt received\n"); | ||
| 496 | |||
| 497 | if (reg & PCIE_CLIENT_INT_NFATAL_ERR) | ||
| 498 | dev_dbg(dev, "no fatal error interrupt received\n"); | ||
| 499 | |||
| 500 | if (reg & PCIE_CLIENT_INT_CORR_ERR) | ||
| 501 | dev_dbg(dev, "correctable error interrupt received\n"); | ||
| 502 | |||
| 503 | if (reg & PCIE_CLIENT_INT_PHY) | ||
| 504 | dev_dbg(dev, "phy interrupt received\n"); | ||
| 505 | |||
| 506 | rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE | | ||
| 507 | PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST | | ||
| 508 | PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR | | ||
| 509 | PCIE_CLIENT_INT_NFATAL_ERR | | ||
| 510 | PCIE_CLIENT_INT_CORR_ERR | | ||
| 511 | PCIE_CLIENT_INT_PHY), | ||
| 512 | PCIE_CLIENT_INT_STATUS); | ||
| 513 | |||
| 514 | return IRQ_HANDLED; | ||
| 515 | } | ||
| 516 | |||
| 517 | static void rockchip_pcie_legacy_int_handler(struct irq_desc *desc) | ||
| 518 | { | ||
| 519 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
| 520 | struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc); | ||
| 521 | struct device *dev = rockchip->dev; | ||
| 522 | u32 reg; | ||
| 523 | u32 hwirq; | ||
| 524 | u32 virq; | ||
| 525 | |||
| 526 | chained_irq_enter(chip, desc); | ||
| 527 | |||
| 528 | reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); | ||
| 529 | reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT; | ||
| 530 | |||
| 531 | while (reg) { | ||
| 532 | hwirq = ffs(reg) - 1; | ||
| 533 | reg &= ~BIT(hwirq); | ||
| 534 | |||
| 535 | virq = irq_find_mapping(rockchip->irq_domain, hwirq); | ||
| 536 | if (virq) | ||
| 537 | generic_handle_irq(virq); | ||
| 538 | else | ||
| 539 | dev_err(dev, "unexpected IRQ, INT%d\n", hwirq); | ||
| 540 | } | ||
| 541 | |||
| 542 | chained_irq_exit(chip, desc); | ||
| 543 | } | ||
| 544 | |||
| 545 | static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip) | ||
| 546 | { | ||
| 547 | int irq, err; | ||
| 548 | struct device *dev = rockchip->dev; | ||
| 549 | struct platform_device *pdev = to_platform_device(dev); | ||
| 550 | |||
| 551 | irq = platform_get_irq_byname(pdev, "sys"); | ||
| 552 | if (irq < 0) { | ||
| 553 | dev_err(dev, "missing sys IRQ resource\n"); | ||
| 554 | return irq; | ||
| 555 | } | ||
| 556 | |||
| 557 | err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler, | ||
| 558 | IRQF_SHARED, "pcie-sys", rockchip); | ||
| 559 | if (err) { | ||
| 560 | dev_err(dev, "failed to request PCIe subsystem IRQ\n"); | ||
| 561 | return err; | ||
| 562 | } | ||
| 563 | |||
| 564 | irq = platform_get_irq_byname(pdev, "legacy"); | ||
| 565 | if (irq < 0) { | ||
| 566 | dev_err(dev, "missing legacy IRQ resource\n"); | ||
| 567 | return irq; | ||
| 568 | } | ||
| 569 | |||
| 570 | irq_set_chained_handler_and_data(irq, | ||
| 571 | rockchip_pcie_legacy_int_handler, | ||
| 572 | rockchip); | ||
| 573 | |||
| 574 | irq = platform_get_irq_byname(pdev, "client"); | ||
| 575 | if (irq < 0) { | ||
| 576 | dev_err(dev, "missing client IRQ resource\n"); | ||
| 577 | return irq; | ||
| 578 | } | ||
| 579 | |||
| 580 | err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler, | ||
| 581 | IRQF_SHARED, "pcie-client", rockchip); | ||
| 582 | if (err) { | ||
| 583 | dev_err(dev, "failed to request PCIe client IRQ\n"); | ||
| 584 | return err; | ||
| 585 | } | ||
| 586 | |||
| 587 | return 0; | ||
| 588 | } | ||
| 589 | |||
| 590 | /** | ||
| 591 | * rockchip_pcie_parse_host_dt - Parse Device Tree | ||
| 592 | * @rockchip: PCIe port information | ||
| 593 | * | ||
| 594 | * Return: '0' on success and error value on failure | ||
| 595 | */ | ||
| 596 | static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip) | ||
| 597 | { | ||
| 598 | struct device *dev = rockchip->dev; | ||
| 599 | int err; | ||
| 600 | |||
| 601 | err = rockchip_pcie_parse_dt(rockchip); | ||
| 602 | if (err) | ||
| 603 | return err; | ||
| 604 | |||
| 605 | err = rockchip_pcie_setup_irq(rockchip); | ||
| 606 | if (err) | ||
| 607 | return err; | ||
| 608 | |||
| 609 | rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v"); | ||
| 610 | if (IS_ERR(rockchip->vpcie12v)) { | ||
| 611 | if (PTR_ERR(rockchip->vpcie12v) == -EPROBE_DEFER) | ||
| 612 | return -EPROBE_DEFER; | ||
| 613 | dev_info(dev, "no vpcie12v regulator found\n"); | ||
| 614 | } | ||
| 615 | |||
| 616 | rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3"); | ||
| 617 | if (IS_ERR(rockchip->vpcie3v3)) { | ||
| 618 | if (PTR_ERR(rockchip->vpcie3v3) == -EPROBE_DEFER) | ||
| 619 | return -EPROBE_DEFER; | ||
| 620 | dev_info(dev, "no vpcie3v3 regulator found\n"); | ||
| 621 | } | ||
| 622 | |||
| 623 | rockchip->vpcie1v8 = devm_regulator_get_optional(dev, "vpcie1v8"); | ||
| 624 | if (IS_ERR(rockchip->vpcie1v8)) { | ||
| 625 | if (PTR_ERR(rockchip->vpcie1v8) == -EPROBE_DEFER) | ||
| 626 | return -EPROBE_DEFER; | ||
| 627 | dev_info(dev, "no vpcie1v8 regulator found\n"); | ||
| 628 | } | ||
| 629 | |||
| 630 | rockchip->vpcie0v9 = devm_regulator_get_optional(dev, "vpcie0v9"); | ||
| 631 | if (IS_ERR(rockchip->vpcie0v9)) { | ||
| 632 | if (PTR_ERR(rockchip->vpcie0v9) == -EPROBE_DEFER) | ||
| 633 | return -EPROBE_DEFER; | ||
| 634 | dev_info(dev, "no vpcie0v9 regulator found\n"); | ||
| 635 | } | ||
| 636 | |||
| 637 | return 0; | ||
| 638 | } | ||
| 639 | |||
| 640 | static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip) | ||
| 641 | { | ||
| 642 | struct device *dev = rockchip->dev; | ||
| 643 | int err; | ||
| 644 | |||
| 645 | if (!IS_ERR(rockchip->vpcie12v)) { | ||
| 646 | err = regulator_enable(rockchip->vpcie12v); | ||
| 647 | if (err) { | ||
| 648 | dev_err(dev, "fail to enable vpcie12v regulator\n"); | ||
| 649 | goto err_out; | ||
| 650 | } | ||
| 651 | } | ||
| 652 | |||
| 653 | if (!IS_ERR(rockchip->vpcie3v3)) { | ||
| 654 | err = regulator_enable(rockchip->vpcie3v3); | ||
| 655 | if (err) { | ||
| 656 | dev_err(dev, "fail to enable vpcie3v3 regulator\n"); | ||
| 657 | goto err_disable_12v; | ||
| 658 | } | ||
| 659 | } | ||
| 660 | |||
| 661 | if (!IS_ERR(rockchip->vpcie1v8)) { | ||
| 662 | err = regulator_enable(rockchip->vpcie1v8); | ||
| 663 | if (err) { | ||
| 664 | dev_err(dev, "fail to enable vpcie1v8 regulator\n"); | ||
| 665 | goto err_disable_3v3; | ||
| 666 | } | ||
| 667 | } | ||
| 668 | |||
| 669 | if (!IS_ERR(rockchip->vpcie0v9)) { | ||
| 670 | err = regulator_enable(rockchip->vpcie0v9); | ||
| 671 | if (err) { | ||
| 672 | dev_err(dev, "fail to enable vpcie0v9 regulator\n"); | ||
| 673 | goto err_disable_1v8; | ||
| 674 | } | ||
| 675 | } | ||
| 676 | |||
| 677 | return 0; | ||
| 678 | |||
| 679 | err_disable_1v8: | ||
| 680 | if (!IS_ERR(rockchip->vpcie1v8)) | ||
| 681 | regulator_disable(rockchip->vpcie1v8); | ||
| 682 | err_disable_3v3: | ||
| 683 | if (!IS_ERR(rockchip->vpcie3v3)) | ||
| 684 | regulator_disable(rockchip->vpcie3v3); | ||
| 685 | err_disable_12v: | ||
| 686 | if (!IS_ERR(rockchip->vpcie12v)) | ||
| 687 | regulator_disable(rockchip->vpcie12v); | ||
| 688 | err_out: | ||
| 689 | return err; | ||
| 690 | } | ||
| 691 | |||
| 692 | static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip) | ||
| 693 | { | ||
| 694 | rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) & | ||
| 695 | (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK); | ||
| 696 | rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT), | ||
| 697 | PCIE_CORE_INT_MASK); | ||
| 698 | |||
| 699 | rockchip_pcie_enable_bw_int(rockchip); | ||
| 700 | } | ||
| 701 | |||
| 702 | static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq, | ||
| 703 | irq_hw_number_t hwirq) | ||
| 704 | { | ||
| 705 | irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); | ||
| 706 | irq_set_chip_data(irq, domain->host_data); | ||
| 707 | |||
| 708 | return 0; | ||
| 709 | } | ||
| 710 | |||
| 711 | static const struct irq_domain_ops intx_domain_ops = { | ||
| 712 | .map = rockchip_pcie_intx_map, | ||
| 713 | }; | ||
| 714 | |||
| 715 | static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip) | ||
| 716 | { | ||
| 717 | struct device *dev = rockchip->dev; | ||
| 718 | struct device_node *intc = of_get_next_child(dev->of_node, NULL); | ||
| 719 | |||
| 720 | if (!intc) { | ||
| 721 | dev_err(dev, "missing child interrupt-controller node\n"); | ||
| 722 | return -EINVAL; | ||
| 723 | } | ||
| 724 | |||
| 725 | rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX, | ||
| 726 | &intx_domain_ops, rockchip); | ||
| 727 | if (!rockchip->irq_domain) { | ||
| 728 | dev_err(dev, "failed to get a INTx IRQ domain\n"); | ||
| 729 | return -EINVAL; | ||
| 730 | } | ||
| 731 | |||
| 732 | return 0; | ||
| 733 | } | ||
| 734 | |||
| 735 | static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip, | ||
| 736 | int region_no, int type, u8 num_pass_bits, | ||
| 737 | u32 lower_addr, u32 upper_addr) | ||
| 738 | { | ||
| 739 | u32 ob_addr_0; | ||
| 740 | u32 ob_addr_1; | ||
| 741 | u32 ob_desc_0; | ||
| 742 | u32 aw_offset; | ||
| 743 | |||
| 744 | if (region_no >= MAX_AXI_WRAPPER_REGION_NUM) | ||
| 745 | return -EINVAL; | ||
| 746 | if (num_pass_bits + 1 < 8) | ||
| 747 | return -EINVAL; | ||
| 748 | if (num_pass_bits > 63) | ||
| 749 | return -EINVAL; | ||
| 750 | if (region_no == 0) { | ||
| 751 | if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits)) | ||
| 752 | return -EINVAL; | ||
| 753 | } | ||
| 754 | if (region_no != 0) { | ||
| 755 | if (AXI_REGION_SIZE < (2ULL << num_pass_bits)) | ||
| 756 | return -EINVAL; | ||
| 757 | } | ||
| 758 | |||
| 759 | aw_offset = (region_no << OB_REG_SIZE_SHIFT); | ||
| 760 | |||
| 761 | ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS; | ||
| 762 | ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR; | ||
| 763 | ob_addr_1 = upper_addr; | ||
| 764 | ob_desc_0 = (1 << 23 | type); | ||
| 765 | |||
| 766 | rockchip_pcie_write(rockchip, ob_addr_0, | ||
| 767 | PCIE_CORE_OB_REGION_ADDR0 + aw_offset); | ||
| 768 | rockchip_pcie_write(rockchip, ob_addr_1, | ||
| 769 | PCIE_CORE_OB_REGION_ADDR1 + aw_offset); | ||
| 770 | rockchip_pcie_write(rockchip, ob_desc_0, | ||
| 771 | PCIE_CORE_OB_REGION_DESC0 + aw_offset); | ||
| 772 | rockchip_pcie_write(rockchip, 0, | ||
| 773 | PCIE_CORE_OB_REGION_DESC1 + aw_offset); | ||
| 774 | |||
| 775 | return 0; | ||
| 776 | } | ||
| 777 | |||
| 778 | static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip, | ||
| 779 | int region_no, u8 num_pass_bits, | ||
| 780 | u32 lower_addr, u32 upper_addr) | ||
| 781 | { | ||
| 782 | u32 ib_addr_0; | ||
| 783 | u32 ib_addr_1; | ||
| 784 | u32 aw_offset; | ||
| 785 | |||
| 786 | if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM) | ||
| 787 | return -EINVAL; | ||
| 788 | if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED) | ||
| 789 | return -EINVAL; | ||
| 790 | if (num_pass_bits > 63) | ||
| 791 | return -EINVAL; | ||
| 792 | |||
| 793 | aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT); | ||
| 794 | |||
| 795 | ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS; | ||
| 796 | ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR; | ||
| 797 | ib_addr_1 = upper_addr; | ||
| 798 | |||
| 799 | rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset); | ||
| 800 | rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset); | ||
| 801 | |||
| 802 | return 0; | ||
| 803 | } | ||
| 804 | |||
| 805 | static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip) | ||
| 806 | { | ||
| 807 | struct device *dev = rockchip->dev; | ||
| 808 | int offset; | ||
| 809 | int err; | ||
| 810 | int reg_no; | ||
| 811 | |||
| 812 | rockchip_pcie_cfg_configuration_accesses(rockchip, | ||
| 813 | AXI_WRAPPER_TYPE0_CFG); | ||
| 814 | |||
| 815 | for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) { | ||
| 816 | err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1, | ||
| 817 | AXI_WRAPPER_MEM_WRITE, | ||
| 818 | 20 - 1, | ||
| 819 | rockchip->mem_bus_addr + | ||
| 820 | (reg_no << 20), | ||
| 821 | 0); | ||
| 822 | if (err) { | ||
| 823 | dev_err(dev, "program RC mem outbound ATU failed\n"); | ||
| 824 | return err; | ||
| 825 | } | ||
| 826 | } | ||
| 827 | |||
| 828 | err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0); | ||
| 829 | if (err) { | ||
| 830 | dev_err(dev, "program RC mem inbound ATU failed\n"); | ||
| 831 | return err; | ||
| 832 | } | ||
| 833 | |||
| 834 | offset = rockchip->mem_size >> 20; | ||
| 835 | for (reg_no = 0; reg_no < (rockchip->io_size >> 20); reg_no++) { | ||
| 836 | err = rockchip_pcie_prog_ob_atu(rockchip, | ||
| 837 | reg_no + 1 + offset, | ||
| 838 | AXI_WRAPPER_IO_WRITE, | ||
| 839 | 20 - 1, | ||
| 840 | rockchip->io_bus_addr + | ||
| 841 | (reg_no << 20), | ||
| 842 | 0); | ||
| 843 | if (err) { | ||
| 844 | dev_err(dev, "program RC io outbound ATU failed\n"); | ||
| 845 | return err; | ||
| 846 | } | ||
| 847 | } | ||
| 848 | |||
| 849 | /* assign message regions */ | ||
| 850 | rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset, | ||
| 851 | AXI_WRAPPER_NOR_MSG, | ||
| 852 | 20 - 1, 0, 0); | ||
| 853 | |||
| 854 | rockchip->msg_bus_addr = rockchip->mem_bus_addr + | ||
| 855 | ((reg_no + offset) << 20); | ||
| 856 | return err; | ||
| 857 | } | ||
| 858 | |||
| 859 | static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip) | ||
| 860 | { | ||
| 861 | u32 value; | ||
| 862 | int err; | ||
| 863 | |||
| 864 | /* send PME_TURN_OFF message */ | ||
| 865 | writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF); | ||
| 866 | |||
| 867 | /* read LTSSM and wait for falling into L2 link state */ | ||
| 868 | err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0, | ||
| 869 | value, PCIE_LINK_IS_L2(value), 20, | ||
| 870 | jiffies_to_usecs(5 * HZ)); | ||
| 871 | if (err) { | ||
| 872 | dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n"); | ||
| 873 | return err; | ||
| 874 | } | ||
| 875 | |||
| 876 | return 0; | ||
| 877 | } | ||
| 878 | |||
| 879 | static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev) | ||
| 880 | { | ||
| 881 | struct rockchip_pcie *rockchip = dev_get_drvdata(dev); | ||
| 882 | int ret; | ||
| 883 | |||
| 884 | /* disable core and cli int since we don't need to ack PME_ACK */ | ||
| 885 | rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) | | ||
| 886 | PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK); | ||
| 887 | rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK); | ||
| 888 | |||
| 889 | ret = rockchip_pcie_wait_l2(rockchip); | ||
| 890 | if (ret) { | ||
| 891 | rockchip_pcie_enable_interrupts(rockchip); | ||
| 892 | return ret; | ||
| 893 | } | ||
| 894 | |||
| 895 | rockchip_pcie_deinit_phys(rockchip); | ||
| 896 | |||
| 897 | rockchip_pcie_disable_clocks(rockchip); | ||
| 898 | |||
| 899 | if (!IS_ERR(rockchip->vpcie0v9)) | ||
| 900 | regulator_disable(rockchip->vpcie0v9); | ||
| 901 | |||
| 902 | return ret; | ||
| 903 | } | ||
| 904 | |||
| 905 | static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev) | ||
| 906 | { | ||
| 907 | struct rockchip_pcie *rockchip = dev_get_drvdata(dev); | ||
| 908 | int err; | ||
| 909 | |||
| 910 | if (!IS_ERR(rockchip->vpcie0v9)) { | ||
| 911 | err = regulator_enable(rockchip->vpcie0v9); | ||
| 912 | if (err) { | ||
| 913 | dev_err(dev, "fail to enable vpcie0v9 regulator\n"); | ||
| 914 | return err; | ||
| 915 | } | ||
| 916 | } | ||
| 917 | |||
| 918 | err = rockchip_pcie_enable_clocks(rockchip); | ||
| 919 | if (err) | ||
| 920 | goto err_disable_0v9; | ||
| 921 | |||
| 922 | err = rockchip_pcie_host_init_port(rockchip); | ||
| 923 | if (err) | ||
| 924 | goto err_pcie_resume; | ||
| 925 | |||
| 926 | err = rockchip_pcie_cfg_atu(rockchip); | ||
| 927 | if (err) | ||
| 928 | goto err_err_deinit_port; | ||
| 929 | |||
| 930 | /* Need this to enter L1 again */ | ||
| 931 | rockchip_pcie_update_txcredit_mui(rockchip); | ||
| 932 | rockchip_pcie_enable_interrupts(rockchip); | ||
| 933 | |||
| 934 | return 0; | ||
| 935 | |||
| 936 | err_err_deinit_port: | ||
| 937 | rockchip_pcie_deinit_phys(rockchip); | ||
| 938 | err_pcie_resume: | ||
| 939 | rockchip_pcie_disable_clocks(rockchip); | ||
| 940 | err_disable_0v9: | ||
| 941 | if (!IS_ERR(rockchip->vpcie0v9)) | ||
| 942 | regulator_disable(rockchip->vpcie0v9); | ||
| 943 | return err; | ||
| 944 | } | ||
| 945 | |||
| 946 | static int rockchip_pcie_probe(struct platform_device *pdev) | ||
| 947 | { | ||
| 948 | struct rockchip_pcie *rockchip; | ||
| 949 | struct device *dev = &pdev->dev; | ||
| 950 | struct pci_bus *bus, *child; | ||
| 951 | struct pci_host_bridge *bridge; | ||
| 952 | struct resource_entry *win; | ||
| 953 | resource_size_t io_base; | ||
| 954 | struct resource *mem; | ||
| 955 | struct resource *io; | ||
| 956 | int err; | ||
| 957 | |||
| 958 | LIST_HEAD(res); | ||
| 959 | |||
| 960 | if (!dev->of_node) | ||
| 961 | return -ENODEV; | ||
| 962 | |||
| 963 | bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip)); | ||
| 964 | if (!bridge) | ||
| 965 | return -ENOMEM; | ||
| 966 | |||
| 967 | rockchip = pci_host_bridge_priv(bridge); | ||
| 968 | |||
| 969 | platform_set_drvdata(pdev, rockchip); | ||
| 970 | rockchip->dev = dev; | ||
| 971 | rockchip->is_rc = true; | ||
| 972 | |||
| 973 | err = rockchip_pcie_parse_host_dt(rockchip); | ||
| 974 | if (err) | ||
| 975 | return err; | ||
| 976 | |||
| 977 | err = rockchip_pcie_enable_clocks(rockchip); | ||
| 978 | if (err) | ||
| 979 | return err; | ||
| 980 | |||
| 981 | err = rockchip_pcie_set_vpcie(rockchip); | ||
| 982 | if (err) { | ||
| 983 | dev_err(dev, "failed to set vpcie regulator\n"); | ||
| 984 | goto err_set_vpcie; | ||
| 985 | } | ||
| 986 | |||
| 987 | err = rockchip_pcie_host_init_port(rockchip); | ||
| 988 | if (err) | ||
| 989 | goto err_vpcie; | ||
| 990 | |||
| 991 | rockchip_pcie_enable_interrupts(rockchip); | ||
| 992 | |||
| 993 | err = rockchip_pcie_init_irq_domain(rockchip); | ||
| 994 | if (err < 0) | ||
| 995 | goto err_deinit_port; | ||
| 996 | |||
| 997 | err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, | ||
| 998 | &res, &io_base); | ||
| 999 | if (err) | ||
| 1000 | goto err_remove_irq_domain; | ||
| 1001 | |||
| 1002 | err = devm_request_pci_bus_resources(dev, &res); | ||
| 1003 | if (err) | ||
| 1004 | goto err_free_res; | ||
| 1005 | |||
| 1006 | /* Get the I/O and memory ranges from DT */ | ||
| 1007 | resource_list_for_each_entry(win, &res) { | ||
| 1008 | switch (resource_type(win->res)) { | ||
| 1009 | case IORESOURCE_IO: | ||
| 1010 | io = win->res; | ||
| 1011 | io->name = "I/O"; | ||
| 1012 | rockchip->io_size = resource_size(io); | ||
| 1013 | rockchip->io_bus_addr = io->start - win->offset; | ||
| 1014 | err = pci_remap_iospace(io, io_base); | ||
| 1015 | if (err) { | ||
| 1016 | dev_warn(dev, "error %d: failed to map resource %pR\n", | ||
| 1017 | err, io); | ||
| 1018 | continue; | ||
| 1019 | } | ||
| 1020 | rockchip->io = io; | ||
| 1021 | break; | ||
| 1022 | case IORESOURCE_MEM: | ||
| 1023 | mem = win->res; | ||
| 1024 | mem->name = "MEM"; | ||
| 1025 | rockchip->mem_size = resource_size(mem); | ||
| 1026 | rockchip->mem_bus_addr = mem->start - win->offset; | ||
| 1027 | break; | ||
| 1028 | case IORESOURCE_BUS: | ||
| 1029 | rockchip->root_bus_nr = win->res->start; | ||
| 1030 | break; | ||
| 1031 | default: | ||
| 1032 | continue; | ||
| 1033 | } | ||
| 1034 | } | ||
| 1035 | |||
| 1036 | err = rockchip_pcie_cfg_atu(rockchip); | ||
| 1037 | if (err) | ||
| 1038 | goto err_unmap_iospace; | ||
| 1039 | |||
| 1040 | rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M); | ||
| 1041 | if (!rockchip->msg_region) { | ||
| 1042 | err = -ENOMEM; | ||
| 1043 | goto err_unmap_iospace; | ||
| 1044 | } | ||
| 1045 | |||
| 1046 | list_splice_init(&res, &bridge->windows); | ||
| 1047 | bridge->dev.parent = dev; | ||
| 1048 | bridge->sysdata = rockchip; | ||
| 1049 | bridge->busnr = 0; | ||
| 1050 | bridge->ops = &rockchip_pcie_ops; | ||
| 1051 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 1052 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 1053 | |||
| 1054 | err = pci_scan_root_bus_bridge(bridge); | ||
| 1055 | if (err < 0) | ||
| 1056 | goto err_unmap_iospace; | ||
| 1057 | |||
| 1058 | bus = bridge->bus; | ||
| 1059 | |||
| 1060 | rockchip->root_bus = bus; | ||
| 1061 | |||
| 1062 | pci_bus_size_bridges(bus); | ||
| 1063 | pci_bus_assign_resources(bus); | ||
| 1064 | list_for_each_entry(child, &bus->children, node) | ||
| 1065 | pcie_bus_configure_settings(child); | ||
| 1066 | |||
| 1067 | pci_bus_add_devices(bus); | ||
| 1068 | return 0; | ||
| 1069 | |||
| 1070 | err_unmap_iospace: | ||
| 1071 | pci_unmap_iospace(rockchip->io); | ||
| 1072 | err_free_res: | ||
| 1073 | pci_free_resource_list(&res); | ||
| 1074 | err_remove_irq_domain: | ||
| 1075 | irq_domain_remove(rockchip->irq_domain); | ||
| 1076 | err_deinit_port: | ||
| 1077 | rockchip_pcie_deinit_phys(rockchip); | ||
| 1078 | err_vpcie: | ||
| 1079 | if (!IS_ERR(rockchip->vpcie12v)) | ||
| 1080 | regulator_disable(rockchip->vpcie12v); | ||
| 1081 | if (!IS_ERR(rockchip->vpcie3v3)) | ||
| 1082 | regulator_disable(rockchip->vpcie3v3); | ||
| 1083 | if (!IS_ERR(rockchip->vpcie1v8)) | ||
| 1084 | regulator_disable(rockchip->vpcie1v8); | ||
| 1085 | if (!IS_ERR(rockchip->vpcie0v9)) | ||
| 1086 | regulator_disable(rockchip->vpcie0v9); | ||
| 1087 | err_set_vpcie: | ||
| 1088 | rockchip_pcie_disable_clocks(rockchip); | ||
| 1089 | return err; | ||
| 1090 | } | ||
| 1091 | |||
| 1092 | static int rockchip_pcie_remove(struct platform_device *pdev) | ||
| 1093 | { | ||
| 1094 | struct device *dev = &pdev->dev; | ||
| 1095 | struct rockchip_pcie *rockchip = dev_get_drvdata(dev); | ||
| 1096 | |||
| 1097 | pci_stop_root_bus(rockchip->root_bus); | ||
| 1098 | pci_remove_root_bus(rockchip->root_bus); | ||
| 1099 | pci_unmap_iospace(rockchip->io); | ||
| 1100 | irq_domain_remove(rockchip->irq_domain); | ||
| 1101 | |||
| 1102 | rockchip_pcie_deinit_phys(rockchip); | ||
| 1103 | |||
| 1104 | rockchip_pcie_disable_clocks(rockchip); | ||
| 1105 | |||
| 1106 | if (!IS_ERR(rockchip->vpcie12v)) | ||
| 1107 | regulator_disable(rockchip->vpcie12v); | ||
| 1108 | if (!IS_ERR(rockchip->vpcie3v3)) | ||
| 1109 | regulator_disable(rockchip->vpcie3v3); | ||
| 1110 | if (!IS_ERR(rockchip->vpcie1v8)) | ||
| 1111 | regulator_disable(rockchip->vpcie1v8); | ||
| 1112 | if (!IS_ERR(rockchip->vpcie0v9)) | ||
| 1113 | regulator_disable(rockchip->vpcie0v9); | ||
| 1114 | |||
| 1115 | return 0; | ||
| 1116 | } | ||
| 1117 | |||
| 1118 | static const struct dev_pm_ops rockchip_pcie_pm_ops = { | ||
| 1119 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq, | ||
| 1120 | rockchip_pcie_resume_noirq) | ||
| 1121 | }; | ||
| 1122 | |||
| 1123 | static const struct of_device_id rockchip_pcie_of_match[] = { | ||
| 1124 | { .compatible = "rockchip,rk3399-pcie", }, | ||
| 1125 | {} | ||
| 1126 | }; | ||
| 1127 | MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match); | ||
| 1128 | |||
| 1129 | static struct platform_driver rockchip_pcie_driver = { | ||
| 1130 | .driver = { | ||
| 1131 | .name = "rockchip-pcie", | ||
| 1132 | .of_match_table = rockchip_pcie_of_match, | ||
| 1133 | .pm = &rockchip_pcie_pm_ops, | ||
| 1134 | }, | ||
| 1135 | .probe = rockchip_pcie_probe, | ||
| 1136 | .remove = rockchip_pcie_remove, | ||
| 1137 | }; | ||
| 1138 | module_platform_driver(rockchip_pcie_driver); | ||
| 1139 | |||
| 1140 | MODULE_AUTHOR("Rockchip Inc"); | ||
| 1141 | MODULE_DESCRIPTION("Rockchip AXI PCIe driver"); | ||
| 1142 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c new file mode 100644 index 000000000000..c53d1322a3d6 --- /dev/null +++ b/drivers/pci/controller/pcie-rockchip.c | |||
| @@ -0,0 +1,424 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 2 | /* | ||
| 3 | * Rockchip AXI PCIe host controller driver | ||
| 4 | * | ||
| 5 | * Copyright (c) 2016 Rockchip, Inc. | ||
| 6 | * | ||
| 7 | * Author: Shawn Lin <shawn.lin@rock-chips.com> | ||
| 8 | * Wenrui Li <wenrui.li@rock-chips.com> | ||
| 9 | * | ||
| 10 | * Bits taken from Synopsys DesignWare Host controller driver and | ||
| 11 | * ARM PCI Host generic driver. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/clk.h> | ||
| 15 | #include <linux/delay.h> | ||
| 16 | #include <linux/gpio/consumer.h> | ||
| 17 | #include <linux/of_pci.h> | ||
| 18 | #include <linux/phy/phy.h> | ||
| 19 | #include <linux/platform_device.h> | ||
| 20 | #include <linux/reset.h> | ||
| 21 | |||
| 22 | #include "../pci.h" | ||
| 23 | #include "pcie-rockchip.h" | ||
| 24 | |||
| 25 | int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) | ||
| 26 | { | ||
| 27 | struct device *dev = rockchip->dev; | ||
| 28 | struct platform_device *pdev = to_platform_device(dev); | ||
| 29 | struct device_node *node = dev->of_node; | ||
| 30 | struct resource *regs; | ||
| 31 | int err; | ||
| 32 | |||
| 33 | if (rockchip->is_rc) { | ||
| 34 | regs = platform_get_resource_byname(pdev, | ||
| 35 | IORESOURCE_MEM, | ||
| 36 | "axi-base"); | ||
| 37 | rockchip->reg_base = devm_pci_remap_cfg_resource(dev, regs); | ||
| 38 | if (IS_ERR(rockchip->reg_base)) | ||
| 39 | return PTR_ERR(rockchip->reg_base); | ||
| 40 | } else { | ||
| 41 | rockchip->mem_res = | ||
| 42 | platform_get_resource_byname(pdev, IORESOURCE_MEM, | ||
| 43 | "mem-base"); | ||
| 44 | if (!rockchip->mem_res) | ||
| 45 | return -EINVAL; | ||
| 46 | } | ||
| 47 | |||
| 48 | regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, | ||
| 49 | "apb-base"); | ||
| 50 | rockchip->apb_base = devm_ioremap_resource(dev, regs); | ||
| 51 | if (IS_ERR(rockchip->apb_base)) | ||
| 52 | return PTR_ERR(rockchip->apb_base); | ||
| 53 | |||
| 54 | err = rockchip_pcie_get_phys(rockchip); | ||
| 55 | if (err) | ||
| 56 | return err; | ||
| 57 | |||
| 58 | rockchip->lanes = 1; | ||
| 59 | err = of_property_read_u32(node, "num-lanes", &rockchip->lanes); | ||
| 60 | if (!err && (rockchip->lanes == 0 || | ||
| 61 | rockchip->lanes == 3 || | ||
| 62 | rockchip->lanes > 4)) { | ||
| 63 | dev_warn(dev, "invalid num-lanes, default to use one lane\n"); | ||
| 64 | rockchip->lanes = 1; | ||
| 65 | } | ||
| 66 | |||
| 67 | rockchip->link_gen = of_pci_get_max_link_speed(node); | ||
| 68 | if (rockchip->link_gen < 0 || rockchip->link_gen > 2) | ||
| 69 | rockchip->link_gen = 2; | ||
| 70 | |||
| 71 | rockchip->core_rst = devm_reset_control_get_exclusive(dev, "core"); | ||
| 72 | if (IS_ERR(rockchip->core_rst)) { | ||
| 73 | if (PTR_ERR(rockchip->core_rst) != -EPROBE_DEFER) | ||
| 74 | dev_err(dev, "missing core reset property in node\n"); | ||
| 75 | return PTR_ERR(rockchip->core_rst); | ||
| 76 | } | ||
| 77 | |||
| 78 | rockchip->mgmt_rst = devm_reset_control_get_exclusive(dev, "mgmt"); | ||
| 79 | if (IS_ERR(rockchip->mgmt_rst)) { | ||
| 80 | if (PTR_ERR(rockchip->mgmt_rst) != -EPROBE_DEFER) | ||
| 81 | dev_err(dev, "missing mgmt reset property in node\n"); | ||
| 82 | return PTR_ERR(rockchip->mgmt_rst); | ||
| 83 | } | ||
| 84 | |||
| 85 | rockchip->mgmt_sticky_rst = devm_reset_control_get_exclusive(dev, | ||
| 86 | "mgmt-sticky"); | ||
| 87 | if (IS_ERR(rockchip->mgmt_sticky_rst)) { | ||
| 88 | if (PTR_ERR(rockchip->mgmt_sticky_rst) != -EPROBE_DEFER) | ||
| 89 | dev_err(dev, "missing mgmt-sticky reset property in node\n"); | ||
| 90 | return PTR_ERR(rockchip->mgmt_sticky_rst); | ||
| 91 | } | ||
| 92 | |||
| 93 | rockchip->pipe_rst = devm_reset_control_get_exclusive(dev, "pipe"); | ||
| 94 | if (IS_ERR(rockchip->pipe_rst)) { | ||
| 95 | if (PTR_ERR(rockchip->pipe_rst) != -EPROBE_DEFER) | ||
| 96 | dev_err(dev, "missing pipe reset property in node\n"); | ||
| 97 | return PTR_ERR(rockchip->pipe_rst); | ||
| 98 | } | ||
| 99 | |||
| 100 | rockchip->pm_rst = devm_reset_control_get_exclusive(dev, "pm"); | ||
| 101 | if (IS_ERR(rockchip->pm_rst)) { | ||
| 102 | if (PTR_ERR(rockchip->pm_rst) != -EPROBE_DEFER) | ||
| 103 | dev_err(dev, "missing pm reset property in node\n"); | ||
| 104 | return PTR_ERR(rockchip->pm_rst); | ||
| 105 | } | ||
| 106 | |||
| 107 | rockchip->pclk_rst = devm_reset_control_get_exclusive(dev, "pclk"); | ||
| 108 | if (IS_ERR(rockchip->pclk_rst)) { | ||
| 109 | if (PTR_ERR(rockchip->pclk_rst) != -EPROBE_DEFER) | ||
| 110 | dev_err(dev, "missing pclk reset property in node\n"); | ||
| 111 | return PTR_ERR(rockchip->pclk_rst); | ||
| 112 | } | ||
| 113 | |||
| 114 | rockchip->aclk_rst = devm_reset_control_get_exclusive(dev, "aclk"); | ||
| 115 | if (IS_ERR(rockchip->aclk_rst)) { | ||
| 116 | if (PTR_ERR(rockchip->aclk_rst) != -EPROBE_DEFER) | ||
| 117 | dev_err(dev, "missing aclk reset property in node\n"); | ||
| 118 | return PTR_ERR(rockchip->aclk_rst); | ||
| 119 | } | ||
| 120 | |||
| 121 | if (rockchip->is_rc) { | ||
| 122 | rockchip->ep_gpio = devm_gpiod_get(dev, "ep", GPIOD_OUT_HIGH); | ||
| 123 | if (IS_ERR(rockchip->ep_gpio)) { | ||
| 124 | dev_err(dev, "missing ep-gpios property in node\n"); | ||
| 125 | return PTR_ERR(rockchip->ep_gpio); | ||
| 126 | } | ||
| 127 | } | ||
| 128 | |||
| 129 | rockchip->aclk_pcie = devm_clk_get(dev, "aclk"); | ||
| 130 | if (IS_ERR(rockchip->aclk_pcie)) { | ||
| 131 | dev_err(dev, "aclk clock not found\n"); | ||
| 132 | return PTR_ERR(rockchip->aclk_pcie); | ||
| 133 | } | ||
| 134 | |||
| 135 | rockchip->aclk_perf_pcie = devm_clk_get(dev, "aclk-perf"); | ||
| 136 | if (IS_ERR(rockchip->aclk_perf_pcie)) { | ||
| 137 | dev_err(dev, "aclk_perf clock not found\n"); | ||
| 138 | return PTR_ERR(rockchip->aclk_perf_pcie); | ||
| 139 | } | ||
| 140 | |||
| 141 | rockchip->hclk_pcie = devm_clk_get(dev, "hclk"); | ||
| 142 | if (IS_ERR(rockchip->hclk_pcie)) { | ||
| 143 | dev_err(dev, "hclk clock not found\n"); | ||
| 144 | return PTR_ERR(rockchip->hclk_pcie); | ||
| 145 | } | ||
| 146 | |||
| 147 | rockchip->clk_pcie_pm = devm_clk_get(dev, "pm"); | ||
| 148 | if (IS_ERR(rockchip->clk_pcie_pm)) { | ||
| 149 | dev_err(dev, "pm clock not found\n"); | ||
| 150 | return PTR_ERR(rockchip->clk_pcie_pm); | ||
| 151 | } | ||
| 152 | |||
| 153 | return 0; | ||
| 154 | } | ||
| 155 | EXPORT_SYMBOL_GPL(rockchip_pcie_parse_dt); | ||
| 156 | |||
| 157 | int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) | ||
| 158 | { | ||
| 159 | struct device *dev = rockchip->dev; | ||
| 160 | int err, i; | ||
| 161 | u32 regs; | ||
| 162 | |||
| 163 | err = reset_control_assert(rockchip->aclk_rst); | ||
| 164 | if (err) { | ||
| 165 | dev_err(dev, "assert aclk_rst err %d\n", err); | ||
| 166 | return err; | ||
| 167 | } | ||
| 168 | |||
| 169 | err = reset_control_assert(rockchip->pclk_rst); | ||
| 170 | if (err) { | ||
| 171 | dev_err(dev, "assert pclk_rst err %d\n", err); | ||
| 172 | return err; | ||
| 173 | } | ||
| 174 | |||
| 175 | err = reset_control_assert(rockchip->pm_rst); | ||
| 176 | if (err) { | ||
| 177 | dev_err(dev, "assert pm_rst err %d\n", err); | ||
| 178 | return err; | ||
| 179 | } | ||
| 180 | |||
| 181 | for (i = 0; i < MAX_LANE_NUM; i++) { | ||
| 182 | err = phy_init(rockchip->phys[i]); | ||
| 183 | if (err) { | ||
| 184 | dev_err(dev, "init phy%d err %d\n", i, err); | ||
| 185 | goto err_exit_phy; | ||
| 186 | } | ||
| 187 | } | ||
| 188 | |||
| 189 | err = reset_control_assert(rockchip->core_rst); | ||
| 190 | if (err) { | ||
| 191 | dev_err(dev, "assert core_rst err %d\n", err); | ||
| 192 | goto err_exit_phy; | ||
| 193 | } | ||
| 194 | |||
| 195 | err = reset_control_assert(rockchip->mgmt_rst); | ||
| 196 | if (err) { | ||
| 197 | dev_err(dev, "assert mgmt_rst err %d\n", err); | ||
| 198 | goto err_exit_phy; | ||
| 199 | } | ||
| 200 | |||
| 201 | err = reset_control_assert(rockchip->mgmt_sticky_rst); | ||
| 202 | if (err) { | ||
| 203 | dev_err(dev, "assert mgmt_sticky_rst err %d\n", err); | ||
| 204 | goto err_exit_phy; | ||
| 205 | } | ||
| 206 | |||
| 207 | err = reset_control_assert(rockchip->pipe_rst); | ||
| 208 | if (err) { | ||
| 209 | dev_err(dev, "assert pipe_rst err %d\n", err); | ||
| 210 | goto err_exit_phy; | ||
| 211 | } | ||
| 212 | |||
| 213 | udelay(10); | ||
| 214 | |||
| 215 | err = reset_control_deassert(rockchip->pm_rst); | ||
| 216 | if (err) { | ||
| 217 | dev_err(dev, "deassert pm_rst err %d\n", err); | ||
| 218 | goto err_exit_phy; | ||
| 219 | } | ||
| 220 | |||
| 221 | err = reset_control_deassert(rockchip->aclk_rst); | ||
| 222 | if (err) { | ||
| 223 | dev_err(dev, "deassert aclk_rst err %d\n", err); | ||
| 224 | goto err_exit_phy; | ||
| 225 | } | ||
| 226 | |||
| 227 | err = reset_control_deassert(rockchip->pclk_rst); | ||
| 228 | if (err) { | ||
| 229 | dev_err(dev, "deassert pclk_rst err %d\n", err); | ||
| 230 | goto err_exit_phy; | ||
| 231 | } | ||
| 232 | |||
| 233 | if (rockchip->link_gen == 2) | ||
| 234 | rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_2, | ||
| 235 | PCIE_CLIENT_CONFIG); | ||
| 236 | else | ||
| 237 | rockchip_pcie_write(rockchip, PCIE_CLIENT_GEN_SEL_1, | ||
| 238 | PCIE_CLIENT_CONFIG); | ||
| 239 | |||
| 240 | regs = PCIE_CLIENT_LINK_TRAIN_ENABLE | PCIE_CLIENT_ARI_ENABLE | | ||
| 241 | PCIE_CLIENT_CONF_LANE_NUM(rockchip->lanes); | ||
| 242 | |||
| 243 | if (rockchip->is_rc) | ||
| 244 | regs |= PCIE_CLIENT_CONF_ENABLE | PCIE_CLIENT_MODE_RC; | ||
| 245 | else | ||
| 246 | regs |= PCIE_CLIENT_CONF_DISABLE | PCIE_CLIENT_MODE_EP; | ||
| 247 | |||
| 248 | rockchip_pcie_write(rockchip, regs, PCIE_CLIENT_CONFIG); | ||
| 249 | |||
| 250 | for (i = 0; i < MAX_LANE_NUM; i++) { | ||
| 251 | err = phy_power_on(rockchip->phys[i]); | ||
| 252 | if (err) { | ||
| 253 | dev_err(dev, "power on phy%d err %d\n", i, err); | ||
| 254 | goto err_power_off_phy; | ||
| 255 | } | ||
| 256 | } | ||
| 257 | |||
| 258 | /* | ||
| 259 | * Please don't reorder the deassert sequence of the following | ||
| 260 | * four reset pins. | ||
| 261 | */ | ||
| 262 | err = reset_control_deassert(rockchip->mgmt_sticky_rst); | ||
| 263 | if (err) { | ||
| 264 | dev_err(dev, "deassert mgmt_sticky_rst err %d\n", err); | ||
| 265 | goto err_power_off_phy; | ||
| 266 | } | ||
| 267 | |||
| 268 | err = reset_control_deassert(rockchip->core_rst); | ||
| 269 | if (err) { | ||
| 270 | dev_err(dev, "deassert core_rst err %d\n", err); | ||
| 271 | goto err_power_off_phy; | ||
| 272 | } | ||
| 273 | |||
| 274 | err = reset_control_deassert(rockchip->mgmt_rst); | ||
| 275 | if (err) { | ||
| 276 | dev_err(dev, "deassert mgmt_rst err %d\n", err); | ||
| 277 | goto err_power_off_phy; | ||
| 278 | } | ||
| 279 | |||
| 280 | err = reset_control_deassert(rockchip->pipe_rst); | ||
| 281 | if (err) { | ||
| 282 | dev_err(dev, "deassert pipe_rst err %d\n", err); | ||
| 283 | goto err_power_off_phy; | ||
| 284 | } | ||
| 285 | |||
| 286 | return 0; | ||
| 287 | err_power_off_phy: | ||
| 288 | while (i--) | ||
| 289 | phy_power_off(rockchip->phys[i]); | ||
| 290 | i = MAX_LANE_NUM; | ||
| 291 | err_exit_phy: | ||
| 292 | while (i--) | ||
| 293 | phy_exit(rockchip->phys[i]); | ||
| 294 | return err; | ||
| 295 | } | ||
| 296 | EXPORT_SYMBOL_GPL(rockchip_pcie_init_port); | ||
| 297 | |||
| 298 | int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip) | ||
| 299 | { | ||
| 300 | struct device *dev = rockchip->dev; | ||
| 301 | struct phy *phy; | ||
| 302 | char *name; | ||
| 303 | u32 i; | ||
| 304 | |||
| 305 | phy = devm_phy_get(dev, "pcie-phy"); | ||
| 306 | if (!IS_ERR(phy)) { | ||
| 307 | rockchip->legacy_phy = true; | ||
| 308 | rockchip->phys[0] = phy; | ||
| 309 | dev_warn(dev, "legacy phy model is deprecated!\n"); | ||
| 310 | return 0; | ||
| 311 | } | ||
| 312 | |||
| 313 | if (PTR_ERR(phy) == -EPROBE_DEFER) | ||
| 314 | return PTR_ERR(phy); | ||
| 315 | |||
| 316 | dev_dbg(dev, "missing legacy phy; search for per-lane PHY\n"); | ||
| 317 | |||
| 318 | for (i = 0; i < MAX_LANE_NUM; i++) { | ||
| 319 | name = kasprintf(GFP_KERNEL, "pcie-phy-%u", i); | ||
| 320 | if (!name) | ||
| 321 | return -ENOMEM; | ||
| 322 | |||
| 323 | phy = devm_of_phy_get(dev, dev->of_node, name); | ||
| 324 | kfree(name); | ||
| 325 | |||
| 326 | if (IS_ERR(phy)) { | ||
| 327 | if (PTR_ERR(phy) != -EPROBE_DEFER) | ||
| 328 | dev_err(dev, "missing phy for lane %d: %ld\n", | ||
| 329 | i, PTR_ERR(phy)); | ||
| 330 | return PTR_ERR(phy); | ||
| 331 | } | ||
| 332 | |||
| 333 | rockchip->phys[i] = phy; | ||
| 334 | } | ||
| 335 | |||
| 336 | return 0; | ||
| 337 | } | ||
| 338 | EXPORT_SYMBOL_GPL(rockchip_pcie_get_phys); | ||
| 339 | |||
| 340 | void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip) | ||
| 341 | { | ||
| 342 | int i; | ||
| 343 | |||
| 344 | for (i = 0; i < MAX_LANE_NUM; i++) { | ||
| 345 | /* inactive lanes are already powered off */ | ||
| 346 | if (rockchip->lanes_map & BIT(i)) | ||
| 347 | phy_power_off(rockchip->phys[i]); | ||
| 348 | phy_exit(rockchip->phys[i]); | ||
| 349 | } | ||
| 350 | } | ||
| 351 | EXPORT_SYMBOL_GPL(rockchip_pcie_deinit_phys); | ||
| 352 | |||
| 353 | int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip) | ||
| 354 | { | ||
| 355 | struct device *dev = rockchip->dev; | ||
| 356 | int err; | ||
| 357 | |||
| 358 | err = clk_prepare_enable(rockchip->aclk_pcie); | ||
| 359 | if (err) { | ||
| 360 | dev_err(dev, "unable to enable aclk_pcie clock\n"); | ||
| 361 | return err; | ||
| 362 | } | ||
| 363 | |||
| 364 | err = clk_prepare_enable(rockchip->aclk_perf_pcie); | ||
| 365 | if (err) { | ||
| 366 | dev_err(dev, "unable to enable aclk_perf_pcie clock\n"); | ||
| 367 | goto err_aclk_perf_pcie; | ||
| 368 | } | ||
| 369 | |||
| 370 | err = clk_prepare_enable(rockchip->hclk_pcie); | ||
| 371 | if (err) { | ||
| 372 | dev_err(dev, "unable to enable hclk_pcie clock\n"); | ||
| 373 | goto err_hclk_pcie; | ||
| 374 | } | ||
| 375 | |||
| 376 | err = clk_prepare_enable(rockchip->clk_pcie_pm); | ||
| 377 | if (err) { | ||
| 378 | dev_err(dev, "unable to enable clk_pcie_pm clock\n"); | ||
| 379 | goto err_clk_pcie_pm; | ||
| 380 | } | ||
| 381 | |||
| 382 | return 0; | ||
| 383 | |||
| 384 | err_clk_pcie_pm: | ||
| 385 | clk_disable_unprepare(rockchip->hclk_pcie); | ||
| 386 | err_hclk_pcie: | ||
| 387 | clk_disable_unprepare(rockchip->aclk_perf_pcie); | ||
| 388 | err_aclk_perf_pcie: | ||
| 389 | clk_disable_unprepare(rockchip->aclk_pcie); | ||
| 390 | return err; | ||
| 391 | } | ||
| 392 | EXPORT_SYMBOL_GPL(rockchip_pcie_enable_clocks); | ||
| 393 | |||
| 394 | void rockchip_pcie_disable_clocks(void *data) | ||
| 395 | { | ||
| 396 | struct rockchip_pcie *rockchip = data; | ||
| 397 | |||
| 398 | clk_disable_unprepare(rockchip->clk_pcie_pm); | ||
| 399 | clk_disable_unprepare(rockchip->hclk_pcie); | ||
| 400 | clk_disable_unprepare(rockchip->aclk_perf_pcie); | ||
| 401 | clk_disable_unprepare(rockchip->aclk_pcie); | ||
| 402 | } | ||
| 403 | EXPORT_SYMBOL_GPL(rockchip_pcie_disable_clocks); | ||
| 404 | |||
| 405 | void rockchip_pcie_cfg_configuration_accesses( | ||
| 406 | struct rockchip_pcie *rockchip, u32 type) | ||
| 407 | { | ||
| 408 | u32 ob_desc_0; | ||
| 409 | |||
| 410 | /* Configuration Accesses for region 0 */ | ||
| 411 | rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF); | ||
| 412 | |||
| 413 | rockchip_pcie_write(rockchip, | ||
| 414 | (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS), | ||
| 415 | PCIE_CORE_OB_REGION_ADDR0); | ||
| 416 | rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H, | ||
| 417 | PCIE_CORE_OB_REGION_ADDR1); | ||
| 418 | ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0); | ||
| 419 | ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK); | ||
| 420 | ob_desc_0 |= (type | (0x1 << 23)); | ||
| 421 | rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0); | ||
| 422 | rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1); | ||
| 423 | } | ||
| 424 | EXPORT_SYMBOL_GPL(rockchip_pcie_cfg_configuration_accesses); | ||
diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h new file mode 100644 index 000000000000..8e87a059ce73 --- /dev/null +++ b/drivers/pci/controller/pcie-rockchip.h | |||
| @@ -0,0 +1,338 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 2 | /* | ||
| 3 | * Rockchip AXI PCIe controller driver | ||
| 4 | * | ||
| 5 | * Copyright (c) 2018 Rockchip, Inc. | ||
| 6 | * | ||
| 7 | * Author: Shawn Lin <shawn.lin@rock-chips.com> | ||
| 8 | * | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef _PCIE_ROCKCHIP_H | ||
| 12 | #define _PCIE_ROCKCHIP_H | ||
| 13 | |||
| 14 | #include <linux/kernel.h> | ||
| 15 | #include <linux/pci.h> | ||
| 16 | |||
| 17 | /* | ||
| 18 | * The upper 16 bits of PCIE_CLIENT_CONFIG are a write mask for the lower 16 | ||
| 19 | * bits. This allows atomic updates of the register without locking. | ||
| 20 | */ | ||
| 21 | #define HIWORD_UPDATE(mask, val) (((mask) << 16) | (val)) | ||
| 22 | #define HIWORD_UPDATE_BIT(val) HIWORD_UPDATE(val, val) | ||
| 23 | |||
| 24 | #define ENCODE_LANES(x) ((((x) >> 1) & 3) << 4) | ||
| 25 | #define MAX_LANE_NUM 4 | ||
| 26 | #define MAX_REGION_LIMIT 32 | ||
| 27 | #define MIN_EP_APERTURE 28 | ||
| 28 | |||
| 29 | #define PCIE_CLIENT_BASE 0x0 | ||
| 30 | #define PCIE_CLIENT_CONFIG (PCIE_CLIENT_BASE + 0x00) | ||
| 31 | #define PCIE_CLIENT_CONF_ENABLE HIWORD_UPDATE_BIT(0x0001) | ||
| 32 | #define PCIE_CLIENT_CONF_DISABLE HIWORD_UPDATE(0x0001, 0) | ||
| 33 | #define PCIE_CLIENT_LINK_TRAIN_ENABLE HIWORD_UPDATE_BIT(0x0002) | ||
| 34 | #define PCIE_CLIENT_ARI_ENABLE HIWORD_UPDATE_BIT(0x0008) | ||
| 35 | #define PCIE_CLIENT_CONF_LANE_NUM(x) HIWORD_UPDATE(0x0030, ENCODE_LANES(x)) | ||
| 36 | #define PCIE_CLIENT_MODE_RC HIWORD_UPDATE_BIT(0x0040) | ||
| 37 | #define PCIE_CLIENT_MODE_EP HIWORD_UPDATE(0x0040, 0) | ||
| 38 | #define PCIE_CLIENT_GEN_SEL_1 HIWORD_UPDATE(0x0080, 0) | ||
| 39 | #define PCIE_CLIENT_GEN_SEL_2 HIWORD_UPDATE_BIT(0x0080) | ||
| 40 | #define PCIE_CLIENT_DEBUG_OUT_0 (PCIE_CLIENT_BASE + 0x3c) | ||
| 41 | #define PCIE_CLIENT_DEBUG_LTSSM_MASK GENMASK(5, 0) | ||
| 42 | #define PCIE_CLIENT_DEBUG_LTSSM_L1 0x18 | ||
| 43 | #define PCIE_CLIENT_DEBUG_LTSSM_L2 0x19 | ||
| 44 | #define PCIE_CLIENT_BASIC_STATUS1 (PCIE_CLIENT_BASE + 0x48) | ||
| 45 | #define PCIE_CLIENT_LINK_STATUS_UP 0x00300000 | ||
| 46 | #define PCIE_CLIENT_LINK_STATUS_MASK 0x00300000 | ||
| 47 | #define PCIE_CLIENT_INT_MASK (PCIE_CLIENT_BASE + 0x4c) | ||
| 48 | #define PCIE_CLIENT_INT_STATUS (PCIE_CLIENT_BASE + 0x50) | ||
| 49 | #define PCIE_CLIENT_INTR_MASK GENMASK(8, 5) | ||
| 50 | #define PCIE_CLIENT_INTR_SHIFT 5 | ||
| 51 | #define PCIE_CLIENT_INT_LEGACY_DONE BIT(15) | ||
| 52 | #define PCIE_CLIENT_INT_MSG BIT(14) | ||
| 53 | #define PCIE_CLIENT_INT_HOT_RST BIT(13) | ||
| 54 | #define PCIE_CLIENT_INT_DPA BIT(12) | ||
| 55 | #define PCIE_CLIENT_INT_FATAL_ERR BIT(11) | ||
| 56 | #define PCIE_CLIENT_INT_NFATAL_ERR BIT(10) | ||
| 57 | #define PCIE_CLIENT_INT_CORR_ERR BIT(9) | ||
| 58 | #define PCIE_CLIENT_INT_INTD BIT(8) | ||
| 59 | #define PCIE_CLIENT_INT_INTC BIT(7) | ||
| 60 | #define PCIE_CLIENT_INT_INTB BIT(6) | ||
| 61 | #define PCIE_CLIENT_INT_INTA BIT(5) | ||
| 62 | #define PCIE_CLIENT_INT_LOCAL BIT(4) | ||
| 63 | #define PCIE_CLIENT_INT_UDMA BIT(3) | ||
| 64 | #define PCIE_CLIENT_INT_PHY BIT(2) | ||
| 65 | #define PCIE_CLIENT_INT_HOT_PLUG BIT(1) | ||
| 66 | #define PCIE_CLIENT_INT_PWR_STCG BIT(0) | ||
| 67 | |||
| 68 | #define PCIE_CLIENT_INT_LEGACY \ | ||
| 69 | (PCIE_CLIENT_INT_INTA | PCIE_CLIENT_INT_INTB | \ | ||
| 70 | PCIE_CLIENT_INT_INTC | PCIE_CLIENT_INT_INTD) | ||
| 71 | |||
| 72 | #define PCIE_CLIENT_INT_CLI \ | ||
| 73 | (PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_NFATAL_ERR | \ | ||
| 74 | PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_DPA | \ | ||
| 75 | PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_MSG | \ | ||
| 76 | PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_LEGACY | \ | ||
| 77 | PCIE_CLIENT_INT_PHY) | ||
| 78 | |||
| 79 | #define PCIE_CORE_CTRL_MGMT_BASE 0x900000 | ||
| 80 | #define PCIE_CORE_CTRL (PCIE_CORE_CTRL_MGMT_BASE + 0x000) | ||
| 81 | #define PCIE_CORE_PL_CONF_SPEED_5G 0x00000008 | ||
| 82 | #define PCIE_CORE_PL_CONF_SPEED_MASK 0x00000018 | ||
| 83 | #define PCIE_CORE_PL_CONF_LANE_MASK 0x00000006 | ||
| 84 | #define PCIE_CORE_PL_CONF_LANE_SHIFT 1 | ||
| 85 | #define PCIE_CORE_CTRL_PLC1 (PCIE_CORE_CTRL_MGMT_BASE + 0x004) | ||
| 86 | #define PCIE_CORE_CTRL_PLC1_FTS_MASK GENMASK(23, 8) | ||
| 87 | #define PCIE_CORE_CTRL_PLC1_FTS_SHIFT 8 | ||
| 88 | #define PCIE_CORE_CTRL_PLC1_FTS_CNT 0xffff | ||
| 89 | #define PCIE_CORE_TXCREDIT_CFG1 (PCIE_CORE_CTRL_MGMT_BASE + 0x020) | ||
| 90 | #define PCIE_CORE_TXCREDIT_CFG1_MUI_MASK 0xFFFF0000 | ||
| 91 | #define PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT 16 | ||
| 92 | #define PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(x) \ | ||
| 93 | (((x) >> 3) << PCIE_CORE_TXCREDIT_CFG1_MUI_SHIFT) | ||
| 94 | #define PCIE_CORE_LANE_MAP (PCIE_CORE_CTRL_MGMT_BASE + 0x200) | ||
| 95 | #define PCIE_CORE_LANE_MAP_MASK 0x0000000f | ||
| 96 | #define PCIE_CORE_LANE_MAP_REVERSE BIT(16) | ||
| 97 | #define PCIE_CORE_INT_STATUS (PCIE_CORE_CTRL_MGMT_BASE + 0x20c) | ||
| 98 | #define PCIE_CORE_INT_PRFPE BIT(0) | ||
| 99 | #define PCIE_CORE_INT_CRFPE BIT(1) | ||
| 100 | #define PCIE_CORE_INT_RRPE BIT(2) | ||
| 101 | #define PCIE_CORE_INT_PRFO BIT(3) | ||
| 102 | #define PCIE_CORE_INT_CRFO BIT(4) | ||
| 103 | #define PCIE_CORE_INT_RT BIT(5) | ||
| 104 | #define PCIE_CORE_INT_RTR BIT(6) | ||
| 105 | #define PCIE_CORE_INT_PE BIT(7) | ||
| 106 | #define PCIE_CORE_INT_MTR BIT(8) | ||
| 107 | #define PCIE_CORE_INT_UCR BIT(9) | ||
| 108 | #define PCIE_CORE_INT_FCE BIT(10) | ||
| 109 | #define PCIE_CORE_INT_CT BIT(11) | ||
| 110 | #define PCIE_CORE_INT_UTC BIT(18) | ||
| 111 | #define PCIE_CORE_INT_MMVC BIT(19) | ||
| 112 | #define PCIE_CORE_CONFIG_VENDOR (PCIE_CORE_CTRL_MGMT_BASE + 0x44) | ||
| 113 | #define PCIE_CORE_INT_MASK (PCIE_CORE_CTRL_MGMT_BASE + 0x210) | ||
| 114 | #define PCIE_CORE_PHY_FUNC_CFG (PCIE_CORE_CTRL_MGMT_BASE + 0x2c0) | ||
| 115 | #define PCIE_RC_BAR_CONF (PCIE_CORE_CTRL_MGMT_BASE + 0x300) | ||
| 116 | #define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_DISABLED 0x0 | ||
| 117 | #define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_IO_32BITS 0x1 | ||
| 118 | #define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_32BITS 0x4 | ||
| 119 | #define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_32BITS 0x5 | ||
| 120 | #define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_MEM_64BITS 0x6 | ||
| 121 | #define ROCKCHIP_PCIE_CORE_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7 | ||
| 122 | |||
| 123 | #define PCIE_CORE_INT \ | ||
| 124 | (PCIE_CORE_INT_PRFPE | PCIE_CORE_INT_CRFPE | \ | ||
| 125 | PCIE_CORE_INT_RRPE | PCIE_CORE_INT_CRFO | \ | ||
| 126 | PCIE_CORE_INT_RT | PCIE_CORE_INT_RTR | \ | ||
| 127 | PCIE_CORE_INT_PE | PCIE_CORE_INT_MTR | \ | ||
| 128 | PCIE_CORE_INT_UCR | PCIE_CORE_INT_FCE | \ | ||
| 129 | PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \ | ||
| 130 | PCIE_CORE_INT_MMVC) | ||
| 131 | |||
| 132 | #define PCIE_RC_RP_ATS_BASE 0x400000 | ||
| 133 | #define PCIE_RC_CONFIG_NORMAL_BASE 0x800000 | ||
| 134 | #define PCIE_RC_CONFIG_BASE 0xa00000 | ||
| 135 | #define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) | ||
| 136 | #define PCIE_RC_CONFIG_SCC_SHIFT 16 | ||
| 137 | #define PCIE_RC_CONFIG_DCR (PCIE_RC_CONFIG_BASE + 0xc4) | ||
| 138 | #define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18 | ||
| 139 | #define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff | ||
| 140 | #define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26 | ||
| 141 | #define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8) | ||
| 142 | #define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5) | ||
| 143 | #define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5) | ||
| 144 | #define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc) | ||
| 145 | #define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10) | ||
| 146 | #define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0) | ||
| 147 | #define PCIE_RC_CONFIG_L1_SUBSTATE_CTRL2 (PCIE_RC_CONFIG_BASE + 0x90c) | ||
| 148 | #define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274) | ||
| 149 | #define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20) | ||
| 150 | |||
| 151 | #define PCIE_CORE_AXI_CONF_BASE 0xc00000 | ||
| 152 | #define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0) | ||
| 153 | #define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f | ||
| 154 | #define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR 0xffffff00 | ||
| 155 | #define PCIE_CORE_OB_REGION_ADDR1 (PCIE_CORE_AXI_CONF_BASE + 0x4) | ||
| 156 | #define PCIE_CORE_OB_REGION_DESC0 (PCIE_CORE_AXI_CONF_BASE + 0x8) | ||
| 157 | #define PCIE_CORE_OB_REGION_DESC1 (PCIE_CORE_AXI_CONF_BASE + 0xc) | ||
| 158 | |||
| 159 | #define PCIE_CORE_AXI_INBOUND_BASE 0xc00800 | ||
| 160 | #define PCIE_RP_IB_ADDR0 (PCIE_CORE_AXI_INBOUND_BASE + 0x0) | ||
| 161 | #define PCIE_CORE_IB_REGION_ADDR0_NUM_BITS 0x3f | ||
| 162 | #define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR 0xffffff00 | ||
| 163 | #define PCIE_RP_IB_ADDR1 (PCIE_CORE_AXI_INBOUND_BASE + 0x4) | ||
| 164 | |||
| 165 | /* Size of one AXI Region (not Region 0) */ | ||
| 166 | #define AXI_REGION_SIZE BIT(20) | ||
| 167 | /* Size of Region 0, equal to sum of sizes of other regions */ | ||
| 168 | #define AXI_REGION_0_SIZE (32 * (0x1 << 20)) | ||
| 169 | #define OB_REG_SIZE_SHIFT 5 | ||
| 170 | #define IB_ROOT_PORT_REG_SIZE_SHIFT 3 | ||
| 171 | #define AXI_WRAPPER_IO_WRITE 0x6 | ||
| 172 | #define AXI_WRAPPER_MEM_WRITE 0x2 | ||
| 173 | #define AXI_WRAPPER_TYPE0_CFG 0xa | ||
| 174 | #define AXI_WRAPPER_TYPE1_CFG 0xb | ||
| 175 | #define AXI_WRAPPER_NOR_MSG 0xc | ||
| 176 | |||
| 177 | #define MAX_AXI_IB_ROOTPORT_REGION_NUM 3 | ||
| 178 | #define MIN_AXI_ADDR_BITS_PASSED 8 | ||
| 179 | #define PCIE_RC_SEND_PME_OFF 0x11960 | ||
| 180 | #define ROCKCHIP_VENDOR_ID 0x1d87 | ||
| 181 | #define PCIE_ECAM_BUS(x) (((x) & 0xff) << 20) | ||
| 182 | #define PCIE_ECAM_DEV(x) (((x) & 0x1f) << 15) | ||
| 183 | #define PCIE_ECAM_FUNC(x) (((x) & 0x7) << 12) | ||
| 184 | #define PCIE_ECAM_REG(x) (((x) & 0xfff) << 0) | ||
| 185 | #define PCIE_ECAM_ADDR(bus, dev, func, reg) \ | ||
| 186 | (PCIE_ECAM_BUS(bus) | PCIE_ECAM_DEV(dev) | \ | ||
| 187 | PCIE_ECAM_FUNC(func) | PCIE_ECAM_REG(reg)) | ||
| 188 | #define PCIE_LINK_IS_L2(x) \ | ||
| 189 | (((x) & PCIE_CLIENT_DEBUG_LTSSM_MASK) == PCIE_CLIENT_DEBUG_LTSSM_L2) | ||
| 190 | #define PCIE_LINK_UP(x) \ | ||
| 191 | (((x) & PCIE_CLIENT_LINK_STATUS_MASK) == PCIE_CLIENT_LINK_STATUS_UP) | ||
| 192 | #define PCIE_LINK_IS_GEN2(x) \ | ||
| 193 | (((x) & PCIE_CORE_PL_CONF_SPEED_MASK) == PCIE_CORE_PL_CONF_SPEED_5G) | ||
| 194 | |||
| 195 | #define RC_REGION_0_ADDR_TRANS_H 0x00000000 | ||
| 196 | #define RC_REGION_0_ADDR_TRANS_L 0x00000000 | ||
| 197 | #define RC_REGION_0_PASS_BITS (25 - 1) | ||
| 198 | #define RC_REGION_0_TYPE_MASK GENMASK(3, 0) | ||
| 199 | #define MAX_AXI_WRAPPER_REGION_NUM 33 | ||
| 200 | |||
| 201 | #define ROCKCHIP_PCIE_MSG_ROUTING_TO_RC 0x0 | ||
| 202 | #define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ADDR 0x1 | ||
| 203 | #define ROCKCHIP_PCIE_MSG_ROUTING_VIA_ID 0x2 | ||
| 204 | #define ROCKCHIP_PCIE_MSG_ROUTING_BROADCAST 0x3 | ||
| 205 | #define ROCKCHIP_PCIE_MSG_ROUTING_LOCAL_INTX 0x4 | ||
| 206 | #define ROCKCHIP_PCIE_MSG_ROUTING_PME_ACK 0x5 | ||
| 207 | #define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTA 0x20 | ||
| 208 | #define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTB 0x21 | ||
| 209 | #define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTC 0x22 | ||
| 210 | #define ROCKCHIP_PCIE_MSG_CODE_ASSERT_INTD 0x23 | ||
| 211 | #define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTA 0x24 | ||
| 212 | #define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTB 0x25 | ||
| 213 | #define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTC 0x26 | ||
| 214 | #define ROCKCHIP_PCIE_MSG_CODE_DEASSERT_INTD 0x27 | ||
| 215 | #define ROCKCHIP_PCIE_MSG_ROUTING_MASK GENMASK(7, 5) | ||
| 216 | #define ROCKCHIP_PCIE_MSG_ROUTING(route) \ | ||
| 217 | (((route) << 5) & ROCKCHIP_PCIE_MSG_ROUTING_MASK) | ||
| 218 | #define ROCKCHIP_PCIE_MSG_CODE_MASK GENMASK(15, 8) | ||
| 219 | #define ROCKCHIP_PCIE_MSG_CODE(code) \ | ||
| 220 | (((code) << 8) & ROCKCHIP_PCIE_MSG_CODE_MASK) | ||
| 221 | #define ROCKCHIP_PCIE_MSG_NO_DATA BIT(16) | ||
| 222 | |||
| 223 | #define ROCKCHIP_PCIE_EP_CMD_STATUS 0x4 | ||
| 224 | #define ROCKCHIP_PCIE_EP_CMD_STATUS_IS BIT(19) | ||
| 225 | #define ROCKCHIP_PCIE_EP_MSI_CTRL_REG 0x90 | ||
| 226 | #define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET 17 | ||
| 227 | #define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK GENMASK(19, 17) | ||
| 228 | #define ROCKCHIP_PCIE_EP_MSI_CTRL_MME_OFFSET 20 | ||
| 229 | #define ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK GENMASK(22, 20) | ||
| 230 | #define ROCKCHIP_PCIE_EP_MSI_CTRL_ME BIT(16) | ||
| 231 | #define ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP BIT(24) | ||
| 232 | #define ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR 0x1 | ||
| 233 | #define ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR 0x3 | ||
| 234 | #define ROCKCHIP_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12)) | ||
| 235 | #define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \ | ||
| 236 | (PCIE_RC_RP_ATS_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008) | ||
| 237 | #define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \ | ||
| 238 | (PCIE_RC_RP_ATS_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008) | ||
| 239 | #define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r) \ | ||
| 240 | (PCIE_RC_RP_ATS_BASE + 0x0000 + ((r) & 0x1f) * 0x0020) | ||
| 241 | #define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12) | ||
| 242 | #define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \ | ||
| 243 | (((devfn) << 12) & \ | ||
| 244 | ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK) | ||
| 245 | #define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20) | ||
| 246 | #define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \ | ||
| 247 | (((bus) << 20) & ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK) | ||
| 248 | #define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r) \ | ||
| 249 | (PCIE_RC_RP_ATS_BASE + 0x0004 + ((r) & 0x1f) * 0x0020) | ||
| 250 | #define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23) | ||
| 251 | #define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24) | ||
| 252 | #define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \ | ||
| 253 | (((devfn) << 24) & ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK) | ||
| 254 | #define ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r) \ | ||
| 255 | (PCIE_RC_RP_ATS_BASE + 0x0008 + ((r) & 0x1f) * 0x0020) | ||
| 256 | #define ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r) \ | ||
| 257 | (PCIE_RC_RP_ATS_BASE + 0x000c + ((r) & 0x1f) * 0x0020) | ||
| 258 | #define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r) \ | ||
| 259 | (PCIE_RC_RP_ATS_BASE + 0x0018 + ((r) & 0x1f) * 0x0020) | ||
| 260 | #define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r) \ | ||
| 261 | (PCIE_RC_RP_ATS_BASE + 0x001c + ((r) & 0x1f) * 0x0020) | ||
| 262 | |||
| 263 | #define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn) \ | ||
| 264 | (PCIE_CORE_CTRL_MGMT_BASE + 0x0240 + (fn) * 0x0008) | ||
| 265 | #define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG1(fn) \ | ||
| 266 | (PCIE_CORE_CTRL_MGMT_BASE + 0x0244 + (fn) * 0x0008) | ||
| 267 | #define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b) \ | ||
| 268 | (GENMASK(4, 0) << ((b) * 8)) | ||
| 269 | #define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE(b, a) \ | ||
| 270 | (((a) << ((b) * 8)) & \ | ||
| 271 | ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_APERTURE_MASK(b)) | ||
| 272 | #define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b) \ | ||
| 273 | (GENMASK(7, 5) << ((b) * 8)) | ||
| 274 | #define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL(b, c) \ | ||
| 275 | (((c) << ((b) * 8 + 5)) & \ | ||
| 276 | ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG_BAR_CTRL_MASK(b)) | ||
| 277 | |||
| 278 | struct rockchip_pcie { | ||
| 279 | void __iomem *reg_base; /* DT axi-base */ | ||
| 280 | void __iomem *apb_base; /* DT apb-base */ | ||
| 281 | bool legacy_phy; | ||
| 282 | struct phy *phys[MAX_LANE_NUM]; | ||
| 283 | struct reset_control *core_rst; | ||
| 284 | struct reset_control *mgmt_rst; | ||
| 285 | struct reset_control *mgmt_sticky_rst; | ||
| 286 | struct reset_control *pipe_rst; | ||
| 287 | struct reset_control *pm_rst; | ||
| 288 | struct reset_control *aclk_rst; | ||
| 289 | struct reset_control *pclk_rst; | ||
| 290 | struct clk *aclk_pcie; | ||
| 291 | struct clk *aclk_perf_pcie; | ||
| 292 | struct clk *hclk_pcie; | ||
| 293 | struct clk *clk_pcie_pm; | ||
| 294 | struct regulator *vpcie12v; /* 12V power supply */ | ||
| 295 | struct regulator *vpcie3v3; /* 3.3V power supply */ | ||
| 296 | struct regulator *vpcie1v8; /* 1.8V power supply */ | ||
| 297 | struct regulator *vpcie0v9; /* 0.9V power supply */ | ||
| 298 | struct gpio_desc *ep_gpio; | ||
| 299 | u32 lanes; | ||
| 300 | u8 lanes_map; | ||
| 301 | u8 root_bus_nr; | ||
| 302 | int link_gen; | ||
| 303 | struct device *dev; | ||
| 304 | struct irq_domain *irq_domain; | ||
| 305 | int offset; | ||
| 306 | struct pci_bus *root_bus; | ||
| 307 | struct resource *io; | ||
| 308 | phys_addr_t io_bus_addr; | ||
| 309 | u32 io_size; | ||
| 310 | void __iomem *msg_region; | ||
| 311 | u32 mem_size; | ||
| 312 | phys_addr_t msg_bus_addr; | ||
| 313 | phys_addr_t mem_bus_addr; | ||
| 314 | bool is_rc; | ||
| 315 | struct resource *mem_res; | ||
| 316 | }; | ||
| 317 | |||
| 318 | static u32 rockchip_pcie_read(struct rockchip_pcie *rockchip, u32 reg) | ||
| 319 | { | ||
| 320 | return readl(rockchip->apb_base + reg); | ||
| 321 | } | ||
| 322 | |||
| 323 | static void rockchip_pcie_write(struct rockchip_pcie *rockchip, u32 val, | ||
| 324 | u32 reg) | ||
| 325 | { | ||
| 326 | writel(val, rockchip->apb_base + reg); | ||
| 327 | } | ||
| 328 | |||
| 329 | int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip); | ||
| 330 | int rockchip_pcie_init_port(struct rockchip_pcie *rockchip); | ||
| 331 | int rockchip_pcie_get_phys(struct rockchip_pcie *rockchip); | ||
| 332 | void rockchip_pcie_deinit_phys(struct rockchip_pcie *rockchip); | ||
| 333 | int rockchip_pcie_enable_clocks(struct rockchip_pcie *rockchip); | ||
| 334 | void rockchip_pcie_disable_clocks(void *data); | ||
| 335 | void rockchip_pcie_cfg_configuration_accesses( | ||
| 336 | struct rockchip_pcie *rockchip, u32 type); | ||
| 337 | |||
| 338 | #endif /* _PCIE_ROCKCHIP_H */ | ||
diff --git a/drivers/pci/controller/pcie-tango.c b/drivers/pci/controller/pcie-tango.c new file mode 100644 index 000000000000..21a208da3f59 --- /dev/null +++ b/drivers/pci/controller/pcie-tango.c | |||
| @@ -0,0 +1,341 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | #include <linux/irqchip/chained_irq.h> | ||
| 3 | #include <linux/irqdomain.h> | ||
| 4 | #include <linux/pci-ecam.h> | ||
| 5 | #include <linux/delay.h> | ||
| 6 | #include <linux/msi.h> | ||
| 7 | #include <linux/of_address.h> | ||
| 8 | |||
| 9 | #define MSI_MAX 256 | ||
| 10 | |||
| 11 | #define SMP8759_MUX 0x48 | ||
| 12 | #define SMP8759_TEST_OUT 0x74 | ||
| 13 | #define SMP8759_DOORBELL 0x7c | ||
| 14 | #define SMP8759_STATUS 0x80 | ||
| 15 | #define SMP8759_ENABLE 0xa0 | ||
| 16 | |||
| 17 | struct tango_pcie { | ||
| 18 | DECLARE_BITMAP(used_msi, MSI_MAX); | ||
| 19 | u64 msi_doorbell; | ||
| 20 | spinlock_t used_msi_lock; | ||
| 21 | void __iomem *base; | ||
| 22 | struct irq_domain *dom; | ||
| 23 | }; | ||
| 24 | |||
| 25 | static void tango_msi_isr(struct irq_desc *desc) | ||
| 26 | { | ||
| 27 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
| 28 | struct tango_pcie *pcie = irq_desc_get_handler_data(desc); | ||
| 29 | unsigned long status, base, virq, idx, pos = 0; | ||
| 30 | |||
| 31 | chained_irq_enter(chip, desc); | ||
| 32 | spin_lock(&pcie->used_msi_lock); | ||
| 33 | |||
| 34 | while ((pos = find_next_bit(pcie->used_msi, MSI_MAX, pos)) < MSI_MAX) { | ||
| 35 | base = round_down(pos, 32); | ||
| 36 | status = readl_relaxed(pcie->base + SMP8759_STATUS + base / 8); | ||
| 37 | for_each_set_bit(idx, &status, 32) { | ||
| 38 | virq = irq_find_mapping(pcie->dom, base + idx); | ||
| 39 | generic_handle_irq(virq); | ||
| 40 | } | ||
| 41 | pos = base + 32; | ||
| 42 | } | ||
| 43 | |||
| 44 | spin_unlock(&pcie->used_msi_lock); | ||
| 45 | chained_irq_exit(chip, desc); | ||
| 46 | } | ||
| 47 | |||
| 48 | static void tango_ack(struct irq_data *d) | ||
| 49 | { | ||
| 50 | struct tango_pcie *pcie = d->chip_data; | ||
| 51 | u32 offset = (d->hwirq / 32) * 4; | ||
| 52 | u32 bit = BIT(d->hwirq % 32); | ||
| 53 | |||
| 54 | writel_relaxed(bit, pcie->base + SMP8759_STATUS + offset); | ||
| 55 | } | ||
| 56 | |||
| 57 | static void update_msi_enable(struct irq_data *d, bool unmask) | ||
| 58 | { | ||
| 59 | unsigned long flags; | ||
| 60 | struct tango_pcie *pcie = d->chip_data; | ||
| 61 | u32 offset = (d->hwirq / 32) * 4; | ||
| 62 | u32 bit = BIT(d->hwirq % 32); | ||
| 63 | u32 val; | ||
| 64 | |||
| 65 | spin_lock_irqsave(&pcie->used_msi_lock, flags); | ||
| 66 | val = readl_relaxed(pcie->base + SMP8759_ENABLE + offset); | ||
| 67 | val = unmask ? val | bit : val & ~bit; | ||
| 68 | writel_relaxed(val, pcie->base + SMP8759_ENABLE + offset); | ||
| 69 | spin_unlock_irqrestore(&pcie->used_msi_lock, flags); | ||
| 70 | } | ||
| 71 | |||
| 72 | static void tango_mask(struct irq_data *d) | ||
| 73 | { | ||
| 74 | update_msi_enable(d, false); | ||
| 75 | } | ||
| 76 | |||
| 77 | static void tango_unmask(struct irq_data *d) | ||
| 78 | { | ||
| 79 | update_msi_enable(d, true); | ||
| 80 | } | ||
| 81 | |||
| 82 | static int tango_set_affinity(struct irq_data *d, const struct cpumask *mask, | ||
| 83 | bool force) | ||
| 84 | { | ||
| 85 | return -EINVAL; | ||
| 86 | } | ||
| 87 | |||
| 88 | static void tango_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) | ||
| 89 | { | ||
| 90 | struct tango_pcie *pcie = d->chip_data; | ||
| 91 | msg->address_lo = lower_32_bits(pcie->msi_doorbell); | ||
| 92 | msg->address_hi = upper_32_bits(pcie->msi_doorbell); | ||
| 93 | msg->data = d->hwirq; | ||
| 94 | } | ||
| 95 | |||
| 96 | static struct irq_chip tango_chip = { | ||
| 97 | .irq_ack = tango_ack, | ||
| 98 | .irq_mask = tango_mask, | ||
| 99 | .irq_unmask = tango_unmask, | ||
| 100 | .irq_set_affinity = tango_set_affinity, | ||
| 101 | .irq_compose_msi_msg = tango_compose_msi_msg, | ||
| 102 | }; | ||
| 103 | |||
| 104 | static void msi_ack(struct irq_data *d) | ||
| 105 | { | ||
| 106 | irq_chip_ack_parent(d); | ||
| 107 | } | ||
| 108 | |||
| 109 | static void msi_mask(struct irq_data *d) | ||
| 110 | { | ||
| 111 | pci_msi_mask_irq(d); | ||
| 112 | irq_chip_mask_parent(d); | ||
| 113 | } | ||
| 114 | |||
| 115 | static void msi_unmask(struct irq_data *d) | ||
| 116 | { | ||
| 117 | pci_msi_unmask_irq(d); | ||
| 118 | irq_chip_unmask_parent(d); | ||
| 119 | } | ||
| 120 | |||
| 121 | static struct irq_chip msi_chip = { | ||
| 122 | .name = "MSI", | ||
| 123 | .irq_ack = msi_ack, | ||
| 124 | .irq_mask = msi_mask, | ||
| 125 | .irq_unmask = msi_unmask, | ||
| 126 | }; | ||
| 127 | |||
| 128 | static struct msi_domain_info msi_dom_info = { | ||
| 129 | .flags = MSI_FLAG_PCI_MSIX | ||
| 130 | | MSI_FLAG_USE_DEF_DOM_OPS | ||
| 131 | | MSI_FLAG_USE_DEF_CHIP_OPS, | ||
| 132 | .chip = &msi_chip, | ||
| 133 | }; | ||
| 134 | |||
| 135 | static int tango_irq_domain_alloc(struct irq_domain *dom, unsigned int virq, | ||
| 136 | unsigned int nr_irqs, void *args) | ||
| 137 | { | ||
| 138 | struct tango_pcie *pcie = dom->host_data; | ||
| 139 | unsigned long flags; | ||
| 140 | int pos; | ||
| 141 | |||
| 142 | spin_lock_irqsave(&pcie->used_msi_lock, flags); | ||
| 143 | pos = find_first_zero_bit(pcie->used_msi, MSI_MAX); | ||
| 144 | if (pos >= MSI_MAX) { | ||
| 145 | spin_unlock_irqrestore(&pcie->used_msi_lock, flags); | ||
| 146 | return -ENOSPC; | ||
| 147 | } | ||
| 148 | __set_bit(pos, pcie->used_msi); | ||
| 149 | spin_unlock_irqrestore(&pcie->used_msi_lock, flags); | ||
| 150 | irq_domain_set_info(dom, virq, pos, &tango_chip, | ||
| 151 | pcie, handle_edge_irq, NULL, NULL); | ||
| 152 | |||
| 153 | return 0; | ||
| 154 | } | ||
| 155 | |||
| 156 | static void tango_irq_domain_free(struct irq_domain *dom, unsigned int virq, | ||
| 157 | unsigned int nr_irqs) | ||
| 158 | { | ||
| 159 | unsigned long flags; | ||
| 160 | struct irq_data *d = irq_domain_get_irq_data(dom, virq); | ||
| 161 | struct tango_pcie *pcie = d->chip_data; | ||
| 162 | |||
| 163 | spin_lock_irqsave(&pcie->used_msi_lock, flags); | ||
| 164 | __clear_bit(d->hwirq, pcie->used_msi); | ||
| 165 | spin_unlock_irqrestore(&pcie->used_msi_lock, flags); | ||
| 166 | } | ||
| 167 | |||
| 168 | static const struct irq_domain_ops dom_ops = { | ||
| 169 | .alloc = tango_irq_domain_alloc, | ||
| 170 | .free = tango_irq_domain_free, | ||
| 171 | }; | ||
| 172 | |||
| 173 | static int smp8759_config_read(struct pci_bus *bus, unsigned int devfn, | ||
| 174 | int where, int size, u32 *val) | ||
| 175 | { | ||
| 176 | struct pci_config_window *cfg = bus->sysdata; | ||
| 177 | struct tango_pcie *pcie = dev_get_drvdata(cfg->parent); | ||
| 178 | int ret; | ||
| 179 | |||
| 180 | /* Reads in configuration space outside devfn 0 return garbage */ | ||
| 181 | if (devfn != 0) | ||
| 182 | return PCIBIOS_FUNC_NOT_SUPPORTED; | ||
| 183 | |||
| 184 | /* | ||
| 185 | * PCI config and MMIO accesses are muxed. Linux doesn't have a | ||
| 186 | * mutual exclusion mechanism for config vs. MMIO accesses, so | ||
| 187 | * concurrent accesses may cause corruption. | ||
| 188 | */ | ||
| 189 | writel_relaxed(1, pcie->base + SMP8759_MUX); | ||
| 190 | ret = pci_generic_config_read(bus, devfn, where, size, val); | ||
| 191 | writel_relaxed(0, pcie->base + SMP8759_MUX); | ||
| 192 | |||
| 193 | return ret; | ||
| 194 | } | ||
| 195 | |||
| 196 | static int smp8759_config_write(struct pci_bus *bus, unsigned int devfn, | ||
| 197 | int where, int size, u32 val) | ||
| 198 | { | ||
| 199 | struct pci_config_window *cfg = bus->sysdata; | ||
| 200 | struct tango_pcie *pcie = dev_get_drvdata(cfg->parent); | ||
| 201 | int ret; | ||
| 202 | |||
| 203 | writel_relaxed(1, pcie->base + SMP8759_MUX); | ||
| 204 | ret = pci_generic_config_write(bus, devfn, where, size, val); | ||
| 205 | writel_relaxed(0, pcie->base + SMP8759_MUX); | ||
| 206 | |||
| 207 | return ret; | ||
| 208 | } | ||
| 209 | |||
| 210 | static struct pci_ecam_ops smp8759_ecam_ops = { | ||
| 211 | .bus_shift = 20, | ||
| 212 | .pci_ops = { | ||
| 213 | .map_bus = pci_ecam_map_bus, | ||
| 214 | .read = smp8759_config_read, | ||
| 215 | .write = smp8759_config_write, | ||
| 216 | } | ||
| 217 | }; | ||
| 218 | |||
| 219 | static int tango_pcie_link_up(struct tango_pcie *pcie) | ||
| 220 | { | ||
| 221 | void __iomem *test_out = pcie->base + SMP8759_TEST_OUT; | ||
| 222 | int i; | ||
| 223 | |||
| 224 | writel_relaxed(16, test_out); | ||
| 225 | for (i = 0; i < 10; ++i) { | ||
| 226 | u32 ltssm_state = readl_relaxed(test_out) >> 8; | ||
| 227 | if ((ltssm_state & 0x1f) == 0xf) /* L0 */ | ||
| 228 | return 1; | ||
| 229 | usleep_range(3000, 4000); | ||
| 230 | } | ||
| 231 | |||
| 232 | return 0; | ||
| 233 | } | ||
| 234 | |||
| 235 | static int tango_pcie_probe(struct platform_device *pdev) | ||
| 236 | { | ||
| 237 | struct device *dev = &pdev->dev; | ||
| 238 | struct tango_pcie *pcie; | ||
| 239 | struct resource *res; | ||
| 240 | struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); | ||
| 241 | struct irq_domain *msi_dom, *irq_dom; | ||
| 242 | struct of_pci_range_parser parser; | ||
| 243 | struct of_pci_range range; | ||
| 244 | int virq, offset; | ||
| 245 | |||
| 246 | dev_warn(dev, "simultaneous PCI config and MMIO accesses may cause data corruption\n"); | ||
| 247 | add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); | ||
| 248 | |||
| 249 | pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); | ||
| 250 | if (!pcie) | ||
| 251 | return -ENOMEM; | ||
| 252 | |||
| 253 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 254 | pcie->base = devm_ioremap_resource(dev, res); | ||
| 255 | if (IS_ERR(pcie->base)) | ||
| 256 | return PTR_ERR(pcie->base); | ||
| 257 | |||
| 258 | platform_set_drvdata(pdev, pcie); | ||
| 259 | |||
| 260 | if (!tango_pcie_link_up(pcie)) | ||
| 261 | return -ENODEV; | ||
| 262 | |||
| 263 | if (of_pci_dma_range_parser_init(&parser, dev->of_node) < 0) | ||
| 264 | return -ENOENT; | ||
| 265 | |||
| 266 | if (of_pci_range_parser_one(&parser, &range) == NULL) | ||
| 267 | return -ENOENT; | ||
| 268 | |||
| 269 | range.pci_addr += range.size; | ||
| 270 | pcie->msi_doorbell = range.pci_addr + res->start + SMP8759_DOORBELL; | ||
| 271 | |||
| 272 | for (offset = 0; offset < MSI_MAX / 8; offset += 4) | ||
| 273 | writel_relaxed(0, pcie->base + SMP8759_ENABLE + offset); | ||
| 274 | |||
| 275 | virq = platform_get_irq(pdev, 1); | ||
| 276 | if (virq <= 0) { | ||
| 277 | dev_err(dev, "Failed to map IRQ\n"); | ||
| 278 | return -ENXIO; | ||
| 279 | } | ||
| 280 | |||
| 281 | irq_dom = irq_domain_create_linear(fwnode, MSI_MAX, &dom_ops, pcie); | ||
| 282 | if (!irq_dom) { | ||
| 283 | dev_err(dev, "Failed to create IRQ domain\n"); | ||
| 284 | return -ENOMEM; | ||
| 285 | } | ||
| 286 | |||
| 287 | msi_dom = pci_msi_create_irq_domain(fwnode, &msi_dom_info, irq_dom); | ||
| 288 | if (!msi_dom) { | ||
| 289 | dev_err(dev, "Failed to create MSI domain\n"); | ||
| 290 | irq_domain_remove(irq_dom); | ||
| 291 | return -ENOMEM; | ||
| 292 | } | ||
| 293 | |||
| 294 | pcie->dom = irq_dom; | ||
| 295 | spin_lock_init(&pcie->used_msi_lock); | ||
| 296 | irq_set_chained_handler_and_data(virq, tango_msi_isr, pcie); | ||
| 297 | |||
| 298 | return pci_host_common_probe(pdev, &smp8759_ecam_ops); | ||
| 299 | } | ||
| 300 | |||
| 301 | static const struct of_device_id tango_pcie_ids[] = { | ||
| 302 | { .compatible = "sigma,smp8759-pcie" }, | ||
| 303 | { }, | ||
| 304 | }; | ||
| 305 | |||
| 306 | static struct platform_driver tango_pcie_driver = { | ||
| 307 | .probe = tango_pcie_probe, | ||
| 308 | .driver = { | ||
| 309 | .name = KBUILD_MODNAME, | ||
| 310 | .of_match_table = tango_pcie_ids, | ||
| 311 | .suppress_bind_attrs = true, | ||
| 312 | }, | ||
| 313 | }; | ||
| 314 | builtin_platform_driver(tango_pcie_driver); | ||
| 315 | |||
| 316 | /* | ||
| 317 | * The root complex advertises the wrong device class. | ||
| 318 | * Header Type 1 is for PCI-to-PCI bridges. | ||
| 319 | */ | ||
| 320 | static void tango_fixup_class(struct pci_dev *dev) | ||
| 321 | { | ||
| 322 | dev->class = PCI_CLASS_BRIDGE_PCI << 8; | ||
| 323 | } | ||
| 324 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_class); | ||
| 325 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_class); | ||
| 326 | |||
| 327 | /* | ||
| 328 | * The root complex exposes a "fake" BAR, which is used to filter | ||
| 329 | * bus-to-system accesses. Only accesses within the range defined by this | ||
| 330 | * BAR are forwarded to the host, others are ignored. | ||
| 331 | * | ||
| 332 | * By default, the DMA framework expects an identity mapping, and DRAM0 is | ||
| 333 | * mapped at 0x80000000. | ||
| 334 | */ | ||
| 335 | static void tango_fixup_bar(struct pci_dev *dev) | ||
| 336 | { | ||
| 337 | dev->non_compliant_bars = true; | ||
| 338 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0x80000000); | ||
| 339 | } | ||
| 340 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_bar); | ||
| 341 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_bar); | ||
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c new file mode 100644 index 000000000000..6a4bbb5b3de0 --- /dev/null +++ b/drivers/pci/controller/pcie-xilinx-nwl.c | |||
| @@ -0,0 +1,917 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 2 | /* | ||
| 3 | * PCIe host controller driver for NWL PCIe Bridge | ||
| 4 | * Based on pcie-xilinx.c, pci-tegra.c | ||
| 5 | * | ||
| 6 | * (C) Copyright 2014 - 2015, Xilinx, Inc. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/delay.h> | ||
| 10 | #include <linux/interrupt.h> | ||
| 11 | #include <linux/irq.h> | ||
| 12 | #include <linux/irqdomain.h> | ||
| 13 | #include <linux/kernel.h> | ||
| 14 | #include <linux/init.h> | ||
| 15 | #include <linux/msi.h> | ||
| 16 | #include <linux/of_address.h> | ||
| 17 | #include <linux/of_pci.h> | ||
| 18 | #include <linux/of_platform.h> | ||
| 19 | #include <linux/of_irq.h> | ||
| 20 | #include <linux/pci.h> | ||
| 21 | #include <linux/platform_device.h> | ||
| 22 | #include <linux/irqchip/chained_irq.h> | ||
| 23 | |||
| 24 | #include "../pci.h" | ||
| 25 | |||
| 26 | /* Bridge core config registers */ | ||
| 27 | #define BRCFG_PCIE_RX0 0x00000000 | ||
| 28 | #define BRCFG_INTERRUPT 0x00000010 | ||
| 29 | #define BRCFG_PCIE_RX_MSG_FILTER 0x00000020 | ||
| 30 | |||
| 31 | /* Egress - Bridge translation registers */ | ||
| 32 | #define E_BREG_CAPABILITIES 0x00000200 | ||
| 33 | #define E_BREG_CONTROL 0x00000208 | ||
| 34 | #define E_BREG_BASE_LO 0x00000210 | ||
| 35 | #define E_BREG_BASE_HI 0x00000214 | ||
| 36 | #define E_ECAM_CAPABILITIES 0x00000220 | ||
| 37 | #define E_ECAM_CONTROL 0x00000228 | ||
| 38 | #define E_ECAM_BASE_LO 0x00000230 | ||
| 39 | #define E_ECAM_BASE_HI 0x00000234 | ||
| 40 | |||
| 41 | /* Ingress - address translations */ | ||
| 42 | #define I_MSII_CAPABILITIES 0x00000300 | ||
| 43 | #define I_MSII_CONTROL 0x00000308 | ||
| 44 | #define I_MSII_BASE_LO 0x00000310 | ||
| 45 | #define I_MSII_BASE_HI 0x00000314 | ||
| 46 | |||
| 47 | #define I_ISUB_CONTROL 0x000003E8 | ||
| 48 | #define SET_ISUB_CONTROL BIT(0) | ||
| 49 | /* Rxed msg fifo - Interrupt status registers */ | ||
| 50 | #define MSGF_MISC_STATUS 0x00000400 | ||
| 51 | #define MSGF_MISC_MASK 0x00000404 | ||
| 52 | #define MSGF_LEG_STATUS 0x00000420 | ||
| 53 | #define MSGF_LEG_MASK 0x00000424 | ||
| 54 | #define MSGF_MSI_STATUS_LO 0x00000440 | ||
| 55 | #define MSGF_MSI_STATUS_HI 0x00000444 | ||
| 56 | #define MSGF_MSI_MASK_LO 0x00000448 | ||
| 57 | #define MSGF_MSI_MASK_HI 0x0000044C | ||
| 58 | |||
| 59 | /* Msg filter mask bits */ | ||
| 60 | #define CFG_ENABLE_PM_MSG_FWD BIT(1) | ||
| 61 | #define CFG_ENABLE_INT_MSG_FWD BIT(2) | ||
| 62 | #define CFG_ENABLE_ERR_MSG_FWD BIT(3) | ||
| 63 | #define CFG_ENABLE_MSG_FILTER_MASK (CFG_ENABLE_PM_MSG_FWD | \ | ||
| 64 | CFG_ENABLE_INT_MSG_FWD | \ | ||
| 65 | CFG_ENABLE_ERR_MSG_FWD) | ||
| 66 | |||
| 67 | /* Misc interrupt status mask bits */ | ||
| 68 | #define MSGF_MISC_SR_RXMSG_AVAIL BIT(0) | ||
| 69 | #define MSGF_MISC_SR_RXMSG_OVER BIT(1) | ||
| 70 | #define MSGF_MISC_SR_SLAVE_ERR BIT(4) | ||
| 71 | #define MSGF_MISC_SR_MASTER_ERR BIT(5) | ||
| 72 | #define MSGF_MISC_SR_I_ADDR_ERR BIT(6) | ||
| 73 | #define MSGF_MISC_SR_E_ADDR_ERR BIT(7) | ||
| 74 | #define MSGF_MISC_SR_FATAL_AER BIT(16) | ||
| 75 | #define MSGF_MISC_SR_NON_FATAL_AER BIT(17) | ||
| 76 | #define MSGF_MISC_SR_CORR_AER BIT(18) | ||
| 77 | #define MSGF_MISC_SR_UR_DETECT BIT(20) | ||
| 78 | #define MSGF_MISC_SR_NON_FATAL_DEV BIT(22) | ||
| 79 | #define MSGF_MISC_SR_FATAL_DEV BIT(23) | ||
| 80 | #define MSGF_MISC_SR_LINK_DOWN BIT(24) | ||
| 81 | #define MSGF_MSIC_SR_LINK_AUTO_BWIDTH BIT(25) | ||
| 82 | #define MSGF_MSIC_SR_LINK_BWIDTH BIT(26) | ||
| 83 | |||
| 84 | #define MSGF_MISC_SR_MASKALL (MSGF_MISC_SR_RXMSG_AVAIL | \ | ||
| 85 | MSGF_MISC_SR_RXMSG_OVER | \ | ||
| 86 | MSGF_MISC_SR_SLAVE_ERR | \ | ||
| 87 | MSGF_MISC_SR_MASTER_ERR | \ | ||
| 88 | MSGF_MISC_SR_I_ADDR_ERR | \ | ||
| 89 | MSGF_MISC_SR_E_ADDR_ERR | \ | ||
| 90 | MSGF_MISC_SR_FATAL_AER | \ | ||
| 91 | MSGF_MISC_SR_NON_FATAL_AER | \ | ||
| 92 | MSGF_MISC_SR_CORR_AER | \ | ||
| 93 | MSGF_MISC_SR_UR_DETECT | \ | ||
| 94 | MSGF_MISC_SR_NON_FATAL_DEV | \ | ||
| 95 | MSGF_MISC_SR_FATAL_DEV | \ | ||
| 96 | MSGF_MISC_SR_LINK_DOWN | \ | ||
| 97 | MSGF_MSIC_SR_LINK_AUTO_BWIDTH | \ | ||
| 98 | MSGF_MSIC_SR_LINK_BWIDTH) | ||
| 99 | |||
| 100 | /* Legacy interrupt status mask bits */ | ||
| 101 | #define MSGF_LEG_SR_INTA BIT(0) | ||
| 102 | #define MSGF_LEG_SR_INTB BIT(1) | ||
| 103 | #define MSGF_LEG_SR_INTC BIT(2) | ||
| 104 | #define MSGF_LEG_SR_INTD BIT(3) | ||
| 105 | #define MSGF_LEG_SR_MASKALL (MSGF_LEG_SR_INTA | MSGF_LEG_SR_INTB | \ | ||
| 106 | MSGF_LEG_SR_INTC | MSGF_LEG_SR_INTD) | ||
| 107 | |||
| 108 | /* MSI interrupt status mask bits */ | ||
| 109 | #define MSGF_MSI_SR_LO_MASK GENMASK(31, 0) | ||
| 110 | #define MSGF_MSI_SR_HI_MASK GENMASK(31, 0) | ||
| 111 | |||
| 112 | #define MSII_PRESENT BIT(0) | ||
| 113 | #define MSII_ENABLE BIT(0) | ||
| 114 | #define MSII_STATUS_ENABLE BIT(15) | ||
| 115 | |||
| 116 | /* Bridge config interrupt mask */ | ||
| 117 | #define BRCFG_INTERRUPT_MASK BIT(0) | ||
| 118 | #define BREG_PRESENT BIT(0) | ||
| 119 | #define BREG_ENABLE BIT(0) | ||
| 120 | #define BREG_ENABLE_FORCE BIT(1) | ||
| 121 | |||
| 122 | /* E_ECAM status mask bits */ | ||
| 123 | #define E_ECAM_PRESENT BIT(0) | ||
| 124 | #define E_ECAM_CR_ENABLE BIT(0) | ||
| 125 | #define E_ECAM_SIZE_LOC GENMASK(20, 16) | ||
| 126 | #define E_ECAM_SIZE_SHIFT 16 | ||
| 127 | #define ECAM_BUS_LOC_SHIFT 20 | ||
| 128 | #define ECAM_DEV_LOC_SHIFT 12 | ||
| 129 | #define NWL_ECAM_VALUE_DEFAULT 12 | ||
| 130 | |||
| 131 | #define CFG_DMA_REG_BAR GENMASK(2, 0) | ||
| 132 | |||
| 133 | #define INT_PCI_MSI_NR (2 * 32) | ||
| 134 | |||
| 135 | /* Readin the PS_LINKUP */ | ||
| 136 | #define PS_LINKUP_OFFSET 0x00000238 | ||
| 137 | #define PCIE_PHY_LINKUP_BIT BIT(0) | ||
| 138 | #define PHY_RDY_LINKUP_BIT BIT(1) | ||
| 139 | |||
| 140 | /* Parameters for the waiting for link up routine */ | ||
| 141 | #define LINK_WAIT_MAX_RETRIES 10 | ||
| 142 | #define LINK_WAIT_USLEEP_MIN 90000 | ||
| 143 | #define LINK_WAIT_USLEEP_MAX 100000 | ||
| 144 | |||
| 145 | struct nwl_msi { /* MSI information */ | ||
| 146 | struct irq_domain *msi_domain; | ||
| 147 | unsigned long *bitmap; | ||
| 148 | struct irq_domain *dev_domain; | ||
| 149 | struct mutex lock; /* protect bitmap variable */ | ||
| 150 | int irq_msi0; | ||
| 151 | int irq_msi1; | ||
| 152 | }; | ||
| 153 | |||
| 154 | struct nwl_pcie { | ||
| 155 | struct device *dev; | ||
| 156 | void __iomem *breg_base; | ||
| 157 | void __iomem *pcireg_base; | ||
| 158 | void __iomem *ecam_base; | ||
| 159 | phys_addr_t phys_breg_base; /* Physical Bridge Register Base */ | ||
| 160 | phys_addr_t phys_pcie_reg_base; /* Physical PCIe Controller Base */ | ||
| 161 | phys_addr_t phys_ecam_base; /* Physical Configuration Base */ | ||
| 162 | u32 breg_size; | ||
| 163 | u32 pcie_reg_size; | ||
| 164 | u32 ecam_size; | ||
| 165 | int irq_intx; | ||
| 166 | int irq_misc; | ||
| 167 | u32 ecam_value; | ||
| 168 | u8 last_busno; | ||
| 169 | u8 root_busno; | ||
| 170 | struct nwl_msi msi; | ||
| 171 | struct irq_domain *legacy_irq_domain; | ||
| 172 | raw_spinlock_t leg_mask_lock; | ||
| 173 | }; | ||
| 174 | |||
| 175 | static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off) | ||
| 176 | { | ||
| 177 | return readl(pcie->breg_base + off); | ||
| 178 | } | ||
| 179 | |||
| 180 | static inline void nwl_bridge_writel(struct nwl_pcie *pcie, u32 val, u32 off) | ||
| 181 | { | ||
| 182 | writel(val, pcie->breg_base + off); | ||
| 183 | } | ||
| 184 | |||
| 185 | static bool nwl_pcie_link_up(struct nwl_pcie *pcie) | ||
| 186 | { | ||
| 187 | if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PCIE_PHY_LINKUP_BIT) | ||
| 188 | return true; | ||
| 189 | return false; | ||
| 190 | } | ||
| 191 | |||
| 192 | static bool nwl_phy_link_up(struct nwl_pcie *pcie) | ||
| 193 | { | ||
| 194 | if (readl(pcie->pcireg_base + PS_LINKUP_OFFSET) & PHY_RDY_LINKUP_BIT) | ||
| 195 | return true; | ||
| 196 | return false; | ||
| 197 | } | ||
| 198 | |||
| 199 | static int nwl_wait_for_link(struct nwl_pcie *pcie) | ||
| 200 | { | ||
| 201 | struct device *dev = pcie->dev; | ||
| 202 | int retries; | ||
| 203 | |||
| 204 | /* check if the link is up or not */ | ||
| 205 | for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { | ||
| 206 | if (nwl_phy_link_up(pcie)) | ||
| 207 | return 0; | ||
| 208 | usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); | ||
| 209 | } | ||
| 210 | |||
| 211 | dev_err(dev, "PHY link never came up\n"); | ||
| 212 | return -ETIMEDOUT; | ||
| 213 | } | ||
| 214 | |||
| 215 | static bool nwl_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) | ||
| 216 | { | ||
| 217 | struct nwl_pcie *pcie = bus->sysdata; | ||
| 218 | |||
| 219 | /* Check link before accessing downstream ports */ | ||
| 220 | if (bus->number != pcie->root_busno) { | ||
| 221 | if (!nwl_pcie_link_up(pcie)) | ||
| 222 | return false; | ||
| 223 | } | ||
| 224 | |||
| 225 | /* Only one device down on each root port */ | ||
| 226 | if (bus->number == pcie->root_busno && devfn > 0) | ||
| 227 | return false; | ||
| 228 | |||
| 229 | return true; | ||
| 230 | } | ||
| 231 | |||
| 232 | /** | ||
| 233 | * nwl_pcie_map_bus - Get configuration base | ||
| 234 | * | ||
| 235 | * @bus: Bus structure of current bus | ||
| 236 | * @devfn: Device/function | ||
| 237 | * @where: Offset from base | ||
| 238 | * | ||
| 239 | * Return: Base address of the configuration space needed to be | ||
| 240 | * accessed. | ||
| 241 | */ | ||
| 242 | static void __iomem *nwl_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, | ||
| 243 | int where) | ||
| 244 | { | ||
| 245 | struct nwl_pcie *pcie = bus->sysdata; | ||
| 246 | int relbus; | ||
| 247 | |||
| 248 | if (!nwl_pcie_valid_device(bus, devfn)) | ||
| 249 | return NULL; | ||
| 250 | |||
| 251 | relbus = (bus->number << ECAM_BUS_LOC_SHIFT) | | ||
| 252 | (devfn << ECAM_DEV_LOC_SHIFT); | ||
| 253 | |||
| 254 | return pcie->ecam_base + relbus + where; | ||
| 255 | } | ||
| 256 | |||
| 257 | /* PCIe operations */ | ||
| 258 | static struct pci_ops nwl_pcie_ops = { | ||
| 259 | .map_bus = nwl_pcie_map_bus, | ||
| 260 | .read = pci_generic_config_read, | ||
| 261 | .write = pci_generic_config_write, | ||
| 262 | }; | ||
| 263 | |||
| 264 | static irqreturn_t nwl_pcie_misc_handler(int irq, void *data) | ||
| 265 | { | ||
| 266 | struct nwl_pcie *pcie = data; | ||
| 267 | struct device *dev = pcie->dev; | ||
| 268 | u32 misc_stat; | ||
| 269 | |||
| 270 | /* Checking for misc interrupts */ | ||
| 271 | misc_stat = nwl_bridge_readl(pcie, MSGF_MISC_STATUS) & | ||
| 272 | MSGF_MISC_SR_MASKALL; | ||
| 273 | if (!misc_stat) | ||
| 274 | return IRQ_NONE; | ||
| 275 | |||
| 276 | if (misc_stat & MSGF_MISC_SR_RXMSG_OVER) | ||
| 277 | dev_err(dev, "Received Message FIFO Overflow\n"); | ||
| 278 | |||
| 279 | if (misc_stat & MSGF_MISC_SR_SLAVE_ERR) | ||
| 280 | dev_err(dev, "Slave error\n"); | ||
| 281 | |||
| 282 | if (misc_stat & MSGF_MISC_SR_MASTER_ERR) | ||
| 283 | dev_err(dev, "Master error\n"); | ||
| 284 | |||
| 285 | if (misc_stat & MSGF_MISC_SR_I_ADDR_ERR) | ||
| 286 | dev_err(dev, "In Misc Ingress address translation error\n"); | ||
| 287 | |||
| 288 | if (misc_stat & MSGF_MISC_SR_E_ADDR_ERR) | ||
| 289 | dev_err(dev, "In Misc Egress address translation error\n"); | ||
| 290 | |||
| 291 | if (misc_stat & MSGF_MISC_SR_FATAL_AER) | ||
| 292 | dev_err(dev, "Fatal Error in AER Capability\n"); | ||
| 293 | |||
| 294 | if (misc_stat & MSGF_MISC_SR_NON_FATAL_AER) | ||
| 295 | dev_err(dev, "Non-Fatal Error in AER Capability\n"); | ||
| 296 | |||
| 297 | if (misc_stat & MSGF_MISC_SR_CORR_AER) | ||
| 298 | dev_err(dev, "Correctable Error in AER Capability\n"); | ||
| 299 | |||
| 300 | if (misc_stat & MSGF_MISC_SR_UR_DETECT) | ||
| 301 | dev_err(dev, "Unsupported request Detected\n"); | ||
| 302 | |||
| 303 | if (misc_stat & MSGF_MISC_SR_NON_FATAL_DEV) | ||
| 304 | dev_err(dev, "Non-Fatal Error Detected\n"); | ||
| 305 | |||
| 306 | if (misc_stat & MSGF_MISC_SR_FATAL_DEV) | ||
| 307 | dev_err(dev, "Fatal Error Detected\n"); | ||
| 308 | |||
| 309 | if (misc_stat & MSGF_MSIC_SR_LINK_AUTO_BWIDTH) | ||
| 310 | dev_info(dev, "Link Autonomous Bandwidth Management Status bit set\n"); | ||
| 311 | |||
| 312 | if (misc_stat & MSGF_MSIC_SR_LINK_BWIDTH) | ||
| 313 | dev_info(dev, "Link Bandwidth Management Status bit set\n"); | ||
| 314 | |||
| 315 | /* Clear misc interrupt status */ | ||
| 316 | nwl_bridge_writel(pcie, misc_stat, MSGF_MISC_STATUS); | ||
| 317 | |||
| 318 | return IRQ_HANDLED; | ||
| 319 | } | ||
| 320 | |||
| 321 | static void nwl_pcie_leg_handler(struct irq_desc *desc) | ||
| 322 | { | ||
| 323 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
| 324 | struct nwl_pcie *pcie; | ||
| 325 | unsigned long status; | ||
| 326 | u32 bit; | ||
| 327 | u32 virq; | ||
| 328 | |||
| 329 | chained_irq_enter(chip, desc); | ||
| 330 | pcie = irq_desc_get_handler_data(desc); | ||
| 331 | |||
| 332 | while ((status = nwl_bridge_readl(pcie, MSGF_LEG_STATUS) & | ||
| 333 | MSGF_LEG_SR_MASKALL) != 0) { | ||
| 334 | for_each_set_bit(bit, &status, PCI_NUM_INTX) { | ||
| 335 | virq = irq_find_mapping(pcie->legacy_irq_domain, bit); | ||
| 336 | if (virq) | ||
| 337 | generic_handle_irq(virq); | ||
| 338 | } | ||
| 339 | } | ||
| 340 | |||
| 341 | chained_irq_exit(chip, desc); | ||
| 342 | } | ||
| 343 | |||
| 344 | static void nwl_pcie_handle_msi_irq(struct nwl_pcie *pcie, u32 status_reg) | ||
| 345 | { | ||
| 346 | struct nwl_msi *msi; | ||
| 347 | unsigned long status; | ||
| 348 | u32 bit; | ||
| 349 | u32 virq; | ||
| 350 | |||
| 351 | msi = &pcie->msi; | ||
| 352 | |||
| 353 | while ((status = nwl_bridge_readl(pcie, status_reg)) != 0) { | ||
| 354 | for_each_set_bit(bit, &status, 32) { | ||
| 355 | nwl_bridge_writel(pcie, 1 << bit, status_reg); | ||
| 356 | virq = irq_find_mapping(msi->dev_domain, bit); | ||
| 357 | if (virq) | ||
| 358 | generic_handle_irq(virq); | ||
| 359 | } | ||
| 360 | } | ||
| 361 | } | ||
| 362 | |||
| 363 | static void nwl_pcie_msi_handler_high(struct irq_desc *desc) | ||
| 364 | { | ||
| 365 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
| 366 | struct nwl_pcie *pcie = irq_desc_get_handler_data(desc); | ||
| 367 | |||
| 368 | chained_irq_enter(chip, desc); | ||
| 369 | nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_HI); | ||
| 370 | chained_irq_exit(chip, desc); | ||
| 371 | } | ||
| 372 | |||
| 373 | static void nwl_pcie_msi_handler_low(struct irq_desc *desc) | ||
| 374 | { | ||
| 375 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
| 376 | struct nwl_pcie *pcie = irq_desc_get_handler_data(desc); | ||
| 377 | |||
| 378 | chained_irq_enter(chip, desc); | ||
| 379 | nwl_pcie_handle_msi_irq(pcie, MSGF_MSI_STATUS_LO); | ||
| 380 | chained_irq_exit(chip, desc); | ||
| 381 | } | ||
| 382 | |||
| 383 | static void nwl_mask_leg_irq(struct irq_data *data) | ||
| 384 | { | ||
| 385 | struct irq_desc *desc = irq_to_desc(data->irq); | ||
| 386 | struct nwl_pcie *pcie; | ||
| 387 | unsigned long flags; | ||
| 388 | u32 mask; | ||
| 389 | u32 val; | ||
| 390 | |||
| 391 | pcie = irq_desc_get_chip_data(desc); | ||
| 392 | mask = 1 << (data->hwirq - 1); | ||
| 393 | raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); | ||
| 394 | val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); | ||
| 395 | nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK); | ||
| 396 | raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags); | ||
| 397 | } | ||
| 398 | |||
| 399 | static void nwl_unmask_leg_irq(struct irq_data *data) | ||
| 400 | { | ||
| 401 | struct irq_desc *desc = irq_to_desc(data->irq); | ||
| 402 | struct nwl_pcie *pcie; | ||
| 403 | unsigned long flags; | ||
| 404 | u32 mask; | ||
| 405 | u32 val; | ||
| 406 | |||
| 407 | pcie = irq_desc_get_chip_data(desc); | ||
| 408 | mask = 1 << (data->hwirq - 1); | ||
| 409 | raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); | ||
| 410 | val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); | ||
| 411 | nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK); | ||
| 412 | raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags); | ||
| 413 | } | ||
| 414 | |||
| 415 | static struct irq_chip nwl_leg_irq_chip = { | ||
| 416 | .name = "nwl_pcie:legacy", | ||
| 417 | .irq_enable = nwl_unmask_leg_irq, | ||
| 418 | .irq_disable = nwl_mask_leg_irq, | ||
| 419 | .irq_mask = nwl_mask_leg_irq, | ||
| 420 | .irq_unmask = nwl_unmask_leg_irq, | ||
| 421 | }; | ||
| 422 | |||
| 423 | static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq, | ||
| 424 | irq_hw_number_t hwirq) | ||
| 425 | { | ||
| 426 | irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq); | ||
| 427 | irq_set_chip_data(irq, domain->host_data); | ||
| 428 | irq_set_status_flags(irq, IRQ_LEVEL); | ||
| 429 | |||
| 430 | return 0; | ||
| 431 | } | ||
| 432 | |||
| 433 | static const struct irq_domain_ops legacy_domain_ops = { | ||
| 434 | .map = nwl_legacy_map, | ||
| 435 | .xlate = pci_irqd_intx_xlate, | ||
| 436 | }; | ||
| 437 | |||
| 438 | #ifdef CONFIG_PCI_MSI | ||
| 439 | static struct irq_chip nwl_msi_irq_chip = { | ||
| 440 | .name = "nwl_pcie:msi", | ||
| 441 | .irq_enable = unmask_msi_irq, | ||
| 442 | .irq_disable = mask_msi_irq, | ||
| 443 | .irq_mask = mask_msi_irq, | ||
| 444 | .irq_unmask = unmask_msi_irq, | ||
| 445 | |||
| 446 | }; | ||
| 447 | |||
| 448 | static struct msi_domain_info nwl_msi_domain_info = { | ||
| 449 | .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | | ||
| 450 | MSI_FLAG_MULTI_PCI_MSI), | ||
| 451 | .chip = &nwl_msi_irq_chip, | ||
| 452 | }; | ||
| 453 | #endif | ||
| 454 | |||
| 455 | static void nwl_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | ||
| 456 | { | ||
| 457 | struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data); | ||
| 458 | phys_addr_t msi_addr = pcie->phys_pcie_reg_base; | ||
| 459 | |||
| 460 | msg->address_lo = lower_32_bits(msi_addr); | ||
| 461 | msg->address_hi = upper_32_bits(msi_addr); | ||
| 462 | msg->data = data->hwirq; | ||
| 463 | } | ||
| 464 | |||
| 465 | static int nwl_msi_set_affinity(struct irq_data *irq_data, | ||
| 466 | const struct cpumask *mask, bool force) | ||
| 467 | { | ||
| 468 | return -EINVAL; | ||
| 469 | } | ||
| 470 | |||
| 471 | static struct irq_chip nwl_irq_chip = { | ||
| 472 | .name = "Xilinx MSI", | ||
| 473 | .irq_compose_msi_msg = nwl_compose_msi_msg, | ||
| 474 | .irq_set_affinity = nwl_msi_set_affinity, | ||
| 475 | }; | ||
| 476 | |||
| 477 | static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, | ||
| 478 | unsigned int nr_irqs, void *args) | ||
| 479 | { | ||
| 480 | struct nwl_pcie *pcie = domain->host_data; | ||
| 481 | struct nwl_msi *msi = &pcie->msi; | ||
| 482 | int bit; | ||
| 483 | int i; | ||
| 484 | |||
| 485 | mutex_lock(&msi->lock); | ||
| 486 | bit = bitmap_find_next_zero_area(msi->bitmap, INT_PCI_MSI_NR, 0, | ||
| 487 | nr_irqs, 0); | ||
| 488 | if (bit >= INT_PCI_MSI_NR) { | ||
| 489 | mutex_unlock(&msi->lock); | ||
| 490 | return -ENOSPC; | ||
| 491 | } | ||
| 492 | |||
| 493 | bitmap_set(msi->bitmap, bit, nr_irqs); | ||
| 494 | |||
| 495 | for (i = 0; i < nr_irqs; i++) { | ||
| 496 | irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip, | ||
| 497 | domain->host_data, handle_simple_irq, | ||
| 498 | NULL, NULL); | ||
| 499 | } | ||
| 500 | mutex_unlock(&msi->lock); | ||
| 501 | return 0; | ||
| 502 | } | ||
| 503 | |||
| 504 | static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq, | ||
| 505 | unsigned int nr_irqs) | ||
| 506 | { | ||
| 507 | struct irq_data *data = irq_domain_get_irq_data(domain, virq); | ||
| 508 | struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data); | ||
| 509 | struct nwl_msi *msi = &pcie->msi; | ||
| 510 | |||
| 511 | mutex_lock(&msi->lock); | ||
| 512 | bitmap_clear(msi->bitmap, data->hwirq, nr_irqs); | ||
| 513 | mutex_unlock(&msi->lock); | ||
| 514 | } | ||
| 515 | |||
| 516 | static const struct irq_domain_ops dev_msi_domain_ops = { | ||
| 517 | .alloc = nwl_irq_domain_alloc, | ||
| 518 | .free = nwl_irq_domain_free, | ||
| 519 | }; | ||
| 520 | |||
| 521 | static int nwl_pcie_init_msi_irq_domain(struct nwl_pcie *pcie) | ||
| 522 | { | ||
| 523 | #ifdef CONFIG_PCI_MSI | ||
| 524 | struct device *dev = pcie->dev; | ||
| 525 | struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node); | ||
| 526 | struct nwl_msi *msi = &pcie->msi; | ||
| 527 | |||
| 528 | msi->dev_domain = irq_domain_add_linear(NULL, INT_PCI_MSI_NR, | ||
| 529 | &dev_msi_domain_ops, pcie); | ||
| 530 | if (!msi->dev_domain) { | ||
| 531 | dev_err(dev, "failed to create dev IRQ domain\n"); | ||
| 532 | return -ENOMEM; | ||
| 533 | } | ||
| 534 | msi->msi_domain = pci_msi_create_irq_domain(fwnode, | ||
| 535 | &nwl_msi_domain_info, | ||
| 536 | msi->dev_domain); | ||
| 537 | if (!msi->msi_domain) { | ||
| 538 | dev_err(dev, "failed to create msi IRQ domain\n"); | ||
| 539 | irq_domain_remove(msi->dev_domain); | ||
| 540 | return -ENOMEM; | ||
| 541 | } | ||
| 542 | #endif | ||
| 543 | return 0; | ||
| 544 | } | ||
| 545 | |||
| 546 | static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie) | ||
| 547 | { | ||
| 548 | struct device *dev = pcie->dev; | ||
| 549 | struct device_node *node = dev->of_node; | ||
| 550 | struct device_node *legacy_intc_node; | ||
| 551 | |||
| 552 | legacy_intc_node = of_get_next_child(node, NULL); | ||
| 553 | if (!legacy_intc_node) { | ||
| 554 | dev_err(dev, "No legacy intc node found\n"); | ||
| 555 | return -EINVAL; | ||
| 556 | } | ||
| 557 | |||
| 558 | pcie->legacy_irq_domain = irq_domain_add_linear(legacy_intc_node, | ||
| 559 | PCI_NUM_INTX, | ||
| 560 | &legacy_domain_ops, | ||
| 561 | pcie); | ||
| 562 | |||
| 563 | if (!pcie->legacy_irq_domain) { | ||
| 564 | dev_err(dev, "failed to create IRQ domain\n"); | ||
| 565 | return -ENOMEM; | ||
| 566 | } | ||
| 567 | |||
| 568 | raw_spin_lock_init(&pcie->leg_mask_lock); | ||
| 569 | nwl_pcie_init_msi_irq_domain(pcie); | ||
| 570 | return 0; | ||
| 571 | } | ||
| 572 | |||
| 573 | static int nwl_pcie_enable_msi(struct nwl_pcie *pcie) | ||
| 574 | { | ||
| 575 | struct device *dev = pcie->dev; | ||
| 576 | struct platform_device *pdev = to_platform_device(dev); | ||
| 577 | struct nwl_msi *msi = &pcie->msi; | ||
| 578 | unsigned long base; | ||
| 579 | int ret; | ||
| 580 | int size = BITS_TO_LONGS(INT_PCI_MSI_NR) * sizeof(long); | ||
| 581 | |||
| 582 | mutex_init(&msi->lock); | ||
| 583 | |||
| 584 | msi->bitmap = kzalloc(size, GFP_KERNEL); | ||
| 585 | if (!msi->bitmap) | ||
| 586 | return -ENOMEM; | ||
| 587 | |||
| 588 | /* Get msi_1 IRQ number */ | ||
| 589 | msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1"); | ||
| 590 | if (msi->irq_msi1 < 0) { | ||
| 591 | dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi1); | ||
| 592 | ret = -EINVAL; | ||
| 593 | goto err; | ||
| 594 | } | ||
| 595 | |||
| 596 | irq_set_chained_handler_and_data(msi->irq_msi1, | ||
| 597 | nwl_pcie_msi_handler_high, pcie); | ||
| 598 | |||
| 599 | /* Get msi_0 IRQ number */ | ||
| 600 | msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0"); | ||
| 601 | if (msi->irq_msi0 < 0) { | ||
| 602 | dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi0); | ||
| 603 | ret = -EINVAL; | ||
| 604 | goto err; | ||
| 605 | } | ||
| 606 | |||
| 607 | irq_set_chained_handler_and_data(msi->irq_msi0, | ||
| 608 | nwl_pcie_msi_handler_low, pcie); | ||
| 609 | |||
| 610 | /* Check for msii_present bit */ | ||
| 611 | ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT; | ||
| 612 | if (!ret) { | ||
| 613 | dev_err(dev, "MSI not present\n"); | ||
| 614 | ret = -EIO; | ||
| 615 | goto err; | ||
| 616 | } | ||
| 617 | |||
| 618 | /* Enable MSII */ | ||
| 619 | nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) | | ||
| 620 | MSII_ENABLE, I_MSII_CONTROL); | ||
| 621 | |||
| 622 | /* Enable MSII status */ | ||
| 623 | nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, I_MSII_CONTROL) | | ||
| 624 | MSII_STATUS_ENABLE, I_MSII_CONTROL); | ||
| 625 | |||
| 626 | /* setup AFI/FPCI range */ | ||
| 627 | base = pcie->phys_pcie_reg_base; | ||
| 628 | nwl_bridge_writel(pcie, lower_32_bits(base), I_MSII_BASE_LO); | ||
| 629 | nwl_bridge_writel(pcie, upper_32_bits(base), I_MSII_BASE_HI); | ||
| 630 | |||
| 631 | /* | ||
| 632 | * For high range MSI interrupts: disable, clear any pending, | ||
| 633 | * and enable | ||
| 634 | */ | ||
| 635 | nwl_bridge_writel(pcie, 0, MSGF_MSI_MASK_HI); | ||
| 636 | |||
| 637 | nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_HI) & | ||
| 638 | MSGF_MSI_SR_HI_MASK, MSGF_MSI_STATUS_HI); | ||
| 639 | |||
| 640 | nwl_bridge_writel(pcie, MSGF_MSI_SR_HI_MASK, MSGF_MSI_MASK_HI); | ||
| 641 | |||
| 642 | /* | ||
| 643 | * For low range MSI interrupts: disable, clear any pending, | ||
| 644 | * and enable | ||
| 645 | */ | ||
| 646 | nwl_bridge_writel(pcie, 0, MSGF_MSI_MASK_LO); | ||
| 647 | |||
| 648 | nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MSI_STATUS_LO) & | ||
| 649 | MSGF_MSI_SR_LO_MASK, MSGF_MSI_STATUS_LO); | ||
| 650 | |||
| 651 | nwl_bridge_writel(pcie, MSGF_MSI_SR_LO_MASK, MSGF_MSI_MASK_LO); | ||
| 652 | |||
| 653 | return 0; | ||
| 654 | err: | ||
| 655 | kfree(msi->bitmap); | ||
| 656 | msi->bitmap = NULL; | ||
| 657 | return ret; | ||
| 658 | } | ||
| 659 | |||
| 660 | static int nwl_pcie_bridge_init(struct nwl_pcie *pcie) | ||
| 661 | { | ||
| 662 | struct device *dev = pcie->dev; | ||
| 663 | struct platform_device *pdev = to_platform_device(dev); | ||
| 664 | u32 breg_val, ecam_val, first_busno = 0; | ||
| 665 | int err; | ||
| 666 | |||
| 667 | breg_val = nwl_bridge_readl(pcie, E_BREG_CAPABILITIES) & BREG_PRESENT; | ||
| 668 | if (!breg_val) { | ||
| 669 | dev_err(dev, "BREG is not present\n"); | ||
| 670 | return breg_val; | ||
| 671 | } | ||
| 672 | |||
| 673 | /* Write bridge_off to breg base */ | ||
| 674 | nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_breg_base), | ||
| 675 | E_BREG_BASE_LO); | ||
| 676 | nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_breg_base), | ||
| 677 | E_BREG_BASE_HI); | ||
| 678 | |||
| 679 | /* Enable BREG */ | ||
| 680 | nwl_bridge_writel(pcie, ~BREG_ENABLE_FORCE & BREG_ENABLE, | ||
| 681 | E_BREG_CONTROL); | ||
| 682 | |||
| 683 | /* Disable DMA channel registers */ | ||
| 684 | nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_PCIE_RX0) | | ||
| 685 | CFG_DMA_REG_BAR, BRCFG_PCIE_RX0); | ||
| 686 | |||
| 687 | /* Enable Ingress subtractive decode translation */ | ||
| 688 | nwl_bridge_writel(pcie, SET_ISUB_CONTROL, I_ISUB_CONTROL); | ||
| 689 | |||
| 690 | /* Enable msg filtering details */ | ||
| 691 | nwl_bridge_writel(pcie, CFG_ENABLE_MSG_FILTER_MASK, | ||
| 692 | BRCFG_PCIE_RX_MSG_FILTER); | ||
| 693 | |||
| 694 | err = nwl_wait_for_link(pcie); | ||
| 695 | if (err) | ||
| 696 | return err; | ||
| 697 | |||
| 698 | ecam_val = nwl_bridge_readl(pcie, E_ECAM_CAPABILITIES) & E_ECAM_PRESENT; | ||
| 699 | if (!ecam_val) { | ||
| 700 | dev_err(dev, "ECAM is not present\n"); | ||
| 701 | return ecam_val; | ||
| 702 | } | ||
| 703 | |||
| 704 | /* Enable ECAM */ | ||
| 705 | nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) | | ||
| 706 | E_ECAM_CR_ENABLE, E_ECAM_CONTROL); | ||
| 707 | |||
| 708 | nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, E_ECAM_CONTROL) | | ||
| 709 | (pcie->ecam_value << E_ECAM_SIZE_SHIFT), | ||
| 710 | E_ECAM_CONTROL); | ||
| 711 | |||
| 712 | nwl_bridge_writel(pcie, lower_32_bits(pcie->phys_ecam_base), | ||
| 713 | E_ECAM_BASE_LO); | ||
| 714 | nwl_bridge_writel(pcie, upper_32_bits(pcie->phys_ecam_base), | ||
| 715 | E_ECAM_BASE_HI); | ||
| 716 | |||
| 717 | /* Get bus range */ | ||
| 718 | ecam_val = nwl_bridge_readl(pcie, E_ECAM_CONTROL); | ||
| 719 | pcie->last_busno = (ecam_val & E_ECAM_SIZE_LOC) >> E_ECAM_SIZE_SHIFT; | ||
| 720 | /* Write primary, secondary and subordinate bus numbers */ | ||
| 721 | ecam_val = first_busno; | ||
| 722 | ecam_val |= (first_busno + 1) << 8; | ||
| 723 | ecam_val |= (pcie->last_busno << E_ECAM_SIZE_SHIFT); | ||
| 724 | writel(ecam_val, (pcie->ecam_base + PCI_PRIMARY_BUS)); | ||
| 725 | |||
| 726 | if (nwl_pcie_link_up(pcie)) | ||
| 727 | dev_info(dev, "Link is UP\n"); | ||
| 728 | else | ||
| 729 | dev_info(dev, "Link is DOWN\n"); | ||
| 730 | |||
| 731 | /* Get misc IRQ number */ | ||
| 732 | pcie->irq_misc = platform_get_irq_byname(pdev, "misc"); | ||
| 733 | if (pcie->irq_misc < 0) { | ||
| 734 | dev_err(dev, "failed to get misc IRQ %d\n", | ||
| 735 | pcie->irq_misc); | ||
| 736 | return -EINVAL; | ||
| 737 | } | ||
| 738 | |||
| 739 | err = devm_request_irq(dev, pcie->irq_misc, | ||
| 740 | nwl_pcie_misc_handler, IRQF_SHARED, | ||
| 741 | "nwl_pcie:misc", pcie); | ||
| 742 | if (err) { | ||
| 743 | dev_err(dev, "fail to register misc IRQ#%d\n", | ||
| 744 | pcie->irq_misc); | ||
| 745 | return err; | ||
| 746 | } | ||
| 747 | |||
| 748 | /* Disable all misc interrupts */ | ||
| 749 | nwl_bridge_writel(pcie, (u32)~MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK); | ||
| 750 | |||
| 751 | /* Clear pending misc interrupts */ | ||
| 752 | nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_MISC_STATUS) & | ||
| 753 | MSGF_MISC_SR_MASKALL, MSGF_MISC_STATUS); | ||
| 754 | |||
| 755 | /* Enable all misc interrupts */ | ||
| 756 | nwl_bridge_writel(pcie, MSGF_MISC_SR_MASKALL, MSGF_MISC_MASK); | ||
| 757 | |||
| 758 | |||
| 759 | /* Disable all legacy interrupts */ | ||
| 760 | nwl_bridge_writel(pcie, (u32)~MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK); | ||
| 761 | |||
| 762 | /* Clear pending legacy interrupts */ | ||
| 763 | nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, MSGF_LEG_STATUS) & | ||
| 764 | MSGF_LEG_SR_MASKALL, MSGF_LEG_STATUS); | ||
| 765 | |||
| 766 | /* Enable all legacy interrupts */ | ||
| 767 | nwl_bridge_writel(pcie, MSGF_LEG_SR_MASKALL, MSGF_LEG_MASK); | ||
| 768 | |||
| 769 | /* Enable the bridge config interrupt */ | ||
| 770 | nwl_bridge_writel(pcie, nwl_bridge_readl(pcie, BRCFG_INTERRUPT) | | ||
| 771 | BRCFG_INTERRUPT_MASK, BRCFG_INTERRUPT); | ||
| 772 | |||
| 773 | return 0; | ||
| 774 | } | ||
| 775 | |||
| 776 | static int nwl_pcie_parse_dt(struct nwl_pcie *pcie, | ||
| 777 | struct platform_device *pdev) | ||
| 778 | { | ||
| 779 | struct device *dev = pcie->dev; | ||
| 780 | struct device_node *node = dev->of_node; | ||
| 781 | struct resource *res; | ||
| 782 | const char *type; | ||
| 783 | |||
| 784 | /* Check for device type */ | ||
| 785 | type = of_get_property(node, "device_type", NULL); | ||
| 786 | if (!type || strcmp(type, "pci")) { | ||
| 787 | dev_err(dev, "invalid \"device_type\" %s\n", type); | ||
| 788 | return -EINVAL; | ||
| 789 | } | ||
| 790 | |||
| 791 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "breg"); | ||
| 792 | pcie->breg_base = devm_ioremap_resource(dev, res); | ||
| 793 | if (IS_ERR(pcie->breg_base)) | ||
| 794 | return PTR_ERR(pcie->breg_base); | ||
| 795 | pcie->phys_breg_base = res->start; | ||
| 796 | |||
| 797 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcireg"); | ||
| 798 | pcie->pcireg_base = devm_ioremap_resource(dev, res); | ||
| 799 | if (IS_ERR(pcie->pcireg_base)) | ||
| 800 | return PTR_ERR(pcie->pcireg_base); | ||
| 801 | pcie->phys_pcie_reg_base = res->start; | ||
| 802 | |||
| 803 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); | ||
| 804 | pcie->ecam_base = devm_pci_remap_cfg_resource(dev, res); | ||
| 805 | if (IS_ERR(pcie->ecam_base)) | ||
| 806 | return PTR_ERR(pcie->ecam_base); | ||
| 807 | pcie->phys_ecam_base = res->start; | ||
| 808 | |||
| 809 | /* Get intx IRQ number */ | ||
| 810 | pcie->irq_intx = platform_get_irq_byname(pdev, "intx"); | ||
| 811 | if (pcie->irq_intx < 0) { | ||
| 812 | dev_err(dev, "failed to get intx IRQ %d\n", pcie->irq_intx); | ||
| 813 | return pcie->irq_intx; | ||
| 814 | } | ||
| 815 | |||
| 816 | irq_set_chained_handler_and_data(pcie->irq_intx, | ||
| 817 | nwl_pcie_leg_handler, pcie); | ||
| 818 | |||
| 819 | return 0; | ||
| 820 | } | ||
| 821 | |||
| 822 | static const struct of_device_id nwl_pcie_of_match[] = { | ||
| 823 | { .compatible = "xlnx,nwl-pcie-2.11", }, | ||
| 824 | {} | ||
| 825 | }; | ||
| 826 | |||
| 827 | static int nwl_pcie_probe(struct platform_device *pdev) | ||
| 828 | { | ||
| 829 | struct device *dev = &pdev->dev; | ||
| 830 | struct nwl_pcie *pcie; | ||
| 831 | struct pci_bus *bus; | ||
| 832 | struct pci_bus *child; | ||
| 833 | struct pci_host_bridge *bridge; | ||
| 834 | int err; | ||
| 835 | resource_size_t iobase = 0; | ||
| 836 | LIST_HEAD(res); | ||
| 837 | |||
| 838 | bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); | ||
| 839 | if (!bridge) | ||
| 840 | return -ENODEV; | ||
| 841 | |||
| 842 | pcie = pci_host_bridge_priv(bridge); | ||
| 843 | |||
| 844 | pcie->dev = dev; | ||
| 845 | pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT; | ||
| 846 | |||
| 847 | err = nwl_pcie_parse_dt(pcie, pdev); | ||
| 848 | if (err) { | ||
| 849 | dev_err(dev, "Parsing DT failed\n"); | ||
| 850 | return err; | ||
| 851 | } | ||
| 852 | |||
| 853 | err = nwl_pcie_bridge_init(pcie); | ||
| 854 | if (err) { | ||
| 855 | dev_err(dev, "HW Initialization failed\n"); | ||
| 856 | return err; | ||
| 857 | } | ||
| 858 | |||
| 859 | err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res, | ||
| 860 | &iobase); | ||
| 861 | if (err) { | ||
| 862 | dev_err(dev, "Getting bridge resources failed\n"); | ||
| 863 | return err; | ||
| 864 | } | ||
| 865 | |||
| 866 | err = devm_request_pci_bus_resources(dev, &res); | ||
| 867 | if (err) | ||
| 868 | goto error; | ||
| 869 | |||
| 870 | err = nwl_pcie_init_irq_domain(pcie); | ||
| 871 | if (err) { | ||
| 872 | dev_err(dev, "Failed creating IRQ Domain\n"); | ||
| 873 | goto error; | ||
| 874 | } | ||
| 875 | |||
| 876 | list_splice_init(&res, &bridge->windows); | ||
| 877 | bridge->dev.parent = dev; | ||
| 878 | bridge->sysdata = pcie; | ||
| 879 | bridge->busnr = pcie->root_busno; | ||
| 880 | bridge->ops = &nwl_pcie_ops; | ||
| 881 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 882 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 883 | |||
| 884 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
| 885 | err = nwl_pcie_enable_msi(pcie); | ||
| 886 | if (err < 0) { | ||
| 887 | dev_err(dev, "failed to enable MSI support: %d\n", err); | ||
| 888 | goto error; | ||
| 889 | } | ||
| 890 | } | ||
| 891 | |||
| 892 | err = pci_scan_root_bus_bridge(bridge); | ||
| 893 | if (err) | ||
| 894 | goto error; | ||
| 895 | |||
| 896 | bus = bridge->bus; | ||
| 897 | |||
| 898 | pci_assign_unassigned_bus_resources(bus); | ||
| 899 | list_for_each_entry(child, &bus->children, node) | ||
| 900 | pcie_bus_configure_settings(child); | ||
| 901 | pci_bus_add_devices(bus); | ||
| 902 | return 0; | ||
| 903 | |||
| 904 | error: | ||
| 905 | pci_free_resource_list(&res); | ||
| 906 | return err; | ||
| 907 | } | ||
| 908 | |||
| 909 | static struct platform_driver nwl_pcie_driver = { | ||
| 910 | .driver = { | ||
| 911 | .name = "nwl-pcie", | ||
| 912 | .suppress_bind_attrs = true, | ||
| 913 | .of_match_table = nwl_pcie_of_match, | ||
| 914 | }, | ||
| 915 | .probe = nwl_pcie_probe, | ||
| 916 | }; | ||
| 917 | builtin_platform_driver(nwl_pcie_driver); | ||
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c new file mode 100644 index 000000000000..b110a3a814e3 --- /dev/null +++ b/drivers/pci/controller/pcie-xilinx.c | |||
| @@ -0,0 +1,702 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0+ | ||
| 2 | /* | ||
| 3 | * PCIe host controller driver for Xilinx AXI PCIe Bridge | ||
| 4 | * | ||
| 5 | * Copyright (c) 2012 - 2014 Xilinx, Inc. | ||
| 6 | * | ||
| 7 | * Based on the Tegra PCIe driver | ||
| 8 | * | ||
| 9 | * Bits taken from Synopsys DesignWare Host controller driver and | ||
| 10 | * ARM PCI Host generic driver. | ||
| 11 | */ | ||
| 12 | |||
| 13 | #include <linux/interrupt.h> | ||
| 14 | #include <linux/irq.h> | ||
| 15 | #include <linux/irqdomain.h> | ||
| 16 | #include <linux/kernel.h> | ||
| 17 | #include <linux/init.h> | ||
| 18 | #include <linux/msi.h> | ||
| 19 | #include <linux/of_address.h> | ||
| 20 | #include <linux/of_pci.h> | ||
| 21 | #include <linux/of_platform.h> | ||
| 22 | #include <linux/of_irq.h> | ||
| 23 | #include <linux/pci.h> | ||
| 24 | #include <linux/platform_device.h> | ||
| 25 | |||
| 26 | #include "../pci.h" | ||
| 27 | |||
| 28 | /* Register definitions */ | ||
| 29 | #define XILINX_PCIE_REG_BIR 0x00000130 | ||
| 30 | #define XILINX_PCIE_REG_IDR 0x00000138 | ||
| 31 | #define XILINX_PCIE_REG_IMR 0x0000013c | ||
| 32 | #define XILINX_PCIE_REG_PSCR 0x00000144 | ||
| 33 | #define XILINX_PCIE_REG_RPSC 0x00000148 | ||
| 34 | #define XILINX_PCIE_REG_MSIBASE1 0x0000014c | ||
| 35 | #define XILINX_PCIE_REG_MSIBASE2 0x00000150 | ||
| 36 | #define XILINX_PCIE_REG_RPEFR 0x00000154 | ||
| 37 | #define XILINX_PCIE_REG_RPIFR1 0x00000158 | ||
| 38 | #define XILINX_PCIE_REG_RPIFR2 0x0000015c | ||
| 39 | |||
| 40 | /* Interrupt registers definitions */ | ||
| 41 | #define XILINX_PCIE_INTR_LINK_DOWN BIT(0) | ||
| 42 | #define XILINX_PCIE_INTR_ECRC_ERR BIT(1) | ||
| 43 | #define XILINX_PCIE_INTR_STR_ERR BIT(2) | ||
| 44 | #define XILINX_PCIE_INTR_HOT_RESET BIT(3) | ||
| 45 | #define XILINX_PCIE_INTR_CFG_TIMEOUT BIT(8) | ||
| 46 | #define XILINX_PCIE_INTR_CORRECTABLE BIT(9) | ||
| 47 | #define XILINX_PCIE_INTR_NONFATAL BIT(10) | ||
| 48 | #define XILINX_PCIE_INTR_FATAL BIT(11) | ||
| 49 | #define XILINX_PCIE_INTR_INTX BIT(16) | ||
| 50 | #define XILINX_PCIE_INTR_MSI BIT(17) | ||
| 51 | #define XILINX_PCIE_INTR_SLV_UNSUPP BIT(20) | ||
| 52 | #define XILINX_PCIE_INTR_SLV_UNEXP BIT(21) | ||
| 53 | #define XILINX_PCIE_INTR_SLV_COMPL BIT(22) | ||
| 54 | #define XILINX_PCIE_INTR_SLV_ERRP BIT(23) | ||
| 55 | #define XILINX_PCIE_INTR_SLV_CMPABT BIT(24) | ||
| 56 | #define XILINX_PCIE_INTR_SLV_ILLBUR BIT(25) | ||
| 57 | #define XILINX_PCIE_INTR_MST_DECERR BIT(26) | ||
| 58 | #define XILINX_PCIE_INTR_MST_SLVERR BIT(27) | ||
| 59 | #define XILINX_PCIE_INTR_MST_ERRP BIT(28) | ||
| 60 | #define XILINX_PCIE_IMR_ALL_MASK 0x1FF30FED | ||
| 61 | #define XILINX_PCIE_IMR_ENABLE_MASK 0x1FF30F0D | ||
| 62 | #define XILINX_PCIE_IDR_ALL_MASK 0xFFFFFFFF | ||
| 63 | |||
| 64 | /* Root Port Error FIFO Read Register definitions */ | ||
| 65 | #define XILINX_PCIE_RPEFR_ERR_VALID BIT(18) | ||
| 66 | #define XILINX_PCIE_RPEFR_REQ_ID GENMASK(15, 0) | ||
| 67 | #define XILINX_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF | ||
| 68 | |||
| 69 | /* Root Port Interrupt FIFO Read Register 1 definitions */ | ||
| 70 | #define XILINX_PCIE_RPIFR1_INTR_VALID BIT(31) | ||
| 71 | #define XILINX_PCIE_RPIFR1_MSI_INTR BIT(30) | ||
| 72 | #define XILINX_PCIE_RPIFR1_INTR_MASK GENMASK(28, 27) | ||
| 73 | #define XILINX_PCIE_RPIFR1_ALL_MASK 0xFFFFFFFF | ||
| 74 | #define XILINX_PCIE_RPIFR1_INTR_SHIFT 27 | ||
| 75 | |||
| 76 | /* Bridge Info Register definitions */ | ||
| 77 | #define XILINX_PCIE_BIR_ECAM_SZ_MASK GENMASK(18, 16) | ||
| 78 | #define XILINX_PCIE_BIR_ECAM_SZ_SHIFT 16 | ||
| 79 | |||
| 80 | /* Root Port Interrupt FIFO Read Register 2 definitions */ | ||
| 81 | #define XILINX_PCIE_RPIFR2_MSG_DATA GENMASK(15, 0) | ||
| 82 | |||
| 83 | /* Root Port Status/control Register definitions */ | ||
| 84 | #define XILINX_PCIE_REG_RPSC_BEN BIT(0) | ||
| 85 | |||
| 86 | /* Phy Status/Control Register definitions */ | ||
| 87 | #define XILINX_PCIE_REG_PSCR_LNKUP BIT(11) | ||
| 88 | |||
| 89 | /* ECAM definitions */ | ||
| 90 | #define ECAM_BUS_NUM_SHIFT 20 | ||
| 91 | #define ECAM_DEV_NUM_SHIFT 12 | ||
| 92 | |||
| 93 | /* Number of MSI IRQs */ | ||
| 94 | #define XILINX_NUM_MSI_IRQS 128 | ||
| 95 | |||
| 96 | /** | ||
| 97 | * struct xilinx_pcie_port - PCIe port information | ||
| 98 | * @reg_base: IO Mapped Register Base | ||
| 99 | * @irq: Interrupt number | ||
| 100 | * @msi_pages: MSI pages | ||
| 101 | * @root_busno: Root Bus number | ||
| 102 | * @dev: Device pointer | ||
| 103 | * @msi_domain: MSI IRQ domain pointer | ||
| 104 | * @leg_domain: Legacy IRQ domain pointer | ||
| 105 | * @resources: Bus Resources | ||
| 106 | */ | ||
| 107 | struct xilinx_pcie_port { | ||
| 108 | void __iomem *reg_base; | ||
| 109 | u32 irq; | ||
| 110 | unsigned long msi_pages; | ||
| 111 | u8 root_busno; | ||
| 112 | struct device *dev; | ||
| 113 | struct irq_domain *msi_domain; | ||
| 114 | struct irq_domain *leg_domain; | ||
| 115 | struct list_head resources; | ||
| 116 | }; | ||
| 117 | |||
| 118 | static DECLARE_BITMAP(msi_irq_in_use, XILINX_NUM_MSI_IRQS); | ||
| 119 | |||
| 120 | static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg) | ||
| 121 | { | ||
| 122 | return readl(port->reg_base + reg); | ||
| 123 | } | ||
| 124 | |||
| 125 | static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg) | ||
| 126 | { | ||
| 127 | writel(val, port->reg_base + reg); | ||
| 128 | } | ||
| 129 | |||
| 130 | static inline bool xilinx_pcie_link_up(struct xilinx_pcie_port *port) | ||
| 131 | { | ||
| 132 | return (pcie_read(port, XILINX_PCIE_REG_PSCR) & | ||
| 133 | XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0; | ||
| 134 | } | ||
| 135 | |||
| 136 | /** | ||
| 137 | * xilinx_pcie_clear_err_interrupts - Clear Error Interrupts | ||
| 138 | * @port: PCIe port information | ||
| 139 | */ | ||
| 140 | static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port) | ||
| 141 | { | ||
| 142 | struct device *dev = port->dev; | ||
| 143 | unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR); | ||
| 144 | |||
| 145 | if (val & XILINX_PCIE_RPEFR_ERR_VALID) { | ||
| 146 | dev_dbg(dev, "Requester ID %lu\n", | ||
| 147 | val & XILINX_PCIE_RPEFR_REQ_ID); | ||
| 148 | pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK, | ||
| 149 | XILINX_PCIE_REG_RPEFR); | ||
| 150 | } | ||
| 151 | } | ||
| 152 | |||
| 153 | /** | ||
| 154 | * xilinx_pcie_valid_device - Check if a valid device is present on bus | ||
| 155 | * @bus: PCI Bus structure | ||
| 156 | * @devfn: device/function | ||
| 157 | * | ||
| 158 | * Return: 'true' on success and 'false' if invalid device is found | ||
| 159 | */ | ||
| 160 | static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) | ||
| 161 | { | ||
| 162 | struct xilinx_pcie_port *port = bus->sysdata; | ||
| 163 | |||
| 164 | /* Check if link is up when trying to access downstream ports */ | ||
| 165 | if (bus->number != port->root_busno) | ||
| 166 | if (!xilinx_pcie_link_up(port)) | ||
| 167 | return false; | ||
| 168 | |||
| 169 | /* Only one device down on each root port */ | ||
| 170 | if (bus->number == port->root_busno && devfn > 0) | ||
| 171 | return false; | ||
| 172 | |||
| 173 | return true; | ||
| 174 | } | ||
| 175 | |||
| 176 | /** | ||
| 177 | * xilinx_pcie_map_bus - Get configuration base | ||
| 178 | * @bus: PCI Bus structure | ||
| 179 | * @devfn: Device/function | ||
| 180 | * @where: Offset from base | ||
| 181 | * | ||
| 182 | * Return: Base address of the configuration space needed to be | ||
| 183 | * accessed. | ||
| 184 | */ | ||
| 185 | static void __iomem *xilinx_pcie_map_bus(struct pci_bus *bus, | ||
| 186 | unsigned int devfn, int where) | ||
| 187 | { | ||
| 188 | struct xilinx_pcie_port *port = bus->sysdata; | ||
| 189 | int relbus; | ||
| 190 | |||
| 191 | if (!xilinx_pcie_valid_device(bus, devfn)) | ||
| 192 | return NULL; | ||
| 193 | |||
| 194 | relbus = (bus->number << ECAM_BUS_NUM_SHIFT) | | ||
| 195 | (devfn << ECAM_DEV_NUM_SHIFT); | ||
| 196 | |||
| 197 | return port->reg_base + relbus + where; | ||
| 198 | } | ||
| 199 | |||
| 200 | /* PCIe operations */ | ||
| 201 | static struct pci_ops xilinx_pcie_ops = { | ||
| 202 | .map_bus = xilinx_pcie_map_bus, | ||
| 203 | .read = pci_generic_config_read, | ||
| 204 | .write = pci_generic_config_write, | ||
| 205 | }; | ||
| 206 | |||
| 207 | /* MSI functions */ | ||
| 208 | |||
| 209 | /** | ||
| 210 | * xilinx_pcie_destroy_msi - Free MSI number | ||
| 211 | * @irq: IRQ to be freed | ||
| 212 | */ | ||
| 213 | static void xilinx_pcie_destroy_msi(unsigned int irq) | ||
| 214 | { | ||
| 215 | struct msi_desc *msi; | ||
| 216 | struct xilinx_pcie_port *port; | ||
| 217 | struct irq_data *d = irq_get_irq_data(irq); | ||
| 218 | irq_hw_number_t hwirq = irqd_to_hwirq(d); | ||
| 219 | |||
| 220 | if (!test_bit(hwirq, msi_irq_in_use)) { | ||
| 221 | msi = irq_get_msi_desc(irq); | ||
| 222 | port = msi_desc_to_pci_sysdata(msi); | ||
| 223 | dev_err(port->dev, "Trying to free unused MSI#%d\n", irq); | ||
| 224 | } else { | ||
| 225 | clear_bit(hwirq, msi_irq_in_use); | ||
| 226 | } | ||
| 227 | } | ||
| 228 | |||
| 229 | /** | ||
| 230 | * xilinx_pcie_assign_msi - Allocate MSI number | ||
| 231 | * | ||
| 232 | * Return: A valid IRQ on success and error value on failure. | ||
| 233 | */ | ||
| 234 | static int xilinx_pcie_assign_msi(void) | ||
| 235 | { | ||
| 236 | int pos; | ||
| 237 | |||
| 238 | pos = find_first_zero_bit(msi_irq_in_use, XILINX_NUM_MSI_IRQS); | ||
| 239 | if (pos < XILINX_NUM_MSI_IRQS) | ||
| 240 | set_bit(pos, msi_irq_in_use); | ||
| 241 | else | ||
| 242 | return -ENOSPC; | ||
| 243 | |||
| 244 | return pos; | ||
| 245 | } | ||
| 246 | |||
| 247 | /** | ||
| 248 | * xilinx_msi_teardown_irq - Destroy the MSI | ||
| 249 | * @chip: MSI Chip descriptor | ||
| 250 | * @irq: MSI IRQ to destroy | ||
| 251 | */ | ||
| 252 | static void xilinx_msi_teardown_irq(struct msi_controller *chip, | ||
| 253 | unsigned int irq) | ||
| 254 | { | ||
| 255 | xilinx_pcie_destroy_msi(irq); | ||
| 256 | irq_dispose_mapping(irq); | ||
| 257 | } | ||
| 258 | |||
| 259 | /** | ||
| 260 | * xilinx_pcie_msi_setup_irq - Setup MSI request | ||
| 261 | * @chip: MSI chip pointer | ||
| 262 | * @pdev: PCIe device pointer | ||
| 263 | * @desc: MSI descriptor pointer | ||
| 264 | * | ||
| 265 | * Return: '0' on success and error value on failure | ||
| 266 | */ | ||
| 267 | static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip, | ||
| 268 | struct pci_dev *pdev, | ||
| 269 | struct msi_desc *desc) | ||
| 270 | { | ||
| 271 | struct xilinx_pcie_port *port = pdev->bus->sysdata; | ||
| 272 | unsigned int irq; | ||
| 273 | int hwirq; | ||
| 274 | struct msi_msg msg; | ||
| 275 | phys_addr_t msg_addr; | ||
| 276 | |||
| 277 | hwirq = xilinx_pcie_assign_msi(); | ||
| 278 | if (hwirq < 0) | ||
| 279 | return hwirq; | ||
| 280 | |||
| 281 | irq = irq_create_mapping(port->msi_domain, hwirq); | ||
| 282 | if (!irq) | ||
| 283 | return -EINVAL; | ||
| 284 | |||
| 285 | irq_set_msi_desc(irq, desc); | ||
| 286 | |||
| 287 | msg_addr = virt_to_phys((void *)port->msi_pages); | ||
| 288 | |||
| 289 | msg.address_hi = 0; | ||
| 290 | msg.address_lo = msg_addr; | ||
| 291 | msg.data = irq; | ||
| 292 | |||
| 293 | pci_write_msi_msg(irq, &msg); | ||
| 294 | |||
| 295 | return 0; | ||
| 296 | } | ||
| 297 | |||
| 298 | /* MSI Chip Descriptor */ | ||
| 299 | static struct msi_controller xilinx_pcie_msi_chip = { | ||
| 300 | .setup_irq = xilinx_pcie_msi_setup_irq, | ||
| 301 | .teardown_irq = xilinx_msi_teardown_irq, | ||
| 302 | }; | ||
| 303 | |||
| 304 | /* HW Interrupt Chip Descriptor */ | ||
| 305 | static struct irq_chip xilinx_msi_irq_chip = { | ||
| 306 | .name = "Xilinx PCIe MSI", | ||
| 307 | .irq_enable = pci_msi_unmask_irq, | ||
| 308 | .irq_disable = pci_msi_mask_irq, | ||
| 309 | .irq_mask = pci_msi_mask_irq, | ||
| 310 | .irq_unmask = pci_msi_unmask_irq, | ||
| 311 | }; | ||
| 312 | |||
| 313 | /** | ||
| 314 | * xilinx_pcie_msi_map - Set the handler for the MSI and mark IRQ as valid | ||
| 315 | * @domain: IRQ domain | ||
| 316 | * @irq: Virtual IRQ number | ||
| 317 | * @hwirq: HW interrupt number | ||
| 318 | * | ||
| 319 | * Return: Always returns 0. | ||
| 320 | */ | ||
| 321 | static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq, | ||
| 322 | irq_hw_number_t hwirq) | ||
| 323 | { | ||
| 324 | irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq); | ||
| 325 | irq_set_chip_data(irq, domain->host_data); | ||
| 326 | |||
| 327 | return 0; | ||
| 328 | } | ||
| 329 | |||
| 330 | /* IRQ Domain operations */ | ||
| 331 | static const struct irq_domain_ops msi_domain_ops = { | ||
| 332 | .map = xilinx_pcie_msi_map, | ||
| 333 | }; | ||
| 334 | |||
| 335 | /** | ||
| 336 | * xilinx_pcie_enable_msi - Enable MSI support | ||
| 337 | * @port: PCIe port information | ||
| 338 | */ | ||
| 339 | static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) | ||
| 340 | { | ||
| 341 | phys_addr_t msg_addr; | ||
| 342 | |||
| 343 | port->msi_pages = __get_free_pages(GFP_KERNEL, 0); | ||
| 344 | msg_addr = virt_to_phys((void *)port->msi_pages); | ||
| 345 | pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1); | ||
| 346 | pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2); | ||
| 347 | } | ||
| 348 | |||
| 349 | /* INTx Functions */ | ||
| 350 | |||
| 351 | /** | ||
| 352 | * xilinx_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid | ||
| 353 | * @domain: IRQ domain | ||
| 354 | * @irq: Virtual IRQ number | ||
| 355 | * @hwirq: HW interrupt number | ||
| 356 | * | ||
| 357 | * Return: Always returns 0. | ||
| 358 | */ | ||
| 359 | static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, | ||
| 360 | irq_hw_number_t hwirq) | ||
| 361 | { | ||
| 362 | irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); | ||
| 363 | irq_set_chip_data(irq, domain->host_data); | ||
| 364 | |||
| 365 | return 0; | ||
| 366 | } | ||
| 367 | |||
| 368 | /* INTx IRQ Domain operations */ | ||
| 369 | static const struct irq_domain_ops intx_domain_ops = { | ||
| 370 | .map = xilinx_pcie_intx_map, | ||
| 371 | .xlate = pci_irqd_intx_xlate, | ||
| 372 | }; | ||
| 373 | |||
| 374 | /* PCIe HW Functions */ | ||
| 375 | |||
| 376 | /** | ||
| 377 | * xilinx_pcie_intr_handler - Interrupt Service Handler | ||
| 378 | * @irq: IRQ number | ||
| 379 | * @data: PCIe port information | ||
| 380 | * | ||
| 381 | * Return: IRQ_HANDLED on success and IRQ_NONE on failure | ||
| 382 | */ | ||
| 383 | static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data) | ||
| 384 | { | ||
| 385 | struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data; | ||
| 386 | struct device *dev = port->dev; | ||
| 387 | u32 val, mask, status; | ||
| 388 | |||
| 389 | /* Read interrupt decode and mask registers */ | ||
| 390 | val = pcie_read(port, XILINX_PCIE_REG_IDR); | ||
| 391 | mask = pcie_read(port, XILINX_PCIE_REG_IMR); | ||
| 392 | |||
| 393 | status = val & mask; | ||
| 394 | if (!status) | ||
| 395 | return IRQ_NONE; | ||
| 396 | |||
| 397 | if (status & XILINX_PCIE_INTR_LINK_DOWN) | ||
| 398 | dev_warn(dev, "Link Down\n"); | ||
| 399 | |||
| 400 | if (status & XILINX_PCIE_INTR_ECRC_ERR) | ||
| 401 | dev_warn(dev, "ECRC failed\n"); | ||
| 402 | |||
| 403 | if (status & XILINX_PCIE_INTR_STR_ERR) | ||
| 404 | dev_warn(dev, "Streaming error\n"); | ||
| 405 | |||
| 406 | if (status & XILINX_PCIE_INTR_HOT_RESET) | ||
| 407 | dev_info(dev, "Hot reset\n"); | ||
| 408 | |||
| 409 | if (status & XILINX_PCIE_INTR_CFG_TIMEOUT) | ||
| 410 | dev_warn(dev, "ECAM access timeout\n"); | ||
| 411 | |||
| 412 | if (status & XILINX_PCIE_INTR_CORRECTABLE) { | ||
| 413 | dev_warn(dev, "Correctable error message\n"); | ||
| 414 | xilinx_pcie_clear_err_interrupts(port); | ||
| 415 | } | ||
| 416 | |||
| 417 | if (status & XILINX_PCIE_INTR_NONFATAL) { | ||
| 418 | dev_warn(dev, "Non fatal error message\n"); | ||
| 419 | xilinx_pcie_clear_err_interrupts(port); | ||
| 420 | } | ||
| 421 | |||
| 422 | if (status & XILINX_PCIE_INTR_FATAL) { | ||
| 423 | dev_warn(dev, "Fatal error message\n"); | ||
| 424 | xilinx_pcie_clear_err_interrupts(port); | ||
| 425 | } | ||
| 426 | |||
| 427 | if (status & (XILINX_PCIE_INTR_INTX | XILINX_PCIE_INTR_MSI)) { | ||
| 428 | val = pcie_read(port, XILINX_PCIE_REG_RPIFR1); | ||
| 429 | |||
| 430 | /* Check whether interrupt valid */ | ||
| 431 | if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) { | ||
| 432 | dev_warn(dev, "RP Intr FIFO1 read error\n"); | ||
| 433 | goto error; | ||
| 434 | } | ||
| 435 | |||
| 436 | /* Decode the IRQ number */ | ||
| 437 | if (val & XILINX_PCIE_RPIFR1_MSI_INTR) { | ||
| 438 | val = pcie_read(port, XILINX_PCIE_REG_RPIFR2) & | ||
| 439 | XILINX_PCIE_RPIFR2_MSG_DATA; | ||
| 440 | } else { | ||
| 441 | val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >> | ||
| 442 | XILINX_PCIE_RPIFR1_INTR_SHIFT; | ||
| 443 | val = irq_find_mapping(port->leg_domain, val); | ||
| 444 | } | ||
| 445 | |||
| 446 | /* Clear interrupt FIFO register 1 */ | ||
| 447 | pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK, | ||
| 448 | XILINX_PCIE_REG_RPIFR1); | ||
| 449 | |||
| 450 | /* Handle the interrupt */ | ||
| 451 | if (IS_ENABLED(CONFIG_PCI_MSI) || | ||
| 452 | !(val & XILINX_PCIE_RPIFR1_MSI_INTR)) | ||
| 453 | generic_handle_irq(val); | ||
| 454 | } | ||
| 455 | |||
| 456 | if (status & XILINX_PCIE_INTR_SLV_UNSUPP) | ||
| 457 | dev_warn(dev, "Slave unsupported request\n"); | ||
| 458 | |||
| 459 | if (status & XILINX_PCIE_INTR_SLV_UNEXP) | ||
| 460 | dev_warn(dev, "Slave unexpected completion\n"); | ||
| 461 | |||
| 462 | if (status & XILINX_PCIE_INTR_SLV_COMPL) | ||
| 463 | dev_warn(dev, "Slave completion timeout\n"); | ||
| 464 | |||
| 465 | if (status & XILINX_PCIE_INTR_SLV_ERRP) | ||
| 466 | dev_warn(dev, "Slave Error Poison\n"); | ||
| 467 | |||
| 468 | if (status & XILINX_PCIE_INTR_SLV_CMPABT) | ||
| 469 | dev_warn(dev, "Slave Completer Abort\n"); | ||
| 470 | |||
| 471 | if (status & XILINX_PCIE_INTR_SLV_ILLBUR) | ||
| 472 | dev_warn(dev, "Slave Illegal Burst\n"); | ||
| 473 | |||
| 474 | if (status & XILINX_PCIE_INTR_MST_DECERR) | ||
| 475 | dev_warn(dev, "Master decode error\n"); | ||
| 476 | |||
| 477 | if (status & XILINX_PCIE_INTR_MST_SLVERR) | ||
| 478 | dev_warn(dev, "Master slave error\n"); | ||
| 479 | |||
| 480 | if (status & XILINX_PCIE_INTR_MST_ERRP) | ||
| 481 | dev_warn(dev, "Master error poison\n"); | ||
| 482 | |||
| 483 | error: | ||
| 484 | /* Clear the Interrupt Decode register */ | ||
| 485 | pcie_write(port, status, XILINX_PCIE_REG_IDR); | ||
| 486 | |||
| 487 | return IRQ_HANDLED; | ||
| 488 | } | ||
| 489 | |||
| 490 | /** | ||
| 491 | * xilinx_pcie_init_irq_domain - Initialize IRQ domain | ||
| 492 | * @port: PCIe port information | ||
| 493 | * | ||
| 494 | * Return: '0' on success and error value on failure | ||
| 495 | */ | ||
| 496 | static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) | ||
| 497 | { | ||
| 498 | struct device *dev = port->dev; | ||
| 499 | struct device_node *node = dev->of_node; | ||
| 500 | struct device_node *pcie_intc_node; | ||
| 501 | |||
| 502 | /* Setup INTx */ | ||
| 503 | pcie_intc_node = of_get_next_child(node, NULL); | ||
| 504 | if (!pcie_intc_node) { | ||
| 505 | dev_err(dev, "No PCIe Intc node found\n"); | ||
| 506 | return -ENODEV; | ||
| 507 | } | ||
| 508 | |||
| 509 | port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, | ||
| 510 | &intx_domain_ops, | ||
| 511 | port); | ||
| 512 | if (!port->leg_domain) { | ||
| 513 | dev_err(dev, "Failed to get a INTx IRQ domain\n"); | ||
| 514 | return -ENODEV; | ||
| 515 | } | ||
| 516 | |||
| 517 | /* Setup MSI */ | ||
| 518 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
| 519 | port->msi_domain = irq_domain_add_linear(node, | ||
| 520 | XILINX_NUM_MSI_IRQS, | ||
| 521 | &msi_domain_ops, | ||
| 522 | &xilinx_pcie_msi_chip); | ||
| 523 | if (!port->msi_domain) { | ||
| 524 | dev_err(dev, "Failed to get a MSI IRQ domain\n"); | ||
| 525 | return -ENODEV; | ||
| 526 | } | ||
| 527 | |||
| 528 | xilinx_pcie_enable_msi(port); | ||
| 529 | } | ||
| 530 | |||
| 531 | return 0; | ||
| 532 | } | ||
| 533 | |||
| 534 | /** | ||
| 535 | * xilinx_pcie_init_port - Initialize hardware | ||
| 536 | * @port: PCIe port information | ||
| 537 | */ | ||
| 538 | static void xilinx_pcie_init_port(struct xilinx_pcie_port *port) | ||
| 539 | { | ||
| 540 | struct device *dev = port->dev; | ||
| 541 | |||
| 542 | if (xilinx_pcie_link_up(port)) | ||
| 543 | dev_info(dev, "PCIe Link is UP\n"); | ||
| 544 | else | ||
| 545 | dev_info(dev, "PCIe Link is DOWN\n"); | ||
| 546 | |||
| 547 | /* Disable all interrupts */ | ||
| 548 | pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK, | ||
| 549 | XILINX_PCIE_REG_IMR); | ||
| 550 | |||
| 551 | /* Clear pending interrupts */ | ||
| 552 | pcie_write(port, pcie_read(port, XILINX_PCIE_REG_IDR) & | ||
| 553 | XILINX_PCIE_IMR_ALL_MASK, | ||
| 554 | XILINX_PCIE_REG_IDR); | ||
| 555 | |||
| 556 | /* Enable all interrupts we handle */ | ||
| 557 | pcie_write(port, XILINX_PCIE_IMR_ENABLE_MASK, XILINX_PCIE_REG_IMR); | ||
| 558 | |||
| 559 | /* Enable the Bridge enable bit */ | ||
| 560 | pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) | | ||
| 561 | XILINX_PCIE_REG_RPSC_BEN, | ||
| 562 | XILINX_PCIE_REG_RPSC); | ||
| 563 | } | ||
| 564 | |||
| 565 | /** | ||
| 566 | * xilinx_pcie_parse_dt - Parse Device tree | ||
| 567 | * @port: PCIe port information | ||
| 568 | * | ||
| 569 | * Return: '0' on success and error value on failure | ||
| 570 | */ | ||
| 571 | static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port) | ||
| 572 | { | ||
| 573 | struct device *dev = port->dev; | ||
| 574 | struct device_node *node = dev->of_node; | ||
| 575 | struct resource regs; | ||
| 576 | const char *type; | ||
| 577 | int err; | ||
| 578 | |||
| 579 | type = of_get_property(node, "device_type", NULL); | ||
| 580 | if (!type || strcmp(type, "pci")) { | ||
| 581 | dev_err(dev, "invalid \"device_type\" %s\n", type); | ||
| 582 | return -EINVAL; | ||
| 583 | } | ||
| 584 | |||
| 585 | err = of_address_to_resource(node, 0, ®s); | ||
| 586 | if (err) { | ||
| 587 | dev_err(dev, "missing \"reg\" property\n"); | ||
| 588 | return err; | ||
| 589 | } | ||
| 590 | |||
| 591 | port->reg_base = devm_pci_remap_cfg_resource(dev, ®s); | ||
| 592 | if (IS_ERR(port->reg_base)) | ||
| 593 | return PTR_ERR(port->reg_base); | ||
| 594 | |||
| 595 | port->irq = irq_of_parse_and_map(node, 0); | ||
| 596 | err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler, | ||
| 597 | IRQF_SHARED | IRQF_NO_THREAD, | ||
| 598 | "xilinx-pcie", port); | ||
| 599 | if (err) { | ||
| 600 | dev_err(dev, "unable to request irq %d\n", port->irq); | ||
| 601 | return err; | ||
| 602 | } | ||
| 603 | |||
| 604 | return 0; | ||
| 605 | } | ||
| 606 | |||
| 607 | /** | ||
| 608 | * xilinx_pcie_probe - Probe function | ||
| 609 | * @pdev: Platform device pointer | ||
| 610 | * | ||
| 611 | * Return: '0' on success and error value on failure | ||
| 612 | */ | ||
| 613 | static int xilinx_pcie_probe(struct platform_device *pdev) | ||
| 614 | { | ||
| 615 | struct device *dev = &pdev->dev; | ||
| 616 | struct xilinx_pcie_port *port; | ||
| 617 | struct pci_bus *bus, *child; | ||
| 618 | struct pci_host_bridge *bridge; | ||
| 619 | int err; | ||
| 620 | resource_size_t iobase = 0; | ||
| 621 | LIST_HEAD(res); | ||
| 622 | |||
| 623 | if (!dev->of_node) | ||
| 624 | return -ENODEV; | ||
| 625 | |||
| 626 | bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); | ||
| 627 | if (!bridge) | ||
| 628 | return -ENODEV; | ||
| 629 | |||
| 630 | port = pci_host_bridge_priv(bridge); | ||
| 631 | |||
| 632 | port->dev = dev; | ||
| 633 | |||
| 634 | err = xilinx_pcie_parse_dt(port); | ||
| 635 | if (err) { | ||
| 636 | dev_err(dev, "Parsing DT failed\n"); | ||
| 637 | return err; | ||
| 638 | } | ||
| 639 | |||
| 640 | xilinx_pcie_init_port(port); | ||
| 641 | |||
| 642 | err = xilinx_pcie_init_irq_domain(port); | ||
| 643 | if (err) { | ||
| 644 | dev_err(dev, "Failed creating IRQ Domain\n"); | ||
| 645 | return err; | ||
| 646 | } | ||
| 647 | |||
| 648 | err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &res, | ||
| 649 | &iobase); | ||
| 650 | if (err) { | ||
| 651 | dev_err(dev, "Getting bridge resources failed\n"); | ||
| 652 | return err; | ||
| 653 | } | ||
| 654 | |||
| 655 | err = devm_request_pci_bus_resources(dev, &res); | ||
| 656 | if (err) | ||
| 657 | goto error; | ||
| 658 | |||
| 659 | |||
| 660 | list_splice_init(&res, &bridge->windows); | ||
| 661 | bridge->dev.parent = dev; | ||
| 662 | bridge->sysdata = port; | ||
| 663 | bridge->busnr = 0; | ||
| 664 | bridge->ops = &xilinx_pcie_ops; | ||
| 665 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 666 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 667 | |||
| 668 | #ifdef CONFIG_PCI_MSI | ||
| 669 | xilinx_pcie_msi_chip.dev = dev; | ||
| 670 | bridge->msi = &xilinx_pcie_msi_chip; | ||
| 671 | #endif | ||
| 672 | err = pci_scan_root_bus_bridge(bridge); | ||
| 673 | if (err < 0) | ||
| 674 | goto error; | ||
| 675 | |||
| 676 | bus = bridge->bus; | ||
| 677 | |||
| 678 | pci_assign_unassigned_bus_resources(bus); | ||
| 679 | list_for_each_entry(child, &bus->children, node) | ||
| 680 | pcie_bus_configure_settings(child); | ||
| 681 | pci_bus_add_devices(bus); | ||
| 682 | return 0; | ||
| 683 | |||
| 684 | error: | ||
| 685 | pci_free_resource_list(&res); | ||
| 686 | return err; | ||
| 687 | } | ||
| 688 | |||
| 689 | static const struct of_device_id xilinx_pcie_of_match[] = { | ||
| 690 | { .compatible = "xlnx,axi-pcie-host-1.00.a", }, | ||
| 691 | {} | ||
| 692 | }; | ||
| 693 | |||
| 694 | static struct platform_driver xilinx_pcie_driver = { | ||
| 695 | .driver = { | ||
| 696 | .name = "xilinx-pcie", | ||
| 697 | .of_match_table = xilinx_pcie_of_match, | ||
| 698 | .suppress_bind_attrs = true, | ||
| 699 | }, | ||
| 700 | .probe = xilinx_pcie_probe, | ||
| 701 | }; | ||
| 702 | builtin_platform_driver(xilinx_pcie_driver); | ||
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c new file mode 100644 index 000000000000..942b64fc7f1f --- /dev/null +++ b/drivers/pci/controller/vmd.c | |||
| @@ -0,0 +1,870 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Volume Management Device driver | ||
| 4 | * Copyright (c) 2015, Intel Corporation. | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include <linux/device.h> | ||
| 8 | #include <linux/interrupt.h> | ||
| 9 | #include <linux/irq.h> | ||
| 10 | #include <linux/kernel.h> | ||
| 11 | #include <linux/module.h> | ||
| 12 | #include <linux/msi.h> | ||
| 13 | #include <linux/pci.h> | ||
| 14 | #include <linux/srcu.h> | ||
| 15 | #include <linux/rculist.h> | ||
| 16 | #include <linux/rcupdate.h> | ||
| 17 | |||
| 18 | #include <asm/irqdomain.h> | ||
| 19 | #include <asm/device.h> | ||
| 20 | #include <asm/msi.h> | ||
| 21 | #include <asm/msidef.h> | ||
| 22 | |||
| 23 | #define VMD_CFGBAR 0 | ||
| 24 | #define VMD_MEMBAR1 2 | ||
| 25 | #define VMD_MEMBAR2 4 | ||
| 26 | |||
| 27 | #define PCI_REG_VMCAP 0x40 | ||
| 28 | #define BUS_RESTRICT_CAP(vmcap) (vmcap & 0x1) | ||
| 29 | #define PCI_REG_VMCONFIG 0x44 | ||
| 30 | #define BUS_RESTRICT_CFG(vmcfg) ((vmcfg >> 8) & 0x3) | ||
| 31 | #define PCI_REG_VMLOCK 0x70 | ||
| 32 | #define MB2_SHADOW_EN(vmlock) (vmlock & 0x2) | ||
| 33 | |||
| 34 | enum vmd_features { | ||
| 35 | /* | ||
| 36 | * Device may contain registers which hint the physical location of the | ||
| 37 | * membars, in order to allow proper address translation during | ||
| 38 | * resource assignment to enable guest virtualization | ||
| 39 | */ | ||
| 40 | VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0), | ||
| 41 | |||
| 42 | /* | ||
| 43 | * Device may provide root port configuration information which limits | ||
| 44 | * bus numbering | ||
| 45 | */ | ||
| 46 | VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1), | ||
| 47 | }; | ||
| 48 | |||
| 49 | /* | ||
| 50 | * Lock for manipulating VMD IRQ lists. | ||
| 51 | */ | ||
| 52 | static DEFINE_RAW_SPINLOCK(list_lock); | ||
| 53 | |||
| 54 | /** | ||
| 55 | * struct vmd_irq - private data to map driver IRQ to the VMD shared vector | ||
| 56 | * @node: list item for parent traversal. | ||
| 57 | * @irq: back pointer to parent. | ||
| 58 | * @enabled: true if driver enabled IRQ | ||
| 59 | * @virq: the virtual IRQ value provided to the requesting driver. | ||
| 60 | * | ||
| 61 | * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to | ||
| 62 | * a VMD IRQ using this structure. | ||
| 63 | */ | ||
| 64 | struct vmd_irq { | ||
| 65 | struct list_head node; | ||
| 66 | struct vmd_irq_list *irq; | ||
| 67 | bool enabled; | ||
| 68 | unsigned int virq; | ||
| 69 | }; | ||
| 70 | |||
| 71 | /** | ||
| 72 | * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector | ||
| 73 | * @irq_list: the list of irq's the VMD one demuxes to. | ||
| 74 | * @srcu: SRCU struct for local synchronization. | ||
| 75 | * @count: number of child IRQs assigned to this vector; used to track | ||
| 76 | * sharing. | ||
| 77 | */ | ||
| 78 | struct vmd_irq_list { | ||
| 79 | struct list_head irq_list; | ||
| 80 | struct srcu_struct srcu; | ||
| 81 | unsigned int count; | ||
| 82 | }; | ||
| 83 | |||
| 84 | struct vmd_dev { | ||
| 85 | struct pci_dev *dev; | ||
| 86 | |||
| 87 | spinlock_t cfg_lock; | ||
| 88 | char __iomem *cfgbar; | ||
| 89 | |||
| 90 | int msix_count; | ||
| 91 | struct vmd_irq_list *irqs; | ||
| 92 | |||
| 93 | struct pci_sysdata sysdata; | ||
| 94 | struct resource resources[3]; | ||
| 95 | struct irq_domain *irq_domain; | ||
| 96 | struct pci_bus *bus; | ||
| 97 | |||
| 98 | #ifdef CONFIG_X86_DEV_DMA_OPS | ||
| 99 | struct dma_map_ops dma_ops; | ||
| 100 | struct dma_domain dma_domain; | ||
| 101 | #endif | ||
| 102 | }; | ||
| 103 | |||
| 104 | static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus) | ||
| 105 | { | ||
| 106 | return container_of(bus->sysdata, struct vmd_dev, sysdata); | ||
| 107 | } | ||
| 108 | |||
| 109 | static inline unsigned int index_from_irqs(struct vmd_dev *vmd, | ||
| 110 | struct vmd_irq_list *irqs) | ||
| 111 | { | ||
| 112 | return irqs - vmd->irqs; | ||
| 113 | } | ||
| 114 | |||
| 115 | /* | ||
| 116 | * Drivers managing a device in a VMD domain allocate their own IRQs as before, | ||
| 117 | * but the MSI entry for the hardware it's driving will be programmed with a | ||
| 118 | * destination ID for the VMD MSI-X table. The VMD muxes interrupts in its | ||
| 119 | * domain into one of its own, and the VMD driver de-muxes these for the | ||
| 120 | * handlers sharing that VMD IRQ. The vmd irq_domain provides the operations | ||
| 121 | * and irq_chip to set this up. | ||
| 122 | */ | ||
| 123 | static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | ||
| 124 | { | ||
| 125 | struct vmd_irq *vmdirq = data->chip_data; | ||
| 126 | struct vmd_irq_list *irq = vmdirq->irq; | ||
| 127 | struct vmd_dev *vmd = irq_data_get_irq_handler_data(data); | ||
| 128 | |||
| 129 | msg->address_hi = MSI_ADDR_BASE_HI; | ||
| 130 | msg->address_lo = MSI_ADDR_BASE_LO | | ||
| 131 | MSI_ADDR_DEST_ID(index_from_irqs(vmd, irq)); | ||
| 132 | msg->data = 0; | ||
| 133 | } | ||
| 134 | |||
| 135 | /* | ||
| 136 | * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops. | ||
| 137 | */ | ||
| 138 | static void vmd_irq_enable(struct irq_data *data) | ||
| 139 | { | ||
| 140 | struct vmd_irq *vmdirq = data->chip_data; | ||
| 141 | unsigned long flags; | ||
| 142 | |||
| 143 | raw_spin_lock_irqsave(&list_lock, flags); | ||
| 144 | WARN_ON(vmdirq->enabled); | ||
| 145 | list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list); | ||
| 146 | vmdirq->enabled = true; | ||
| 147 | raw_spin_unlock_irqrestore(&list_lock, flags); | ||
| 148 | |||
| 149 | data->chip->irq_unmask(data); | ||
| 150 | } | ||
| 151 | |||
| 152 | static void vmd_irq_disable(struct irq_data *data) | ||
| 153 | { | ||
| 154 | struct vmd_irq *vmdirq = data->chip_data; | ||
| 155 | unsigned long flags; | ||
| 156 | |||
| 157 | data->chip->irq_mask(data); | ||
| 158 | |||
| 159 | raw_spin_lock_irqsave(&list_lock, flags); | ||
| 160 | if (vmdirq->enabled) { | ||
| 161 | list_del_rcu(&vmdirq->node); | ||
| 162 | vmdirq->enabled = false; | ||
| 163 | } | ||
| 164 | raw_spin_unlock_irqrestore(&list_lock, flags); | ||
| 165 | } | ||
| 166 | |||
| 167 | /* | ||
| 168 | * XXX: Stubbed until we develop acceptable way to not create conflicts with | ||
| 169 | * other devices sharing the same vector. | ||
| 170 | */ | ||
| 171 | static int vmd_irq_set_affinity(struct irq_data *data, | ||
| 172 | const struct cpumask *dest, bool force) | ||
| 173 | { | ||
| 174 | return -EINVAL; | ||
| 175 | } | ||
| 176 | |||
| 177 | static struct irq_chip vmd_msi_controller = { | ||
| 178 | .name = "VMD-MSI", | ||
| 179 | .irq_enable = vmd_irq_enable, | ||
| 180 | .irq_disable = vmd_irq_disable, | ||
| 181 | .irq_compose_msi_msg = vmd_compose_msi_msg, | ||
| 182 | .irq_set_affinity = vmd_irq_set_affinity, | ||
| 183 | }; | ||
| 184 | |||
| 185 | static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info, | ||
| 186 | msi_alloc_info_t *arg) | ||
| 187 | { | ||
| 188 | return 0; | ||
| 189 | } | ||
| 190 | |||
| 191 | /* | ||
| 192 | * XXX: We can be even smarter selecting the best IRQ once we solve the | ||
| 193 | * affinity problem. | ||
| 194 | */ | ||
| 195 | static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc) | ||
| 196 | { | ||
| 197 | int i, best = 1; | ||
| 198 | unsigned long flags; | ||
| 199 | |||
| 200 | if (pci_is_bridge(msi_desc_to_pci_dev(desc)) || vmd->msix_count == 1) | ||
| 201 | return &vmd->irqs[0]; | ||
| 202 | |||
| 203 | raw_spin_lock_irqsave(&list_lock, flags); | ||
| 204 | for (i = 1; i < vmd->msix_count; i++) | ||
| 205 | if (vmd->irqs[i].count < vmd->irqs[best].count) | ||
| 206 | best = i; | ||
| 207 | vmd->irqs[best].count++; | ||
| 208 | raw_spin_unlock_irqrestore(&list_lock, flags); | ||
| 209 | |||
| 210 | return &vmd->irqs[best]; | ||
| 211 | } | ||
| 212 | |||
| 213 | static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info, | ||
| 214 | unsigned int virq, irq_hw_number_t hwirq, | ||
| 215 | msi_alloc_info_t *arg) | ||
| 216 | { | ||
| 217 | struct msi_desc *desc = arg->desc; | ||
| 218 | struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus); | ||
| 219 | struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL); | ||
| 220 | unsigned int index, vector; | ||
| 221 | |||
| 222 | if (!vmdirq) | ||
| 223 | return -ENOMEM; | ||
| 224 | |||
| 225 | INIT_LIST_HEAD(&vmdirq->node); | ||
| 226 | vmdirq->irq = vmd_next_irq(vmd, desc); | ||
| 227 | vmdirq->virq = virq; | ||
| 228 | index = index_from_irqs(vmd, vmdirq->irq); | ||
| 229 | vector = pci_irq_vector(vmd->dev, index); | ||
| 230 | |||
| 231 | irq_domain_set_info(domain, virq, vector, info->chip, vmdirq, | ||
| 232 | handle_untracked_irq, vmd, NULL); | ||
| 233 | return 0; | ||
| 234 | } | ||
| 235 | |||
| 236 | static void vmd_msi_free(struct irq_domain *domain, | ||
| 237 | struct msi_domain_info *info, unsigned int virq) | ||
| 238 | { | ||
| 239 | struct vmd_irq *vmdirq = irq_get_chip_data(virq); | ||
| 240 | unsigned long flags; | ||
| 241 | |||
| 242 | synchronize_srcu(&vmdirq->irq->srcu); | ||
| 243 | |||
| 244 | /* XXX: Potential optimization to rebalance */ | ||
| 245 | raw_spin_lock_irqsave(&list_lock, flags); | ||
| 246 | vmdirq->irq->count--; | ||
| 247 | raw_spin_unlock_irqrestore(&list_lock, flags); | ||
| 248 | |||
| 249 | kfree(vmdirq); | ||
| 250 | } | ||
| 251 | |||
| 252 | static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev, | ||
| 253 | int nvec, msi_alloc_info_t *arg) | ||
| 254 | { | ||
| 255 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 256 | struct vmd_dev *vmd = vmd_from_bus(pdev->bus); | ||
| 257 | |||
| 258 | if (nvec > vmd->msix_count) | ||
| 259 | return vmd->msix_count; | ||
| 260 | |||
| 261 | memset(arg, 0, sizeof(*arg)); | ||
| 262 | return 0; | ||
| 263 | } | ||
| 264 | |||
| 265 | static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) | ||
| 266 | { | ||
| 267 | arg->desc = desc; | ||
| 268 | } | ||
| 269 | |||
| 270 | static struct msi_domain_ops vmd_msi_domain_ops = { | ||
| 271 | .get_hwirq = vmd_get_hwirq, | ||
| 272 | .msi_init = vmd_msi_init, | ||
| 273 | .msi_free = vmd_msi_free, | ||
| 274 | .msi_prepare = vmd_msi_prepare, | ||
| 275 | .set_desc = vmd_set_desc, | ||
| 276 | }; | ||
| 277 | |||
| 278 | static struct msi_domain_info vmd_msi_domain_info = { | ||
| 279 | .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | | ||
| 280 | MSI_FLAG_PCI_MSIX, | ||
| 281 | .ops = &vmd_msi_domain_ops, | ||
| 282 | .chip = &vmd_msi_controller, | ||
| 283 | }; | ||
| 284 | |||
| 285 | #ifdef CONFIG_X86_DEV_DMA_OPS | ||
| 286 | /* | ||
| 287 | * VMD replaces the requester ID with its own. DMA mappings for devices in a | ||
| 288 | * VMD domain need to be mapped for the VMD, not the device requiring | ||
| 289 | * the mapping. | ||
| 290 | */ | ||
| 291 | static struct device *to_vmd_dev(struct device *dev) | ||
| 292 | { | ||
| 293 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 294 | struct vmd_dev *vmd = vmd_from_bus(pdev->bus); | ||
| 295 | |||
| 296 | return &vmd->dev->dev; | ||
| 297 | } | ||
| 298 | |||
| 299 | static const struct dma_map_ops *vmd_dma_ops(struct device *dev) | ||
| 300 | { | ||
| 301 | return get_dma_ops(to_vmd_dev(dev)); | ||
| 302 | } | ||
| 303 | |||
| 304 | static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr, | ||
| 305 | gfp_t flag, unsigned long attrs) | ||
| 306 | { | ||
| 307 | return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag, | ||
| 308 | attrs); | ||
| 309 | } | ||
| 310 | |||
| 311 | static void vmd_free(struct device *dev, size_t size, void *vaddr, | ||
| 312 | dma_addr_t addr, unsigned long attrs) | ||
| 313 | { | ||
| 314 | return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr, | ||
| 315 | attrs); | ||
| 316 | } | ||
| 317 | |||
| 318 | static int vmd_mmap(struct device *dev, struct vm_area_struct *vma, | ||
| 319 | void *cpu_addr, dma_addr_t addr, size_t size, | ||
| 320 | unsigned long attrs) | ||
| 321 | { | ||
| 322 | return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr, | ||
| 323 | size, attrs); | ||
| 324 | } | ||
| 325 | |||
| 326 | static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt, | ||
| 327 | void *cpu_addr, dma_addr_t addr, size_t size, | ||
| 328 | unsigned long attrs) | ||
| 329 | { | ||
| 330 | return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr, | ||
| 331 | addr, size, attrs); | ||
| 332 | } | ||
| 333 | |||
| 334 | static dma_addr_t vmd_map_page(struct device *dev, struct page *page, | ||
| 335 | unsigned long offset, size_t size, | ||
| 336 | enum dma_data_direction dir, | ||
| 337 | unsigned long attrs) | ||
| 338 | { | ||
| 339 | return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size, | ||
| 340 | dir, attrs); | ||
| 341 | } | ||
| 342 | |||
| 343 | static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size, | ||
| 344 | enum dma_data_direction dir, unsigned long attrs) | ||
| 345 | { | ||
| 346 | vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs); | ||
| 347 | } | ||
| 348 | |||
| 349 | static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
| 350 | enum dma_data_direction dir, unsigned long attrs) | ||
| 351 | { | ||
| 352 | return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs); | ||
| 353 | } | ||
| 354 | |||
| 355 | static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | ||
| 356 | enum dma_data_direction dir, unsigned long attrs) | ||
| 357 | { | ||
| 358 | vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs); | ||
| 359 | } | ||
| 360 | |||
| 361 | static void vmd_sync_single_for_cpu(struct device *dev, dma_addr_t addr, | ||
| 362 | size_t size, enum dma_data_direction dir) | ||
| 363 | { | ||
| 364 | vmd_dma_ops(dev)->sync_single_for_cpu(to_vmd_dev(dev), addr, size, dir); | ||
| 365 | } | ||
| 366 | |||
| 367 | static void vmd_sync_single_for_device(struct device *dev, dma_addr_t addr, | ||
| 368 | size_t size, enum dma_data_direction dir) | ||
| 369 | { | ||
| 370 | vmd_dma_ops(dev)->sync_single_for_device(to_vmd_dev(dev), addr, size, | ||
| 371 | dir); | ||
| 372 | } | ||
| 373 | |||
| 374 | static void vmd_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | ||
| 375 | int nents, enum dma_data_direction dir) | ||
| 376 | { | ||
| 377 | vmd_dma_ops(dev)->sync_sg_for_cpu(to_vmd_dev(dev), sg, nents, dir); | ||
| 378 | } | ||
| 379 | |||
| 380 | static void vmd_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | ||
| 381 | int nents, enum dma_data_direction dir) | ||
| 382 | { | ||
| 383 | vmd_dma_ops(dev)->sync_sg_for_device(to_vmd_dev(dev), sg, nents, dir); | ||
| 384 | } | ||
| 385 | |||
| 386 | static int vmd_mapping_error(struct device *dev, dma_addr_t addr) | ||
| 387 | { | ||
| 388 | return vmd_dma_ops(dev)->mapping_error(to_vmd_dev(dev), addr); | ||
| 389 | } | ||
| 390 | |||
| 391 | static int vmd_dma_supported(struct device *dev, u64 mask) | ||
| 392 | { | ||
| 393 | return vmd_dma_ops(dev)->dma_supported(to_vmd_dev(dev), mask); | ||
| 394 | } | ||
| 395 | |||
| 396 | #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK | ||
| 397 | static u64 vmd_get_required_mask(struct device *dev) | ||
| 398 | { | ||
| 399 | return vmd_dma_ops(dev)->get_required_mask(to_vmd_dev(dev)); | ||
| 400 | } | ||
| 401 | #endif | ||
| 402 | |||
| 403 | static void vmd_teardown_dma_ops(struct vmd_dev *vmd) | ||
| 404 | { | ||
| 405 | struct dma_domain *domain = &vmd->dma_domain; | ||
| 406 | |||
| 407 | if (get_dma_ops(&vmd->dev->dev)) | ||
| 408 | del_dma_domain(domain); | ||
| 409 | } | ||
| 410 | |||
| 411 | #define ASSIGN_VMD_DMA_OPS(source, dest, fn) \ | ||
| 412 | do { \ | ||
| 413 | if (source->fn) \ | ||
| 414 | dest->fn = vmd_##fn; \ | ||
| 415 | } while (0) | ||
| 416 | |||
| 417 | static void vmd_setup_dma_ops(struct vmd_dev *vmd) | ||
| 418 | { | ||
| 419 | const struct dma_map_ops *source = get_dma_ops(&vmd->dev->dev); | ||
| 420 | struct dma_map_ops *dest = &vmd->dma_ops; | ||
| 421 | struct dma_domain *domain = &vmd->dma_domain; | ||
| 422 | |||
| 423 | domain->domain_nr = vmd->sysdata.domain; | ||
| 424 | domain->dma_ops = dest; | ||
| 425 | |||
| 426 | if (!source) | ||
| 427 | return; | ||
| 428 | ASSIGN_VMD_DMA_OPS(source, dest, alloc); | ||
| 429 | ASSIGN_VMD_DMA_OPS(source, dest, free); | ||
| 430 | ASSIGN_VMD_DMA_OPS(source, dest, mmap); | ||
| 431 | ASSIGN_VMD_DMA_OPS(source, dest, get_sgtable); | ||
| 432 | ASSIGN_VMD_DMA_OPS(source, dest, map_page); | ||
| 433 | ASSIGN_VMD_DMA_OPS(source, dest, unmap_page); | ||
| 434 | ASSIGN_VMD_DMA_OPS(source, dest, map_sg); | ||
| 435 | ASSIGN_VMD_DMA_OPS(source, dest, unmap_sg); | ||
| 436 | ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_cpu); | ||
| 437 | ASSIGN_VMD_DMA_OPS(source, dest, sync_single_for_device); | ||
| 438 | ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_cpu); | ||
| 439 | ASSIGN_VMD_DMA_OPS(source, dest, sync_sg_for_device); | ||
| 440 | ASSIGN_VMD_DMA_OPS(source, dest, mapping_error); | ||
| 441 | ASSIGN_VMD_DMA_OPS(source, dest, dma_supported); | ||
| 442 | #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK | ||
| 443 | ASSIGN_VMD_DMA_OPS(source, dest, get_required_mask); | ||
| 444 | #endif | ||
| 445 | add_dma_domain(domain); | ||
| 446 | } | ||
| 447 | #undef ASSIGN_VMD_DMA_OPS | ||
| 448 | #else | ||
| 449 | static void vmd_teardown_dma_ops(struct vmd_dev *vmd) {} | ||
| 450 | static void vmd_setup_dma_ops(struct vmd_dev *vmd) {} | ||
| 451 | #endif | ||
| 452 | |||
| 453 | static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus, | ||
| 454 | unsigned int devfn, int reg, int len) | ||
| 455 | { | ||
| 456 | char __iomem *addr = vmd->cfgbar + | ||
| 457 | (bus->number << 20) + (devfn << 12) + reg; | ||
| 458 | |||
| 459 | if ((addr - vmd->cfgbar) + len >= | ||
| 460 | resource_size(&vmd->dev->resource[VMD_CFGBAR])) | ||
| 461 | return NULL; | ||
| 462 | |||
| 463 | return addr; | ||
| 464 | } | ||
| 465 | |||
| 466 | /* | ||
| 467 | * CPU may deadlock if config space is not serialized on some versions of this | ||
| 468 | * hardware, so all config space access is done under a spinlock. | ||
| 469 | */ | ||
| 470 | static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg, | ||
| 471 | int len, u32 *value) | ||
| 472 | { | ||
| 473 | struct vmd_dev *vmd = vmd_from_bus(bus); | ||
| 474 | char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); | ||
| 475 | unsigned long flags; | ||
| 476 | int ret = 0; | ||
| 477 | |||
| 478 | if (!addr) | ||
| 479 | return -EFAULT; | ||
| 480 | |||
| 481 | spin_lock_irqsave(&vmd->cfg_lock, flags); | ||
| 482 | switch (len) { | ||
| 483 | case 1: | ||
| 484 | *value = readb(addr); | ||
| 485 | break; | ||
| 486 | case 2: | ||
| 487 | *value = readw(addr); | ||
| 488 | break; | ||
| 489 | case 4: | ||
| 490 | *value = readl(addr); | ||
| 491 | break; | ||
| 492 | default: | ||
| 493 | ret = -EINVAL; | ||
| 494 | break; | ||
| 495 | } | ||
| 496 | spin_unlock_irqrestore(&vmd->cfg_lock, flags); | ||
| 497 | return ret; | ||
| 498 | } | ||
| 499 | |||
| 500 | /* | ||
| 501 | * VMD h/w converts non-posted config writes to posted memory writes. The | ||
| 502 | * read-back in this function forces the completion so it returns only after | ||
| 503 | * the config space was written, as expected. | ||
| 504 | */ | ||
| 505 | static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg, | ||
| 506 | int len, u32 value) | ||
| 507 | { | ||
| 508 | struct vmd_dev *vmd = vmd_from_bus(bus); | ||
| 509 | char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len); | ||
| 510 | unsigned long flags; | ||
| 511 | int ret = 0; | ||
| 512 | |||
| 513 | if (!addr) | ||
| 514 | return -EFAULT; | ||
| 515 | |||
| 516 | spin_lock_irqsave(&vmd->cfg_lock, flags); | ||
| 517 | switch (len) { | ||
| 518 | case 1: | ||
| 519 | writeb(value, addr); | ||
| 520 | readb(addr); | ||
| 521 | break; | ||
| 522 | case 2: | ||
| 523 | writew(value, addr); | ||
| 524 | readw(addr); | ||
| 525 | break; | ||
| 526 | case 4: | ||
| 527 | writel(value, addr); | ||
| 528 | readl(addr); | ||
| 529 | break; | ||
| 530 | default: | ||
| 531 | ret = -EINVAL; | ||
| 532 | break; | ||
| 533 | } | ||
| 534 | spin_unlock_irqrestore(&vmd->cfg_lock, flags); | ||
| 535 | return ret; | ||
| 536 | } | ||
| 537 | |||
| 538 | static struct pci_ops vmd_ops = { | ||
| 539 | .read = vmd_pci_read, | ||
| 540 | .write = vmd_pci_write, | ||
| 541 | }; | ||
| 542 | |||
| 543 | static void vmd_attach_resources(struct vmd_dev *vmd) | ||
| 544 | { | ||
| 545 | vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1]; | ||
| 546 | vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2]; | ||
| 547 | } | ||
| 548 | |||
| 549 | static void vmd_detach_resources(struct vmd_dev *vmd) | ||
| 550 | { | ||
| 551 | vmd->dev->resource[VMD_MEMBAR1].child = NULL; | ||
| 552 | vmd->dev->resource[VMD_MEMBAR2].child = NULL; | ||
| 553 | } | ||
| 554 | |||
| 555 | /* | ||
| 556 | * VMD domains start at 0x10000 to not clash with ACPI _SEG domains. | ||
| 557 | * Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of which the lower | ||
| 558 | * 16 bits are the PCI Segment Group (domain) number. Other bits are | ||
| 559 | * currently reserved. | ||
| 560 | */ | ||
| 561 | static int vmd_find_free_domain(void) | ||
| 562 | { | ||
| 563 | int domain = 0xffff; | ||
| 564 | struct pci_bus *bus = NULL; | ||
| 565 | |||
| 566 | while ((bus = pci_find_next_bus(bus)) != NULL) | ||
| 567 | domain = max_t(int, domain, pci_domain_nr(bus)); | ||
| 568 | return domain + 1; | ||
| 569 | } | ||
| 570 | |||
| 571 | static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) | ||
| 572 | { | ||
| 573 | struct pci_sysdata *sd = &vmd->sysdata; | ||
| 574 | struct fwnode_handle *fn; | ||
| 575 | struct resource *res; | ||
| 576 | u32 upper_bits; | ||
| 577 | unsigned long flags; | ||
| 578 | LIST_HEAD(resources); | ||
| 579 | resource_size_t offset[2] = {0}; | ||
| 580 | resource_size_t membar2_offset = 0x2000, busn_start = 0; | ||
| 581 | |||
| 582 | /* | ||
| 583 | * Shadow registers may exist in certain VMD device ids which allow | ||
| 584 | * guests to correctly assign host physical addresses to the root ports | ||
| 585 | * and child devices. These registers will either return the host value | ||
| 586 | * or 0, depending on an enable bit in the VMD device. | ||
| 587 | */ | ||
| 588 | if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) { | ||
| 589 | u32 vmlock; | ||
| 590 | int ret; | ||
| 591 | |||
| 592 | membar2_offset = 0x2018; | ||
| 593 | ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock); | ||
| 594 | if (ret || vmlock == ~0) | ||
| 595 | return -ENODEV; | ||
| 596 | |||
| 597 | if (MB2_SHADOW_EN(vmlock)) { | ||
| 598 | void __iomem *membar2; | ||
| 599 | |||
| 600 | membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0); | ||
| 601 | if (!membar2) | ||
| 602 | return -ENOMEM; | ||
| 603 | offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - | ||
| 604 | readq(membar2 + 0x2008); | ||
| 605 | offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - | ||
| 606 | readq(membar2 + 0x2010); | ||
| 607 | pci_iounmap(vmd->dev, membar2); | ||
| 608 | } | ||
| 609 | } | ||
| 610 | |||
| 611 | /* | ||
| 612 | * Certain VMD devices may have a root port configuration option which | ||
| 613 | * limits the bus range to between 0-127 or 128-255 | ||
| 614 | */ | ||
| 615 | if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) { | ||
| 616 | u32 vmcap, vmconfig; | ||
| 617 | |||
| 618 | pci_read_config_dword(vmd->dev, PCI_REG_VMCAP, &vmcap); | ||
| 619 | pci_read_config_dword(vmd->dev, PCI_REG_VMCONFIG, &vmconfig); | ||
| 620 | if (BUS_RESTRICT_CAP(vmcap) && | ||
| 621 | (BUS_RESTRICT_CFG(vmconfig) == 0x1)) | ||
| 622 | busn_start = 128; | ||
| 623 | } | ||
| 624 | |||
| 625 | res = &vmd->dev->resource[VMD_CFGBAR]; | ||
| 626 | vmd->resources[0] = (struct resource) { | ||
| 627 | .name = "VMD CFGBAR", | ||
| 628 | .start = busn_start, | ||
| 629 | .end = busn_start + (resource_size(res) >> 20) - 1, | ||
| 630 | .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED, | ||
| 631 | }; | ||
| 632 | |||
| 633 | /* | ||
| 634 | * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can | ||
| 635 | * put 32-bit resources in the window. | ||
| 636 | * | ||
| 637 | * There's no hardware reason why a 64-bit window *couldn't* | ||
| 638 | * contain a 32-bit resource, but pbus_size_mem() computes the | ||
| 639 | * bridge window size assuming a 64-bit window will contain no | ||
| 640 | * 32-bit resources. __pci_assign_resource() enforces that | ||
| 641 | * artificial restriction to make sure everything will fit. | ||
| 642 | * | ||
| 643 | * The only way we could use a 64-bit non-prefechable MEMBAR is | ||
| 644 | * if its address is <4GB so that we can convert it to a 32-bit | ||
| 645 | * resource. To be visible to the host OS, all VMD endpoints must | ||
| 646 | * be initially configured by platform BIOS, which includes setting | ||
| 647 | * up these resources. We can assume the device is configured | ||
| 648 | * according to the platform needs. | ||
| 649 | */ | ||
| 650 | res = &vmd->dev->resource[VMD_MEMBAR1]; | ||
| 651 | upper_bits = upper_32_bits(res->end); | ||
| 652 | flags = res->flags & ~IORESOURCE_SIZEALIGN; | ||
| 653 | if (!upper_bits) | ||
| 654 | flags &= ~IORESOURCE_MEM_64; | ||
| 655 | vmd->resources[1] = (struct resource) { | ||
| 656 | .name = "VMD MEMBAR1", | ||
| 657 | .start = res->start, | ||
| 658 | .end = res->end, | ||
| 659 | .flags = flags, | ||
| 660 | .parent = res, | ||
| 661 | }; | ||
| 662 | |||
| 663 | res = &vmd->dev->resource[VMD_MEMBAR2]; | ||
| 664 | upper_bits = upper_32_bits(res->end); | ||
| 665 | flags = res->flags & ~IORESOURCE_SIZEALIGN; | ||
| 666 | if (!upper_bits) | ||
| 667 | flags &= ~IORESOURCE_MEM_64; | ||
| 668 | vmd->resources[2] = (struct resource) { | ||
| 669 | .name = "VMD MEMBAR2", | ||
| 670 | .start = res->start + membar2_offset, | ||
| 671 | .end = res->end, | ||
| 672 | .flags = flags, | ||
| 673 | .parent = res, | ||
| 674 | }; | ||
| 675 | |||
| 676 | sd->vmd_domain = true; | ||
| 677 | sd->domain = vmd_find_free_domain(); | ||
| 678 | if (sd->domain < 0) | ||
| 679 | return sd->domain; | ||
| 680 | |||
| 681 | sd->node = pcibus_to_node(vmd->dev->bus); | ||
| 682 | |||
| 683 | fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain); | ||
| 684 | if (!fn) | ||
| 685 | return -ENODEV; | ||
| 686 | |||
| 687 | vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, | ||
| 688 | x86_vector_domain); | ||
| 689 | irq_domain_free_fwnode(fn); | ||
| 690 | if (!vmd->irq_domain) | ||
| 691 | return -ENODEV; | ||
| 692 | |||
| 693 | pci_add_resource(&resources, &vmd->resources[0]); | ||
| 694 | pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]); | ||
| 695 | pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]); | ||
| 696 | |||
| 697 | vmd->bus = pci_create_root_bus(&vmd->dev->dev, busn_start, &vmd_ops, | ||
| 698 | sd, &resources); | ||
| 699 | if (!vmd->bus) { | ||
| 700 | pci_free_resource_list(&resources); | ||
| 701 | irq_domain_remove(vmd->irq_domain); | ||
| 702 | return -ENODEV; | ||
| 703 | } | ||
| 704 | |||
| 705 | vmd_attach_resources(vmd); | ||
| 706 | vmd_setup_dma_ops(vmd); | ||
| 707 | dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain); | ||
| 708 | pci_rescan_bus(vmd->bus); | ||
| 709 | |||
| 710 | WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj, | ||
| 711 | "domain"), "Can't create symlink to domain\n"); | ||
| 712 | return 0; | ||
| 713 | } | ||
| 714 | |||
| 715 | static irqreturn_t vmd_irq(int irq, void *data) | ||
| 716 | { | ||
| 717 | struct vmd_irq_list *irqs = data; | ||
| 718 | struct vmd_irq *vmdirq; | ||
| 719 | int idx; | ||
| 720 | |||
| 721 | idx = srcu_read_lock(&irqs->srcu); | ||
| 722 | list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node) | ||
| 723 | generic_handle_irq(vmdirq->virq); | ||
| 724 | srcu_read_unlock(&irqs->srcu, idx); | ||
| 725 | |||
| 726 | return IRQ_HANDLED; | ||
| 727 | } | ||
| 728 | |||
| 729 | static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) | ||
| 730 | { | ||
| 731 | struct vmd_dev *vmd; | ||
| 732 | int i, err; | ||
| 733 | |||
| 734 | if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20)) | ||
| 735 | return -ENOMEM; | ||
| 736 | |||
| 737 | vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL); | ||
| 738 | if (!vmd) | ||
| 739 | return -ENOMEM; | ||
| 740 | |||
| 741 | vmd->dev = dev; | ||
| 742 | err = pcim_enable_device(dev); | ||
| 743 | if (err < 0) | ||
| 744 | return err; | ||
| 745 | |||
| 746 | vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0); | ||
| 747 | if (!vmd->cfgbar) | ||
| 748 | return -ENOMEM; | ||
| 749 | |||
| 750 | pci_set_master(dev); | ||
| 751 | if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) && | ||
| 752 | dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) | ||
| 753 | return -ENODEV; | ||
| 754 | |||
| 755 | vmd->msix_count = pci_msix_vec_count(dev); | ||
| 756 | if (vmd->msix_count < 0) | ||
| 757 | return -ENODEV; | ||
| 758 | |||
| 759 | vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count, | ||
| 760 | PCI_IRQ_MSIX); | ||
| 761 | if (vmd->msix_count < 0) | ||
| 762 | return vmd->msix_count; | ||
| 763 | |||
| 764 | vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs), | ||
| 765 | GFP_KERNEL); | ||
| 766 | if (!vmd->irqs) | ||
| 767 | return -ENOMEM; | ||
| 768 | |||
| 769 | for (i = 0; i < vmd->msix_count; i++) { | ||
| 770 | err = init_srcu_struct(&vmd->irqs[i].srcu); | ||
| 771 | if (err) | ||
| 772 | return err; | ||
| 773 | |||
| 774 | INIT_LIST_HEAD(&vmd->irqs[i].irq_list); | ||
| 775 | err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), | ||
| 776 | vmd_irq, IRQF_NO_THREAD, | ||
| 777 | "vmd", &vmd->irqs[i]); | ||
| 778 | if (err) | ||
| 779 | return err; | ||
| 780 | } | ||
| 781 | |||
| 782 | spin_lock_init(&vmd->cfg_lock); | ||
| 783 | pci_set_drvdata(dev, vmd); | ||
| 784 | err = vmd_enable_domain(vmd, (unsigned long) id->driver_data); | ||
| 785 | if (err) | ||
| 786 | return err; | ||
| 787 | |||
| 788 | dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n", | ||
| 789 | vmd->sysdata.domain); | ||
| 790 | return 0; | ||
| 791 | } | ||
| 792 | |||
| 793 | static void vmd_cleanup_srcu(struct vmd_dev *vmd) | ||
| 794 | { | ||
| 795 | int i; | ||
| 796 | |||
| 797 | for (i = 0; i < vmd->msix_count; i++) | ||
| 798 | cleanup_srcu_struct(&vmd->irqs[i].srcu); | ||
| 799 | } | ||
| 800 | |||
| 801 | static void vmd_remove(struct pci_dev *dev) | ||
| 802 | { | ||
| 803 | struct vmd_dev *vmd = pci_get_drvdata(dev); | ||
| 804 | |||
| 805 | vmd_detach_resources(vmd); | ||
| 806 | sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); | ||
| 807 | pci_stop_root_bus(vmd->bus); | ||
| 808 | pci_remove_root_bus(vmd->bus); | ||
| 809 | vmd_cleanup_srcu(vmd); | ||
| 810 | vmd_teardown_dma_ops(vmd); | ||
| 811 | irq_domain_remove(vmd->irq_domain); | ||
| 812 | } | ||
| 813 | |||
| 814 | #ifdef CONFIG_PM_SLEEP | ||
| 815 | static int vmd_suspend(struct device *dev) | ||
| 816 | { | ||
| 817 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 818 | struct vmd_dev *vmd = pci_get_drvdata(pdev); | ||
| 819 | int i; | ||
| 820 | |||
| 821 | for (i = 0; i < vmd->msix_count; i++) | ||
| 822 | devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]); | ||
| 823 | |||
| 824 | pci_save_state(pdev); | ||
| 825 | return 0; | ||
| 826 | } | ||
| 827 | |||
| 828 | static int vmd_resume(struct device *dev) | ||
| 829 | { | ||
| 830 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 831 | struct vmd_dev *vmd = pci_get_drvdata(pdev); | ||
| 832 | int err, i; | ||
| 833 | |||
| 834 | for (i = 0; i < vmd->msix_count; i++) { | ||
| 835 | err = devm_request_irq(dev, pci_irq_vector(pdev, i), | ||
| 836 | vmd_irq, IRQF_NO_THREAD, | ||
| 837 | "vmd", &vmd->irqs[i]); | ||
| 838 | if (err) | ||
| 839 | return err; | ||
| 840 | } | ||
| 841 | |||
| 842 | pci_restore_state(pdev); | ||
| 843 | return 0; | ||
| 844 | } | ||
| 845 | #endif | ||
| 846 | static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume); | ||
| 847 | |||
| 848 | static const struct pci_device_id vmd_ids[] = { | ||
| 849 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),}, | ||
| 850 | {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0), | ||
| 851 | .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW | | ||
| 852 | VMD_FEAT_HAS_BUS_RESTRICTIONS,}, | ||
| 853 | {0,} | ||
| 854 | }; | ||
| 855 | MODULE_DEVICE_TABLE(pci, vmd_ids); | ||
| 856 | |||
| 857 | static struct pci_driver vmd_drv = { | ||
| 858 | .name = "vmd", | ||
| 859 | .id_table = vmd_ids, | ||
| 860 | .probe = vmd_probe, | ||
| 861 | .remove = vmd_remove, | ||
| 862 | .driver = { | ||
| 863 | .pm = &vmd_dev_pm_ops, | ||
| 864 | }, | ||
| 865 | }; | ||
| 866 | module_pci_driver(vmd_drv); | ||
| 867 | |||
| 868 | MODULE_AUTHOR("Intel Corporation"); | ||
| 869 | MODULE_LICENSE("GPL v2"); | ||
| 870 | MODULE_VERSION("0.6"); | ||
