diff options
Diffstat (limited to 'drivers/pci')
57 files changed, 3442 insertions, 837 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index e0cacb7b8563..c32a77fc8b03 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig | |||
| @@ -86,6 +86,9 @@ config PCI_ATS | |||
| 86 | config PCI_ECAM | 86 | config PCI_ECAM |
| 87 | bool | 87 | bool |
| 88 | 88 | ||
| 89 | config PCI_LOCKLESS_CONFIG | ||
| 90 | bool | ||
| 91 | |||
| 89 | config PCI_IOV | 92 | config PCI_IOV |
| 90 | bool "PCI IOV support" | 93 | bool "PCI IOV support" |
| 91 | depends on PCI | 94 | depends on PCI |
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 462c1f5f5546..66a21acad952 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
| @@ -4,7 +4,8 @@ | |||
| 4 | 4 | ||
| 5 | obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \ | 5 | obj-y += access.o bus.o probe.o host-bridge.o remove.o pci.o \ |
| 6 | pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ | 6 | pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ |
| 7 | irq.o vpd.o setup-bus.o vc.o mmap.o | 7 | irq.o vpd.o setup-bus.o vc.o mmap.o setup-irq.o |
| 8 | |||
| 8 | obj-$(CONFIG_PROC_FS) += proc.o | 9 | obj-$(CONFIG_PROC_FS) += proc.o |
| 9 | obj-$(CONFIG_SYSFS) += slot.o | 10 | obj-$(CONFIG_SYSFS) += slot.o |
| 10 | 11 | ||
| @@ -29,20 +30,6 @@ obj-$(CONFIG_PCI_ATS) += ats.o | |||
| 29 | obj-$(CONFIG_PCI_IOV) += iov.o | 30 | obj-$(CONFIG_PCI_IOV) += iov.o |
| 30 | 31 | ||
| 31 | # | 32 | # |
| 32 | # Some architectures use the generic PCI setup functions | ||
| 33 | # | ||
| 34 | obj-$(CONFIG_ALPHA) += setup-irq.o | ||
| 35 | obj-$(CONFIG_ARC) += setup-irq.o | ||
| 36 | obj-$(CONFIG_ARM) += setup-irq.o | ||
| 37 | obj-$(CONFIG_ARM64) += setup-irq.o | ||
| 38 | obj-$(CONFIG_UNICORE32) += setup-irq.o | ||
| 39 | obj-$(CONFIG_SUPERH) += setup-irq.o | ||
| 40 | obj-$(CONFIG_MIPS) += setup-irq.o | ||
| 41 | obj-$(CONFIG_TILE) += setup-irq.o | ||
| 42 | obj-$(CONFIG_SPARC_LEON) += setup-irq.o | ||
| 43 | obj-$(CONFIG_M68K) += setup-irq.o | ||
| 44 | |||
| 45 | # | ||
| 46 | # ACPI Related PCI FW Functions | 33 | # ACPI Related PCI FW Functions |
| 47 | # ACPI _DSM provided firmware instance and string name | 34 | # ACPI _DSM provided firmware instance and string name |
| 48 | # | 35 | # |
diff --git a/drivers/pci/access.c b/drivers/pci/access.c index c80e37a69305..913d6722ece9 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c | |||
| @@ -25,6 +25,14 @@ DEFINE_RAW_SPINLOCK(pci_lock); | |||
| 25 | #define PCI_word_BAD (pos & 1) | 25 | #define PCI_word_BAD (pos & 1) |
| 26 | #define PCI_dword_BAD (pos & 3) | 26 | #define PCI_dword_BAD (pos & 3) |
| 27 | 27 | ||
| 28 | #ifdef CONFIG_PCI_LOCKLESS_CONFIG | ||
| 29 | # define pci_lock_config(f) do { (void)(f); } while (0) | ||
| 30 | # define pci_unlock_config(f) do { (void)(f); } while (0) | ||
| 31 | #else | ||
| 32 | # define pci_lock_config(f) raw_spin_lock_irqsave(&pci_lock, f) | ||
| 33 | # define pci_unlock_config(f) raw_spin_unlock_irqrestore(&pci_lock, f) | ||
| 34 | #endif | ||
| 35 | |||
| 28 | #define PCI_OP_READ(size, type, len) \ | 36 | #define PCI_OP_READ(size, type, len) \ |
| 29 | int pci_bus_read_config_##size \ | 37 | int pci_bus_read_config_##size \ |
| 30 | (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \ | 38 | (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \ |
| @@ -33,10 +41,10 @@ int pci_bus_read_config_##size \ | |||
| 33 | unsigned long flags; \ | 41 | unsigned long flags; \ |
| 34 | u32 data = 0; \ | 42 | u32 data = 0; \ |
| 35 | if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ | 43 | if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ |
| 36 | raw_spin_lock_irqsave(&pci_lock, flags); \ | 44 | pci_lock_config(flags); \ |
| 37 | res = bus->ops->read(bus, devfn, pos, len, &data); \ | 45 | res = bus->ops->read(bus, devfn, pos, len, &data); \ |
| 38 | *value = (type)data; \ | 46 | *value = (type)data; \ |
| 39 | raw_spin_unlock_irqrestore(&pci_lock, flags); \ | 47 | pci_unlock_config(flags); \ |
| 40 | return res; \ | 48 | return res; \ |
| 41 | } | 49 | } |
| 42 | 50 | ||
| @@ -47,9 +55,9 @@ int pci_bus_write_config_##size \ | |||
| 47 | int res; \ | 55 | int res; \ |
| 48 | unsigned long flags; \ | 56 | unsigned long flags; \ |
| 49 | if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ | 57 | if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \ |
| 50 | raw_spin_lock_irqsave(&pci_lock, flags); \ | 58 | pci_lock_config(flags); \ |
| 51 | res = bus->ops->write(bus, devfn, pos, len, value); \ | 59 | res = bus->ops->write(bus, devfn, pos, len, value); \ |
| 52 | raw_spin_unlock_irqrestore(&pci_lock, flags); \ | 60 | pci_unlock_config(flags); \ |
| 53 | return res; \ | 61 | return res; \ |
| 54 | } | 62 | } |
| 55 | 63 | ||
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c index eeb9fb2b47aa..ad8ddbbbf245 100644 --- a/drivers/pci/ats.c +++ b/drivers/pci/ats.c | |||
| @@ -153,23 +153,27 @@ int pci_enable_pri(struct pci_dev *pdev, u32 reqs) | |||
| 153 | u32 max_requests; | 153 | u32 max_requests; |
| 154 | int pos; | 154 | int pos; |
| 155 | 155 | ||
| 156 | if (WARN_ON(pdev->pri_enabled)) | ||
| 157 | return -EBUSY; | ||
| 158 | |||
| 156 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); | 159 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); |
| 157 | if (!pos) | 160 | if (!pos) |
| 158 | return -EINVAL; | 161 | return -EINVAL; |
| 159 | 162 | ||
| 160 | pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control); | ||
| 161 | pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status); | 163 | pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status); |
| 162 | if ((control & PCI_PRI_CTRL_ENABLE) || | 164 | if (!(status & PCI_PRI_STATUS_STOPPED)) |
| 163 | !(status & PCI_PRI_STATUS_STOPPED)) | ||
| 164 | return -EBUSY; | 165 | return -EBUSY; |
| 165 | 166 | ||
| 166 | pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ, &max_requests); | 167 | pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ, &max_requests); |
| 167 | reqs = min(max_requests, reqs); | 168 | reqs = min(max_requests, reqs); |
| 169 | pdev->pri_reqs_alloc = reqs; | ||
| 168 | pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs); | 170 | pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs); |
| 169 | 171 | ||
| 170 | control |= PCI_PRI_CTRL_ENABLE; | 172 | control = PCI_PRI_CTRL_ENABLE; |
| 171 | pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control); | 173 | pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control); |
| 172 | 174 | ||
| 175 | pdev->pri_enabled = 1; | ||
| 176 | |||
| 173 | return 0; | 177 | return 0; |
| 174 | } | 178 | } |
| 175 | EXPORT_SYMBOL_GPL(pci_enable_pri); | 179 | EXPORT_SYMBOL_GPL(pci_enable_pri); |
| @@ -185,6 +189,9 @@ void pci_disable_pri(struct pci_dev *pdev) | |||
| 185 | u16 control; | 189 | u16 control; |
| 186 | int pos; | 190 | int pos; |
| 187 | 191 | ||
| 192 | if (WARN_ON(!pdev->pri_enabled)) | ||
| 193 | return; | ||
| 194 | |||
| 188 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); | 195 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); |
| 189 | if (!pos) | 196 | if (!pos) |
| 190 | return; | 197 | return; |
| @@ -192,10 +199,34 @@ void pci_disable_pri(struct pci_dev *pdev) | |||
| 192 | pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control); | 199 | pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control); |
| 193 | control &= ~PCI_PRI_CTRL_ENABLE; | 200 | control &= ~PCI_PRI_CTRL_ENABLE; |
| 194 | pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control); | 201 | pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control); |
| 202 | |||
| 203 | pdev->pri_enabled = 0; | ||
| 195 | } | 204 | } |
| 196 | EXPORT_SYMBOL_GPL(pci_disable_pri); | 205 | EXPORT_SYMBOL_GPL(pci_disable_pri); |
| 197 | 206 | ||
| 198 | /** | 207 | /** |
| 208 | * pci_restore_pri_state - Restore PRI | ||
| 209 | * @pdev: PCI device structure | ||
| 210 | */ | ||
| 211 | void pci_restore_pri_state(struct pci_dev *pdev) | ||
| 212 | { | ||
| 213 | u16 control = PCI_PRI_CTRL_ENABLE; | ||
| 214 | u32 reqs = pdev->pri_reqs_alloc; | ||
| 215 | int pos; | ||
| 216 | |||
| 217 | if (!pdev->pri_enabled) | ||
| 218 | return; | ||
| 219 | |||
| 220 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); | ||
| 221 | if (!pos) | ||
| 222 | return; | ||
| 223 | |||
| 224 | pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs); | ||
| 225 | pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control); | ||
| 226 | } | ||
| 227 | EXPORT_SYMBOL_GPL(pci_restore_pri_state); | ||
| 228 | |||
| 229 | /** | ||
| 199 | * pci_reset_pri - Resets device's PRI state | 230 | * pci_reset_pri - Resets device's PRI state |
| 200 | * @pdev: PCI device structure | 231 | * @pdev: PCI device structure |
| 201 | * | 232 | * |
| @@ -207,16 +238,14 @@ int pci_reset_pri(struct pci_dev *pdev) | |||
| 207 | u16 control; | 238 | u16 control; |
| 208 | int pos; | 239 | int pos; |
| 209 | 240 | ||
| 241 | if (WARN_ON(pdev->pri_enabled)) | ||
| 242 | return -EBUSY; | ||
| 243 | |||
| 210 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); | 244 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI); |
| 211 | if (!pos) | 245 | if (!pos) |
| 212 | return -EINVAL; | 246 | return -EINVAL; |
| 213 | 247 | ||
| 214 | pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control); | 248 | control = PCI_PRI_CTRL_RESET; |
| 215 | if (control & PCI_PRI_CTRL_ENABLE) | ||
| 216 | return -EBUSY; | ||
| 217 | |||
| 218 | control |= PCI_PRI_CTRL_RESET; | ||
| 219 | |||
| 220 | pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control); | 249 | pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control); |
| 221 | 250 | ||
| 222 | return 0; | 251 | return 0; |
| @@ -239,16 +268,14 @@ int pci_enable_pasid(struct pci_dev *pdev, int features) | |||
| 239 | u16 control, supported; | 268 | u16 control, supported; |
| 240 | int pos; | 269 | int pos; |
| 241 | 270 | ||
| 271 | if (WARN_ON(pdev->pasid_enabled)) | ||
| 272 | return -EBUSY; | ||
| 273 | |||
| 242 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID); | 274 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID); |
| 243 | if (!pos) | 275 | if (!pos) |
| 244 | return -EINVAL; | 276 | return -EINVAL; |
| 245 | 277 | ||
| 246 | pci_read_config_word(pdev, pos + PCI_PASID_CTRL, &control); | ||
| 247 | pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported); | 278 | pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported); |
| 248 | |||
| 249 | if (control & PCI_PASID_CTRL_ENABLE) | ||
| 250 | return -EINVAL; | ||
| 251 | |||
| 252 | supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV; | 279 | supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV; |
| 253 | 280 | ||
| 254 | /* User wants to enable anything unsupported? */ | 281 | /* User wants to enable anything unsupported? */ |
| @@ -256,9 +283,12 @@ int pci_enable_pasid(struct pci_dev *pdev, int features) | |||
| 256 | return -EINVAL; | 283 | return -EINVAL; |
| 257 | 284 | ||
| 258 | control = PCI_PASID_CTRL_ENABLE | features; | 285 | control = PCI_PASID_CTRL_ENABLE | features; |
| 286 | pdev->pasid_features = features; | ||
| 259 | 287 | ||
| 260 | pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control); | 288 | pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control); |
| 261 | 289 | ||
| 290 | pdev->pasid_enabled = 1; | ||
| 291 | |||
| 262 | return 0; | 292 | return 0; |
| 263 | } | 293 | } |
| 264 | EXPORT_SYMBOL_GPL(pci_enable_pasid); | 294 | EXPORT_SYMBOL_GPL(pci_enable_pasid); |
| @@ -266,22 +296,47 @@ EXPORT_SYMBOL_GPL(pci_enable_pasid); | |||
| 266 | /** | 296 | /** |
| 267 | * pci_disable_pasid - Disable the PASID capability | 297 | * pci_disable_pasid - Disable the PASID capability |
| 268 | * @pdev: PCI device structure | 298 | * @pdev: PCI device structure |
| 269 | * | ||
| 270 | */ | 299 | */ |
| 271 | void pci_disable_pasid(struct pci_dev *pdev) | 300 | void pci_disable_pasid(struct pci_dev *pdev) |
| 272 | { | 301 | { |
| 273 | u16 control = 0; | 302 | u16 control = 0; |
| 274 | int pos; | 303 | int pos; |
| 275 | 304 | ||
| 305 | if (WARN_ON(!pdev->pasid_enabled)) | ||
| 306 | return; | ||
| 307 | |||
| 276 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID); | 308 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID); |
| 277 | if (!pos) | 309 | if (!pos) |
| 278 | return; | 310 | return; |
| 279 | 311 | ||
| 280 | pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control); | 312 | pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control); |
| 313 | |||
| 314 | pdev->pasid_enabled = 0; | ||
| 281 | } | 315 | } |
| 282 | EXPORT_SYMBOL_GPL(pci_disable_pasid); | 316 | EXPORT_SYMBOL_GPL(pci_disable_pasid); |
| 283 | 317 | ||
| 284 | /** | 318 | /** |
| 319 | * pci_restore_pasid_state - Restore PASID capabilities | ||
| 320 | * @pdev: PCI device structure | ||
| 321 | */ | ||
| 322 | void pci_restore_pasid_state(struct pci_dev *pdev) | ||
| 323 | { | ||
| 324 | u16 control; | ||
| 325 | int pos; | ||
| 326 | |||
| 327 | if (!pdev->pasid_enabled) | ||
| 328 | return; | ||
| 329 | |||
| 330 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID); | ||
| 331 | if (!pos) | ||
| 332 | return; | ||
| 333 | |||
| 334 | control = PCI_PASID_CTRL_ENABLE | pdev->pasid_features; | ||
| 335 | pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control); | ||
| 336 | } | ||
| 337 | EXPORT_SYMBOL_GPL(pci_restore_pasid_state); | ||
| 338 | |||
| 339 | /** | ||
| 285 | * pci_pasid_features - Check which PASID features are supported | 340 | * pci_pasid_features - Check which PASID features are supported |
| 286 | * @pdev: PCI device structure | 341 | * @pdev: PCI device structure |
| 287 | * | 342 | * |
diff --git a/drivers/pci/dwc/Kconfig b/drivers/pci/dwc/Kconfig index b7e15526d676..d275aadc47ee 100644 --- a/drivers/pci/dwc/Kconfig +++ b/drivers/pci/dwc/Kconfig | |||
| @@ -16,6 +16,7 @@ config PCIE_DW_EP | |||
| 16 | 16 | ||
| 17 | config PCI_DRA7XX | 17 | config PCI_DRA7XX |
| 18 | bool "TI DRA7xx PCIe controller" | 18 | bool "TI DRA7xx PCIe controller" |
| 19 | depends on SOC_DRA7XX || COMPILE_TEST | ||
| 19 | depends on (PCI && PCI_MSI_IRQ_DOMAIN) || PCI_ENDPOINT | 20 | depends on (PCI && PCI_MSI_IRQ_DOMAIN) || PCI_ENDPOINT |
| 20 | depends on OF && HAS_IOMEM && TI_PIPE3 | 21 | depends on OF && HAS_IOMEM && TI_PIPE3 |
| 21 | help | 22 | help |
| @@ -158,4 +159,14 @@ config PCIE_ARTPEC6 | |||
| 158 | Say Y here to enable PCIe controller support on Axis ARTPEC-6 | 159 | Say Y here to enable PCIe controller support on Axis ARTPEC-6 |
| 159 | SoCs. This PCIe controller uses the DesignWare core. | 160 | SoCs. This PCIe controller uses the DesignWare core. |
| 160 | 161 | ||
| 162 | config PCIE_KIRIN | ||
| 163 | depends on OF && ARM64 | ||
| 164 | bool "HiSilicon Kirin series SoCs PCIe controllers" | ||
| 165 | depends on PCI | ||
| 166 | select PCIEPORTBUS | ||
| 167 | select PCIE_DW_HOST | ||
| 168 | help | ||
| 169 | Say Y here if you want PCIe controller support | ||
| 170 | on HiSilicon Kirin series SoCs. | ||
| 171 | |||
| 161 | endmenu | 172 | endmenu |
diff --git a/drivers/pci/dwc/Makefile b/drivers/pci/dwc/Makefile index f31a8596442a..c61be9738cce 100644 --- a/drivers/pci/dwc/Makefile +++ b/drivers/pci/dwc/Makefile | |||
| @@ -13,6 +13,7 @@ obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o | |||
| 13 | obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o | 13 | obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o |
| 14 | obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o | 14 | obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o |
| 15 | obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o | 15 | obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o |
| 16 | obj-$(CONFIG_PCIE_KIRIN) += pcie-kirin.o | ||
| 16 | 17 | ||
| 17 | # The following drivers are for devices that use the generic ACPI | 18 | # The following drivers are for devices that use the generic ACPI |
| 18 | # pci_root.c driver but don't support standard ECAM config access. | 19 | # pci_root.c driver but don't support standard ECAM config access. |
diff --git a/drivers/pci/dwc/pci-dra7xx.c b/drivers/pci/dwc/pci-dra7xx.c index 8decf46cf525..f2fc5f47064e 100644 --- a/drivers/pci/dwc/pci-dra7xx.c +++ b/drivers/pci/dwc/pci-dra7xx.c | |||
| @@ -174,7 +174,7 @@ static int dra7xx_pcie_establish_link(struct dw_pcie *pci) | |||
| 174 | static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) | 174 | static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) |
| 175 | { | 175 | { |
| 176 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, | 176 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI, |
| 177 | ~LEG_EP_INTERRUPTS & ~MSI); | 177 | LEG_EP_INTERRUPTS | MSI); |
| 178 | 178 | ||
| 179 | dra7xx_pcie_writel(dra7xx, | 179 | dra7xx_pcie_writel(dra7xx, |
| 180 | PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, | 180 | PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MSI, |
| @@ -184,7 +184,7 @@ static void dra7xx_pcie_enable_msi_interrupts(struct dra7xx_pcie *dra7xx) | |||
| 184 | static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) | 184 | static void dra7xx_pcie_enable_wrapper_interrupts(struct dra7xx_pcie *dra7xx) |
| 185 | { | 185 | { |
| 186 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, | 186 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MAIN, |
| 187 | ~INTERRUPTS); | 187 | INTERRUPTS); |
| 188 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, | 188 | dra7xx_pcie_writel(dra7xx, PCIECTRL_DRA7XX_CONF_IRQENABLE_SET_MAIN, |
| 189 | INTERRUPTS); | 189 | INTERRUPTS); |
| 190 | } | 190 | } |
| @@ -208,7 +208,7 @@ static void dra7xx_pcie_host_init(struct pcie_port *pp) | |||
| 208 | dra7xx_pcie_enable_interrupts(dra7xx); | 208 | dra7xx_pcie_enable_interrupts(dra7xx); |
| 209 | } | 209 | } |
| 210 | 210 | ||
| 211 | static struct dw_pcie_host_ops dra7xx_pcie_host_ops = { | 211 | static const struct dw_pcie_host_ops dra7xx_pcie_host_ops = { |
| 212 | .host_init = dra7xx_pcie_host_init, | 212 | .host_init = dra7xx_pcie_host_init, |
| 213 | }; | 213 | }; |
| 214 | 214 | ||
diff --git a/drivers/pci/dwc/pci-exynos.c b/drivers/pci/dwc/pci-exynos.c index 546082ad5a3f..c78c06552590 100644 --- a/drivers/pci/dwc/pci-exynos.c +++ b/drivers/pci/dwc/pci-exynos.c | |||
| @@ -590,7 +590,7 @@ static void exynos_pcie_host_init(struct pcie_port *pp) | |||
| 590 | exynos_pcie_enable_interrupts(ep); | 590 | exynos_pcie_enable_interrupts(ep); |
| 591 | } | 591 | } |
| 592 | 592 | ||
| 593 | static struct dw_pcie_host_ops exynos_pcie_host_ops = { | 593 | static const struct dw_pcie_host_ops exynos_pcie_host_ops = { |
| 594 | .rd_own_conf = exynos_pcie_rd_own_conf, | 594 | .rd_own_conf = exynos_pcie_rd_own_conf, |
| 595 | .wr_own_conf = exynos_pcie_wr_own_conf, | 595 | .wr_own_conf = exynos_pcie_wr_own_conf, |
| 596 | .host_init = exynos_pcie_host_init, | 596 | .host_init = exynos_pcie_host_init, |
diff --git a/drivers/pci/dwc/pci-imx6.c b/drivers/pci/dwc/pci-imx6.c index 19a289b8cc94..bf5c3616e344 100644 --- a/drivers/pci/dwc/pci-imx6.c +++ b/drivers/pci/dwc/pci-imx6.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/pci.h> | 24 | #include <linux/pci.h> |
| 25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
| 26 | #include <linux/regmap.h> | 26 | #include <linux/regmap.h> |
| 27 | #include <linux/regulator/consumer.h> | ||
| 27 | #include <linux/resource.h> | 28 | #include <linux/resource.h> |
| 28 | #include <linux/signal.h> | 29 | #include <linux/signal.h> |
| 29 | #include <linux/types.h> | 30 | #include <linux/types.h> |
| @@ -59,6 +60,7 @@ struct imx6_pcie { | |||
| 59 | u32 tx_swing_full; | 60 | u32 tx_swing_full; |
| 60 | u32 tx_swing_low; | 61 | u32 tx_swing_low; |
| 61 | int link_gen; | 62 | int link_gen; |
| 63 | struct regulator *vpcie; | ||
| 62 | }; | 64 | }; |
| 63 | 65 | ||
| 64 | /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ | 66 | /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ |
| @@ -284,6 +286,8 @@ static int imx6q_pcie_abort_handler(unsigned long addr, | |||
| 284 | 286 | ||
| 285 | static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) | 287 | static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) |
| 286 | { | 288 | { |
| 289 | struct device *dev = imx6_pcie->pci->dev; | ||
| 290 | |||
| 287 | switch (imx6_pcie->variant) { | 291 | switch (imx6_pcie->variant) { |
| 288 | case IMX7D: | 292 | case IMX7D: |
| 289 | reset_control_assert(imx6_pcie->pciephy_reset); | 293 | reset_control_assert(imx6_pcie->pciephy_reset); |
| @@ -310,6 +314,14 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie) | |||
| 310 | IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); | 314 | IMX6Q_GPR1_PCIE_REF_CLK_EN, 0 << 16); |
| 311 | break; | 315 | break; |
| 312 | } | 316 | } |
| 317 | |||
| 318 | if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { | ||
| 319 | int ret = regulator_disable(imx6_pcie->vpcie); | ||
| 320 | |||
| 321 | if (ret) | ||
| 322 | dev_err(dev, "failed to disable vpcie regulator: %d\n", | ||
| 323 | ret); | ||
| 324 | } | ||
| 313 | } | 325 | } |
| 314 | 326 | ||
| 315 | static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) | 327 | static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) |
| @@ -376,10 +388,19 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) | |||
| 376 | struct device *dev = pci->dev; | 388 | struct device *dev = pci->dev; |
| 377 | int ret; | 389 | int ret; |
| 378 | 390 | ||
| 391 | if (imx6_pcie->vpcie && !regulator_is_enabled(imx6_pcie->vpcie)) { | ||
| 392 | ret = regulator_enable(imx6_pcie->vpcie); | ||
| 393 | if (ret) { | ||
| 394 | dev_err(dev, "failed to enable vpcie regulator: %d\n", | ||
| 395 | ret); | ||
| 396 | return; | ||
| 397 | } | ||
| 398 | } | ||
| 399 | |||
| 379 | ret = clk_prepare_enable(imx6_pcie->pcie_phy); | 400 | ret = clk_prepare_enable(imx6_pcie->pcie_phy); |
| 380 | if (ret) { | 401 | if (ret) { |
| 381 | dev_err(dev, "unable to enable pcie_phy clock\n"); | 402 | dev_err(dev, "unable to enable pcie_phy clock\n"); |
| 382 | return; | 403 | goto err_pcie_phy; |
| 383 | } | 404 | } |
| 384 | 405 | ||
| 385 | ret = clk_prepare_enable(imx6_pcie->pcie_bus); | 406 | ret = clk_prepare_enable(imx6_pcie->pcie_bus); |
| @@ -439,6 +460,13 @@ err_pcie: | |||
| 439 | clk_disable_unprepare(imx6_pcie->pcie_bus); | 460 | clk_disable_unprepare(imx6_pcie->pcie_bus); |
| 440 | err_pcie_bus: | 461 | err_pcie_bus: |
| 441 | clk_disable_unprepare(imx6_pcie->pcie_phy); | 462 | clk_disable_unprepare(imx6_pcie->pcie_phy); |
| 463 | err_pcie_phy: | ||
| 464 | if (imx6_pcie->vpcie && regulator_is_enabled(imx6_pcie->vpcie) > 0) { | ||
| 465 | ret = regulator_disable(imx6_pcie->vpcie); | ||
| 466 | if (ret) | ||
| 467 | dev_err(dev, "failed to disable vpcie regulator: %d\n", | ||
| 468 | ret); | ||
| 469 | } | ||
| 442 | } | 470 | } |
| 443 | 471 | ||
| 444 | static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) | 472 | static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie) |
| @@ -629,7 +657,7 @@ static int imx6_pcie_link_up(struct dw_pcie *pci) | |||
| 629 | PCIE_PHY_DEBUG_R1_XMLH_LINK_UP; | 657 | PCIE_PHY_DEBUG_R1_XMLH_LINK_UP; |
| 630 | } | 658 | } |
| 631 | 659 | ||
| 632 | static struct dw_pcie_host_ops imx6_pcie_host_ops = { | 660 | static const struct dw_pcie_host_ops imx6_pcie_host_ops = { |
| 633 | .host_init = imx6_pcie_host_init, | 661 | .host_init = imx6_pcie_host_init, |
| 634 | }; | 662 | }; |
| 635 | 663 | ||
| @@ -802,6 +830,13 @@ static int imx6_pcie_probe(struct platform_device *pdev) | |||
| 802 | if (ret) | 830 | if (ret) |
| 803 | imx6_pcie->link_gen = 1; | 831 | imx6_pcie->link_gen = 1; |
| 804 | 832 | ||
| 833 | imx6_pcie->vpcie = devm_regulator_get_optional(&pdev->dev, "vpcie"); | ||
| 834 | if (IS_ERR(imx6_pcie->vpcie)) { | ||
| 835 | if (PTR_ERR(imx6_pcie->vpcie) == -EPROBE_DEFER) | ||
| 836 | return -EPROBE_DEFER; | ||
| 837 | imx6_pcie->vpcie = NULL; | ||
| 838 | } | ||
| 839 | |||
| 805 | platform_set_drvdata(pdev, imx6_pcie); | 840 | platform_set_drvdata(pdev, imx6_pcie); |
| 806 | 841 | ||
| 807 | ret = imx6_add_pcie_port(imx6_pcie, pdev); | 842 | ret = imx6_add_pcie_port(imx6_pcie, pdev); |
diff --git a/drivers/pci/dwc/pci-keystone.c b/drivers/pci/dwc/pci-keystone.c index fcc9723bad6e..4783cec1f78d 100644 --- a/drivers/pci/dwc/pci-keystone.c +++ b/drivers/pci/dwc/pci-keystone.c | |||
| @@ -291,7 +291,7 @@ static void __init ks_pcie_host_init(struct pcie_port *pp) | |||
| 291 | "Asynchronous external abort"); | 291 | "Asynchronous external abort"); |
| 292 | } | 292 | } |
| 293 | 293 | ||
| 294 | static struct dw_pcie_host_ops keystone_pcie_host_ops = { | 294 | static const struct dw_pcie_host_ops keystone_pcie_host_ops = { |
| 295 | .rd_other_conf = ks_dw_pcie_rd_other_conf, | 295 | .rd_other_conf = ks_dw_pcie_rd_other_conf, |
| 296 | .wr_other_conf = ks_dw_pcie_wr_other_conf, | 296 | .wr_other_conf = ks_dw_pcie_wr_other_conf, |
| 297 | .host_init = ks_pcie_host_init, | 297 | .host_init = ks_pcie_host_init, |
diff --git a/drivers/pci/dwc/pci-layerscape.c b/drivers/pci/dwc/pci-layerscape.c index 27d638c4e134..fd861289ad8b 100644 --- a/drivers/pci/dwc/pci-layerscape.c +++ b/drivers/pci/dwc/pci-layerscape.c | |||
| @@ -39,7 +39,7 @@ struct ls_pcie_drvdata { | |||
| 39 | u32 lut_offset; | 39 | u32 lut_offset; |
| 40 | u32 ltssm_shift; | 40 | u32 ltssm_shift; |
| 41 | u32 lut_dbg; | 41 | u32 lut_dbg; |
| 42 | struct dw_pcie_host_ops *ops; | 42 | const struct dw_pcie_host_ops *ops; |
| 43 | const struct dw_pcie_ops *dw_pcie_ops; | 43 | const struct dw_pcie_ops *dw_pcie_ops; |
| 44 | }; | 44 | }; |
| 45 | 45 | ||
| @@ -185,12 +185,12 @@ static int ls_pcie_msi_host_init(struct pcie_port *pp, | |||
| 185 | return 0; | 185 | return 0; |
| 186 | } | 186 | } |
| 187 | 187 | ||
| 188 | static struct dw_pcie_host_ops ls1021_pcie_host_ops = { | 188 | static const struct dw_pcie_host_ops ls1021_pcie_host_ops = { |
| 189 | .host_init = ls1021_pcie_host_init, | 189 | .host_init = ls1021_pcie_host_init, |
| 190 | .msi_host_init = ls_pcie_msi_host_init, | 190 | .msi_host_init = ls_pcie_msi_host_init, |
| 191 | }; | 191 | }; |
| 192 | 192 | ||
| 193 | static struct dw_pcie_host_ops ls_pcie_host_ops = { | 193 | static const struct dw_pcie_host_ops ls_pcie_host_ops = { |
| 194 | .host_init = ls_pcie_host_init, | 194 | .host_init = ls_pcie_host_init, |
| 195 | .msi_host_init = ls_pcie_msi_host_init, | 195 | .msi_host_init = ls_pcie_msi_host_init, |
| 196 | }; | 196 | }; |
diff --git a/drivers/pci/dwc/pcie-armada8k.c b/drivers/pci/dwc/pcie-armada8k.c index 495b023042b3..ea8f34af6a85 100644 --- a/drivers/pci/dwc/pcie-armada8k.c +++ b/drivers/pci/dwc/pcie-armada8k.c | |||
| @@ -160,7 +160,7 @@ static irqreturn_t armada8k_pcie_irq_handler(int irq, void *arg) | |||
| 160 | return IRQ_HANDLED; | 160 | return IRQ_HANDLED; |
| 161 | } | 161 | } |
| 162 | 162 | ||
| 163 | static struct dw_pcie_host_ops armada8k_pcie_host_ops = { | 163 | static const struct dw_pcie_host_ops armada8k_pcie_host_ops = { |
| 164 | .host_init = armada8k_pcie_host_init, | 164 | .host_init = armada8k_pcie_host_init, |
| 165 | }; | 165 | }; |
| 166 | 166 | ||
diff --git a/drivers/pci/dwc/pcie-artpec6.c b/drivers/pci/dwc/pcie-artpec6.c index 82a04acc42fd..01c6f7823672 100644 --- a/drivers/pci/dwc/pcie-artpec6.c +++ b/drivers/pci/dwc/pcie-artpec6.c | |||
| @@ -184,7 +184,7 @@ static void artpec6_pcie_host_init(struct pcie_port *pp) | |||
| 184 | artpec6_pcie_enable_interrupts(artpec6_pcie); | 184 | artpec6_pcie_enable_interrupts(artpec6_pcie); |
| 185 | } | 185 | } |
| 186 | 186 | ||
| 187 | static struct dw_pcie_host_ops artpec6_pcie_host_ops = { | 187 | static const struct dw_pcie_host_ops artpec6_pcie_host_ops = { |
| 188 | .host_init = artpec6_pcie_host_init, | 188 | .host_init = artpec6_pcie_host_init, |
| 189 | }; | 189 | }; |
| 190 | 190 | ||
diff --git a/drivers/pci/dwc/pcie-designware-host.c b/drivers/pci/dwc/pcie-designware-host.c index 28ed32ba4f1b..d29c020da082 100644 --- a/drivers/pci/dwc/pcie-designware-host.c +++ b/drivers/pci/dwc/pcie-designware-host.c | |||
| @@ -280,9 +280,9 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
| 280 | struct device_node *np = dev->of_node; | 280 | struct device_node *np = dev->of_node; |
| 281 | struct platform_device *pdev = to_platform_device(dev); | 281 | struct platform_device *pdev = to_platform_device(dev); |
| 282 | struct pci_bus *bus, *child; | 282 | struct pci_bus *bus, *child; |
| 283 | struct pci_host_bridge *bridge; | ||
| 283 | struct resource *cfg_res; | 284 | struct resource *cfg_res; |
| 284 | int i, ret; | 285 | int i, ret; |
| 285 | LIST_HEAD(res); | ||
| 286 | struct resource_entry *win, *tmp; | 286 | struct resource_entry *win, *tmp; |
| 287 | 287 | ||
| 288 | cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); | 288 | cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); |
| @@ -295,16 +295,21 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
| 295 | dev_err(dev, "missing *config* reg space\n"); | 295 | dev_err(dev, "missing *config* reg space\n"); |
| 296 | } | 296 | } |
| 297 | 297 | ||
| 298 | ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base); | 298 | bridge = pci_alloc_host_bridge(0); |
| 299 | if (!bridge) | ||
| 300 | return -ENOMEM; | ||
| 301 | |||
| 302 | ret = of_pci_get_host_bridge_resources(np, 0, 0xff, | ||
| 303 | &bridge->windows, &pp->io_base); | ||
| 299 | if (ret) | 304 | if (ret) |
| 300 | return ret; | 305 | return ret; |
| 301 | 306 | ||
| 302 | ret = devm_request_pci_bus_resources(dev, &res); | 307 | ret = devm_request_pci_bus_resources(dev, &bridge->windows); |
| 303 | if (ret) | 308 | if (ret) |
| 304 | goto error; | 309 | goto error; |
| 305 | 310 | ||
| 306 | /* Get the I/O and memory ranges from DT */ | 311 | /* Get the I/O and memory ranges from DT */ |
| 307 | resource_list_for_each_entry_safe(win, tmp, &res) { | 312 | resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { |
| 308 | switch (resource_type(win->res)) { | 313 | switch (resource_type(win->res)) { |
| 309 | case IORESOURCE_IO: | 314 | case IORESOURCE_IO: |
| 310 | ret = pci_remap_iospace(win->res, pp->io_base); | 315 | ret = pci_remap_iospace(win->res, pp->io_base); |
| @@ -400,27 +405,27 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
| 400 | pp->ops->host_init(pp); | 405 | pp->ops->host_init(pp); |
| 401 | 406 | ||
| 402 | pp->root_bus_nr = pp->busn->start; | 407 | pp->root_bus_nr = pp->busn->start; |
| 408 | |||
| 409 | bridge->dev.parent = dev; | ||
| 410 | bridge->sysdata = pp; | ||
| 411 | bridge->busnr = pp->root_bus_nr; | ||
| 412 | bridge->ops = &dw_pcie_ops; | ||
| 413 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 414 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 403 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | 415 | if (IS_ENABLED(CONFIG_PCI_MSI)) { |
| 404 | bus = pci_scan_root_bus_msi(dev, pp->root_bus_nr, | 416 | bridge->msi = &dw_pcie_msi_chip; |
| 405 | &dw_pcie_ops, pp, &res, | ||
| 406 | &dw_pcie_msi_chip); | ||
| 407 | dw_pcie_msi_chip.dev = dev; | 417 | dw_pcie_msi_chip.dev = dev; |
| 408 | } else | ||
| 409 | bus = pci_scan_root_bus(dev, pp->root_bus_nr, &dw_pcie_ops, | ||
| 410 | pp, &res); | ||
| 411 | if (!bus) { | ||
| 412 | ret = -ENOMEM; | ||
| 413 | goto error; | ||
| 414 | } | 418 | } |
| 415 | 419 | ||
| 420 | ret = pci_scan_root_bus_bridge(bridge); | ||
| 421 | if (ret) | ||
| 422 | goto error; | ||
| 423 | |||
| 424 | bus = bridge->bus; | ||
| 425 | |||
| 416 | if (pp->ops->scan_bus) | 426 | if (pp->ops->scan_bus) |
| 417 | pp->ops->scan_bus(pp); | 427 | pp->ops->scan_bus(pp); |
| 418 | 428 | ||
| 419 | #ifdef CONFIG_ARM | ||
| 420 | /* support old dtbs that incorrectly describe IRQs */ | ||
| 421 | pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); | ||
| 422 | #endif | ||
| 423 | |||
| 424 | pci_bus_size_bridges(bus); | 429 | pci_bus_size_bridges(bus); |
| 425 | pci_bus_assign_resources(bus); | 430 | pci_bus_assign_resources(bus); |
| 426 | 431 | ||
| @@ -431,7 +436,7 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
| 431 | return 0; | 436 | return 0; |
| 432 | 437 | ||
| 433 | error: | 438 | error: |
| 434 | pci_free_resource_list(&res); | 439 | pci_free_host_bridge(bridge); |
| 435 | return ret; | 440 | return ret; |
| 436 | } | 441 | } |
| 437 | 442 | ||
diff --git a/drivers/pci/dwc/pcie-designware-plat.c b/drivers/pci/dwc/pcie-designware-plat.c index 32091b32f6e1..091b4e7ad059 100644 --- a/drivers/pci/dwc/pcie-designware-plat.c +++ b/drivers/pci/dwc/pcie-designware-plat.c | |||
| @@ -46,7 +46,7 @@ static void dw_plat_pcie_host_init(struct pcie_port *pp) | |||
| 46 | dw_pcie_msi_init(pp); | 46 | dw_pcie_msi_init(pp); |
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | static struct dw_pcie_host_ops dw_plat_pcie_host_ops = { | 49 | static const struct dw_pcie_host_ops dw_plat_pcie_host_ops = { |
| 50 | .host_init = dw_plat_pcie_host_init, | 50 | .host_init = dw_plat_pcie_host_init, |
| 51 | }; | 51 | }; |
| 52 | 52 | ||
| @@ -67,7 +67,8 @@ static int dw_plat_add_pcie_port(struct pcie_port *pp, | |||
| 67 | 67 | ||
| 68 | ret = devm_request_irq(dev, pp->msi_irq, | 68 | ret = devm_request_irq(dev, pp->msi_irq, |
| 69 | dw_plat_pcie_msi_irq_handler, | 69 | dw_plat_pcie_msi_irq_handler, |
| 70 | IRQF_SHARED, "dw-plat-pcie-msi", pp); | 70 | IRQF_SHARED | IRQF_NO_THREAD, |
| 71 | "dw-plat-pcie-msi", pp); | ||
| 71 | if (ret) { | 72 | if (ret) { |
| 72 | dev_err(dev, "failed to request MSI IRQ\n"); | 73 | dev_err(dev, "failed to request MSI IRQ\n"); |
| 73 | return ret; | 74 | return ret; |
diff --git a/drivers/pci/dwc/pcie-designware.h b/drivers/pci/dwc/pcie-designware.h index c6a840575796..b4d2a89f8e58 100644 --- a/drivers/pci/dwc/pcie-designware.h +++ b/drivers/pci/dwc/pcie-designware.h | |||
| @@ -162,7 +162,7 @@ struct pcie_port { | |||
| 162 | struct resource *mem; | 162 | struct resource *mem; |
| 163 | struct resource *busn; | 163 | struct resource *busn; |
| 164 | int irq; | 164 | int irq; |
| 165 | struct dw_pcie_host_ops *ops; | 165 | const struct dw_pcie_host_ops *ops; |
| 166 | int msi_irq; | 166 | int msi_irq; |
| 167 | struct irq_domain *irq_domain; | 167 | struct irq_domain *irq_domain; |
| 168 | unsigned long msi_data; | 168 | unsigned long msi_data; |
diff --git a/drivers/pci/dwc/pcie-kirin.c b/drivers/pci/dwc/pcie-kirin.c new file mode 100644 index 000000000000..33fddb9f6739 --- /dev/null +++ b/drivers/pci/dwc/pcie-kirin.c | |||
| @@ -0,0 +1,517 @@ | |||
| 1 | /* | ||
| 2 | * PCIe host controller driver for Kirin Phone SoCs | ||
| 3 | * | ||
| 4 | * Copyright (C) 2017 Hilisicon Electronics Co., Ltd. | ||
| 5 | * http://www.huawei.com | ||
| 6 | * | ||
| 7 | * Author: Xiaowei Song <songxiaowei@huawei.com> | ||
| 8 | * | ||
| 9 | * This program is free software; you can redistribute it and/or modify | ||
| 10 | * it under the terms of the GNU General Public License version 2 as | ||
| 11 | * published by the Free Software Foundation. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <asm/compiler.h> | ||
| 15 | #include <linux/compiler.h> | ||
| 16 | #include <linux/clk.h> | ||
| 17 | #include <linux/delay.h> | ||
| 18 | #include <linux/err.h> | ||
| 19 | #include <linux/gpio.h> | ||
| 20 | #include <linux/interrupt.h> | ||
| 21 | #include <linux/mfd/syscon.h> | ||
| 22 | #include <linux/of_address.h> | ||
| 23 | #include <linux/of_gpio.h> | ||
| 24 | #include <linux/of_pci.h> | ||
| 25 | #include <linux/pci.h> | ||
| 26 | #include <linux/pci_regs.h> | ||
| 27 | #include <linux/platform_device.h> | ||
| 28 | #include <linux/regmap.h> | ||
| 29 | #include <linux/resource.h> | ||
| 30 | #include <linux/types.h> | ||
| 31 | #include "pcie-designware.h" | ||
| 32 | |||
| 33 | #define to_kirin_pcie(x) dev_get_drvdata((x)->dev) | ||
| 34 | |||
| 35 | #define REF_CLK_FREQ 100000000 | ||
| 36 | |||
| 37 | /* PCIe ELBI registers */ | ||
| 38 | #define SOC_PCIECTRL_CTRL0_ADDR 0x000 | ||
| 39 | #define SOC_PCIECTRL_CTRL1_ADDR 0x004 | ||
| 40 | #define SOC_PCIEPHY_CTRL2_ADDR 0x008 | ||
| 41 | #define SOC_PCIEPHY_CTRL3_ADDR 0x00c | ||
| 42 | #define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21) | ||
| 43 | |||
| 44 | /* info located in APB */ | ||
| 45 | #define PCIE_APP_LTSSM_ENABLE 0x01c | ||
| 46 | #define PCIE_APB_PHY_CTRL0 0x0 | ||
| 47 | #define PCIE_APB_PHY_CTRL1 0x4 | ||
| 48 | #define PCIE_APB_PHY_STATUS0 0x400 | ||
| 49 | #define PCIE_LINKUP_ENABLE (0x8020) | ||
| 50 | #define PCIE_LTSSM_ENABLE_BIT (0x1 << 11) | ||
| 51 | #define PIPE_CLK_STABLE (0x1 << 19) | ||
| 52 | #define PHY_REF_PAD_BIT (0x1 << 8) | ||
| 53 | #define PHY_PWR_DOWN_BIT (0x1 << 22) | ||
| 54 | #define PHY_RST_ACK_BIT (0x1 << 16) | ||
| 55 | |||
| 56 | /* info located in sysctrl */ | ||
| 57 | #define SCTRL_PCIE_CMOS_OFFSET 0x60 | ||
| 58 | #define SCTRL_PCIE_CMOS_BIT 0x10 | ||
| 59 | #define SCTRL_PCIE_ISO_OFFSET 0x44 | ||
| 60 | #define SCTRL_PCIE_ISO_BIT 0x30 | ||
| 61 | #define SCTRL_PCIE_HPCLK_OFFSET 0x190 | ||
| 62 | #define SCTRL_PCIE_HPCLK_BIT 0x184000 | ||
| 63 | #define SCTRL_PCIE_OE_OFFSET 0x14a | ||
| 64 | #define PCIE_DEBOUNCE_PARAM 0xF0F400 | ||
| 65 | #define PCIE_OE_BYPASS (0x3 << 28) | ||
| 66 | |||
| 67 | /* peri_crg ctrl */ | ||
| 68 | #define CRGCTRL_PCIE_ASSERT_OFFSET 0x88 | ||
| 69 | #define CRGCTRL_PCIE_ASSERT_BIT 0x8c000000 | ||
| 70 | |||
| 71 | /* Time for delay */ | ||
| 72 | #define REF_2_PERST_MIN 20000 | ||
| 73 | #define REF_2_PERST_MAX 25000 | ||
| 74 | #define PERST_2_ACCESS_MIN 10000 | ||
| 75 | #define PERST_2_ACCESS_MAX 12000 | ||
| 76 | #define LINK_WAIT_MIN 900 | ||
| 77 | #define LINK_WAIT_MAX 1000 | ||
| 78 | #define PIPE_CLK_WAIT_MIN 550 | ||
| 79 | #define PIPE_CLK_WAIT_MAX 600 | ||
| 80 | #define TIME_CMOS_MIN 100 | ||
| 81 | #define TIME_CMOS_MAX 105 | ||
| 82 | #define TIME_PHY_PD_MIN 10 | ||
| 83 | #define TIME_PHY_PD_MAX 11 | ||
| 84 | |||
| 85 | struct kirin_pcie { | ||
| 86 | struct dw_pcie *pci; | ||
| 87 | void __iomem *apb_base; | ||
| 88 | void __iomem *phy_base; | ||
| 89 | struct regmap *crgctrl; | ||
| 90 | struct regmap *sysctrl; | ||
| 91 | struct clk *apb_sys_clk; | ||
| 92 | struct clk *apb_phy_clk; | ||
| 93 | struct clk *phy_ref_clk; | ||
| 94 | struct clk *pcie_aclk; | ||
| 95 | struct clk *pcie_aux_clk; | ||
| 96 | int gpio_id_reset; | ||
| 97 | }; | ||
| 98 | |||
| 99 | /* Registers in PCIeCTRL */ | ||
| 100 | static inline void kirin_apb_ctrl_writel(struct kirin_pcie *kirin_pcie, | ||
| 101 | u32 val, u32 reg) | ||
| 102 | { | ||
| 103 | writel(val, kirin_pcie->apb_base + reg); | ||
| 104 | } | ||
| 105 | |||
| 106 | static inline u32 kirin_apb_ctrl_readl(struct kirin_pcie *kirin_pcie, u32 reg) | ||
| 107 | { | ||
| 108 | return readl(kirin_pcie->apb_base + reg); | ||
| 109 | } | ||
| 110 | |||
| 111 | /* Registers in PCIePHY */ | ||
| 112 | static inline void kirin_apb_phy_writel(struct kirin_pcie *kirin_pcie, | ||
| 113 | u32 val, u32 reg) | ||
| 114 | { | ||
| 115 | writel(val, kirin_pcie->phy_base + reg); | ||
| 116 | } | ||
| 117 | |||
| 118 | static inline u32 kirin_apb_phy_readl(struct kirin_pcie *kirin_pcie, u32 reg) | ||
| 119 | { | ||
| 120 | return readl(kirin_pcie->phy_base + reg); | ||
| 121 | } | ||
| 122 | |||
| 123 | static long kirin_pcie_get_clk(struct kirin_pcie *kirin_pcie, | ||
| 124 | struct platform_device *pdev) | ||
| 125 | { | ||
| 126 | struct device *dev = &pdev->dev; | ||
| 127 | |||
| 128 | kirin_pcie->phy_ref_clk = devm_clk_get(dev, "pcie_phy_ref"); | ||
| 129 | if (IS_ERR(kirin_pcie->phy_ref_clk)) | ||
| 130 | return PTR_ERR(kirin_pcie->phy_ref_clk); | ||
| 131 | |||
| 132 | kirin_pcie->pcie_aux_clk = devm_clk_get(dev, "pcie_aux"); | ||
| 133 | if (IS_ERR(kirin_pcie->pcie_aux_clk)) | ||
| 134 | return PTR_ERR(kirin_pcie->pcie_aux_clk); | ||
| 135 | |||
| 136 | kirin_pcie->apb_phy_clk = devm_clk_get(dev, "pcie_apb_phy"); | ||
| 137 | if (IS_ERR(kirin_pcie->apb_phy_clk)) | ||
| 138 | return PTR_ERR(kirin_pcie->apb_phy_clk); | ||
| 139 | |||
| 140 | kirin_pcie->apb_sys_clk = devm_clk_get(dev, "pcie_apb_sys"); | ||
| 141 | if (IS_ERR(kirin_pcie->apb_sys_clk)) | ||
| 142 | return PTR_ERR(kirin_pcie->apb_sys_clk); | ||
| 143 | |||
| 144 | kirin_pcie->pcie_aclk = devm_clk_get(dev, "pcie_aclk"); | ||
| 145 | if (IS_ERR(kirin_pcie->pcie_aclk)) | ||
| 146 | return PTR_ERR(kirin_pcie->pcie_aclk); | ||
| 147 | |||
| 148 | return 0; | ||
| 149 | } | ||
| 150 | |||
| 151 | static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie, | ||
| 152 | struct platform_device *pdev) | ||
| 153 | { | ||
| 154 | struct device *dev = &pdev->dev; | ||
| 155 | struct resource *apb; | ||
| 156 | struct resource *phy; | ||
| 157 | struct resource *dbi; | ||
| 158 | |||
| 159 | apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb"); | ||
| 160 | kirin_pcie->apb_base = devm_ioremap_resource(dev, apb); | ||
| 161 | if (IS_ERR(kirin_pcie->apb_base)) | ||
| 162 | return PTR_ERR(kirin_pcie->apb_base); | ||
| 163 | |||
| 164 | phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); | ||
| 165 | kirin_pcie->phy_base = devm_ioremap_resource(dev, phy); | ||
| 166 | if (IS_ERR(kirin_pcie->phy_base)) | ||
| 167 | return PTR_ERR(kirin_pcie->phy_base); | ||
| 168 | |||
| 169 | dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); | ||
| 170 | kirin_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi); | ||
| 171 | if (IS_ERR(kirin_pcie->pci->dbi_base)) | ||
| 172 | return PTR_ERR(kirin_pcie->pci->dbi_base); | ||
| 173 | |||
| 174 | kirin_pcie->crgctrl = | ||
| 175 | syscon_regmap_lookup_by_compatible("hisilicon,hi3660-crgctrl"); | ||
| 176 | if (IS_ERR(kirin_pcie->crgctrl)) | ||
| 177 | return PTR_ERR(kirin_pcie->crgctrl); | ||
| 178 | |||
| 179 | kirin_pcie->sysctrl = | ||
| 180 | syscon_regmap_lookup_by_compatible("hisilicon,hi3660-sctrl"); | ||
| 181 | if (IS_ERR(kirin_pcie->sysctrl)) | ||
| 182 | return PTR_ERR(kirin_pcie->sysctrl); | ||
| 183 | |||
| 184 | return 0; | ||
| 185 | } | ||
| 186 | |||
| 187 | static int kirin_pcie_phy_init(struct kirin_pcie *kirin_pcie) | ||
| 188 | { | ||
| 189 | struct device *dev = kirin_pcie->pci->dev; | ||
| 190 | u32 reg_val; | ||
| 191 | |||
| 192 | reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1); | ||
| 193 | reg_val &= ~PHY_REF_PAD_BIT; | ||
| 194 | kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1); | ||
| 195 | |||
| 196 | reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL0); | ||
| 197 | reg_val &= ~PHY_PWR_DOWN_BIT; | ||
| 198 | kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL0); | ||
| 199 | usleep_range(TIME_PHY_PD_MIN, TIME_PHY_PD_MAX); | ||
| 200 | |||
| 201 | reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_CTRL1); | ||
| 202 | reg_val &= ~PHY_RST_ACK_BIT; | ||
| 203 | kirin_apb_phy_writel(kirin_pcie, reg_val, PCIE_APB_PHY_CTRL1); | ||
| 204 | |||
| 205 | usleep_range(PIPE_CLK_WAIT_MIN, PIPE_CLK_WAIT_MAX); | ||
| 206 | reg_val = kirin_apb_phy_readl(kirin_pcie, PCIE_APB_PHY_STATUS0); | ||
| 207 | if (reg_val & PIPE_CLK_STABLE) { | ||
| 208 | dev_err(dev, "PIPE clk is not stable\n"); | ||
| 209 | return -EINVAL; | ||
| 210 | } | ||
| 211 | |||
| 212 | return 0; | ||
| 213 | } | ||
| 214 | |||
| 215 | static void kirin_pcie_oe_enable(struct kirin_pcie *kirin_pcie) | ||
| 216 | { | ||
| 217 | u32 val; | ||
| 218 | |||
| 219 | regmap_read(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, &val); | ||
| 220 | val |= PCIE_DEBOUNCE_PARAM; | ||
| 221 | val &= ~PCIE_OE_BYPASS; | ||
| 222 | regmap_write(kirin_pcie->sysctrl, SCTRL_PCIE_OE_OFFSET, val); | ||
| 223 | } | ||
| 224 | |||
| 225 | static int kirin_pcie_clk_ctrl(struct kirin_pcie *kirin_pcie, bool enable) | ||
| 226 | { | ||
| 227 | int ret = 0; | ||
| 228 | |||
| 229 | if (!enable) | ||
| 230 | goto close_clk; | ||
| 231 | |||
| 232 | ret = clk_set_rate(kirin_pcie->phy_ref_clk, REF_CLK_FREQ); | ||
| 233 | if (ret) | ||
| 234 | return ret; | ||
| 235 | |||
| 236 | ret = clk_prepare_enable(kirin_pcie->phy_ref_clk); | ||
| 237 | if (ret) | ||
| 238 | return ret; | ||
| 239 | |||
| 240 | ret = clk_prepare_enable(kirin_pcie->apb_sys_clk); | ||
| 241 | if (ret) | ||
| 242 | goto apb_sys_fail; | ||
| 243 | |||
| 244 | ret = clk_prepare_enable(kirin_pcie->apb_phy_clk); | ||
| 245 | if (ret) | ||
| 246 | goto apb_phy_fail; | ||
| 247 | |||
| 248 | ret = clk_prepare_enable(kirin_pcie->pcie_aclk); | ||
| 249 | if (ret) | ||
| 250 | goto aclk_fail; | ||
| 251 | |||
| 252 | ret = clk_prepare_enable(kirin_pcie->pcie_aux_clk); | ||
| 253 | if (ret) | ||
| 254 | goto aux_clk_fail; | ||
| 255 | |||
| 256 | return 0; | ||
| 257 | |||
| 258 | close_clk: | ||
| 259 | clk_disable_unprepare(kirin_pcie->pcie_aux_clk); | ||
| 260 | aux_clk_fail: | ||
| 261 | clk_disable_unprepare(kirin_pcie->pcie_aclk); | ||
| 262 | aclk_fail: | ||
| 263 | clk_disable_unprepare(kirin_pcie->apb_phy_clk); | ||
| 264 | apb_phy_fail: | ||
| 265 | clk_disable_unprepare(kirin_pcie->apb_sys_clk); | ||
| 266 | apb_sys_fail: | ||
| 267 | clk_disable_unprepare(kirin_pcie->phy_ref_clk); | ||
| 268 | |||
| 269 | return ret; | ||
| 270 | } | ||
| 271 | |||
| 272 | static int kirin_pcie_power_on(struct kirin_pcie *kirin_pcie) | ||
| 273 | { | ||
| 274 | int ret; | ||
| 275 | |||
| 276 | /* Power supply for Host */ | ||
| 277 | regmap_write(kirin_pcie->sysctrl, | ||
| 278 | SCTRL_PCIE_CMOS_OFFSET, SCTRL_PCIE_CMOS_BIT); | ||
| 279 | usleep_range(TIME_CMOS_MIN, TIME_CMOS_MAX); | ||
| 280 | kirin_pcie_oe_enable(kirin_pcie); | ||
| 281 | |||
| 282 | ret = kirin_pcie_clk_ctrl(kirin_pcie, true); | ||
| 283 | if (ret) | ||
| 284 | return ret; | ||
| 285 | |||
| 286 | /* ISO disable, PCIeCtrl, PHY assert and clk gate clear */ | ||
| 287 | regmap_write(kirin_pcie->sysctrl, | ||
| 288 | SCTRL_PCIE_ISO_OFFSET, SCTRL_PCIE_ISO_BIT); | ||
| 289 | regmap_write(kirin_pcie->crgctrl, | ||
| 290 | CRGCTRL_PCIE_ASSERT_OFFSET, CRGCTRL_PCIE_ASSERT_BIT); | ||
| 291 | regmap_write(kirin_pcie->sysctrl, | ||
| 292 | SCTRL_PCIE_HPCLK_OFFSET, SCTRL_PCIE_HPCLK_BIT); | ||
| 293 | |||
| 294 | ret = kirin_pcie_phy_init(kirin_pcie); | ||
| 295 | if (ret) | ||
| 296 | goto close_clk; | ||
| 297 | |||
| 298 | /* perst assert Endpoint */ | ||
| 299 | if (!gpio_request(kirin_pcie->gpio_id_reset, "pcie_perst")) { | ||
| 300 | usleep_range(REF_2_PERST_MIN, REF_2_PERST_MAX); | ||
| 301 | ret = gpio_direction_output(kirin_pcie->gpio_id_reset, 1); | ||
| 302 | if (ret) | ||
| 303 | goto close_clk; | ||
| 304 | usleep_range(PERST_2_ACCESS_MIN, PERST_2_ACCESS_MAX); | ||
| 305 | |||
| 306 | return 0; | ||
| 307 | } | ||
| 308 | |||
| 309 | close_clk: | ||
| 310 | kirin_pcie_clk_ctrl(kirin_pcie, false); | ||
| 311 | return ret; | ||
| 312 | } | ||
| 313 | |||
| 314 | static void kirin_pcie_sideband_dbi_w_mode(struct kirin_pcie *kirin_pcie, | ||
| 315 | bool on) | ||
| 316 | { | ||
| 317 | u32 val; | ||
| 318 | |||
| 319 | val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL0_ADDR); | ||
| 320 | if (on) | ||
| 321 | val = val | PCIE_ELBI_SLV_DBI_ENABLE; | ||
| 322 | else | ||
| 323 | val = val & ~PCIE_ELBI_SLV_DBI_ENABLE; | ||
| 324 | |||
| 325 | kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL0_ADDR); | ||
| 326 | } | ||
| 327 | |||
| 328 | static void kirin_pcie_sideband_dbi_r_mode(struct kirin_pcie *kirin_pcie, | ||
| 329 | bool on) | ||
| 330 | { | ||
| 331 | u32 val; | ||
| 332 | |||
| 333 | val = kirin_apb_ctrl_readl(kirin_pcie, SOC_PCIECTRL_CTRL1_ADDR); | ||
| 334 | if (on) | ||
| 335 | val = val | PCIE_ELBI_SLV_DBI_ENABLE; | ||
| 336 | else | ||
| 337 | val = val & ~PCIE_ELBI_SLV_DBI_ENABLE; | ||
| 338 | |||
| 339 | kirin_apb_ctrl_writel(kirin_pcie, val, SOC_PCIECTRL_CTRL1_ADDR); | ||
| 340 | } | ||
| 341 | |||
| 342 | static int kirin_pcie_rd_own_conf(struct pcie_port *pp, | ||
| 343 | int where, int size, u32 *val) | ||
| 344 | { | ||
| 345 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 346 | struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); | ||
| 347 | int ret; | ||
| 348 | |||
| 349 | kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true); | ||
| 350 | ret = dw_pcie_read(pci->dbi_base + where, size, val); | ||
| 351 | kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false); | ||
| 352 | |||
| 353 | return ret; | ||
| 354 | } | ||
| 355 | |||
| 356 | static int kirin_pcie_wr_own_conf(struct pcie_port *pp, | ||
| 357 | int where, int size, u32 val) | ||
| 358 | { | ||
| 359 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 360 | struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); | ||
| 361 | int ret; | ||
| 362 | |||
| 363 | kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true); | ||
| 364 | ret = dw_pcie_write(pci->dbi_base + where, size, val); | ||
| 365 | kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false); | ||
| 366 | |||
| 367 | return ret; | ||
| 368 | } | ||
| 369 | |||
| 370 | static u32 kirin_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, | ||
| 371 | u32 reg, size_t size) | ||
| 372 | { | ||
| 373 | struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); | ||
| 374 | u32 ret; | ||
| 375 | |||
| 376 | kirin_pcie_sideband_dbi_r_mode(kirin_pcie, true); | ||
| 377 | dw_pcie_read(base + reg, size, &ret); | ||
| 378 | kirin_pcie_sideband_dbi_r_mode(kirin_pcie, false); | ||
| 379 | |||
| 380 | return ret; | ||
| 381 | } | ||
| 382 | |||
| 383 | static void kirin_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, | ||
| 384 | u32 reg, size_t size, u32 val) | ||
| 385 | { | ||
| 386 | struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); | ||
| 387 | |||
| 388 | kirin_pcie_sideband_dbi_w_mode(kirin_pcie, true); | ||
| 389 | dw_pcie_write(base + reg, size, val); | ||
| 390 | kirin_pcie_sideband_dbi_w_mode(kirin_pcie, false); | ||
| 391 | } | ||
| 392 | |||
| 393 | static int kirin_pcie_link_up(struct dw_pcie *pci) | ||
| 394 | { | ||
| 395 | struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); | ||
| 396 | u32 val = kirin_apb_ctrl_readl(kirin_pcie, PCIE_APB_PHY_STATUS0); | ||
| 397 | |||
| 398 | if ((val & PCIE_LINKUP_ENABLE) == PCIE_LINKUP_ENABLE) | ||
| 399 | return 1; | ||
| 400 | |||
| 401 | return 0; | ||
| 402 | } | ||
| 403 | |||
| 404 | static int kirin_pcie_establish_link(struct pcie_port *pp) | ||
| 405 | { | ||
| 406 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 407 | struct kirin_pcie *kirin_pcie = to_kirin_pcie(pci); | ||
| 408 | struct device *dev = kirin_pcie->pci->dev; | ||
| 409 | int count = 0; | ||
| 410 | |||
| 411 | if (kirin_pcie_link_up(pci)) | ||
| 412 | return 0; | ||
| 413 | |||
| 414 | dw_pcie_setup_rc(pp); | ||
| 415 | |||
| 416 | /* assert LTSSM enable */ | ||
| 417 | kirin_apb_ctrl_writel(kirin_pcie, PCIE_LTSSM_ENABLE_BIT, | ||
| 418 | PCIE_APP_LTSSM_ENABLE); | ||
| 419 | |||
| 420 | /* check if the link is up or not */ | ||
| 421 | while (!kirin_pcie_link_up(pci)) { | ||
| 422 | usleep_range(LINK_WAIT_MIN, LINK_WAIT_MAX); | ||
| 423 | count++; | ||
| 424 | if (count == 1000) { | ||
| 425 | dev_err(dev, "Link Fail\n"); | ||
| 426 | return -EINVAL; | ||
| 427 | } | ||
| 428 | } | ||
| 429 | |||
| 430 | return 0; | ||
| 431 | } | ||
| 432 | |||
| 433 | static void kirin_pcie_host_init(struct pcie_port *pp) | ||
| 434 | { | ||
| 435 | kirin_pcie_establish_link(pp); | ||
| 436 | } | ||
| 437 | |||
| 438 | static struct dw_pcie_ops kirin_dw_pcie_ops = { | ||
| 439 | .read_dbi = kirin_pcie_read_dbi, | ||
| 440 | .write_dbi = kirin_pcie_write_dbi, | ||
| 441 | .link_up = kirin_pcie_link_up, | ||
| 442 | }; | ||
| 443 | |||
| 444 | static struct dw_pcie_host_ops kirin_pcie_host_ops = { | ||
| 445 | .rd_own_conf = kirin_pcie_rd_own_conf, | ||
| 446 | .wr_own_conf = kirin_pcie_wr_own_conf, | ||
| 447 | .host_init = kirin_pcie_host_init, | ||
| 448 | }; | ||
| 449 | |||
| 450 | static int __init kirin_add_pcie_port(struct dw_pcie *pci, | ||
| 451 | struct platform_device *pdev) | ||
| 452 | { | ||
| 453 | pci->pp.ops = &kirin_pcie_host_ops; | ||
| 454 | |||
| 455 | return dw_pcie_host_init(&pci->pp); | ||
| 456 | } | ||
| 457 | |||
| 458 | static int kirin_pcie_probe(struct platform_device *pdev) | ||
| 459 | { | ||
| 460 | struct device *dev = &pdev->dev; | ||
| 461 | struct kirin_pcie *kirin_pcie; | ||
| 462 | struct dw_pcie *pci; | ||
| 463 | int ret; | ||
| 464 | |||
| 465 | if (!dev->of_node) { | ||
| 466 | dev_err(dev, "NULL node\n"); | ||
| 467 | return -EINVAL; | ||
| 468 | } | ||
| 469 | |||
| 470 | kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL); | ||
| 471 | if (!kirin_pcie) | ||
| 472 | return -ENOMEM; | ||
| 473 | |||
| 474 | pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); | ||
| 475 | if (!pci) | ||
| 476 | return -ENOMEM; | ||
| 477 | |||
| 478 | pci->dev = dev; | ||
| 479 | pci->ops = &kirin_dw_pcie_ops; | ||
| 480 | kirin_pcie->pci = pci; | ||
| 481 | |||
| 482 | ret = kirin_pcie_get_clk(kirin_pcie, pdev); | ||
| 483 | if (ret) | ||
| 484 | return ret; | ||
| 485 | |||
| 486 | ret = kirin_pcie_get_resource(kirin_pcie, pdev); | ||
| 487 | if (ret) | ||
| 488 | return ret; | ||
| 489 | |||
| 490 | kirin_pcie->gpio_id_reset = of_get_named_gpio(dev->of_node, | ||
| 491 | "reset-gpio", 0); | ||
| 492 | if (kirin_pcie->gpio_id_reset < 0) | ||
| 493 | return -ENODEV; | ||
| 494 | |||
| 495 | ret = kirin_pcie_power_on(kirin_pcie); | ||
| 496 | if (ret) | ||
| 497 | return ret; | ||
| 498 | |||
| 499 | platform_set_drvdata(pdev, kirin_pcie); | ||
| 500 | |||
| 501 | return kirin_add_pcie_port(pci, pdev); | ||
| 502 | } | ||
| 503 | |||
| 504 | static const struct of_device_id kirin_pcie_match[] = { | ||
| 505 | { .compatible = "hisilicon,kirin960-pcie" }, | ||
| 506 | {}, | ||
| 507 | }; | ||
| 508 | |||
| 509 | struct platform_driver kirin_pcie_driver = { | ||
| 510 | .probe = kirin_pcie_probe, | ||
| 511 | .driver = { | ||
| 512 | .name = "kirin-pcie", | ||
| 513 | .of_match_table = kirin_pcie_match, | ||
| 514 | .suppress_bind_attrs = true, | ||
| 515 | }, | ||
| 516 | }; | ||
| 517 | builtin_platform_driver(kirin_pcie_driver); | ||
diff --git a/drivers/pci/dwc/pcie-qcom.c b/drivers/pci/dwc/pcie-qcom.c index 5bf23d432fdb..68c5f2ab5bc8 100644 --- a/drivers/pci/dwc/pcie-qcom.c +++ b/drivers/pci/dwc/pcie-qcom.c | |||
| @@ -51,6 +51,12 @@ | |||
| 51 | #define PCIE20_ELBI_SYS_CTRL 0x04 | 51 | #define PCIE20_ELBI_SYS_CTRL 0x04 |
| 52 | #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) | 52 | #define PCIE20_ELBI_SYS_CTRL_LT_ENABLE BIT(0) |
| 53 | 53 | ||
| 54 | #define PCIE20_AXI_MSTR_RESP_COMP_CTRL0 0x818 | ||
| 55 | #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K 0x4 | ||
| 56 | #define CFG_REMOTE_RD_REQ_BRIDGE_SIZE_4K 0x5 | ||
| 57 | #define PCIE20_AXI_MSTR_RESP_COMP_CTRL1 0x81c | ||
| 58 | #define CFG_BRIDGE_SB_INIT BIT(0) | ||
| 59 | |||
| 54 | #define PCIE20_CAP 0x70 | 60 | #define PCIE20_CAP 0x70 |
| 55 | 61 | ||
| 56 | #define PERST_DELAY_US 1000 | 62 | #define PERST_DELAY_US 1000 |
| @@ -86,10 +92,29 @@ struct qcom_pcie_resources_v2 { | |||
| 86 | struct clk *pipe_clk; | 92 | struct clk *pipe_clk; |
| 87 | }; | 93 | }; |
| 88 | 94 | ||
| 95 | struct qcom_pcie_resources_v3 { | ||
| 96 | struct clk *aux_clk; | ||
| 97 | struct clk *master_clk; | ||
| 98 | struct clk *slave_clk; | ||
| 99 | struct reset_control *axi_m_reset; | ||
| 100 | struct reset_control *axi_s_reset; | ||
| 101 | struct reset_control *pipe_reset; | ||
| 102 | struct reset_control *axi_m_vmid_reset; | ||
| 103 | struct reset_control *axi_s_xpu_reset; | ||
| 104 | struct reset_control *parf_reset; | ||
| 105 | struct reset_control *phy_reset; | ||
| 106 | struct reset_control *axi_m_sticky_reset; | ||
| 107 | struct reset_control *pipe_sticky_reset; | ||
| 108 | struct reset_control *pwr_reset; | ||
| 109 | struct reset_control *ahb_reset; | ||
| 110 | struct reset_control *phy_ahb_reset; | ||
| 111 | }; | ||
| 112 | |||
| 89 | union qcom_pcie_resources { | 113 | union qcom_pcie_resources { |
| 90 | struct qcom_pcie_resources_v0 v0; | 114 | struct qcom_pcie_resources_v0 v0; |
| 91 | struct qcom_pcie_resources_v1 v1; | 115 | struct qcom_pcie_resources_v1 v1; |
| 92 | struct qcom_pcie_resources_v2 v2; | 116 | struct qcom_pcie_resources_v2 v2; |
| 117 | struct qcom_pcie_resources_v3 v3; | ||
| 93 | }; | 118 | }; |
| 94 | 119 | ||
| 95 | struct qcom_pcie; | 120 | struct qcom_pcie; |
| @@ -133,26 +158,6 @@ static irqreturn_t qcom_pcie_msi_irq_handler(int irq, void *arg) | |||
| 133 | return dw_handle_msi_irq(pp); | 158 | return dw_handle_msi_irq(pp); |
| 134 | } | 159 | } |
| 135 | 160 | ||
| 136 | static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie) | ||
| 137 | { | ||
| 138 | u32 val; | ||
| 139 | |||
| 140 | /* enable link training */ | ||
| 141 | val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); | ||
| 142 | val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; | ||
| 143 | writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); | ||
| 144 | } | ||
| 145 | |||
| 146 | static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie) | ||
| 147 | { | ||
| 148 | u32 val; | ||
| 149 | |||
| 150 | /* enable link training */ | ||
| 151 | val = readl(pcie->parf + PCIE20_PARF_LTSSM); | ||
| 152 | val |= BIT(8); | ||
| 153 | writel(val, pcie->parf + PCIE20_PARF_LTSSM); | ||
| 154 | } | ||
| 155 | |||
| 156 | static int qcom_pcie_establish_link(struct qcom_pcie *pcie) | 161 | static int qcom_pcie_establish_link(struct qcom_pcie *pcie) |
| 157 | { | 162 | { |
| 158 | struct dw_pcie *pci = pcie->pci; | 163 | struct dw_pcie *pci = pcie->pci; |
| @@ -167,6 +172,16 @@ static int qcom_pcie_establish_link(struct qcom_pcie *pcie) | |||
| 167 | return dw_pcie_wait_for_link(pci); | 172 | return dw_pcie_wait_for_link(pci); |
| 168 | } | 173 | } |
| 169 | 174 | ||
| 175 | static void qcom_pcie_v0_v1_ltssm_enable(struct qcom_pcie *pcie) | ||
| 176 | { | ||
| 177 | u32 val; | ||
| 178 | |||
| 179 | /* enable link training */ | ||
| 180 | val = readl(pcie->elbi + PCIE20_ELBI_SYS_CTRL); | ||
| 181 | val |= PCIE20_ELBI_SYS_CTRL_LT_ENABLE; | ||
| 182 | writel(val, pcie->elbi + PCIE20_ELBI_SYS_CTRL); | ||
| 183 | } | ||
| 184 | |||
| 170 | static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie) | 185 | static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie) |
| 171 | { | 186 | { |
| 172 | struct qcom_pcie_resources_v0 *res = &pcie->res.v0; | 187 | struct qcom_pcie_resources_v0 *res = &pcie->res.v0; |
| @@ -217,36 +232,6 @@ static int qcom_pcie_get_resources_v0(struct qcom_pcie *pcie) | |||
| 217 | return PTR_ERR_OR_ZERO(res->phy_reset); | 232 | return PTR_ERR_OR_ZERO(res->phy_reset); |
| 218 | } | 233 | } |
| 219 | 234 | ||
| 220 | static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie) | ||
| 221 | { | ||
| 222 | struct qcom_pcie_resources_v1 *res = &pcie->res.v1; | ||
| 223 | struct dw_pcie *pci = pcie->pci; | ||
| 224 | struct device *dev = pci->dev; | ||
| 225 | |||
| 226 | res->vdda = devm_regulator_get(dev, "vdda"); | ||
| 227 | if (IS_ERR(res->vdda)) | ||
| 228 | return PTR_ERR(res->vdda); | ||
| 229 | |||
| 230 | res->iface = devm_clk_get(dev, "iface"); | ||
| 231 | if (IS_ERR(res->iface)) | ||
| 232 | return PTR_ERR(res->iface); | ||
| 233 | |||
| 234 | res->aux = devm_clk_get(dev, "aux"); | ||
| 235 | if (IS_ERR(res->aux)) | ||
| 236 | return PTR_ERR(res->aux); | ||
| 237 | |||
| 238 | res->master_bus = devm_clk_get(dev, "master_bus"); | ||
| 239 | if (IS_ERR(res->master_bus)) | ||
| 240 | return PTR_ERR(res->master_bus); | ||
| 241 | |||
| 242 | res->slave_bus = devm_clk_get(dev, "slave_bus"); | ||
| 243 | if (IS_ERR(res->slave_bus)) | ||
| 244 | return PTR_ERR(res->slave_bus); | ||
| 245 | |||
| 246 | res->core = devm_reset_control_get(dev, "core"); | ||
| 247 | return PTR_ERR_OR_ZERO(res->core); | ||
| 248 | } | ||
| 249 | |||
| 250 | static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie) | 235 | static void qcom_pcie_deinit_v0(struct qcom_pcie *pcie) |
| 251 | { | 236 | { |
| 252 | struct qcom_pcie_resources_v0 *res = &pcie->res.v0; | 237 | struct qcom_pcie_resources_v0 *res = &pcie->res.v0; |
| @@ -357,6 +342,13 @@ static int qcom_pcie_init_v0(struct qcom_pcie *pcie) | |||
| 357 | /* wait for clock acquisition */ | 342 | /* wait for clock acquisition */ |
| 358 | usleep_range(1000, 1500); | 343 | usleep_range(1000, 1500); |
| 359 | 344 | ||
| 345 | |||
| 346 | /* Set the Max TLP size to 2K, instead of using default of 4K */ | ||
| 347 | writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, | ||
| 348 | pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL0); | ||
| 349 | writel(CFG_BRIDGE_SB_INIT, | ||
| 350 | pci->dbi_base + PCIE20_AXI_MSTR_RESP_COMP_CTRL1); | ||
| 351 | |||
| 360 | return 0; | 352 | return 0; |
| 361 | 353 | ||
| 362 | err_deassert_ahb: | 354 | err_deassert_ahb: |
| @@ -375,6 +367,36 @@ err_refclk: | |||
| 375 | return ret; | 367 | return ret; |
| 376 | } | 368 | } |
| 377 | 369 | ||
| 370 | static int qcom_pcie_get_resources_v1(struct qcom_pcie *pcie) | ||
| 371 | { | ||
| 372 | struct qcom_pcie_resources_v1 *res = &pcie->res.v1; | ||
| 373 | struct dw_pcie *pci = pcie->pci; | ||
| 374 | struct device *dev = pci->dev; | ||
| 375 | |||
| 376 | res->vdda = devm_regulator_get(dev, "vdda"); | ||
| 377 | if (IS_ERR(res->vdda)) | ||
| 378 | return PTR_ERR(res->vdda); | ||
| 379 | |||
| 380 | res->iface = devm_clk_get(dev, "iface"); | ||
| 381 | if (IS_ERR(res->iface)) | ||
| 382 | return PTR_ERR(res->iface); | ||
| 383 | |||
| 384 | res->aux = devm_clk_get(dev, "aux"); | ||
| 385 | if (IS_ERR(res->aux)) | ||
| 386 | return PTR_ERR(res->aux); | ||
| 387 | |||
| 388 | res->master_bus = devm_clk_get(dev, "master_bus"); | ||
| 389 | if (IS_ERR(res->master_bus)) | ||
| 390 | return PTR_ERR(res->master_bus); | ||
| 391 | |||
| 392 | res->slave_bus = devm_clk_get(dev, "slave_bus"); | ||
| 393 | if (IS_ERR(res->slave_bus)) | ||
| 394 | return PTR_ERR(res->slave_bus); | ||
| 395 | |||
| 396 | res->core = devm_reset_control_get(dev, "core"); | ||
| 397 | return PTR_ERR_OR_ZERO(res->core); | ||
| 398 | } | ||
| 399 | |||
| 378 | static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie) | 400 | static void qcom_pcie_deinit_v1(struct qcom_pcie *pcie) |
| 379 | { | 401 | { |
| 380 | struct qcom_pcie_resources_v1 *res = &pcie->res.v1; | 402 | struct qcom_pcie_resources_v1 *res = &pcie->res.v1; |
| @@ -455,6 +477,16 @@ err_res: | |||
| 455 | return ret; | 477 | return ret; |
| 456 | } | 478 | } |
| 457 | 479 | ||
| 480 | static void qcom_pcie_v2_ltssm_enable(struct qcom_pcie *pcie) | ||
| 481 | { | ||
| 482 | u32 val; | ||
| 483 | |||
| 484 | /* enable link training */ | ||
| 485 | val = readl(pcie->parf + PCIE20_PARF_LTSSM); | ||
| 486 | val |= BIT(8); | ||
| 487 | writel(val, pcie->parf + PCIE20_PARF_LTSSM); | ||
| 488 | } | ||
| 489 | |||
| 458 | static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie) | 490 | static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie) |
| 459 | { | 491 | { |
| 460 | struct qcom_pcie_resources_v2 *res = &pcie->res.v2; | 492 | struct qcom_pcie_resources_v2 *res = &pcie->res.v2; |
| @@ -481,6 +513,17 @@ static int qcom_pcie_get_resources_v2(struct qcom_pcie *pcie) | |||
| 481 | return PTR_ERR_OR_ZERO(res->pipe_clk); | 513 | return PTR_ERR_OR_ZERO(res->pipe_clk); |
| 482 | } | 514 | } |
| 483 | 515 | ||
| 516 | static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie) | ||
| 517 | { | ||
| 518 | struct qcom_pcie_resources_v2 *res = &pcie->res.v2; | ||
| 519 | |||
| 520 | clk_disable_unprepare(res->pipe_clk); | ||
| 521 | clk_disable_unprepare(res->slave_clk); | ||
| 522 | clk_disable_unprepare(res->master_clk); | ||
| 523 | clk_disable_unprepare(res->cfg_clk); | ||
| 524 | clk_disable_unprepare(res->aux_clk); | ||
| 525 | } | ||
| 526 | |||
| 484 | static int qcom_pcie_init_v2(struct qcom_pcie *pcie) | 527 | static int qcom_pcie_init_v2(struct qcom_pcie *pcie) |
| 485 | { | 528 | { |
| 486 | struct qcom_pcie_resources_v2 *res = &pcie->res.v2; | 529 | struct qcom_pcie_resources_v2 *res = &pcie->res.v2; |
| @@ -562,22 +605,290 @@ static int qcom_pcie_post_init_v2(struct qcom_pcie *pcie) | |||
| 562 | return 0; | 605 | return 0; |
| 563 | } | 606 | } |
| 564 | 607 | ||
| 565 | static int qcom_pcie_link_up(struct dw_pcie *pci) | 608 | static int qcom_pcie_get_resources_v3(struct qcom_pcie *pcie) |
| 566 | { | 609 | { |
| 567 | u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA); | 610 | struct qcom_pcie_resources_v3 *res = &pcie->res.v3; |
| 611 | struct dw_pcie *pci = pcie->pci; | ||
| 612 | struct device *dev = pci->dev; | ||
| 568 | 613 | ||
| 569 | return !!(val & PCI_EXP_LNKSTA_DLLLA); | 614 | res->aux_clk = devm_clk_get(dev, "aux"); |
| 615 | if (IS_ERR(res->aux_clk)) | ||
| 616 | return PTR_ERR(res->aux_clk); | ||
| 617 | |||
| 618 | res->master_clk = devm_clk_get(dev, "master_bus"); | ||
| 619 | if (IS_ERR(res->master_clk)) | ||
| 620 | return PTR_ERR(res->master_clk); | ||
| 621 | |||
| 622 | res->slave_clk = devm_clk_get(dev, "slave_bus"); | ||
| 623 | if (IS_ERR(res->slave_clk)) | ||
| 624 | return PTR_ERR(res->slave_clk); | ||
| 625 | |||
| 626 | res->axi_m_reset = devm_reset_control_get(dev, "axi_m"); | ||
| 627 | if (IS_ERR(res->axi_m_reset)) | ||
| 628 | return PTR_ERR(res->axi_m_reset); | ||
| 629 | |||
| 630 | res->axi_s_reset = devm_reset_control_get(dev, "axi_s"); | ||
| 631 | if (IS_ERR(res->axi_s_reset)) | ||
| 632 | return PTR_ERR(res->axi_s_reset); | ||
| 633 | |||
| 634 | res->pipe_reset = devm_reset_control_get(dev, "pipe"); | ||
| 635 | if (IS_ERR(res->pipe_reset)) | ||
| 636 | return PTR_ERR(res->pipe_reset); | ||
| 637 | |||
| 638 | res->axi_m_vmid_reset = devm_reset_control_get(dev, "axi_m_vmid"); | ||
| 639 | if (IS_ERR(res->axi_m_vmid_reset)) | ||
| 640 | return PTR_ERR(res->axi_m_vmid_reset); | ||
| 641 | |||
| 642 | res->axi_s_xpu_reset = devm_reset_control_get(dev, "axi_s_xpu"); | ||
| 643 | if (IS_ERR(res->axi_s_xpu_reset)) | ||
| 644 | return PTR_ERR(res->axi_s_xpu_reset); | ||
| 645 | |||
| 646 | res->parf_reset = devm_reset_control_get(dev, "parf"); | ||
| 647 | if (IS_ERR(res->parf_reset)) | ||
| 648 | return PTR_ERR(res->parf_reset); | ||
| 649 | |||
| 650 | res->phy_reset = devm_reset_control_get(dev, "phy"); | ||
| 651 | if (IS_ERR(res->phy_reset)) | ||
| 652 | return PTR_ERR(res->phy_reset); | ||
| 653 | |||
| 654 | res->axi_m_sticky_reset = devm_reset_control_get(dev, "axi_m_sticky"); | ||
| 655 | if (IS_ERR(res->axi_m_sticky_reset)) | ||
| 656 | return PTR_ERR(res->axi_m_sticky_reset); | ||
| 657 | |||
| 658 | res->pipe_sticky_reset = devm_reset_control_get(dev, "pipe_sticky"); | ||
| 659 | if (IS_ERR(res->pipe_sticky_reset)) | ||
| 660 | return PTR_ERR(res->pipe_sticky_reset); | ||
| 661 | |||
| 662 | res->pwr_reset = devm_reset_control_get(dev, "pwr"); | ||
| 663 | if (IS_ERR(res->pwr_reset)) | ||
| 664 | return PTR_ERR(res->pwr_reset); | ||
| 665 | |||
| 666 | res->ahb_reset = devm_reset_control_get(dev, "ahb"); | ||
| 667 | if (IS_ERR(res->ahb_reset)) | ||
| 668 | return PTR_ERR(res->ahb_reset); | ||
| 669 | |||
| 670 | res->phy_ahb_reset = devm_reset_control_get(dev, "phy_ahb"); | ||
| 671 | if (IS_ERR(res->phy_ahb_reset)) | ||
| 672 | return PTR_ERR(res->phy_ahb_reset); | ||
| 673 | |||
| 674 | return 0; | ||
| 570 | } | 675 | } |
| 571 | 676 | ||
| 572 | static void qcom_pcie_deinit_v2(struct qcom_pcie *pcie) | 677 | static void qcom_pcie_deinit_v3(struct qcom_pcie *pcie) |
| 573 | { | 678 | { |
| 574 | struct qcom_pcie_resources_v2 *res = &pcie->res.v2; | 679 | struct qcom_pcie_resources_v3 *res = &pcie->res.v3; |
| 575 | 680 | ||
| 576 | clk_disable_unprepare(res->pipe_clk); | 681 | reset_control_assert(res->axi_m_reset); |
| 682 | reset_control_assert(res->axi_s_reset); | ||
| 683 | reset_control_assert(res->pipe_reset); | ||
| 684 | reset_control_assert(res->pipe_sticky_reset); | ||
| 685 | reset_control_assert(res->phy_reset); | ||
| 686 | reset_control_assert(res->phy_ahb_reset); | ||
| 687 | reset_control_assert(res->axi_m_sticky_reset); | ||
| 688 | reset_control_assert(res->pwr_reset); | ||
| 689 | reset_control_assert(res->ahb_reset); | ||
| 690 | clk_disable_unprepare(res->aux_clk); | ||
| 691 | clk_disable_unprepare(res->master_clk); | ||
| 577 | clk_disable_unprepare(res->slave_clk); | 692 | clk_disable_unprepare(res->slave_clk); |
| 693 | } | ||
| 694 | |||
| 695 | static int qcom_pcie_init_v3(struct qcom_pcie *pcie) | ||
| 696 | { | ||
| 697 | struct qcom_pcie_resources_v3 *res = &pcie->res.v3; | ||
| 698 | struct dw_pcie *pci = pcie->pci; | ||
| 699 | struct device *dev = pci->dev; | ||
| 700 | u32 val; | ||
| 701 | int ret; | ||
| 702 | |||
| 703 | ret = reset_control_assert(res->axi_m_reset); | ||
| 704 | if (ret) { | ||
| 705 | dev_err(dev, "cannot assert axi master reset\n"); | ||
| 706 | return ret; | ||
| 707 | } | ||
| 708 | |||
| 709 | ret = reset_control_assert(res->axi_s_reset); | ||
| 710 | if (ret) { | ||
| 711 | dev_err(dev, "cannot assert axi slave reset\n"); | ||
| 712 | return ret; | ||
| 713 | } | ||
| 714 | |||
| 715 | usleep_range(10000, 12000); | ||
| 716 | |||
| 717 | ret = reset_control_assert(res->pipe_reset); | ||
| 718 | if (ret) { | ||
| 719 | dev_err(dev, "cannot assert pipe reset\n"); | ||
| 720 | return ret; | ||
| 721 | } | ||
| 722 | |||
| 723 | ret = reset_control_assert(res->pipe_sticky_reset); | ||
| 724 | if (ret) { | ||
| 725 | dev_err(dev, "cannot assert pipe sticky reset\n"); | ||
| 726 | return ret; | ||
| 727 | } | ||
| 728 | |||
| 729 | ret = reset_control_assert(res->phy_reset); | ||
| 730 | if (ret) { | ||
| 731 | dev_err(dev, "cannot assert phy reset\n"); | ||
| 732 | return ret; | ||
| 733 | } | ||
| 734 | |||
| 735 | ret = reset_control_assert(res->phy_ahb_reset); | ||
| 736 | if (ret) { | ||
| 737 | dev_err(dev, "cannot assert phy ahb reset\n"); | ||
| 738 | return ret; | ||
| 739 | } | ||
| 740 | |||
| 741 | usleep_range(10000, 12000); | ||
| 742 | |||
| 743 | ret = reset_control_assert(res->axi_m_sticky_reset); | ||
| 744 | if (ret) { | ||
| 745 | dev_err(dev, "cannot assert axi master sticky reset\n"); | ||
| 746 | return ret; | ||
| 747 | } | ||
| 748 | |||
| 749 | ret = reset_control_assert(res->pwr_reset); | ||
| 750 | if (ret) { | ||
| 751 | dev_err(dev, "cannot assert power reset\n"); | ||
| 752 | return ret; | ||
| 753 | } | ||
| 754 | |||
| 755 | ret = reset_control_assert(res->ahb_reset); | ||
| 756 | if (ret) { | ||
| 757 | dev_err(dev, "cannot assert ahb reset\n"); | ||
| 758 | return ret; | ||
| 759 | } | ||
| 760 | |||
| 761 | usleep_range(10000, 12000); | ||
| 762 | |||
| 763 | ret = reset_control_deassert(res->phy_ahb_reset); | ||
| 764 | if (ret) { | ||
| 765 | dev_err(dev, "cannot deassert phy ahb reset\n"); | ||
| 766 | return ret; | ||
| 767 | } | ||
| 768 | |||
| 769 | ret = reset_control_deassert(res->phy_reset); | ||
| 770 | if (ret) { | ||
| 771 | dev_err(dev, "cannot deassert phy reset\n"); | ||
| 772 | goto err_rst_phy; | ||
| 773 | } | ||
| 774 | |||
| 775 | ret = reset_control_deassert(res->pipe_reset); | ||
| 776 | if (ret) { | ||
| 777 | dev_err(dev, "cannot deassert pipe reset\n"); | ||
| 778 | goto err_rst_pipe; | ||
| 779 | } | ||
| 780 | |||
| 781 | ret = reset_control_deassert(res->pipe_sticky_reset); | ||
| 782 | if (ret) { | ||
| 783 | dev_err(dev, "cannot deassert pipe sticky reset\n"); | ||
| 784 | goto err_rst_pipe_sticky; | ||
| 785 | } | ||
| 786 | |||
| 787 | usleep_range(10000, 12000); | ||
| 788 | |||
| 789 | ret = reset_control_deassert(res->axi_m_reset); | ||
| 790 | if (ret) { | ||
| 791 | dev_err(dev, "cannot deassert axi master reset\n"); | ||
| 792 | goto err_rst_axi_m; | ||
| 793 | } | ||
| 794 | |||
| 795 | ret = reset_control_deassert(res->axi_m_sticky_reset); | ||
| 796 | if (ret) { | ||
| 797 | dev_err(dev, "cannot deassert axi master sticky reset\n"); | ||
| 798 | goto err_rst_axi_m_sticky; | ||
| 799 | } | ||
| 800 | |||
| 801 | ret = reset_control_deassert(res->axi_s_reset); | ||
| 802 | if (ret) { | ||
| 803 | dev_err(dev, "cannot deassert axi slave reset\n"); | ||
| 804 | goto err_rst_axi_s; | ||
| 805 | } | ||
| 806 | |||
| 807 | ret = reset_control_deassert(res->pwr_reset); | ||
| 808 | if (ret) { | ||
| 809 | dev_err(dev, "cannot deassert power reset\n"); | ||
| 810 | goto err_rst_pwr; | ||
| 811 | } | ||
| 812 | |||
| 813 | ret = reset_control_deassert(res->ahb_reset); | ||
| 814 | if (ret) { | ||
| 815 | dev_err(dev, "cannot deassert ahb reset\n"); | ||
| 816 | goto err_rst_ahb; | ||
| 817 | } | ||
| 818 | |||
| 819 | usleep_range(10000, 12000); | ||
| 820 | |||
| 821 | ret = clk_prepare_enable(res->aux_clk); | ||
| 822 | if (ret) { | ||
| 823 | dev_err(dev, "cannot prepare/enable iface clock\n"); | ||
| 824 | goto err_clk_aux; | ||
| 825 | } | ||
| 826 | |||
| 827 | ret = clk_prepare_enable(res->master_clk); | ||
| 828 | if (ret) { | ||
| 829 | dev_err(dev, "cannot prepare/enable core clock\n"); | ||
| 830 | goto err_clk_axi_m; | ||
| 831 | } | ||
| 832 | |||
| 833 | ret = clk_prepare_enable(res->slave_clk); | ||
| 834 | if (ret) { | ||
| 835 | dev_err(dev, "cannot prepare/enable phy clock\n"); | ||
| 836 | goto err_clk_axi_s; | ||
| 837 | } | ||
| 838 | |||
| 839 | /* enable PCIe clocks and resets */ | ||
| 840 | val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); | ||
| 841 | val &= !BIT(0); | ||
| 842 | writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); | ||
| 843 | |||
| 844 | /* change DBI base address */ | ||
| 845 | writel(0, pcie->parf + PCIE20_PARF_DBI_BASE_ADDR); | ||
| 846 | |||
| 847 | /* MAC PHY_POWERDOWN MUX DISABLE */ | ||
| 848 | val = readl(pcie->parf + PCIE20_PARF_SYS_CTRL); | ||
| 849 | val &= ~BIT(29); | ||
| 850 | writel(val, pcie->parf + PCIE20_PARF_SYS_CTRL); | ||
| 851 | |||
| 852 | val = readl(pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); | ||
| 853 | val |= BIT(4); | ||
| 854 | writel(val, pcie->parf + PCIE20_PARF_MHI_CLOCK_RESET_CTRL); | ||
| 855 | |||
| 856 | val = readl(pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); | ||
| 857 | val |= BIT(31); | ||
| 858 | writel(val, pcie->parf + PCIE20_PARF_AXI_MSTR_WR_ADDR_HALT_V2); | ||
| 859 | |||
| 860 | return 0; | ||
| 861 | |||
| 862 | err_clk_axi_s: | ||
| 578 | clk_disable_unprepare(res->master_clk); | 863 | clk_disable_unprepare(res->master_clk); |
| 579 | clk_disable_unprepare(res->cfg_clk); | 864 | err_clk_axi_m: |
| 580 | clk_disable_unprepare(res->aux_clk); | 865 | clk_disable_unprepare(res->aux_clk); |
| 866 | err_clk_aux: | ||
| 867 | reset_control_assert(res->ahb_reset); | ||
| 868 | err_rst_ahb: | ||
| 869 | reset_control_assert(res->pwr_reset); | ||
| 870 | err_rst_pwr: | ||
| 871 | reset_control_assert(res->axi_s_reset); | ||
| 872 | err_rst_axi_s: | ||
| 873 | reset_control_assert(res->axi_m_sticky_reset); | ||
| 874 | err_rst_axi_m_sticky: | ||
| 875 | reset_control_assert(res->axi_m_reset); | ||
| 876 | err_rst_axi_m: | ||
| 877 | reset_control_assert(res->pipe_sticky_reset); | ||
| 878 | err_rst_pipe_sticky: | ||
| 879 | reset_control_assert(res->pipe_reset); | ||
| 880 | err_rst_pipe: | ||
| 881 | reset_control_assert(res->phy_reset); | ||
| 882 | err_rst_phy: | ||
| 883 | reset_control_assert(res->phy_ahb_reset); | ||
| 884 | return ret; | ||
| 885 | } | ||
| 886 | |||
| 887 | static int qcom_pcie_link_up(struct dw_pcie *pci) | ||
| 888 | { | ||
| 889 | u16 val = readw(pci->dbi_base + PCIE20_CAP + PCI_EXP_LNKSTA); | ||
| 890 | |||
| 891 | return !!(val & PCI_EXP_LNKSTA_DLLLA); | ||
| 581 | } | 892 | } |
| 582 | 893 | ||
| 583 | static void qcom_pcie_host_init(struct pcie_port *pp) | 894 | static void qcom_pcie_host_init(struct pcie_port *pp) |
| @@ -634,7 +945,7 @@ static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, | |||
| 634 | return dw_pcie_read(pci->dbi_base + where, size, val); | 945 | return dw_pcie_read(pci->dbi_base + where, size, val); |
| 635 | } | 946 | } |
| 636 | 947 | ||
| 637 | static struct dw_pcie_host_ops qcom_pcie_dw_ops = { | 948 | static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { |
| 638 | .host_init = qcom_pcie_host_init, | 949 | .host_init = qcom_pcie_host_init, |
| 639 | .rd_own_conf = qcom_pcie_rd_own_conf, | 950 | .rd_own_conf = qcom_pcie_rd_own_conf, |
| 640 | }; | 951 | }; |
| @@ -665,6 +976,13 @@ static const struct dw_pcie_ops dw_pcie_ops = { | |||
| 665 | .link_up = qcom_pcie_link_up, | 976 | .link_up = qcom_pcie_link_up, |
| 666 | }; | 977 | }; |
| 667 | 978 | ||
| 979 | static const struct qcom_pcie_ops ops_v3 = { | ||
| 980 | .get_resources = qcom_pcie_get_resources_v3, | ||
| 981 | .init = qcom_pcie_init_v3, | ||
| 982 | .deinit = qcom_pcie_deinit_v3, | ||
| 983 | .ltssm_enable = qcom_pcie_v2_ltssm_enable, | ||
| 984 | }; | ||
| 985 | |||
| 668 | static int qcom_pcie_probe(struct platform_device *pdev) | 986 | static int qcom_pcie_probe(struct platform_device *pdev) |
| 669 | { | 987 | { |
| 670 | struct device *dev = &pdev->dev; | 988 | struct device *dev = &pdev->dev; |
| @@ -727,7 +1045,8 @@ static int qcom_pcie_probe(struct platform_device *pdev) | |||
| 727 | 1045 | ||
| 728 | ret = devm_request_irq(dev, pp->msi_irq, | 1046 | ret = devm_request_irq(dev, pp->msi_irq, |
| 729 | qcom_pcie_msi_irq_handler, | 1047 | qcom_pcie_msi_irq_handler, |
| 730 | IRQF_SHARED, "qcom-pcie-msi", pp); | 1048 | IRQF_SHARED | IRQF_NO_THREAD, |
| 1049 | "qcom-pcie-msi", pp); | ||
| 731 | if (ret) { | 1050 | if (ret) { |
| 732 | dev_err(dev, "cannot request msi irq\n"); | 1051 | dev_err(dev, "cannot request msi irq\n"); |
| 733 | return ret; | 1052 | return ret; |
| @@ -754,6 +1073,7 @@ static const struct of_device_id qcom_pcie_match[] = { | |||
| 754 | { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 }, | 1073 | { .compatible = "qcom,pcie-apq8064", .data = &ops_v0 }, |
| 755 | { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 }, | 1074 | { .compatible = "qcom,pcie-apq8084", .data = &ops_v1 }, |
| 756 | { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 }, | 1075 | { .compatible = "qcom,pcie-msm8996", .data = &ops_v2 }, |
| 1076 | { .compatible = "qcom,pcie-ipq4019", .data = &ops_v3 }, | ||
| 757 | { } | 1077 | { } |
| 758 | }; | 1078 | }; |
| 759 | 1079 | ||
diff --git a/drivers/pci/dwc/pcie-spear13xx.c b/drivers/pci/dwc/pcie-spear13xx.c index 8ff36b3dbbdf..80897291e0fb 100644 --- a/drivers/pci/dwc/pcie-spear13xx.c +++ b/drivers/pci/dwc/pcie-spear13xx.c | |||
| @@ -186,7 +186,7 @@ static void spear13xx_pcie_host_init(struct pcie_port *pp) | |||
| 186 | spear13xx_pcie_enable_interrupts(spear13xx_pcie); | 186 | spear13xx_pcie_enable_interrupts(spear13xx_pcie); |
| 187 | } | 187 | } |
| 188 | 188 | ||
| 189 | static struct dw_pcie_host_ops spear13xx_pcie_host_ops = { | 189 | static const struct dw_pcie_host_ops spear13xx_pcie_host_ops = { |
| 190 | .host_init = spear13xx_pcie_host_init, | 190 | .host_init = spear13xx_pcie_host_init, |
| 191 | }; | 191 | }; |
| 192 | 192 | ||
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 7f47cd5e10a5..89d61c2cbfaa 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig | |||
| @@ -180,6 +180,31 @@ config PCIE_ROCKCHIP | |||
| 180 | There is 1 internal PCIe port available to support GEN2 with | 180 | There is 1 internal PCIe port available to support GEN2 with |
| 181 | 4 slots. | 181 | 4 slots. |
| 182 | 182 | ||
| 183 | config PCIE_MEDIATEK | ||
| 184 | bool "MediaTek PCIe controller" | ||
| 185 | depends on ARM && (ARCH_MEDIATEK || COMPILE_TEST) | ||
| 186 | depends on OF | ||
| 187 | depends on PCI | ||
| 188 | select PCIEPORTBUS | ||
| 189 | help | ||
| 190 | Say Y here if you want to enable PCIe controller support on | ||
| 191 | MT7623 series SoCs. There is one single root complex with 3 root | ||
| 192 | ports available. Each port supports Gen2 lane x1. | ||
| 193 | |||
| 194 | config PCIE_TANGO_SMP8759 | ||
| 195 | bool "Tango SMP8759 PCIe controller (DANGEROUS)" | ||
| 196 | depends on ARCH_TANGO && PCI_MSI && OF | ||
| 197 | depends on BROKEN | ||
| 198 | select PCI_HOST_COMMON | ||
| 199 | help | ||
| 200 | Say Y here to enable PCIe controller support for Sigma Designs | ||
| 201 | Tango SMP8759-based systems. | ||
| 202 | |||
| 203 | Note: The SMP8759 controller multiplexes PCI config and MMIO | ||
| 204 | accesses, and Linux doesn't provide a way to serialize them. | ||
| 205 | This can lead to data corruption if drivers perform concurrent | ||
| 206 | config and MMIO accesses. | ||
| 207 | |||
| 183 | config VMD | 208 | config VMD |
| 184 | depends on PCI_MSI && X86_64 && SRCU | 209 | depends on PCI_MSI && X86_64 && SRCU |
| 185 | tristate "Intel Volume Management Device Driver" | 210 | tristate "Intel Volume Management Device Driver" |
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile index cab879578003..12382785e02a 100644 --- a/drivers/pci/host/Makefile +++ b/drivers/pci/host/Makefile | |||
| @@ -18,6 +18,8 @@ obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o | |||
| 18 | obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o | 18 | obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o |
| 19 | obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o | 19 | obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o |
| 20 | obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o | 20 | obj-$(CONFIG_PCIE_ROCKCHIP) += pcie-rockchip.o |
| 21 | obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o | ||
| 22 | obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o | ||
| 21 | obj-$(CONFIG_VMD) += vmd.o | 23 | obj-$(CONFIG_VMD) += vmd.o |
| 22 | 24 | ||
| 23 | # The following drivers are for devices that use the generic ACPI | 25 | # The following drivers are for devices that use the generic ACPI |
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c index 37d0bcd31f8a..5fb9b620ac78 100644 --- a/drivers/pci/host/pci-aardvark.c +++ b/drivers/pci/host/pci-aardvark.c | |||
| @@ -886,12 +886,14 @@ static int advk_pcie_probe(struct platform_device *pdev) | |||
| 886 | struct advk_pcie *pcie; | 886 | struct advk_pcie *pcie; |
| 887 | struct resource *res; | 887 | struct resource *res; |
| 888 | struct pci_bus *bus, *child; | 888 | struct pci_bus *bus, *child; |
| 889 | struct pci_host_bridge *bridge; | ||
| 889 | int ret, irq; | 890 | int ret, irq; |
| 890 | 891 | ||
| 891 | pcie = devm_kzalloc(dev, sizeof(struct advk_pcie), GFP_KERNEL); | 892 | bridge = devm_pci_alloc_host_bridge(dev, sizeof(struct advk_pcie)); |
| 892 | if (!pcie) | 893 | if (!bridge) |
| 893 | return -ENOMEM; | 894 | return -ENOMEM; |
| 894 | 895 | ||
| 896 | pcie = pci_host_bridge_priv(bridge); | ||
| 895 | pcie->pdev = pdev; | 897 | pcie->pdev = pdev; |
| 896 | 898 | ||
| 897 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 899 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| @@ -929,14 +931,21 @@ static int advk_pcie_probe(struct platform_device *pdev) | |||
| 929 | return ret; | 931 | return ret; |
| 930 | } | 932 | } |
| 931 | 933 | ||
| 932 | bus = pci_scan_root_bus(dev, 0, &advk_pcie_ops, | 934 | list_splice_init(&pcie->resources, &bridge->windows); |
| 933 | pcie, &pcie->resources); | 935 | bridge->dev.parent = dev; |
| 934 | if (!bus) { | 936 | bridge->sysdata = pcie; |
| 937 | bridge->busnr = 0; | ||
| 938 | bridge->ops = &advk_pcie_ops; | ||
| 939 | |||
| 940 | ret = pci_scan_root_bus_bridge(bridge); | ||
| 941 | if (ret < 0) { | ||
| 935 | advk_pcie_remove_msi_irq_domain(pcie); | 942 | advk_pcie_remove_msi_irq_domain(pcie); |
| 936 | advk_pcie_remove_irq_domain(pcie); | 943 | advk_pcie_remove_irq_domain(pcie); |
| 937 | return -ENOMEM; | 944 | return ret; |
| 938 | } | 945 | } |
| 939 | 946 | ||
| 947 | bus = bridge->bus; | ||
| 948 | |||
| 940 | pci_bus_assign_resources(bus); | 949 | pci_bus_assign_resources(bus); |
| 941 | 950 | ||
| 942 | list_for_each_entry(child, &bus->children, node) | 951 | list_for_each_entry(child, &bus->children, node) |
diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c index d26501c4145a..5162dffc102b 100644 --- a/drivers/pci/host/pci-ftpci100.c +++ b/drivers/pci/host/pci-ftpci100.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/irqchip/chained_irq.h> | 25 | #include <linux/irqchip/chained_irq.h> |
| 26 | #include <linux/bitops.h> | 26 | #include <linux/bitops.h> |
| 27 | #include <linux/irq.h> | 27 | #include <linux/irq.h> |
| 28 | #include <linux/clk.h> | ||
| 28 | 29 | ||
| 29 | /* | 30 | /* |
| 30 | * Special configuration registers directly in the first few words | 31 | * Special configuration registers directly in the first few words |
| @@ -37,6 +38,7 @@ | |||
| 37 | #define PCI_CONFIG 0x28 /* PCI configuration command register */ | 38 | #define PCI_CONFIG 0x28 /* PCI configuration command register */ |
| 38 | #define PCI_DATA 0x2C | 39 | #define PCI_DATA 0x2C |
| 39 | 40 | ||
| 41 | #define FARADAY_PCI_STATUS_CMD 0x04 /* Status and command */ | ||
| 40 | #define FARADAY_PCI_PMC 0x40 /* Power management control */ | 42 | #define FARADAY_PCI_PMC 0x40 /* Power management control */ |
| 41 | #define FARADAY_PCI_PMCSR 0x44 /* Power management status */ | 43 | #define FARADAY_PCI_PMCSR 0x44 /* Power management status */ |
| 42 | #define FARADAY_PCI_CTRL1 0x48 /* Control register 1 */ | 44 | #define FARADAY_PCI_CTRL1 0x48 /* Control register 1 */ |
| @@ -45,6 +47,8 @@ | |||
| 45 | #define FARADAY_PCI_MEM2_BASE_SIZE 0x54 /* Memory base and size #2 */ | 47 | #define FARADAY_PCI_MEM2_BASE_SIZE 0x54 /* Memory base and size #2 */ |
| 46 | #define FARADAY_PCI_MEM3_BASE_SIZE 0x58 /* Memory base and size #3 */ | 48 | #define FARADAY_PCI_MEM3_BASE_SIZE 0x58 /* Memory base and size #3 */ |
| 47 | 49 | ||
| 50 | #define PCI_STATUS_66MHZ_CAPABLE BIT(21) | ||
| 51 | |||
| 48 | /* Bits 31..28 gives INTD..INTA status */ | 52 | /* Bits 31..28 gives INTD..INTA status */ |
| 49 | #define PCI_CTRL2_INTSTS_SHIFT 28 | 53 | #define PCI_CTRL2_INTSTS_SHIFT 28 |
| 50 | #define PCI_CTRL2_INTMASK_CMDERR BIT(27) | 54 | #define PCI_CTRL2_INTMASK_CMDERR BIT(27) |
| @@ -117,6 +121,7 @@ struct faraday_pci { | |||
| 117 | void __iomem *base; | 121 | void __iomem *base; |
| 118 | struct irq_domain *irqdomain; | 122 | struct irq_domain *irqdomain; |
| 119 | struct pci_bus *bus; | 123 | struct pci_bus *bus; |
| 124 | struct clk *bus_clk; | ||
| 120 | }; | 125 | }; |
| 121 | 126 | ||
| 122 | static int faraday_res_to_memcfg(resource_size_t mem_base, | 127 | static int faraday_res_to_memcfg(resource_size_t mem_base, |
| @@ -178,12 +183,11 @@ static int faraday_res_to_memcfg(resource_size_t mem_base, | |||
| 178 | return 0; | 183 | return 0; |
| 179 | } | 184 | } |
| 180 | 185 | ||
| 181 | static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn, | 186 | static int faraday_raw_pci_read_config(struct faraday_pci *p, int bus_number, |
| 182 | int config, int size, u32 *value) | 187 | unsigned int fn, int config, int size, |
| 188 | u32 *value) | ||
| 183 | { | 189 | { |
| 184 | struct faraday_pci *p = bus->sysdata; | 190 | writel(PCI_CONF_BUS(bus_number) | |
| 185 | |||
| 186 | writel(PCI_CONF_BUS(bus->number) | | ||
| 187 | PCI_CONF_DEVICE(PCI_SLOT(fn)) | | 191 | PCI_CONF_DEVICE(PCI_SLOT(fn)) | |
| 188 | PCI_CONF_FUNCTION(PCI_FUNC(fn)) | | 192 | PCI_CONF_FUNCTION(PCI_FUNC(fn)) | |
| 189 | PCI_CONF_WHERE(config) | | 193 | PCI_CONF_WHERE(config) | |
| @@ -197,24 +201,28 @@ static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn, | |||
| 197 | else if (size == 2) | 201 | else if (size == 2) |
| 198 | *value = (*value >> (8 * (config & 3))) & 0xFFFF; | 202 | *value = (*value >> (8 * (config & 3))) & 0xFFFF; |
| 199 | 203 | ||
| 204 | return PCIBIOS_SUCCESSFUL; | ||
| 205 | } | ||
| 206 | |||
| 207 | static int faraday_pci_read_config(struct pci_bus *bus, unsigned int fn, | ||
| 208 | int config, int size, u32 *value) | ||
| 209 | { | ||
| 210 | struct faraday_pci *p = bus->sysdata; | ||
| 211 | |||
| 200 | dev_dbg(&bus->dev, | 212 | dev_dbg(&bus->dev, |
| 201 | "[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", | 213 | "[read] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", |
| 202 | PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value); | 214 | PCI_SLOT(fn), PCI_FUNC(fn), config, size, *value); |
| 203 | 215 | ||
| 204 | return PCIBIOS_SUCCESSFUL; | 216 | return faraday_raw_pci_read_config(p, bus->number, fn, config, size, value); |
| 205 | } | 217 | } |
| 206 | 218 | ||
| 207 | static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn, | 219 | static int faraday_raw_pci_write_config(struct faraday_pci *p, int bus_number, |
| 208 | int config, int size, u32 value) | 220 | unsigned int fn, int config, int size, |
| 221 | u32 value) | ||
| 209 | { | 222 | { |
| 210 | struct faraday_pci *p = bus->sysdata; | ||
| 211 | int ret = PCIBIOS_SUCCESSFUL; | 223 | int ret = PCIBIOS_SUCCESSFUL; |
| 212 | 224 | ||
| 213 | dev_dbg(&bus->dev, | 225 | writel(PCI_CONF_BUS(bus_number) | |
| 214 | "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", | ||
| 215 | PCI_SLOT(fn), PCI_FUNC(fn), config, size, value); | ||
| 216 | |||
| 217 | writel(PCI_CONF_BUS(bus->number) | | ||
| 218 | PCI_CONF_DEVICE(PCI_SLOT(fn)) | | 226 | PCI_CONF_DEVICE(PCI_SLOT(fn)) | |
| 219 | PCI_CONF_FUNCTION(PCI_FUNC(fn)) | | 227 | PCI_CONF_FUNCTION(PCI_FUNC(fn)) | |
| 220 | PCI_CONF_WHERE(config) | | 228 | PCI_CONF_WHERE(config) | |
| @@ -238,6 +246,19 @@ static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn, | |||
| 238 | return ret; | 246 | return ret; |
| 239 | } | 247 | } |
| 240 | 248 | ||
| 249 | static int faraday_pci_write_config(struct pci_bus *bus, unsigned int fn, | ||
| 250 | int config, int size, u32 value) | ||
| 251 | { | ||
| 252 | struct faraday_pci *p = bus->sysdata; | ||
| 253 | |||
| 254 | dev_dbg(&bus->dev, | ||
| 255 | "[write] slt: %.2d, fnc: %d, cnf: 0x%.2X, val (%d bytes): 0x%.8X\n", | ||
| 256 | PCI_SLOT(fn), PCI_FUNC(fn), config, size, value); | ||
| 257 | |||
| 258 | return faraday_raw_pci_write_config(p, bus->number, fn, config, size, | ||
| 259 | value); | ||
| 260 | } | ||
| 261 | |||
| 241 | static struct pci_ops faraday_pci_ops = { | 262 | static struct pci_ops faraday_pci_ops = { |
| 242 | .read = faraday_pci_read_config, | 263 | .read = faraday_pci_read_config, |
| 243 | .write = faraday_pci_write_config, | 264 | .write = faraday_pci_write_config, |
| @@ -248,10 +269,10 @@ static void faraday_pci_ack_irq(struct irq_data *d) | |||
| 248 | struct faraday_pci *p = irq_data_get_irq_chip_data(d); | 269 | struct faraday_pci *p = irq_data_get_irq_chip_data(d); |
| 249 | unsigned int reg; | 270 | unsigned int reg; |
| 250 | 271 | ||
| 251 | faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, ®); | 272 | faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®); |
| 252 | reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT); | 273 | reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT); |
| 253 | reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTSTS_SHIFT); | 274 | reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTSTS_SHIFT); |
| 254 | faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg); | 275 | faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg); |
| 255 | } | 276 | } |
| 256 | 277 | ||
| 257 | static void faraday_pci_mask_irq(struct irq_data *d) | 278 | static void faraday_pci_mask_irq(struct irq_data *d) |
| @@ -259,10 +280,10 @@ static void faraday_pci_mask_irq(struct irq_data *d) | |||
| 259 | struct faraday_pci *p = irq_data_get_irq_chip_data(d); | 280 | struct faraday_pci *p = irq_data_get_irq_chip_data(d); |
| 260 | unsigned int reg; | 281 | unsigned int reg; |
| 261 | 282 | ||
| 262 | faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, ®); | 283 | faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®); |
| 263 | reg &= ~((0xF << PCI_CTRL2_INTSTS_SHIFT) | 284 | reg &= ~((0xF << PCI_CTRL2_INTSTS_SHIFT) |
| 264 | | BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT)); | 285 | | BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT)); |
| 265 | faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg); | 286 | faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg); |
| 266 | } | 287 | } |
| 267 | 288 | ||
| 268 | static void faraday_pci_unmask_irq(struct irq_data *d) | 289 | static void faraday_pci_unmask_irq(struct irq_data *d) |
| @@ -270,10 +291,10 @@ static void faraday_pci_unmask_irq(struct irq_data *d) | |||
| 270 | struct faraday_pci *p = irq_data_get_irq_chip_data(d); | 291 | struct faraday_pci *p = irq_data_get_irq_chip_data(d); |
| 271 | unsigned int reg; | 292 | unsigned int reg; |
| 272 | 293 | ||
| 273 | faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, ®); | 294 | faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®); |
| 274 | reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT); | 295 | reg &= ~(0xF << PCI_CTRL2_INTSTS_SHIFT); |
| 275 | reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT); | 296 | reg |= BIT(irqd_to_hwirq(d) + PCI_CTRL2_INTMASK_SHIFT); |
| 276 | faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, reg); | 297 | faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, reg); |
| 277 | } | 298 | } |
| 278 | 299 | ||
| 279 | static void faraday_pci_irq_handler(struct irq_desc *desc) | 300 | static void faraday_pci_irq_handler(struct irq_desc *desc) |
| @@ -282,7 +303,7 @@ static void faraday_pci_irq_handler(struct irq_desc *desc) | |||
| 282 | struct irq_chip *irqchip = irq_desc_get_chip(desc); | 303 | struct irq_chip *irqchip = irq_desc_get_chip(desc); |
| 283 | unsigned int irq_stat, reg, i; | 304 | unsigned int irq_stat, reg, i; |
| 284 | 305 | ||
| 285 | faraday_pci_read_config(p->bus, 0, FARADAY_PCI_CTRL2, 4, ®); | 306 | faraday_raw_pci_read_config(p, 0, 0, FARADAY_PCI_CTRL2, 4, ®); |
| 286 | irq_stat = reg >> PCI_CTRL2_INTSTS_SHIFT; | 307 | irq_stat = reg >> PCI_CTRL2_INTSTS_SHIFT; |
| 287 | 308 | ||
| 288 | chained_irq_enter(irqchip, desc); | 309 | chained_irq_enter(irqchip, desc); |
| @@ -403,8 +424,8 @@ static int faraday_pci_parse_map_dma_ranges(struct faraday_pci *p, | |||
| 403 | dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n", | 424 | dev_info(dev, "DMA MEM%d BASE: 0x%016llx -> 0x%016llx config %08x\n", |
| 404 | i + 1, range.pci_addr, end, val); | 425 | i + 1, range.pci_addr, end, val); |
| 405 | if (i <= 2) { | 426 | if (i <= 2) { |
| 406 | faraday_pci_write_config(p->bus, 0, confreg[i], | 427 | faraday_raw_pci_write_config(p, 0, 0, confreg[i], |
| 407 | 4, val); | 428 | 4, val); |
| 408 | } else { | 429 | } else { |
| 409 | dev_err(dev, "ignore extraneous dma-range %d\n", i); | 430 | dev_err(dev, "ignore extraneous dma-range %d\n", i); |
| 410 | break; | 431 | break; |
| @@ -428,11 +449,14 @@ static int faraday_pci_probe(struct platform_device *pdev) | |||
| 428 | struct resource *mem; | 449 | struct resource *mem; |
| 429 | struct resource *io; | 450 | struct resource *io; |
| 430 | struct pci_host_bridge *host; | 451 | struct pci_host_bridge *host; |
| 452 | struct clk *clk; | ||
| 453 | unsigned char max_bus_speed = PCI_SPEED_33MHz; | ||
| 454 | unsigned char cur_bus_speed = PCI_SPEED_33MHz; | ||
| 431 | int ret; | 455 | int ret; |
| 432 | u32 val; | 456 | u32 val; |
| 433 | LIST_HEAD(res); | 457 | LIST_HEAD(res); |
| 434 | 458 | ||
| 435 | host = pci_alloc_host_bridge(sizeof(*p)); | 459 | host = devm_pci_alloc_host_bridge(dev, sizeof(*p)); |
| 436 | if (!host) | 460 | if (!host) |
| 437 | return -ENOMEM; | 461 | return -ENOMEM; |
| 438 | 462 | ||
| @@ -440,10 +464,30 @@ static int faraday_pci_probe(struct platform_device *pdev) | |||
| 440 | host->ops = &faraday_pci_ops; | 464 | host->ops = &faraday_pci_ops; |
| 441 | host->busnr = 0; | 465 | host->busnr = 0; |
| 442 | host->msi = NULL; | 466 | host->msi = NULL; |
| 467 | host->map_irq = of_irq_parse_and_map_pci; | ||
| 468 | host->swizzle_irq = pci_common_swizzle; | ||
| 443 | p = pci_host_bridge_priv(host); | 469 | p = pci_host_bridge_priv(host); |
| 444 | host->sysdata = p; | 470 | host->sysdata = p; |
| 445 | p->dev = dev; | 471 | p->dev = dev; |
| 446 | 472 | ||
| 473 | /* Retrieve and enable optional clocks */ | ||
| 474 | clk = devm_clk_get(dev, "PCLK"); | ||
| 475 | if (IS_ERR(clk)) | ||
| 476 | return PTR_ERR(clk); | ||
| 477 | ret = clk_prepare_enable(clk); | ||
| 478 | if (ret) { | ||
| 479 | dev_err(dev, "could not prepare PCLK\n"); | ||
| 480 | return ret; | ||
| 481 | } | ||
| 482 | p->bus_clk = devm_clk_get(dev, "PCICLK"); | ||
| 483 | if (IS_ERR(p->bus_clk)) | ||
| 484 | return PTR_ERR(clk); | ||
| 485 | ret = clk_prepare_enable(p->bus_clk); | ||
| 486 | if (ret) { | ||
| 487 | dev_err(dev, "could not prepare PCICLK\n"); | ||
| 488 | return ret; | ||
| 489 | } | ||
| 490 | |||
| 447 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 491 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 448 | p->base = devm_ioremap_resource(dev, regs); | 492 | p->base = devm_ioremap_resource(dev, regs); |
| 449 | if (IS_ERR(p->base)) | 493 | if (IS_ERR(p->base)) |
| @@ -496,17 +540,8 @@ static int faraday_pci_probe(struct platform_device *pdev) | |||
| 496 | val |= PCI_COMMAND_MEMORY; | 540 | val |= PCI_COMMAND_MEMORY; |
| 497 | val |= PCI_COMMAND_MASTER; | 541 | val |= PCI_COMMAND_MASTER; |
| 498 | writel(val, p->base + PCI_CTRL); | 542 | writel(val, p->base + PCI_CTRL); |
| 499 | |||
| 500 | list_splice_init(&res, &host->windows); | ||
| 501 | ret = pci_register_host_bridge(host); | ||
| 502 | if (ret) { | ||
| 503 | dev_err(dev, "failed to register host: %d\n", ret); | ||
| 504 | return ret; | ||
| 505 | } | ||
| 506 | p->bus = host->bus; | ||
| 507 | |||
| 508 | /* Mask and clear all interrupts */ | 543 | /* Mask and clear all interrupts */ |
| 509 | faraday_pci_write_config(p->bus, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000); | 544 | faraday_raw_pci_write_config(p, 0, 0, FARADAY_PCI_CTRL2 + 2, 2, 0xF000); |
| 510 | if (variant->cascaded_irq) { | 545 | if (variant->cascaded_irq) { |
| 511 | ret = faraday_pci_setup_cascaded_irq(p); | 546 | ret = faraday_pci_setup_cascaded_irq(p); |
| 512 | if (ret) { | 547 | if (ret) { |
| @@ -515,12 +550,48 @@ static int faraday_pci_probe(struct platform_device *pdev) | |||
| 515 | } | 550 | } |
| 516 | } | 551 | } |
| 517 | 552 | ||
| 553 | /* Check bus clock if we can gear up to 66 MHz */ | ||
| 554 | if (!IS_ERR(p->bus_clk)) { | ||
| 555 | unsigned long rate; | ||
| 556 | u32 val; | ||
| 557 | |||
| 558 | faraday_raw_pci_read_config(p, 0, 0, | ||
| 559 | FARADAY_PCI_STATUS_CMD, 4, &val); | ||
| 560 | rate = clk_get_rate(p->bus_clk); | ||
| 561 | |||
| 562 | if ((rate == 33000000) && (val & PCI_STATUS_66MHZ_CAPABLE)) { | ||
| 563 | dev_info(dev, "33MHz bus is 66MHz capable\n"); | ||
| 564 | max_bus_speed = PCI_SPEED_66MHz; | ||
| 565 | ret = clk_set_rate(p->bus_clk, 66000000); | ||
| 566 | if (ret) | ||
| 567 | dev_err(dev, "failed to set bus clock\n"); | ||
| 568 | } else { | ||
| 569 | dev_info(dev, "33MHz only bus\n"); | ||
| 570 | max_bus_speed = PCI_SPEED_33MHz; | ||
| 571 | } | ||
| 572 | |||
| 573 | /* Bumping the clock may fail so read back the rate */ | ||
| 574 | rate = clk_get_rate(p->bus_clk); | ||
| 575 | if (rate == 33000000) | ||
| 576 | cur_bus_speed = PCI_SPEED_33MHz; | ||
| 577 | if (rate == 66000000) | ||
| 578 | cur_bus_speed = PCI_SPEED_66MHz; | ||
| 579 | } | ||
| 580 | |||
| 518 | ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node); | 581 | ret = faraday_pci_parse_map_dma_ranges(p, dev->of_node); |
| 519 | if (ret) | 582 | if (ret) |
| 520 | return ret; | 583 | return ret; |
| 521 | 584 | ||
| 522 | pci_scan_child_bus(p->bus); | 585 | list_splice_init(&res, &host->windows); |
| 523 | pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); | 586 | ret = pci_scan_root_bus_bridge(host); |
| 587 | if (ret) { | ||
| 588 | dev_err(dev, "failed to scan host: %d\n", ret); | ||
| 589 | return ret; | ||
| 590 | } | ||
| 591 | p->bus = host->bus; | ||
| 592 | p->bus->max_bus_speed = max_bus_speed; | ||
| 593 | p->bus->cur_bus_speed = cur_bus_speed; | ||
| 594 | |||
| 524 | pci_bus_assign_resources(p->bus); | 595 | pci_bus_assign_resources(p->bus); |
| 525 | pci_bus_add_devices(p->bus); | 596 | pci_bus_add_devices(p->bus); |
| 526 | pci_free_resource_list(&res); | 597 | pci_free_resource_list(&res); |
diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c index e9a53bae1c25..44a47d4f0b8f 100644 --- a/drivers/pci/host/pci-host-common.c +++ b/drivers/pci/host/pci-host-common.c | |||
| @@ -117,8 +117,14 @@ int pci_host_common_probe(struct platform_device *pdev, | |||
| 117 | struct device *dev = &pdev->dev; | 117 | struct device *dev = &pdev->dev; |
| 118 | struct device_node *np = dev->of_node; | 118 | struct device_node *np = dev->of_node; |
| 119 | struct pci_bus *bus, *child; | 119 | struct pci_bus *bus, *child; |
| 120 | struct pci_host_bridge *bridge; | ||
| 120 | struct pci_config_window *cfg; | 121 | struct pci_config_window *cfg; |
| 121 | struct list_head resources; | 122 | struct list_head resources; |
| 123 | int ret; | ||
| 124 | |||
| 125 | bridge = devm_pci_alloc_host_bridge(dev, 0); | ||
| 126 | if (!bridge) | ||
| 127 | return -ENOMEM; | ||
| 122 | 128 | ||
| 123 | type = of_get_property(np, "device_type", NULL); | 129 | type = of_get_property(np, "device_type", NULL); |
| 124 | if (!type || strcmp(type, "pci")) { | 130 | if (!type || strcmp(type, "pci")) { |
| @@ -138,16 +144,21 @@ int pci_host_common_probe(struct platform_device *pdev, | |||
| 138 | if (!pci_has_flag(PCI_PROBE_ONLY)) | 144 | if (!pci_has_flag(PCI_PROBE_ONLY)) |
| 139 | pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS); | 145 | pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS); |
| 140 | 146 | ||
| 141 | bus = pci_scan_root_bus(dev, cfg->busr.start, &ops->pci_ops, cfg, | 147 | list_splice_init(&resources, &bridge->windows); |
| 142 | &resources); | 148 | bridge->dev.parent = dev; |
| 143 | if (!bus) { | 149 | bridge->sysdata = cfg; |
| 144 | dev_err(dev, "Scanning rootbus failed"); | 150 | bridge->busnr = cfg->busr.start; |
| 145 | return -ENODEV; | 151 | bridge->ops = &ops->pci_ops; |
| 152 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 153 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 154 | |||
| 155 | ret = pci_scan_root_bus_bridge(bridge); | ||
| 156 | if (ret < 0) { | ||
| 157 | dev_err(dev, "Scanning root bridge failed"); | ||
| 158 | return ret; | ||
| 146 | } | 159 | } |
| 147 | 160 | ||
| 148 | #ifdef CONFIG_ARM | 161 | bus = bridge->bus; |
| 149 | pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); | ||
| 150 | #endif | ||
| 151 | 162 | ||
| 152 | /* | 163 | /* |
| 153 | * We insert PCI resources into the iomem_resource and | 164 | * We insert PCI resources into the iomem_resource and |
diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c index 84936383e269..415dcc69a502 100644 --- a/drivers/pci/host/pci-hyperv.c +++ b/drivers/pci/host/pci-hyperv.c | |||
| @@ -64,22 +64,39 @@ | |||
| 64 | * major version. | 64 | * major version. |
| 65 | */ | 65 | */ |
| 66 | 66 | ||
| 67 | #define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (major))) | 67 | #define PCI_MAKE_VERSION(major, minor) ((u32)(((major) << 16) | (minor))) |
| 68 | #define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16) | 68 | #define PCI_MAJOR_VERSION(version) ((u32)(version) >> 16) |
| 69 | #define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff) | 69 | #define PCI_MINOR_VERSION(version) ((u32)(version) & 0xff) |
| 70 | 70 | ||
| 71 | enum { | 71 | enum pci_protocol_version_t { |
| 72 | PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), | 72 | PCI_PROTOCOL_VERSION_1_1 = PCI_MAKE_VERSION(1, 1), /* Win10 */ |
| 73 | PCI_PROTOCOL_VERSION_CURRENT = PCI_PROTOCOL_VERSION_1_1 | 73 | PCI_PROTOCOL_VERSION_1_2 = PCI_MAKE_VERSION(1, 2), /* RS1 */ |
| 74 | }; | 74 | }; |
| 75 | 75 | ||
| 76 | #define CPU_AFFINITY_ALL -1ULL | 76 | #define CPU_AFFINITY_ALL -1ULL |
| 77 | |||
| 78 | /* | ||
| 79 | * Supported protocol versions in the order of probing - highest go | ||
| 80 | * first. | ||
| 81 | */ | ||
| 82 | static enum pci_protocol_version_t pci_protocol_versions[] = { | ||
| 83 | PCI_PROTOCOL_VERSION_1_2, | ||
| 84 | PCI_PROTOCOL_VERSION_1_1, | ||
| 85 | }; | ||
| 86 | |||
| 87 | /* | ||
| 88 | * Protocol version negotiated by hv_pci_protocol_negotiation(). | ||
| 89 | */ | ||
| 90 | static enum pci_protocol_version_t pci_protocol_version; | ||
| 91 | |||
| 77 | #define PCI_CONFIG_MMIO_LENGTH 0x2000 | 92 | #define PCI_CONFIG_MMIO_LENGTH 0x2000 |
| 78 | #define CFG_PAGE_OFFSET 0x1000 | 93 | #define CFG_PAGE_OFFSET 0x1000 |
| 79 | #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET) | 94 | #define CFG_PAGE_SIZE (PCI_CONFIG_MMIO_LENGTH - CFG_PAGE_OFFSET) |
| 80 | 95 | ||
| 81 | #define MAX_SUPPORTED_MSI_MESSAGES 0x400 | 96 | #define MAX_SUPPORTED_MSI_MESSAGES 0x400 |
| 82 | 97 | ||
| 98 | #define STATUS_REVISION_MISMATCH 0xC0000059 | ||
| 99 | |||
| 83 | /* | 100 | /* |
| 84 | * Message Types | 101 | * Message Types |
| 85 | */ | 102 | */ |
| @@ -109,6 +126,9 @@ enum pci_message_type { | |||
| 109 | PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13, | 126 | PCI_QUERY_PROTOCOL_VERSION = PCI_MESSAGE_BASE + 0x13, |
| 110 | PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14, | 127 | PCI_CREATE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x14, |
| 111 | PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15, | 128 | PCI_DELETE_INTERRUPT_MESSAGE = PCI_MESSAGE_BASE + 0x15, |
| 129 | PCI_RESOURCES_ASSIGNED2 = PCI_MESSAGE_BASE + 0x16, | ||
| 130 | PCI_CREATE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x17, | ||
| 131 | PCI_DELETE_INTERRUPT_MESSAGE2 = PCI_MESSAGE_BASE + 0x18, /* unused */ | ||
| 112 | PCI_MESSAGE_MAXIMUM | 132 | PCI_MESSAGE_MAXIMUM |
| 113 | }; | 133 | }; |
| 114 | 134 | ||
| @@ -179,6 +199,30 @@ struct hv_msi_desc { | |||
| 179 | } __packed; | 199 | } __packed; |
| 180 | 200 | ||
| 181 | /** | 201 | /** |
| 202 | * struct hv_msi_desc2 - 1.2 version of hv_msi_desc | ||
| 203 | * @vector: IDT entry | ||
| 204 | * @delivery_mode: As defined in Intel's Programmer's | ||
| 205 | * Reference Manual, Volume 3, Chapter 8. | ||
| 206 | * @vector_count: Number of contiguous entries in the | ||
| 207 | * Interrupt Descriptor Table that are | ||
| 208 | * occupied by this Message-Signaled | ||
| 209 | * Interrupt. For "MSI", as first defined | ||
| 210 | * in PCI 2.2, this can be between 1 and | ||
| 211 | * 32. For "MSI-X," as first defined in PCI | ||
| 212 | * 3.0, this must be 1, as each MSI-X table | ||
| 213 | * entry would have its own descriptor. | ||
| 214 | * @processor_count: number of bits enabled in array. | ||
| 215 | * @processor_array: All the target virtual processors. | ||
| 216 | */ | ||
| 217 | struct hv_msi_desc2 { | ||
| 218 | u8 vector; | ||
| 219 | u8 delivery_mode; | ||
| 220 | u16 vector_count; | ||
| 221 | u16 processor_count; | ||
| 222 | u16 processor_array[32]; | ||
| 223 | } __packed; | ||
| 224 | |||
| 225 | /** | ||
| 182 | * struct tran_int_desc | 226 | * struct tran_int_desc |
| 183 | * @reserved: unused, padding | 227 | * @reserved: unused, padding |
| 184 | * @vector_count: same as in hv_msi_desc | 228 | * @vector_count: same as in hv_msi_desc |
| @@ -245,7 +289,7 @@ struct pci_packet { | |||
| 245 | 289 | ||
| 246 | struct pci_version_request { | 290 | struct pci_version_request { |
| 247 | struct pci_message message_type; | 291 | struct pci_message message_type; |
| 248 | enum pci_message_type protocol_version; | 292 | u32 protocol_version; |
| 249 | } __packed; | 293 | } __packed; |
| 250 | 294 | ||
| 251 | /* | 295 | /* |
| @@ -294,6 +338,14 @@ struct pci_resources_assigned { | |||
| 294 | u32 reserved[4]; | 338 | u32 reserved[4]; |
| 295 | } __packed; | 339 | } __packed; |
| 296 | 340 | ||
| 341 | struct pci_resources_assigned2 { | ||
| 342 | struct pci_message message_type; | ||
| 343 | union win_slot_encoding wslot; | ||
| 344 | u8 memory_range[0x14][6]; /* not used here */ | ||
| 345 | u32 msi_descriptor_count; | ||
| 346 | u8 reserved[70]; | ||
| 347 | } __packed; | ||
| 348 | |||
| 297 | struct pci_create_interrupt { | 349 | struct pci_create_interrupt { |
| 298 | struct pci_message message_type; | 350 | struct pci_message message_type; |
| 299 | union win_slot_encoding wslot; | 351 | union win_slot_encoding wslot; |
| @@ -306,6 +358,12 @@ struct pci_create_int_response { | |||
| 306 | struct tran_int_desc int_desc; | 358 | struct tran_int_desc int_desc; |
| 307 | } __packed; | 359 | } __packed; |
| 308 | 360 | ||
| 361 | struct pci_create_interrupt2 { | ||
| 362 | struct pci_message message_type; | ||
| 363 | union win_slot_encoding wslot; | ||
| 364 | struct hv_msi_desc2 int_desc; | ||
| 365 | } __packed; | ||
| 366 | |||
| 309 | struct pci_delete_interrupt { | 367 | struct pci_delete_interrupt { |
| 310 | struct pci_message message_type; | 368 | struct pci_message message_type; |
| 311 | union win_slot_encoding wslot; | 369 | union win_slot_encoding wslot; |
| @@ -331,17 +389,42 @@ static int pci_ring_size = (4 * PAGE_SIZE); | |||
| 331 | #define HV_PARTITION_ID_SELF ((u64)-1) | 389 | #define HV_PARTITION_ID_SELF ((u64)-1) |
| 332 | #define HVCALL_RETARGET_INTERRUPT 0x7e | 390 | #define HVCALL_RETARGET_INTERRUPT 0x7e |
| 333 | 391 | ||
| 334 | struct retarget_msi_interrupt { | 392 | struct hv_interrupt_entry { |
| 335 | u64 partition_id; /* use "self" */ | ||
| 336 | u64 device_id; | ||
| 337 | u32 source; /* 1 for MSI(-X) */ | 393 | u32 source; /* 1 for MSI(-X) */ |
| 338 | u32 reserved1; | 394 | u32 reserved1; |
| 339 | u32 address; | 395 | u32 address; |
| 340 | u32 data; | 396 | u32 data; |
| 341 | u64 reserved2; | 397 | }; |
| 398 | |||
| 399 | #define HV_VP_SET_BANK_COUNT_MAX 5 /* current implementation limit */ | ||
| 400 | |||
| 401 | struct hv_vp_set { | ||
| 402 | u64 format; /* 0 (HvGenericSetSparse4k) */ | ||
| 403 | u64 valid_banks; | ||
| 404 | u64 masks[HV_VP_SET_BANK_COUNT_MAX]; | ||
| 405 | }; | ||
| 406 | |||
| 407 | /* | ||
| 408 | * flags for hv_device_interrupt_target.flags | ||
| 409 | */ | ||
| 410 | #define HV_DEVICE_INTERRUPT_TARGET_MULTICAST 1 | ||
| 411 | #define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET 2 | ||
| 412 | |||
| 413 | struct hv_device_interrupt_target { | ||
| 342 | u32 vector; | 414 | u32 vector; |
| 343 | u32 flags; | 415 | u32 flags; |
| 344 | u64 vp_mask; | 416 | union { |
| 417 | u64 vp_mask; | ||
| 418 | struct hv_vp_set vp_set; | ||
| 419 | }; | ||
| 420 | }; | ||
| 421 | |||
| 422 | struct retarget_msi_interrupt { | ||
| 423 | u64 partition_id; /* use "self" */ | ||
| 424 | u64 device_id; | ||
| 425 | struct hv_interrupt_entry int_entry; | ||
| 426 | u64 reserved2; | ||
| 427 | struct hv_device_interrupt_target int_target; | ||
| 345 | } __packed; | 428 | } __packed; |
| 346 | 429 | ||
| 347 | /* | 430 | /* |
| @@ -382,7 +465,10 @@ struct hv_pcibus_device { | |||
| 382 | struct msi_domain_info msi_info; | 465 | struct msi_domain_info msi_info; |
| 383 | struct msi_controller msi_chip; | 466 | struct msi_controller msi_chip; |
| 384 | struct irq_domain *irq_domain; | 467 | struct irq_domain *irq_domain; |
| 468 | |||
| 469 | /* hypercall arg, must not cross page boundary */ | ||
| 385 | struct retarget_msi_interrupt retarget_msi_interrupt_params; | 470 | struct retarget_msi_interrupt retarget_msi_interrupt_params; |
| 471 | |||
| 386 | spinlock_t retarget_msi_interrupt_lock; | 472 | spinlock_t retarget_msi_interrupt_lock; |
| 387 | }; | 473 | }; |
| 388 | 474 | ||
| @@ -476,6 +562,52 @@ static void put_pcichild(struct hv_pci_dev *hv_pcidev, | |||
| 476 | static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus); | 562 | static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus); |
| 477 | static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus); | 563 | static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus); |
| 478 | 564 | ||
| 565 | |||
| 566 | /* | ||
| 567 | * Temporary CPU to vCPU mapping to address transitioning | ||
| 568 | * vmbus_cpu_number_to_vp_number() being migrated to | ||
| 569 | * hv_cpu_number_to_vp_number() in a separate patch. Once that patch | ||
| 570 | * has been picked up in the main line, remove this code here and use | ||
| 571 | * the official code. | ||
| 572 | */ | ||
| 573 | static struct hv_tmpcpumap | ||
| 574 | { | ||
| 575 | bool initialized; | ||
| 576 | u32 vp_index[NR_CPUS]; | ||
| 577 | } hv_tmpcpumap; | ||
| 578 | |||
| 579 | static void hv_tmpcpumap_init_cpu(void *_unused) | ||
| 580 | { | ||
| 581 | int cpu = smp_processor_id(); | ||
| 582 | u64 vp_index; | ||
| 583 | |||
| 584 | hv_get_vp_index(vp_index); | ||
| 585 | |||
| 586 | hv_tmpcpumap.vp_index[cpu] = vp_index; | ||
| 587 | } | ||
| 588 | |||
| 589 | static void hv_tmpcpumap_init(void) | ||
| 590 | { | ||
| 591 | if (hv_tmpcpumap.initialized) | ||
| 592 | return; | ||
| 593 | |||
| 594 | memset(hv_tmpcpumap.vp_index, -1, sizeof(hv_tmpcpumap.vp_index)); | ||
| 595 | on_each_cpu(hv_tmpcpumap_init_cpu, NULL, true); | ||
| 596 | hv_tmpcpumap.initialized = true; | ||
| 597 | } | ||
| 598 | |||
| 599 | /** | ||
| 600 | * hv_tmp_cpu_nr_to_vp_nr() - Convert Linux CPU nr to Hyper-V vCPU nr | ||
| 601 | * | ||
| 602 | * Remove once vmbus_cpu_number_to_vp_number() has been converted to | ||
| 603 | * hv_cpu_number_to_vp_number() and replace callers appropriately. | ||
| 604 | */ | ||
| 605 | static u32 hv_tmp_cpu_nr_to_vp_nr(int cpu) | ||
| 606 | { | ||
| 607 | return hv_tmpcpumap.vp_index[cpu]; | ||
| 608 | } | ||
| 609 | |||
| 610 | |||
| 479 | /** | 611 | /** |
| 480 | * devfn_to_wslot() - Convert from Linux PCI slot to Windows | 612 | * devfn_to_wslot() - Convert from Linux PCI slot to Windows |
| 481 | * @devfn: The Linux representation of PCI slot | 613 | * @devfn: The Linux representation of PCI slot |
| @@ -786,8 +918,11 @@ static void hv_irq_unmask(struct irq_data *data) | |||
| 786 | struct cpumask *dest; | 918 | struct cpumask *dest; |
| 787 | struct pci_bus *pbus; | 919 | struct pci_bus *pbus; |
| 788 | struct pci_dev *pdev; | 920 | struct pci_dev *pdev; |
| 789 | int cpu; | ||
| 790 | unsigned long flags; | 921 | unsigned long flags; |
| 922 | u32 var_size = 0; | ||
| 923 | int cpu_vmbus; | ||
| 924 | int cpu; | ||
| 925 | u64 res; | ||
| 791 | 926 | ||
| 792 | dest = irq_data_get_affinity_mask(data); | 927 | dest = irq_data_get_affinity_mask(data); |
| 793 | pdev = msi_desc_to_pci_dev(msi_desc); | 928 | pdev = msi_desc_to_pci_dev(msi_desc); |
| @@ -799,23 +934,74 @@ static void hv_irq_unmask(struct irq_data *data) | |||
| 799 | params = &hbus->retarget_msi_interrupt_params; | 934 | params = &hbus->retarget_msi_interrupt_params; |
| 800 | memset(params, 0, sizeof(*params)); | 935 | memset(params, 0, sizeof(*params)); |
| 801 | params->partition_id = HV_PARTITION_ID_SELF; | 936 | params->partition_id = HV_PARTITION_ID_SELF; |
| 802 | params->source = 1; /* MSI(-X) */ | 937 | params->int_entry.source = 1; /* MSI(-X) */ |
| 803 | params->address = msi_desc->msg.address_lo; | 938 | params->int_entry.address = msi_desc->msg.address_lo; |
| 804 | params->data = msi_desc->msg.data; | 939 | params->int_entry.data = msi_desc->msg.data; |
| 805 | params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | | 940 | params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | |
| 806 | (hbus->hdev->dev_instance.b[4] << 16) | | 941 | (hbus->hdev->dev_instance.b[4] << 16) | |
| 807 | (hbus->hdev->dev_instance.b[7] << 8) | | 942 | (hbus->hdev->dev_instance.b[7] << 8) | |
| 808 | (hbus->hdev->dev_instance.b[6] & 0xf8) | | 943 | (hbus->hdev->dev_instance.b[6] & 0xf8) | |
| 809 | PCI_FUNC(pdev->devfn); | 944 | PCI_FUNC(pdev->devfn); |
| 810 | params->vector = cfg->vector; | 945 | params->int_target.vector = cfg->vector; |
| 946 | |||
| 947 | /* | ||
| 948 | * Honoring apic->irq_delivery_mode set to dest_Fixed by | ||
| 949 | * setting the HV_DEVICE_INTERRUPT_TARGET_MULTICAST flag results in a | ||
| 950 | * spurious interrupt storm. Not doing so does not seem to have a | ||
| 951 | * negative effect (yet?). | ||
| 952 | */ | ||
| 953 | |||
| 954 | if (pci_protocol_version >= PCI_PROTOCOL_VERSION_1_2) { | ||
| 955 | /* | ||
| 956 | * PCI_PROTOCOL_VERSION_1_2 supports the VP_SET version of the | ||
| 957 | * HVCALL_RETARGET_INTERRUPT hypercall, which also coincides | ||
| 958 | * with >64 VP support. | ||
| 959 | * ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED | ||
| 960 | * is not sufficient for this hypercall. | ||
| 961 | */ | ||
| 962 | params->int_target.flags |= | ||
| 963 | HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET; | ||
| 964 | params->int_target.vp_set.valid_banks = | ||
| 965 | (1ull << HV_VP_SET_BANK_COUNT_MAX) - 1; | ||
| 966 | |||
| 967 | /* | ||
| 968 | * var-sized hypercall, var-size starts after vp_mask (thus | ||
| 969 | * vp_set.format does not count, but vp_set.valid_banks does). | ||
| 970 | */ | ||
| 971 | var_size = 1 + HV_VP_SET_BANK_COUNT_MAX; | ||
| 811 | 972 | ||
| 812 | for_each_cpu_and(cpu, dest, cpu_online_mask) | 973 | for_each_cpu_and(cpu, dest, cpu_online_mask) { |
| 813 | params->vp_mask |= (1ULL << vmbus_cpu_number_to_vp_number(cpu)); | 974 | cpu_vmbus = hv_tmp_cpu_nr_to_vp_nr(cpu); |
| 814 | 975 | ||
| 815 | hv_do_hypercall(HVCALL_RETARGET_INTERRUPT, params, NULL); | 976 | if (cpu_vmbus >= HV_VP_SET_BANK_COUNT_MAX * 64) { |
| 977 | dev_err(&hbus->hdev->device, | ||
| 978 | "too high CPU %d", cpu_vmbus); | ||
| 979 | res = 1; | ||
| 980 | goto exit_unlock; | ||
| 981 | } | ||
| 816 | 982 | ||
| 983 | params->int_target.vp_set.masks[cpu_vmbus / 64] |= | ||
| 984 | (1ULL << (cpu_vmbus & 63)); | ||
| 985 | } | ||
| 986 | } else { | ||
| 987 | for_each_cpu_and(cpu, dest, cpu_online_mask) { | ||
| 988 | params->int_target.vp_mask |= | ||
| 989 | (1ULL << hv_tmp_cpu_nr_to_vp_nr(cpu)); | ||
| 990 | } | ||
| 991 | } | ||
| 992 | |||
| 993 | res = hv_do_hypercall(HVCALL_RETARGET_INTERRUPT | (var_size << 17), | ||
| 994 | params, NULL); | ||
| 995 | |||
| 996 | exit_unlock: | ||
| 817 | spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags); | 997 | spin_unlock_irqrestore(&hbus->retarget_msi_interrupt_lock, flags); |
| 818 | 998 | ||
| 999 | if (res) { | ||
| 1000 | dev_err(&hbus->hdev->device, | ||
| 1001 | "%s() failed: %#llx", __func__, res); | ||
| 1002 | return; | ||
| 1003 | } | ||
| 1004 | |||
| 819 | pci_msi_unmask_irq(data); | 1005 | pci_msi_unmask_irq(data); |
| 820 | } | 1006 | } |
| 821 | 1007 | ||
| @@ -836,6 +1022,53 @@ static void hv_pci_compose_compl(void *context, struct pci_response *resp, | |||
| 836 | complete(&comp_pkt->comp_pkt.host_event); | 1022 | complete(&comp_pkt->comp_pkt.host_event); |
| 837 | } | 1023 | } |
| 838 | 1024 | ||
| 1025 | static u32 hv_compose_msi_req_v1( | ||
| 1026 | struct pci_create_interrupt *int_pkt, struct cpumask *affinity, | ||
| 1027 | u32 slot, u8 vector) | ||
| 1028 | { | ||
| 1029 | int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; | ||
| 1030 | int_pkt->wslot.slot = slot; | ||
| 1031 | int_pkt->int_desc.vector = vector; | ||
| 1032 | int_pkt->int_desc.vector_count = 1; | ||
| 1033 | int_pkt->int_desc.delivery_mode = | ||
| 1034 | (apic->irq_delivery_mode == dest_LowestPrio) ? | ||
| 1035 | dest_LowestPrio : dest_Fixed; | ||
| 1036 | |||
| 1037 | /* | ||
| 1038 | * Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in | ||
| 1039 | * hv_irq_unmask(). | ||
| 1040 | */ | ||
| 1041 | int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL; | ||
| 1042 | |||
| 1043 | return sizeof(*int_pkt); | ||
| 1044 | } | ||
| 1045 | |||
| 1046 | static u32 hv_compose_msi_req_v2( | ||
| 1047 | struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity, | ||
| 1048 | u32 slot, u8 vector) | ||
| 1049 | { | ||
| 1050 | int cpu; | ||
| 1051 | |||
| 1052 | int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2; | ||
| 1053 | int_pkt->wslot.slot = slot; | ||
| 1054 | int_pkt->int_desc.vector = vector; | ||
| 1055 | int_pkt->int_desc.vector_count = 1; | ||
| 1056 | int_pkt->int_desc.delivery_mode = | ||
| 1057 | (apic->irq_delivery_mode == dest_LowestPrio) ? | ||
| 1058 | dest_LowestPrio : dest_Fixed; | ||
| 1059 | |||
| 1060 | /* | ||
| 1061 | * Create MSI w/ dummy vCPU set targeting just one vCPU, overwritten | ||
| 1062 | * by subsequent retarget in hv_irq_unmask(). | ||
| 1063 | */ | ||
| 1064 | cpu = cpumask_first_and(affinity, cpu_online_mask); | ||
| 1065 | int_pkt->int_desc.processor_array[0] = | ||
| 1066 | hv_tmp_cpu_nr_to_vp_nr(cpu); | ||
| 1067 | int_pkt->int_desc.processor_count = 1; | ||
| 1068 | |||
| 1069 | return sizeof(*int_pkt); | ||
| 1070 | } | ||
| 1071 | |||
| 839 | /** | 1072 | /** |
| 840 | * hv_compose_msi_msg() - Supplies a valid MSI address/data | 1073 | * hv_compose_msi_msg() - Supplies a valid MSI address/data |
| 841 | * @data: Everything about this MSI | 1074 | * @data: Everything about this MSI |
| @@ -854,15 +1087,17 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | |||
| 854 | struct hv_pci_dev *hpdev; | 1087 | struct hv_pci_dev *hpdev; |
| 855 | struct pci_bus *pbus; | 1088 | struct pci_bus *pbus; |
| 856 | struct pci_dev *pdev; | 1089 | struct pci_dev *pdev; |
| 857 | struct pci_create_interrupt *int_pkt; | ||
| 858 | struct compose_comp_ctxt comp; | 1090 | struct compose_comp_ctxt comp; |
| 859 | struct tran_int_desc *int_desc; | 1091 | struct tran_int_desc *int_desc; |
| 860 | struct cpumask *affinity; | ||
| 861 | struct { | 1092 | struct { |
| 862 | struct pci_packet pkt; | 1093 | struct pci_packet pci_pkt; |
| 863 | u8 buffer[sizeof(struct pci_create_interrupt)]; | 1094 | union { |
| 864 | } ctxt; | 1095 | struct pci_create_interrupt v1; |
| 865 | int cpu; | 1096 | struct pci_create_interrupt2 v2; |
| 1097 | } int_pkts; | ||
| 1098 | } __packed ctxt; | ||
| 1099 | |||
| 1100 | u32 size; | ||
| 866 | int ret; | 1101 | int ret; |
| 867 | 1102 | ||
| 868 | pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); | 1103 | pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); |
| @@ -885,36 +1120,44 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) | |||
| 885 | 1120 | ||
| 886 | memset(&ctxt, 0, sizeof(ctxt)); | 1121 | memset(&ctxt, 0, sizeof(ctxt)); |
| 887 | init_completion(&comp.comp_pkt.host_event); | 1122 | init_completion(&comp.comp_pkt.host_event); |
| 888 | ctxt.pkt.completion_func = hv_pci_compose_compl; | 1123 | ctxt.pci_pkt.completion_func = hv_pci_compose_compl; |
| 889 | ctxt.pkt.compl_ctxt = ∁ | 1124 | ctxt.pci_pkt.compl_ctxt = ∁ |
| 890 | int_pkt = (struct pci_create_interrupt *)&ctxt.pkt.message; | 1125 | |
| 891 | int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; | 1126 | switch (pci_protocol_version) { |
| 892 | int_pkt->wslot.slot = hpdev->desc.win_slot.slot; | 1127 | case PCI_PROTOCOL_VERSION_1_1: |
| 893 | int_pkt->int_desc.vector = cfg->vector; | 1128 | size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, |
| 894 | int_pkt->int_desc.vector_count = 1; | 1129 | irq_data_get_affinity_mask(data), |
| 895 | int_pkt->int_desc.delivery_mode = | 1130 | hpdev->desc.win_slot.slot, |
| 896 | (apic->irq_delivery_mode == dest_LowestPrio) ? 1 : 0; | 1131 | cfg->vector); |
| 1132 | break; | ||
| 897 | 1133 | ||
| 898 | /* | 1134 | case PCI_PROTOCOL_VERSION_1_2: |
| 899 | * This bit doesn't have to work on machines with more than 64 | 1135 | size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, |
| 900 | * processors because Hyper-V only supports 64 in a guest. | 1136 | irq_data_get_affinity_mask(data), |
| 901 | */ | 1137 | hpdev->desc.win_slot.slot, |
| 902 | affinity = irq_data_get_affinity_mask(data); | 1138 | cfg->vector); |
| 903 | if (cpumask_weight(affinity) >= 32) { | 1139 | break; |
| 904 | int_pkt->int_desc.cpu_mask = CPU_AFFINITY_ALL; | 1140 | |
| 905 | } else { | 1141 | default: |
| 906 | for_each_cpu_and(cpu, affinity, cpu_online_mask) { | 1142 | /* As we only negotiate protocol versions known to this driver, |
| 907 | int_pkt->int_desc.cpu_mask |= | 1143 | * this path should never hit. However, this is it not a hot |
| 908 | (1ULL << vmbus_cpu_number_to_vp_number(cpu)); | 1144 | * path so we print a message to aid future updates. |
| 909 | } | 1145 | */ |
| 1146 | dev_err(&hbus->hdev->device, | ||
| 1147 | "Unexpected vPCI protocol, update driver."); | ||
| 1148 | goto free_int_desc; | ||
| 910 | } | 1149 | } |
| 911 | 1150 | ||
| 912 | ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, int_pkt, | 1151 | ret = vmbus_sendpacket(hpdev->hbus->hdev->channel, &ctxt.int_pkts, |
| 913 | sizeof(*int_pkt), (unsigned long)&ctxt.pkt, | 1152 | size, (unsigned long)&ctxt.pci_pkt, |
| 914 | VM_PKT_DATA_INBAND, | 1153 | VM_PKT_DATA_INBAND, |
| 915 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | 1154 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
| 916 | if (ret) | 1155 | if (ret) { |
| 1156 | dev_err(&hbus->hdev->device, | ||
| 1157 | "Sending request for interrupt failed: 0x%x", | ||
| 1158 | comp.comp_pkt.completion_status); | ||
| 917 | goto free_int_desc; | 1159 | goto free_int_desc; |
| 1160 | } | ||
| 918 | 1161 | ||
| 919 | wait_for_completion(&comp.comp_pkt.host_event); | 1162 | wait_for_completion(&comp.comp_pkt.host_event); |
| 920 | 1163 | ||
| @@ -1513,12 +1756,12 @@ static void pci_devices_present_work(struct work_struct *work) | |||
| 1513 | put_pcichild(hpdev, hv_pcidev_ref_initial); | 1756 | put_pcichild(hpdev, hv_pcidev_ref_initial); |
| 1514 | } | 1757 | } |
| 1515 | 1758 | ||
| 1516 | switch(hbus->state) { | 1759 | switch (hbus->state) { |
| 1517 | case hv_pcibus_installed: | 1760 | case hv_pcibus_installed: |
| 1518 | /* | 1761 | /* |
| 1519 | * Tell the core to rescan bus | 1762 | * Tell the core to rescan bus |
| 1520 | * because there may have been changes. | 1763 | * because there may have been changes. |
| 1521 | */ | 1764 | */ |
| 1522 | pci_lock_rescan_remove(); | 1765 | pci_lock_rescan_remove(); |
| 1523 | pci_scan_child_bus(hbus->pci_bus); | 1766 | pci_scan_child_bus(hbus->pci_bus); |
| 1524 | pci_unlock_rescan_remove(); | 1767 | pci_unlock_rescan_remove(); |
| @@ -1800,6 +2043,7 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev) | |||
| 1800 | struct hv_pci_compl comp_pkt; | 2043 | struct hv_pci_compl comp_pkt; |
| 1801 | struct pci_packet *pkt; | 2044 | struct pci_packet *pkt; |
| 1802 | int ret; | 2045 | int ret; |
| 2046 | int i; | ||
| 1803 | 2047 | ||
| 1804 | /* | 2048 | /* |
| 1805 | * Initiate the handshake with the host and negotiate | 2049 | * Initiate the handshake with the host and negotiate |
| @@ -1816,26 +2060,44 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev) | |||
| 1816 | pkt->compl_ctxt = &comp_pkt; | 2060 | pkt->compl_ctxt = &comp_pkt; |
| 1817 | version_req = (struct pci_version_request *)&pkt->message; | 2061 | version_req = (struct pci_version_request *)&pkt->message; |
| 1818 | version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION; | 2062 | version_req->message_type.type = PCI_QUERY_PROTOCOL_VERSION; |
| 1819 | version_req->protocol_version = PCI_PROTOCOL_VERSION_CURRENT; | ||
| 1820 | 2063 | ||
| 1821 | ret = vmbus_sendpacket(hdev->channel, version_req, | 2064 | for (i = 0; i < ARRAY_SIZE(pci_protocol_versions); i++) { |
| 1822 | sizeof(struct pci_version_request), | 2065 | version_req->protocol_version = pci_protocol_versions[i]; |
| 1823 | (unsigned long)pkt, VM_PKT_DATA_INBAND, | 2066 | ret = vmbus_sendpacket(hdev->channel, version_req, |
| 1824 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | 2067 | sizeof(struct pci_version_request), |
| 1825 | if (ret) | 2068 | (unsigned long)pkt, VM_PKT_DATA_INBAND, |
| 1826 | goto exit; | 2069 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
| 2070 | if (ret) { | ||
| 2071 | dev_err(&hdev->device, | ||
| 2072 | "PCI Pass-through VSP failed sending version reqquest: %#x", | ||
| 2073 | ret); | ||
| 2074 | goto exit; | ||
| 2075 | } | ||
| 1827 | 2076 | ||
| 1828 | wait_for_completion(&comp_pkt.host_event); | 2077 | wait_for_completion(&comp_pkt.host_event); |
| 1829 | 2078 | ||
| 1830 | if (comp_pkt.completion_status < 0) { | 2079 | if (comp_pkt.completion_status >= 0) { |
| 1831 | dev_err(&hdev->device, | 2080 | pci_protocol_version = pci_protocol_versions[i]; |
| 1832 | "PCI Pass-through VSP failed version request %x\n", | 2081 | dev_info(&hdev->device, |
| 1833 | comp_pkt.completion_status); | 2082 | "PCI VMBus probing: Using version %#x\n", |
| 1834 | ret = -EPROTO; | 2083 | pci_protocol_version); |
| 1835 | goto exit; | 2084 | goto exit; |
| 2085 | } | ||
| 2086 | |||
| 2087 | if (comp_pkt.completion_status != STATUS_REVISION_MISMATCH) { | ||
| 2088 | dev_err(&hdev->device, | ||
| 2089 | "PCI Pass-through VSP failed version request: %#x", | ||
| 2090 | comp_pkt.completion_status); | ||
| 2091 | ret = -EPROTO; | ||
| 2092 | goto exit; | ||
| 2093 | } | ||
| 2094 | |||
| 2095 | reinit_completion(&comp_pkt.host_event); | ||
| 1836 | } | 2096 | } |
| 1837 | 2097 | ||
| 1838 | ret = 0; | 2098 | dev_err(&hdev->device, |
| 2099 | "PCI pass-through VSP failed to find supported version"); | ||
| 2100 | ret = -EPROTO; | ||
| 1839 | 2101 | ||
| 1840 | exit: | 2102 | exit: |
| 1841 | kfree(pkt); | 2103 | kfree(pkt); |
| @@ -2094,13 +2356,18 @@ static int hv_send_resources_allocated(struct hv_device *hdev) | |||
| 2094 | { | 2356 | { |
| 2095 | struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); | 2357 | struct hv_pcibus_device *hbus = hv_get_drvdata(hdev); |
| 2096 | struct pci_resources_assigned *res_assigned; | 2358 | struct pci_resources_assigned *res_assigned; |
| 2359 | struct pci_resources_assigned2 *res_assigned2; | ||
| 2097 | struct hv_pci_compl comp_pkt; | 2360 | struct hv_pci_compl comp_pkt; |
| 2098 | struct hv_pci_dev *hpdev; | 2361 | struct hv_pci_dev *hpdev; |
| 2099 | struct pci_packet *pkt; | 2362 | struct pci_packet *pkt; |
| 2363 | size_t size_res; | ||
| 2100 | u32 wslot; | 2364 | u32 wslot; |
| 2101 | int ret; | 2365 | int ret; |
| 2102 | 2366 | ||
| 2103 | pkt = kmalloc(sizeof(*pkt) + sizeof(*res_assigned), GFP_KERNEL); | 2367 | size_res = (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) |
| 2368 | ? sizeof(*res_assigned) : sizeof(*res_assigned2); | ||
| 2369 | |||
| 2370 | pkt = kmalloc(sizeof(*pkt) + size_res, GFP_KERNEL); | ||
| 2104 | if (!pkt) | 2371 | if (!pkt) |
| 2105 | return -ENOMEM; | 2372 | return -ENOMEM; |
| 2106 | 2373 | ||
| @@ -2111,22 +2378,30 @@ static int hv_send_resources_allocated(struct hv_device *hdev) | |||
| 2111 | if (!hpdev) | 2378 | if (!hpdev) |
| 2112 | continue; | 2379 | continue; |
| 2113 | 2380 | ||
| 2114 | memset(pkt, 0, sizeof(*pkt) + sizeof(*res_assigned)); | 2381 | memset(pkt, 0, sizeof(*pkt) + size_res); |
| 2115 | init_completion(&comp_pkt.host_event); | 2382 | init_completion(&comp_pkt.host_event); |
| 2116 | pkt->completion_func = hv_pci_generic_compl; | 2383 | pkt->completion_func = hv_pci_generic_compl; |
| 2117 | pkt->compl_ctxt = &comp_pkt; | 2384 | pkt->compl_ctxt = &comp_pkt; |
| 2118 | res_assigned = (struct pci_resources_assigned *)&pkt->message; | ||
| 2119 | res_assigned->message_type.type = PCI_RESOURCES_ASSIGNED; | ||
| 2120 | res_assigned->wslot.slot = hpdev->desc.win_slot.slot; | ||
| 2121 | 2385 | ||
| 2386 | if (pci_protocol_version < PCI_PROTOCOL_VERSION_1_2) { | ||
| 2387 | res_assigned = | ||
| 2388 | (struct pci_resources_assigned *)&pkt->message; | ||
| 2389 | res_assigned->message_type.type = | ||
| 2390 | PCI_RESOURCES_ASSIGNED; | ||
| 2391 | res_assigned->wslot.slot = hpdev->desc.win_slot.slot; | ||
| 2392 | } else { | ||
| 2393 | res_assigned2 = | ||
| 2394 | (struct pci_resources_assigned2 *)&pkt->message; | ||
| 2395 | res_assigned2->message_type.type = | ||
| 2396 | PCI_RESOURCES_ASSIGNED2; | ||
| 2397 | res_assigned2->wslot.slot = hpdev->desc.win_slot.slot; | ||
| 2398 | } | ||
| 2122 | put_pcichild(hpdev, hv_pcidev_ref_by_slot); | 2399 | put_pcichild(hpdev, hv_pcidev_ref_by_slot); |
| 2123 | 2400 | ||
| 2124 | ret = vmbus_sendpacket( | 2401 | ret = vmbus_sendpacket(hdev->channel, &pkt->message, |
| 2125 | hdev->channel, &pkt->message, | 2402 | size_res, (unsigned long)pkt, |
| 2126 | sizeof(*res_assigned), | 2403 | VM_PKT_DATA_INBAND, |
| 2127 | (unsigned long)pkt, | 2404 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); |
| 2128 | VM_PKT_DATA_INBAND, | ||
| 2129 | VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); | ||
| 2130 | if (ret) | 2405 | if (ret) |
| 2131 | break; | 2406 | break; |
| 2132 | 2407 | ||
| @@ -2204,11 +2479,19 @@ static int hv_pci_probe(struct hv_device *hdev, | |||
| 2204 | struct hv_pcibus_device *hbus; | 2479 | struct hv_pcibus_device *hbus; |
| 2205 | int ret; | 2480 | int ret; |
| 2206 | 2481 | ||
| 2207 | hbus = kzalloc(sizeof(*hbus), GFP_KERNEL); | 2482 | /* |
| 2483 | * hv_pcibus_device contains the hypercall arguments for retargeting in | ||
| 2484 | * hv_irq_unmask(). Those must not cross a page boundary. | ||
| 2485 | */ | ||
| 2486 | BUILD_BUG_ON(sizeof(*hbus) > PAGE_SIZE); | ||
| 2487 | |||
| 2488 | hbus = (struct hv_pcibus_device *)get_zeroed_page(GFP_KERNEL); | ||
| 2208 | if (!hbus) | 2489 | if (!hbus) |
| 2209 | return -ENOMEM; | 2490 | return -ENOMEM; |
| 2210 | hbus->state = hv_pcibus_init; | 2491 | hbus->state = hv_pcibus_init; |
| 2211 | 2492 | ||
| 2493 | hv_tmpcpumap_init(); | ||
| 2494 | |||
| 2212 | /* | 2495 | /* |
| 2213 | * The PCI bus "domain" is what is called "segment" in ACPI and | 2496 | * The PCI bus "domain" is what is called "segment" in ACPI and |
| 2214 | * other specs. Pull it from the instance ID, to get something | 2497 | * other specs. Pull it from the instance ID, to get something |
| @@ -2308,7 +2591,7 @@ free_config: | |||
| 2308 | close: | 2591 | close: |
| 2309 | vmbus_close(hdev->channel); | 2592 | vmbus_close(hdev->channel); |
| 2310 | free_bus: | 2593 | free_bus: |
| 2311 | kfree(hbus); | 2594 | free_page((unsigned long)hbus); |
| 2312 | return ret; | 2595 | return ret; |
| 2313 | } | 2596 | } |
| 2314 | 2597 | ||
| @@ -2386,7 +2669,7 @@ static int hv_pci_remove(struct hv_device *hdev) | |||
| 2386 | irq_domain_free_fwnode(hbus->sysdata.fwnode); | 2669 | irq_domain_free_fwnode(hbus->sysdata.fwnode); |
| 2387 | put_hvpcibus(hbus); | 2670 | put_hvpcibus(hbus); |
| 2388 | wait_for_completion(&hbus->remove_event); | 2671 | wait_for_completion(&hbus->remove_event); |
| 2389 | kfree(hbus); | 2672 | free_page((unsigned long)hbus); |
| 2390 | return 0; | 2673 | return 0; |
| 2391 | } | 2674 | } |
| 2392 | 2675 | ||
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c index 85348590848b..6f879685fedd 100644 --- a/drivers/pci/host/pci-rcar-gen2.c +++ b/drivers/pci/host/pci-rcar-gen2.c | |||
| @@ -429,7 +429,7 @@ static int rcar_pci_probe(struct platform_device *pdev) | |||
| 429 | return 0; | 429 | return 0; |
| 430 | } | 430 | } |
| 431 | 431 | ||
| 432 | static struct of_device_id rcar_pci_of_match[] = { | 432 | static const struct of_device_id rcar_pci_of_match[] = { |
| 433 | { .compatible = "renesas,pci-r8a7790", }, | 433 | { .compatible = "renesas,pci-r8a7790", }, |
| 434 | { .compatible = "renesas,pci-r8a7791", }, | 434 | { .compatible = "renesas,pci-r8a7791", }, |
| 435 | { .compatible = "renesas,pci-r8a7794", }, | 435 | { .compatible = "renesas,pci-r8a7794", }, |
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index 2618f875a600..b3722b7709df 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c | |||
| @@ -233,8 +233,8 @@ struct tegra_msi { | |||
| 233 | struct msi_controller chip; | 233 | struct msi_controller chip; |
| 234 | DECLARE_BITMAP(used, INT_PCI_MSI_NR); | 234 | DECLARE_BITMAP(used, INT_PCI_MSI_NR); |
| 235 | struct irq_domain *domain; | 235 | struct irq_domain *domain; |
| 236 | unsigned long pages; | ||
| 237 | struct mutex lock; | 236 | struct mutex lock; |
| 237 | u64 phys; | ||
| 238 | int irq; | 238 | int irq; |
| 239 | }; | 239 | }; |
| 240 | 240 | ||
| @@ -1448,9 +1448,8 @@ static int tegra_msi_setup_irq(struct msi_controller *chip, | |||
| 1448 | 1448 | ||
| 1449 | irq_set_msi_desc(irq, desc); | 1449 | irq_set_msi_desc(irq, desc); |
| 1450 | 1450 | ||
| 1451 | msg.address_lo = virt_to_phys((void *)msi->pages); | 1451 | msg.address_lo = lower_32_bits(msi->phys); |
| 1452 | /* 32 bit address only */ | 1452 | msg.address_hi = upper_32_bits(msi->phys); |
| 1453 | msg.address_hi = 0; | ||
| 1454 | msg.data = hwirq; | 1453 | msg.data = hwirq; |
| 1455 | 1454 | ||
| 1456 | pci_write_msi_msg(irq, &msg); | 1455 | pci_write_msi_msg(irq, &msg); |
| @@ -1499,7 +1498,6 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie) | |||
| 1499 | const struct tegra_pcie_soc *soc = pcie->soc; | 1498 | const struct tegra_pcie_soc *soc = pcie->soc; |
| 1500 | struct tegra_msi *msi = &pcie->msi; | 1499 | struct tegra_msi *msi = &pcie->msi; |
| 1501 | struct device *dev = pcie->dev; | 1500 | struct device *dev = pcie->dev; |
| 1502 | unsigned long base; | ||
| 1503 | int err; | 1501 | int err; |
| 1504 | u32 reg; | 1502 | u32 reg; |
| 1505 | 1503 | ||
| @@ -1531,12 +1529,25 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie) | |||
| 1531 | goto err; | 1529 | goto err; |
| 1532 | } | 1530 | } |
| 1533 | 1531 | ||
| 1534 | /* setup AFI/FPCI range */ | 1532 | /* |
| 1535 | msi->pages = __get_free_pages(GFP_KERNEL, 0); | 1533 | * The PCI host bridge on Tegra contains some logic that intercepts |
| 1536 | base = virt_to_phys((void *)msi->pages); | 1534 | * MSI writes, which means that the MSI target address doesn't have |
| 1535 | * to point to actual physical memory. Rather than allocating one 4 | ||
| 1536 | * KiB page of system memory that's never used, we can simply pick | ||
| 1537 | * an arbitrary address within an area reserved for system memory | ||
| 1538 | * in the FPCI address map. | ||
| 1539 | * | ||
| 1540 | * However, in order to avoid confusion, we pick an address that | ||
| 1541 | * doesn't map to physical memory. The FPCI address map reserves a | ||
| 1542 | * 1012 GiB region for system memory and memory-mapped I/O. Since | ||
| 1543 | * none of the Tegra SoCs that contain this PCI host bridge can | ||
| 1544 | * address more than 16 GiB of system memory, the last 4 KiB of | ||
| 1545 | * these 1012 GiB is a good candidate. | ||
| 1546 | */ | ||
| 1547 | msi->phys = 0xfcfffff000; | ||
| 1537 | 1548 | ||
| 1538 | afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); | 1549 | afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); |
| 1539 | afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST); | 1550 | afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST); |
| 1540 | /* this register is in 4K increments */ | 1551 | /* this register is in 4K increments */ |
| 1541 | afi_writel(pcie, 1, AFI_MSI_BAR_SZ); | 1552 | afi_writel(pcie, 1, AFI_MSI_BAR_SZ); |
| 1542 | 1553 | ||
| @@ -1585,8 +1596,6 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie) | |||
| 1585 | afi_writel(pcie, 0, AFI_MSI_EN_VEC6); | 1596 | afi_writel(pcie, 0, AFI_MSI_EN_VEC6); |
| 1586 | afi_writel(pcie, 0, AFI_MSI_EN_VEC7); | 1597 | afi_writel(pcie, 0, AFI_MSI_EN_VEC7); |
| 1587 | 1598 | ||
| 1588 | free_pages(msi->pages, 0); | ||
| 1589 | |||
| 1590 | if (msi->irq > 0) | 1599 | if (msi->irq > 0) |
| 1591 | free_irq(msi->irq, pcie); | 1600 | free_irq(msi->irq, pcie); |
| 1592 | 1601 | ||
| @@ -2238,7 +2247,7 @@ static int tegra_pcie_probe(struct platform_device *pdev) | |||
| 2238 | struct pci_bus *child; | 2247 | struct pci_bus *child; |
| 2239 | int err; | 2248 | int err; |
| 2240 | 2249 | ||
| 2241 | host = pci_alloc_host_bridge(sizeof(*pcie)); | 2250 | host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); |
| 2242 | if (!host) | 2251 | if (!host) |
| 2243 | return -ENOMEM; | 2252 | return -ENOMEM; |
| 2244 | 2253 | ||
| @@ -2284,16 +2293,15 @@ static int tegra_pcie_probe(struct platform_device *pdev) | |||
| 2284 | host->busnr = pcie->busn.start; | 2293 | host->busnr = pcie->busn.start; |
| 2285 | host->dev.parent = &pdev->dev; | 2294 | host->dev.parent = &pdev->dev; |
| 2286 | host->ops = &tegra_pcie_ops; | 2295 | host->ops = &tegra_pcie_ops; |
| 2296 | host->map_irq = tegra_pcie_map_irq; | ||
| 2297 | host->swizzle_irq = pci_common_swizzle; | ||
| 2287 | 2298 | ||
| 2288 | err = pci_register_host_bridge(host); | 2299 | err = pci_scan_root_bus_bridge(host); |
| 2289 | if (err < 0) { | 2300 | if (err < 0) { |
| 2290 | dev_err(dev, "failed to register host: %d\n", err); | 2301 | dev_err(dev, "failed to register host: %d\n", err); |
| 2291 | goto disable_msi; | 2302 | goto disable_msi; |
| 2292 | } | 2303 | } |
| 2293 | 2304 | ||
| 2294 | pci_scan_child_bus(host->bus); | ||
| 2295 | |||
| 2296 | pci_fixup_irqs(pci_common_swizzle, tegra_pcie_map_irq); | ||
| 2297 | pci_bus_size_bridges(host->bus); | 2305 | pci_bus_size_bridges(host->bus); |
| 2298 | pci_bus_assign_resources(host->bus); | 2306 | pci_bus_assign_resources(host->bus); |
| 2299 | 2307 | ||
diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c index 9281eee2d000..d417acab0ecf 100644 --- a/drivers/pci/host/pci-versatile.c +++ b/drivers/pci/host/pci-versatile.c | |||
| @@ -120,30 +120,35 @@ out_release_res: | |||
| 120 | 120 | ||
| 121 | static int versatile_pci_probe(struct platform_device *pdev) | 121 | static int versatile_pci_probe(struct platform_device *pdev) |
| 122 | { | 122 | { |
| 123 | struct device *dev = &pdev->dev; | ||
| 123 | struct resource *res; | 124 | struct resource *res; |
| 124 | int ret, i, myslot = -1; | 125 | int ret, i, myslot = -1; |
| 125 | u32 val; | 126 | u32 val; |
| 126 | void __iomem *local_pci_cfg_base; | 127 | void __iomem *local_pci_cfg_base; |
| 127 | struct pci_bus *bus, *child; | 128 | struct pci_bus *bus, *child; |
| 129 | struct pci_host_bridge *bridge; | ||
| 128 | LIST_HEAD(pci_res); | 130 | LIST_HEAD(pci_res); |
| 129 | 131 | ||
| 132 | bridge = devm_pci_alloc_host_bridge(dev, 0); | ||
| 133 | if (!bridge) | ||
| 134 | return -ENOMEM; | ||
| 135 | |||
| 130 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 136 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
| 131 | versatile_pci_base = devm_ioremap_resource(&pdev->dev, res); | 137 | versatile_pci_base = devm_ioremap_resource(dev, res); |
| 132 | if (IS_ERR(versatile_pci_base)) | 138 | if (IS_ERR(versatile_pci_base)) |
| 133 | return PTR_ERR(versatile_pci_base); | 139 | return PTR_ERR(versatile_pci_base); |
| 134 | 140 | ||
| 135 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 141 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
| 136 | versatile_cfg_base[0] = devm_ioremap_resource(&pdev->dev, res); | 142 | versatile_cfg_base[0] = devm_ioremap_resource(dev, res); |
| 137 | if (IS_ERR(versatile_cfg_base[0])) | 143 | if (IS_ERR(versatile_cfg_base[0])) |
| 138 | return PTR_ERR(versatile_cfg_base[0]); | 144 | return PTR_ERR(versatile_cfg_base[0]); |
| 139 | 145 | ||
| 140 | res = platform_get_resource(pdev, IORESOURCE_MEM, 2); | 146 | res = platform_get_resource(pdev, IORESOURCE_MEM, 2); |
| 141 | versatile_cfg_base[1] = devm_pci_remap_cfg_resource(&pdev->dev, | 147 | versatile_cfg_base[1] = devm_pci_remap_cfg_resource(dev, res); |
| 142 | res); | ||
| 143 | if (IS_ERR(versatile_cfg_base[1])) | 148 | if (IS_ERR(versatile_cfg_base[1])) |
| 144 | return PTR_ERR(versatile_cfg_base[1]); | 149 | return PTR_ERR(versatile_cfg_base[1]); |
| 145 | 150 | ||
| 146 | ret = versatile_pci_parse_request_of_pci_ranges(&pdev->dev, &pci_res); | 151 | ret = versatile_pci_parse_request_of_pci_ranges(dev, &pci_res); |
| 147 | if (ret) | 152 | if (ret) |
| 148 | return ret; | 153 | return ret; |
| 149 | 154 | ||
| @@ -159,7 +164,7 @@ static int versatile_pci_probe(struct platform_device *pdev) | |||
| 159 | } | 164 | } |
| 160 | } | 165 | } |
| 161 | if (myslot == -1) { | 166 | if (myslot == -1) { |
| 162 | dev_err(&pdev->dev, "Cannot find PCI core!\n"); | 167 | dev_err(dev, "Cannot find PCI core!\n"); |
| 163 | return -EIO; | 168 | return -EIO; |
| 164 | } | 169 | } |
| 165 | /* | 170 | /* |
| @@ -167,7 +172,7 @@ static int versatile_pci_probe(struct platform_device *pdev) | |||
| 167 | */ | 172 | */ |
| 168 | pci_slot_ignore |= (1 << myslot); | 173 | pci_slot_ignore |= (1 << myslot); |
| 169 | 174 | ||
| 170 | dev_info(&pdev->dev, "PCI core found (slot %d)\n", myslot); | 175 | dev_info(dev, "PCI core found (slot %d)\n", myslot); |
| 171 | 176 | ||
| 172 | writel(myslot, PCI_SELFID); | 177 | writel(myslot, PCI_SELFID); |
| 173 | local_pci_cfg_base = versatile_cfg_base[1] + (myslot << 11); | 178 | local_pci_cfg_base = versatile_cfg_base[1] + (myslot << 11); |
| @@ -199,11 +204,20 @@ static int versatile_pci_probe(struct platform_device *pdev) | |||
| 199 | pci_add_flags(PCI_ENABLE_PROC_DOMAINS); | 204 | pci_add_flags(PCI_ENABLE_PROC_DOMAINS); |
| 200 | pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC); | 205 | pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC); |
| 201 | 206 | ||
| 202 | bus = pci_scan_root_bus(&pdev->dev, 0, &pci_versatile_ops, NULL, &pci_res); | 207 | list_splice_init(&pci_res, &bridge->windows); |
| 203 | if (!bus) | 208 | bridge->dev.parent = dev; |
| 204 | return -ENOMEM; | 209 | bridge->sysdata = NULL; |
| 210 | bridge->busnr = 0; | ||
| 211 | bridge->ops = &pci_versatile_ops; | ||
| 212 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 213 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 214 | |||
| 215 | ret = pci_scan_root_bus_bridge(bridge); | ||
| 216 | if (ret < 0) | ||
| 217 | return ret; | ||
| 218 | |||
| 219 | bus = bridge->bus; | ||
| 205 | 220 | ||
| 206 | pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); | ||
| 207 | pci_assign_unassigned_bus_resources(bus); | 221 | pci_assign_unassigned_bus_resources(bus); |
| 208 | list_for_each_entry(child, &bus->children, node) | 222 | list_for_each_entry(child, &bus->children, node) |
| 209 | pcie_bus_configure_settings(child); | 223 | pcie_bus_configure_settings(child); |
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index 8cae013e7188..bd897479a215 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c | |||
| @@ -636,13 +636,16 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) | |||
| 636 | struct xgene_pcie_port *port; | 636 | struct xgene_pcie_port *port; |
| 637 | resource_size_t iobase = 0; | 637 | resource_size_t iobase = 0; |
| 638 | struct pci_bus *bus, *child; | 638 | struct pci_bus *bus, *child; |
| 639 | struct pci_host_bridge *bridge; | ||
| 639 | int ret; | 640 | int ret; |
| 640 | LIST_HEAD(res); | 641 | LIST_HEAD(res); |
| 641 | 642 | ||
| 642 | port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); | 643 | bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); |
| 643 | if (!port) | 644 | if (!bridge) |
| 644 | return -ENOMEM; | 645 | return -ENOMEM; |
| 645 | 646 | ||
| 647 | port = pci_host_bridge_priv(bridge); | ||
| 648 | |||
| 646 | port->node = of_node_get(dn); | 649 | port->node = of_node_get(dn); |
| 647 | port->dev = dev; | 650 | port->dev = dev; |
| 648 | 651 | ||
| @@ -670,11 +673,19 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) | |||
| 670 | if (ret) | 673 | if (ret) |
| 671 | goto error; | 674 | goto error; |
| 672 | 675 | ||
| 673 | bus = pci_create_root_bus(dev, 0, &xgene_pcie_ops, port, &res); | 676 | list_splice_init(&res, &bridge->windows); |
| 674 | if (!bus) { | 677 | bridge->dev.parent = dev; |
| 675 | ret = -ENOMEM; | 678 | bridge->sysdata = port; |
| 679 | bridge->busnr = 0; | ||
| 680 | bridge->ops = &xgene_pcie_ops; | ||
| 681 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 682 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 683 | |||
| 684 | ret = pci_scan_root_bus_bridge(bridge); | ||
| 685 | if (ret < 0) | ||
| 676 | goto error; | 686 | goto error; |
| 677 | } | 687 | |
| 688 | bus = bridge->bus; | ||
| 678 | 689 | ||
| 679 | pci_scan_child_bus(bus); | 690 | pci_scan_child_bus(bus); |
| 680 | pci_assign_unassigned_bus_resources(bus); | 691 | pci_assign_unassigned_bus_resources(bus); |
diff --git a/drivers/pci/host/pcie-altera.c b/drivers/pci/host/pcie-altera.c index 75ec5cea26f6..4ea4f8f5dc77 100644 --- a/drivers/pci/host/pcie-altera.c +++ b/drivers/pci/host/pcie-altera.c | |||
| @@ -579,12 +579,14 @@ static int altera_pcie_probe(struct platform_device *pdev) | |||
| 579 | struct altera_pcie *pcie; | 579 | struct altera_pcie *pcie; |
| 580 | struct pci_bus *bus; | 580 | struct pci_bus *bus; |
| 581 | struct pci_bus *child; | 581 | struct pci_bus *child; |
| 582 | struct pci_host_bridge *bridge; | ||
| 582 | int ret; | 583 | int ret; |
| 583 | 584 | ||
| 584 | pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); | 585 | bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); |
| 585 | if (!pcie) | 586 | if (!bridge) |
| 586 | return -ENOMEM; | 587 | return -ENOMEM; |
| 587 | 588 | ||
| 589 | pcie = pci_host_bridge_priv(bridge); | ||
| 588 | pcie->pdev = pdev; | 590 | pcie->pdev = pdev; |
| 589 | 591 | ||
| 590 | ret = altera_pcie_parse_dt(pcie); | 592 | ret = altera_pcie_parse_dt(pcie); |
| @@ -613,12 +615,20 @@ static int altera_pcie_probe(struct platform_device *pdev) | |||
| 613 | cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE); | 615 | cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE); |
| 614 | altera_pcie_host_init(pcie); | 616 | altera_pcie_host_init(pcie); |
| 615 | 617 | ||
| 616 | bus = pci_scan_root_bus(dev, pcie->root_bus_nr, &altera_pcie_ops, | 618 | list_splice_init(&pcie->resources, &bridge->windows); |
| 617 | pcie, &pcie->resources); | 619 | bridge->dev.parent = dev; |
| 618 | if (!bus) | 620 | bridge->sysdata = pcie; |
| 619 | return -ENOMEM; | 621 | bridge->busnr = pcie->root_bus_nr; |
| 622 | bridge->ops = &altera_pcie_ops; | ||
| 623 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 624 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 625 | |||
| 626 | ret = pci_scan_root_bus_bridge(bridge); | ||
| 627 | if (ret < 0) | ||
| 628 | return ret; | ||
| 629 | |||
| 630 | bus = bridge->bus; | ||
| 620 | 631 | ||
| 621 | pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); | ||
| 622 | pci_assign_unassigned_bus_resources(bus); | 632 | pci_assign_unassigned_bus_resources(bus); |
| 623 | 633 | ||
| 624 | /* Configure PCI Express setting. */ | 634 | /* Configure PCI Express setting. */ |
diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c index 384c27e664fe..f03d5e3612e9 100644 --- a/drivers/pci/host/pcie-iproc-bcma.c +++ b/drivers/pci/host/pcie-iproc-bcma.c | |||
| @@ -45,12 +45,15 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev) | |||
| 45 | struct device *dev = &bdev->dev; | 45 | struct device *dev = &bdev->dev; |
| 46 | struct iproc_pcie *pcie; | 46 | struct iproc_pcie *pcie; |
| 47 | LIST_HEAD(resources); | 47 | LIST_HEAD(resources); |
| 48 | struct pci_host_bridge *bridge; | ||
| 48 | int ret; | 49 | int ret; |
| 49 | 50 | ||
| 50 | pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); | 51 | bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); |
| 51 | if (!pcie) | 52 | if (!bridge) |
| 52 | return -ENOMEM; | 53 | return -ENOMEM; |
| 53 | 54 | ||
| 55 | pcie = pci_host_bridge_priv(bridge); | ||
| 56 | |||
| 54 | pcie->dev = dev; | 57 | pcie->dev = dev; |
| 55 | 58 | ||
| 56 | pcie->type = IPROC_PCIE_PAXB_BCMA; | 59 | pcie->type = IPROC_PCIE_PAXB_BCMA; |
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c index 90d2bdd94e41..22531190bc40 100644 --- a/drivers/pci/host/pcie-iproc-platform.c +++ b/drivers/pci/host/pcie-iproc-platform.c | |||
| @@ -52,12 +52,15 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) | |||
| 52 | struct resource reg; | 52 | struct resource reg; |
| 53 | resource_size_t iobase = 0; | 53 | resource_size_t iobase = 0; |
| 54 | LIST_HEAD(resources); | 54 | LIST_HEAD(resources); |
| 55 | struct pci_host_bridge *bridge; | ||
| 55 | int ret; | 56 | int ret; |
| 56 | 57 | ||
| 57 | pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); | 58 | bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); |
| 58 | if (!pcie) | 59 | if (!bridge) |
| 59 | return -ENOMEM; | 60 | return -ENOMEM; |
| 60 | 61 | ||
| 62 | pcie = pci_host_bridge_priv(bridge); | ||
| 63 | |||
| 61 | pcie->dev = dev; | 64 | pcie->dev = dev; |
| 62 | pcie->type = (enum iproc_pcie_type) of_device_get_match_data(dev); | 65 | pcie->type = (enum iproc_pcie_type) of_device_get_match_data(dev); |
| 63 | 66 | ||
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c index 0f39bd2a04cb..c57486348856 100644 --- a/drivers/pci/host/pcie-iproc.c +++ b/drivers/pci/host/pcie-iproc.c | |||
| @@ -452,14 +452,13 @@ static inline void iproc_pcie_apb_err_disable(struct pci_bus *bus, | |||
| 452 | * Note access to the configuration registers are protected at the higher layer | 452 | * Note access to the configuration registers are protected at the higher layer |
| 453 | * by 'pci_lock' in drivers/pci/access.c | 453 | * by 'pci_lock' in drivers/pci/access.c |
| 454 | */ | 454 | */ |
| 455 | static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus, | 455 | static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie, |
| 456 | int busno, | ||
| 456 | unsigned int devfn, | 457 | unsigned int devfn, |
| 457 | int where) | 458 | int where) |
| 458 | { | 459 | { |
| 459 | struct iproc_pcie *pcie = iproc_data(bus); | ||
| 460 | unsigned slot = PCI_SLOT(devfn); | 460 | unsigned slot = PCI_SLOT(devfn); |
| 461 | unsigned fn = PCI_FUNC(devfn); | 461 | unsigned fn = PCI_FUNC(devfn); |
| 462 | unsigned busno = bus->number; | ||
| 463 | u32 val; | 462 | u32 val; |
| 464 | u16 offset; | 463 | u16 offset; |
| 465 | 464 | ||
| @@ -499,6 +498,58 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus, | |||
| 499 | return (pcie->base + offset); | 498 | return (pcie->base + offset); |
| 500 | } | 499 | } |
| 501 | 500 | ||
| 501 | static void __iomem *iproc_pcie_bus_map_cfg_bus(struct pci_bus *bus, | ||
| 502 | unsigned int devfn, | ||
| 503 | int where) | ||
| 504 | { | ||
| 505 | return iproc_pcie_map_cfg_bus(iproc_data(bus), bus->number, devfn, | ||
| 506 | where); | ||
| 507 | } | ||
| 508 | |||
| 509 | static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie, | ||
| 510 | unsigned int devfn, int where, | ||
| 511 | int size, u32 *val) | ||
| 512 | { | ||
| 513 | void __iomem *addr; | ||
| 514 | |||
| 515 | addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3); | ||
| 516 | if (!addr) { | ||
| 517 | *val = ~0; | ||
| 518 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 519 | } | ||
| 520 | |||
| 521 | *val = readl(addr); | ||
| 522 | |||
| 523 | if (size <= 2) | ||
| 524 | *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); | ||
| 525 | |||
| 526 | return PCIBIOS_SUCCESSFUL; | ||
| 527 | } | ||
| 528 | |||
| 529 | static int iproc_pci_raw_config_write32(struct iproc_pcie *pcie, | ||
| 530 | unsigned int devfn, int where, | ||
| 531 | int size, u32 val) | ||
| 532 | { | ||
| 533 | void __iomem *addr; | ||
| 534 | u32 mask, tmp; | ||
| 535 | |||
| 536 | addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3); | ||
| 537 | if (!addr) | ||
| 538 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
| 539 | |||
| 540 | if (size == 4) { | ||
| 541 | writel(val, addr); | ||
| 542 | return PCIBIOS_SUCCESSFUL; | ||
| 543 | } | ||
| 544 | |||
| 545 | mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); | ||
| 546 | tmp = readl(addr) & mask; | ||
| 547 | tmp |= val << ((where & 0x3) * 8); | ||
| 548 | writel(tmp, addr); | ||
| 549 | |||
| 550 | return PCIBIOS_SUCCESSFUL; | ||
| 551 | } | ||
| 552 | |||
| 502 | static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, | 553 | static int iproc_pcie_config_read32(struct pci_bus *bus, unsigned int devfn, |
| 503 | int where, int size, u32 *val) | 554 | int where, int size, u32 *val) |
| 504 | { | 555 | { |
| @@ -524,7 +575,7 @@ static int iproc_pcie_config_write32(struct pci_bus *bus, unsigned int devfn, | |||
| 524 | } | 575 | } |
| 525 | 576 | ||
| 526 | static struct pci_ops iproc_pcie_ops = { | 577 | static struct pci_ops iproc_pcie_ops = { |
| 527 | .map_bus = iproc_pcie_map_cfg_bus, | 578 | .map_bus = iproc_pcie_bus_map_cfg_bus, |
| 528 | .read = iproc_pcie_config_read32, | 579 | .read = iproc_pcie_config_read32, |
| 529 | .write = iproc_pcie_config_write32, | 580 | .write = iproc_pcie_config_write32, |
| 530 | }; | 581 | }; |
| @@ -556,12 +607,11 @@ static void iproc_pcie_reset(struct iproc_pcie *pcie) | |||
| 556 | msleep(100); | 607 | msleep(100); |
| 557 | } | 608 | } |
| 558 | 609 | ||
| 559 | static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus) | 610 | static int iproc_pcie_check_link(struct iproc_pcie *pcie) |
| 560 | { | 611 | { |
| 561 | struct device *dev = pcie->dev; | 612 | struct device *dev = pcie->dev; |
| 562 | u8 hdr_type; | 613 | u32 hdr_type, link_ctrl, link_status, class, val; |
| 563 | u32 link_ctrl, class, val; | 614 | u16 pos = PCI_EXP_CAP; |
| 564 | u16 pos = PCI_EXP_CAP, link_status; | ||
| 565 | bool link_is_active = false; | 615 | bool link_is_active = false; |
| 566 | 616 | ||
| 567 | /* | 617 | /* |
| @@ -578,7 +628,7 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus) | |||
| 578 | } | 628 | } |
| 579 | 629 | ||
| 580 | /* make sure we are not in EP mode */ | 630 | /* make sure we are not in EP mode */ |
| 581 | pci_bus_read_config_byte(bus, 0, PCI_HEADER_TYPE, &hdr_type); | 631 | iproc_pci_raw_config_read32(pcie, 0, PCI_HEADER_TYPE, 1, &hdr_type); |
| 582 | if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) { | 632 | if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) { |
| 583 | dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type); | 633 | dev_err(dev, "in EP mode, hdr=%#02x\n", hdr_type); |
| 584 | return -EFAULT; | 634 | return -EFAULT; |
| @@ -588,13 +638,16 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus) | |||
| 588 | #define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c | 638 | #define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c |
| 589 | #define PCI_CLASS_BRIDGE_MASK 0xffff00 | 639 | #define PCI_CLASS_BRIDGE_MASK 0xffff00 |
| 590 | #define PCI_CLASS_BRIDGE_SHIFT 8 | 640 | #define PCI_CLASS_BRIDGE_SHIFT 8 |
| 591 | pci_bus_read_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, &class); | 641 | iproc_pci_raw_config_read32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET, |
| 642 | 4, &class); | ||
| 592 | class &= ~PCI_CLASS_BRIDGE_MASK; | 643 | class &= ~PCI_CLASS_BRIDGE_MASK; |
| 593 | class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT); | 644 | class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT); |
| 594 | pci_bus_write_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, class); | 645 | iproc_pci_raw_config_write32(pcie, 0, PCI_BRIDGE_CTRL_REG_OFFSET, |
| 646 | 4, class); | ||
| 595 | 647 | ||
| 596 | /* check link status to see if link is active */ | 648 | /* check link status to see if link is active */ |
| 597 | pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, &link_status); | 649 | iproc_pci_raw_config_read32(pcie, 0, pos + PCI_EXP_LNKSTA, |
| 650 | 2, &link_status); | ||
| 598 | if (link_status & PCI_EXP_LNKSTA_NLW) | 651 | if (link_status & PCI_EXP_LNKSTA_NLW) |
| 599 | link_is_active = true; | 652 | link_is_active = true; |
| 600 | 653 | ||
| @@ -603,20 +656,21 @@ static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus) | |||
| 603 | #define PCI_TARGET_LINK_SPEED_MASK 0xf | 656 | #define PCI_TARGET_LINK_SPEED_MASK 0xf |
| 604 | #define PCI_TARGET_LINK_SPEED_GEN2 0x2 | 657 | #define PCI_TARGET_LINK_SPEED_GEN2 0x2 |
| 605 | #define PCI_TARGET_LINK_SPEED_GEN1 0x1 | 658 | #define PCI_TARGET_LINK_SPEED_GEN1 0x1 |
| 606 | pci_bus_read_config_dword(bus, 0, | 659 | iproc_pci_raw_config_read32(pcie, 0, |
| 607 | pos + PCI_EXP_LNKCTL2, | 660 | pos + PCI_EXP_LNKCTL2, 4, |
| 608 | &link_ctrl); | 661 | &link_ctrl); |
| 609 | if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) == | 662 | if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) == |
| 610 | PCI_TARGET_LINK_SPEED_GEN2) { | 663 | PCI_TARGET_LINK_SPEED_GEN2) { |
| 611 | link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK; | 664 | link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK; |
| 612 | link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1; | 665 | link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1; |
| 613 | pci_bus_write_config_dword(bus, 0, | 666 | iproc_pci_raw_config_write32(pcie, 0, |
| 614 | pos + PCI_EXP_LNKCTL2, | 667 | pos + PCI_EXP_LNKCTL2, |
| 615 | link_ctrl); | 668 | 4, link_ctrl); |
| 616 | msleep(100); | 669 | msleep(100); |
| 617 | 670 | ||
| 618 | pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, | 671 | iproc_pci_raw_config_read32(pcie, 0, |
| 619 | &link_status); | 672 | pos + PCI_EXP_LNKSTA, |
| 673 | 2, &link_status); | ||
| 620 | if (link_status & PCI_EXP_LNKSTA_NLW) | 674 | if (link_status & PCI_EXP_LNKSTA_NLW) |
| 621 | link_is_active = true; | 675 | link_is_active = true; |
| 622 | } | 676 | } |
| @@ -1205,7 +1259,8 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) | |||
| 1205 | struct device *dev; | 1259 | struct device *dev; |
| 1206 | int ret; | 1260 | int ret; |
| 1207 | void *sysdata; | 1261 | void *sysdata; |
| 1208 | struct pci_bus *bus, *child; | 1262 | struct pci_bus *child; |
| 1263 | struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); | ||
| 1209 | 1264 | ||
| 1210 | dev = pcie->dev; | 1265 | dev = pcie->dev; |
| 1211 | 1266 | ||
| @@ -1252,18 +1307,10 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) | |||
| 1252 | sysdata = pcie; | 1307 | sysdata = pcie; |
| 1253 | #endif | 1308 | #endif |
| 1254 | 1309 | ||
| 1255 | bus = pci_create_root_bus(dev, 0, &iproc_pcie_ops, sysdata, res); | 1310 | ret = iproc_pcie_check_link(pcie); |
| 1256 | if (!bus) { | ||
| 1257 | dev_err(dev, "unable to create PCI root bus\n"); | ||
| 1258 | ret = -ENOMEM; | ||
| 1259 | goto err_power_off_phy; | ||
| 1260 | } | ||
| 1261 | pcie->root_bus = bus; | ||
| 1262 | |||
| 1263 | ret = iproc_pcie_check_link(pcie, bus); | ||
| 1264 | if (ret) { | 1311 | if (ret) { |
| 1265 | dev_err(dev, "no PCIe EP device detected\n"); | 1312 | dev_err(dev, "no PCIe EP device detected\n"); |
| 1266 | goto err_rm_root_bus; | 1313 | goto err_power_off_phy; |
| 1267 | } | 1314 | } |
| 1268 | 1315 | ||
| 1269 | iproc_pcie_enable(pcie); | 1316 | iproc_pcie_enable(pcie); |
| @@ -1272,23 +1319,31 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) | |||
| 1272 | if (iproc_pcie_msi_enable(pcie)) | 1319 | if (iproc_pcie_msi_enable(pcie)) |
| 1273 | dev_info(dev, "not using iProc MSI\n"); | 1320 | dev_info(dev, "not using iProc MSI\n"); |
| 1274 | 1321 | ||
| 1275 | pci_scan_child_bus(bus); | 1322 | list_splice_init(res, &host->windows); |
| 1276 | pci_assign_unassigned_bus_resources(bus); | 1323 | host->busnr = 0; |
| 1324 | host->dev.parent = dev; | ||
| 1325 | host->ops = &iproc_pcie_ops; | ||
| 1326 | host->sysdata = sysdata; | ||
| 1327 | host->map_irq = pcie->map_irq; | ||
| 1328 | host->swizzle_irq = pci_common_swizzle; | ||
| 1277 | 1329 | ||
| 1278 | if (pcie->map_irq) | 1330 | ret = pci_scan_root_bus_bridge(host); |
| 1279 | pci_fixup_irqs(pci_common_swizzle, pcie->map_irq); | 1331 | if (ret < 0) { |
| 1332 | dev_err(dev, "failed to scan host: %d\n", ret); | ||
| 1333 | goto err_power_off_phy; | ||
| 1334 | } | ||
| 1280 | 1335 | ||
| 1281 | list_for_each_entry(child, &bus->children, node) | 1336 | pci_assign_unassigned_bus_resources(host->bus); |
| 1337 | |||
| 1338 | pcie->root_bus = host->bus; | ||
| 1339 | |||
| 1340 | list_for_each_entry(child, &host->bus->children, node) | ||
| 1282 | pcie_bus_configure_settings(child); | 1341 | pcie_bus_configure_settings(child); |
| 1283 | 1342 | ||
| 1284 | pci_bus_add_devices(bus); | 1343 | pci_bus_add_devices(host->bus); |
| 1285 | 1344 | ||
| 1286 | return 0; | 1345 | return 0; |
| 1287 | 1346 | ||
| 1288 | err_rm_root_bus: | ||
| 1289 | pci_stop_root_bus(bus); | ||
| 1290 | pci_remove_root_bus(bus); | ||
| 1291 | |||
| 1292 | err_power_off_phy: | 1347 | err_power_off_phy: |
| 1293 | phy_power_off(pcie->phy); | 1348 | phy_power_off(pcie->phy); |
| 1294 | err_exit_phy: | 1349 | err_exit_phy: |
diff --git a/drivers/pci/host/pcie-mediatek.c b/drivers/pci/host/pcie-mediatek.c new file mode 100644 index 000000000000..5a9d8589ea0b --- /dev/null +++ b/drivers/pci/host/pcie-mediatek.c | |||
| @@ -0,0 +1,554 @@ | |||
| 1 | /* | ||
| 2 | * MediaTek PCIe host controller driver. | ||
| 3 | * | ||
| 4 | * Copyright (c) 2017 MediaTek Inc. | ||
| 5 | * Author: Ryder Lee <ryder.lee@mediatek.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/clk.h> | ||
| 18 | #include <linux/delay.h> | ||
| 19 | #include <linux/kernel.h> | ||
| 20 | #include <linux/of_address.h> | ||
| 21 | #include <linux/of_pci.h> | ||
| 22 | #include <linux/of_platform.h> | ||
| 23 | #include <linux/pci.h> | ||
| 24 | #include <linux/phy/phy.h> | ||
| 25 | #include <linux/platform_device.h> | ||
| 26 | #include <linux/pm_runtime.h> | ||
| 27 | #include <linux/reset.h> | ||
| 28 | |||
| 29 | /* PCIe shared registers */ | ||
| 30 | #define PCIE_SYS_CFG 0x00 | ||
| 31 | #define PCIE_INT_ENABLE 0x0c | ||
| 32 | #define PCIE_CFG_ADDR 0x20 | ||
| 33 | #define PCIE_CFG_DATA 0x24 | ||
| 34 | |||
| 35 | /* PCIe per port registers */ | ||
| 36 | #define PCIE_BAR0_SETUP 0x10 | ||
| 37 | #define PCIE_CLASS 0x34 | ||
| 38 | #define PCIE_LINK_STATUS 0x50 | ||
| 39 | |||
| 40 | #define PCIE_PORT_INT_EN(x) BIT(20 + (x)) | ||
| 41 | #define PCIE_PORT_PERST(x) BIT(1 + (x)) | ||
| 42 | #define PCIE_PORT_LINKUP BIT(0) | ||
| 43 | #define PCIE_BAR_MAP_MAX GENMASK(31, 16) | ||
| 44 | |||
| 45 | #define PCIE_BAR_ENABLE BIT(0) | ||
| 46 | #define PCIE_REVISION_ID BIT(0) | ||
| 47 | #define PCIE_CLASS_CODE (0x60400 << 8) | ||
| 48 | #define PCIE_CONF_REG(regn) (((regn) & GENMASK(7, 2)) | \ | ||
| 49 | ((((regn) >> 8) & GENMASK(3, 0)) << 24)) | ||
| 50 | #define PCIE_CONF_FUN(fun) (((fun) << 8) & GENMASK(10, 8)) | ||
| 51 | #define PCIE_CONF_DEV(dev) (((dev) << 11) & GENMASK(15, 11)) | ||
| 52 | #define PCIE_CONF_BUS(bus) (((bus) << 16) & GENMASK(23, 16)) | ||
| 53 | #define PCIE_CONF_ADDR(regn, fun, dev, bus) \ | ||
| 54 | (PCIE_CONF_REG(regn) | PCIE_CONF_FUN(fun) | \ | ||
| 55 | PCIE_CONF_DEV(dev) | PCIE_CONF_BUS(bus)) | ||
| 56 | |||
| 57 | /* MediaTek specific configuration registers */ | ||
| 58 | #define PCIE_FTS_NUM 0x70c | ||
| 59 | #define PCIE_FTS_NUM_MASK GENMASK(15, 8) | ||
| 60 | #define PCIE_FTS_NUM_L0(x) ((x) & 0xff << 8) | ||
| 61 | |||
| 62 | #define PCIE_FC_CREDIT 0x73c | ||
| 63 | #define PCIE_FC_CREDIT_MASK (GENMASK(31, 31) | GENMASK(28, 16)) | ||
| 64 | #define PCIE_FC_CREDIT_VAL(x) ((x) << 16) | ||
| 65 | |||
| 66 | /** | ||
| 67 | * struct mtk_pcie_port - PCIe port information | ||
| 68 | * @base: IO mapped register base | ||
| 69 | * @list: port list | ||
| 70 | * @pcie: pointer to PCIe host info | ||
| 71 | * @reset: pointer to port reset control | ||
| 72 | * @sys_ck: pointer to bus clock | ||
| 73 | * @phy: pointer to phy control block | ||
| 74 | * @lane: lane count | ||
| 75 | * @index: port index | ||
| 76 | */ | ||
| 77 | struct mtk_pcie_port { | ||
| 78 | void __iomem *base; | ||
| 79 | struct list_head list; | ||
| 80 | struct mtk_pcie *pcie; | ||
| 81 | struct reset_control *reset; | ||
| 82 | struct clk *sys_ck; | ||
| 83 | struct phy *phy; | ||
| 84 | u32 lane; | ||
| 85 | u32 index; | ||
| 86 | }; | ||
| 87 | |||
| 88 | /** | ||
| 89 | * struct mtk_pcie - PCIe host information | ||
| 90 | * @dev: pointer to PCIe device | ||
| 91 | * @base: IO mapped register base | ||
| 92 | * @free_ck: free-run reference clock | ||
| 93 | * @io: IO resource | ||
| 94 | * @pio: PIO resource | ||
| 95 | * @mem: non-prefetchable memory resource | ||
| 96 | * @busn: bus range | ||
| 97 | * @offset: IO / Memory offset | ||
| 98 | * @ports: pointer to PCIe port information | ||
| 99 | */ | ||
| 100 | struct mtk_pcie { | ||
| 101 | struct device *dev; | ||
| 102 | void __iomem *base; | ||
| 103 | struct clk *free_ck; | ||
| 104 | |||
| 105 | struct resource io; | ||
| 106 | struct resource pio; | ||
| 107 | struct resource mem; | ||
| 108 | struct resource busn; | ||
| 109 | struct { | ||
| 110 | resource_size_t mem; | ||
| 111 | resource_size_t io; | ||
| 112 | } offset; | ||
| 113 | struct list_head ports; | ||
| 114 | }; | ||
| 115 | |||
| 116 | static inline bool mtk_pcie_link_up(struct mtk_pcie_port *port) | ||
| 117 | { | ||
| 118 | return !!(readl(port->base + PCIE_LINK_STATUS) & PCIE_PORT_LINKUP); | ||
| 119 | } | ||
| 120 | |||
| 121 | static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie) | ||
| 122 | { | ||
| 123 | struct device *dev = pcie->dev; | ||
| 124 | |||
| 125 | clk_disable_unprepare(pcie->free_ck); | ||
| 126 | |||
| 127 | if (dev->pm_domain) { | ||
| 128 | pm_runtime_put_sync(dev); | ||
| 129 | pm_runtime_disable(dev); | ||
| 130 | } | ||
| 131 | } | ||
| 132 | |||
| 133 | static void mtk_pcie_port_free(struct mtk_pcie_port *port) | ||
| 134 | { | ||
| 135 | struct mtk_pcie *pcie = port->pcie; | ||
| 136 | struct device *dev = pcie->dev; | ||
| 137 | |||
| 138 | devm_iounmap(dev, port->base); | ||
| 139 | list_del(&port->list); | ||
| 140 | devm_kfree(dev, port); | ||
| 141 | } | ||
| 142 | |||
| 143 | static void mtk_pcie_put_resources(struct mtk_pcie *pcie) | ||
| 144 | { | ||
| 145 | struct mtk_pcie_port *port, *tmp; | ||
| 146 | |||
| 147 | list_for_each_entry_safe(port, tmp, &pcie->ports, list) { | ||
| 148 | phy_power_off(port->phy); | ||
| 149 | clk_disable_unprepare(port->sys_ck); | ||
| 150 | mtk_pcie_port_free(port); | ||
| 151 | } | ||
| 152 | |||
| 153 | mtk_pcie_subsys_powerdown(pcie); | ||
| 154 | } | ||
| 155 | |||
| 156 | static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, | ||
| 157 | unsigned int devfn, int where) | ||
| 158 | { | ||
| 159 | struct pci_host_bridge *host = pci_find_host_bridge(bus); | ||
| 160 | struct mtk_pcie *pcie = pci_host_bridge_priv(host); | ||
| 161 | |||
| 162 | writel(PCIE_CONF_ADDR(where, PCI_FUNC(devfn), PCI_SLOT(devfn), | ||
| 163 | bus->number), pcie->base + PCIE_CFG_ADDR); | ||
| 164 | |||
| 165 | return pcie->base + PCIE_CFG_DATA + (where & 3); | ||
| 166 | } | ||
| 167 | |||
| 168 | static struct pci_ops mtk_pcie_ops = { | ||
| 169 | .map_bus = mtk_pcie_map_bus, | ||
| 170 | .read = pci_generic_config_read, | ||
| 171 | .write = pci_generic_config_write, | ||
| 172 | }; | ||
| 173 | |||
| 174 | static void mtk_pcie_configure_rc(struct mtk_pcie_port *port) | ||
| 175 | { | ||
| 176 | struct mtk_pcie *pcie = port->pcie; | ||
| 177 | u32 func = PCI_FUNC(port->index << 3); | ||
| 178 | u32 slot = PCI_SLOT(port->index << 3); | ||
| 179 | u32 val; | ||
| 180 | |||
| 181 | /* enable interrupt */ | ||
| 182 | val = readl(pcie->base + PCIE_INT_ENABLE); | ||
| 183 | val |= PCIE_PORT_INT_EN(port->index); | ||
| 184 | writel(val, pcie->base + PCIE_INT_ENABLE); | ||
| 185 | |||
| 186 | /* map to all DDR region. We need to set it before cfg operation. */ | ||
| 187 | writel(PCIE_BAR_MAP_MAX | PCIE_BAR_ENABLE, | ||
| 188 | port->base + PCIE_BAR0_SETUP); | ||
| 189 | |||
| 190 | /* configure class code and revision ID */ | ||
| 191 | writel(PCIE_CLASS_CODE | PCIE_REVISION_ID, port->base + PCIE_CLASS); | ||
| 192 | |||
| 193 | /* configure FC credit */ | ||
| 194 | writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0), | ||
| 195 | pcie->base + PCIE_CFG_ADDR); | ||
| 196 | val = readl(pcie->base + PCIE_CFG_DATA); | ||
| 197 | val &= ~PCIE_FC_CREDIT_MASK; | ||
| 198 | val |= PCIE_FC_CREDIT_VAL(0x806c); | ||
| 199 | writel(PCIE_CONF_ADDR(PCIE_FC_CREDIT, func, slot, 0), | ||
| 200 | pcie->base + PCIE_CFG_ADDR); | ||
| 201 | writel(val, pcie->base + PCIE_CFG_DATA); | ||
| 202 | |||
| 203 | /* configure RC FTS number to 250 when it leaves L0s */ | ||
| 204 | writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0), | ||
| 205 | pcie->base + PCIE_CFG_ADDR); | ||
| 206 | val = readl(pcie->base + PCIE_CFG_DATA); | ||
| 207 | val &= ~PCIE_FTS_NUM_MASK; | ||
| 208 | val |= PCIE_FTS_NUM_L0(0x50); | ||
| 209 | writel(PCIE_CONF_ADDR(PCIE_FTS_NUM, func, slot, 0), | ||
| 210 | pcie->base + PCIE_CFG_ADDR); | ||
| 211 | writel(val, pcie->base + PCIE_CFG_DATA); | ||
| 212 | } | ||
| 213 | |||
| 214 | static void mtk_pcie_assert_ports(struct mtk_pcie_port *port) | ||
| 215 | { | ||
| 216 | struct mtk_pcie *pcie = port->pcie; | ||
| 217 | u32 val; | ||
| 218 | |||
| 219 | /* assert port PERST_N */ | ||
| 220 | val = readl(pcie->base + PCIE_SYS_CFG); | ||
| 221 | val |= PCIE_PORT_PERST(port->index); | ||
| 222 | writel(val, pcie->base + PCIE_SYS_CFG); | ||
| 223 | |||
| 224 | /* de-assert port PERST_N */ | ||
| 225 | val = readl(pcie->base + PCIE_SYS_CFG); | ||
| 226 | val &= ~PCIE_PORT_PERST(port->index); | ||
| 227 | writel(val, pcie->base + PCIE_SYS_CFG); | ||
| 228 | |||
| 229 | /* PCIe v2.0 need at least 100ms delay to train from Gen1 to Gen2 */ | ||
| 230 | msleep(100); | ||
| 231 | } | ||
| 232 | |||
| 233 | static void mtk_pcie_enable_ports(struct mtk_pcie_port *port) | ||
| 234 | { | ||
| 235 | struct device *dev = port->pcie->dev; | ||
| 236 | int err; | ||
| 237 | |||
| 238 | err = clk_prepare_enable(port->sys_ck); | ||
| 239 | if (err) { | ||
| 240 | dev_err(dev, "failed to enable port%d clock\n", port->index); | ||
| 241 | goto err_sys_clk; | ||
| 242 | } | ||
| 243 | |||
| 244 | reset_control_assert(port->reset); | ||
| 245 | reset_control_deassert(port->reset); | ||
| 246 | |||
| 247 | err = phy_power_on(port->phy); | ||
| 248 | if (err) { | ||
| 249 | dev_err(dev, "failed to power on port%d phy\n", port->index); | ||
| 250 | goto err_phy_on; | ||
| 251 | } | ||
| 252 | |||
| 253 | mtk_pcie_assert_ports(port); | ||
| 254 | |||
| 255 | /* if link up, then setup root port configuration space */ | ||
| 256 | if (mtk_pcie_link_up(port)) { | ||
| 257 | mtk_pcie_configure_rc(port); | ||
| 258 | return; | ||
| 259 | } | ||
| 260 | |||
| 261 | dev_info(dev, "Port%d link down\n", port->index); | ||
| 262 | |||
| 263 | phy_power_off(port->phy); | ||
| 264 | err_phy_on: | ||
| 265 | clk_disable_unprepare(port->sys_ck); | ||
| 266 | err_sys_clk: | ||
| 267 | mtk_pcie_port_free(port); | ||
| 268 | } | ||
| 269 | |||
| 270 | static int mtk_pcie_parse_ports(struct mtk_pcie *pcie, | ||
| 271 | struct device_node *node, | ||
| 272 | int index) | ||
| 273 | { | ||
| 274 | struct mtk_pcie_port *port; | ||
| 275 | struct resource *regs; | ||
| 276 | struct device *dev = pcie->dev; | ||
| 277 | struct platform_device *pdev = to_platform_device(dev); | ||
| 278 | char name[10]; | ||
| 279 | int err; | ||
| 280 | |||
| 281 | port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); | ||
| 282 | if (!port) | ||
| 283 | return -ENOMEM; | ||
| 284 | |||
| 285 | err = of_property_read_u32(node, "num-lanes", &port->lane); | ||
| 286 | if (err) { | ||
| 287 | dev_err(dev, "missing num-lanes property\n"); | ||
| 288 | return err; | ||
| 289 | } | ||
| 290 | |||
| 291 | regs = platform_get_resource(pdev, IORESOURCE_MEM, index + 1); | ||
| 292 | port->base = devm_ioremap_resource(dev, regs); | ||
| 293 | if (IS_ERR(port->base)) { | ||
| 294 | dev_err(dev, "failed to map port%d base\n", index); | ||
| 295 | return PTR_ERR(port->base); | ||
| 296 | } | ||
| 297 | |||
| 298 | snprintf(name, sizeof(name), "sys_ck%d", index); | ||
| 299 | port->sys_ck = devm_clk_get(dev, name); | ||
| 300 | if (IS_ERR(port->sys_ck)) { | ||
| 301 | dev_err(dev, "failed to get port%d clock\n", index); | ||
| 302 | return PTR_ERR(port->sys_ck); | ||
| 303 | } | ||
| 304 | |||
| 305 | snprintf(name, sizeof(name), "pcie-rst%d", index); | ||
| 306 | port->reset = devm_reset_control_get_optional(dev, name); | ||
| 307 | if (PTR_ERR(port->reset) == -EPROBE_DEFER) | ||
| 308 | return PTR_ERR(port->reset); | ||
| 309 | |||
| 310 | /* some platforms may use default PHY setting */ | ||
| 311 | snprintf(name, sizeof(name), "pcie-phy%d", index); | ||
| 312 | port->phy = devm_phy_optional_get(dev, name); | ||
| 313 | if (IS_ERR(port->phy)) | ||
| 314 | return PTR_ERR(port->phy); | ||
| 315 | |||
| 316 | port->index = index; | ||
| 317 | port->pcie = pcie; | ||
| 318 | |||
| 319 | INIT_LIST_HEAD(&port->list); | ||
| 320 | list_add_tail(&port->list, &pcie->ports); | ||
| 321 | |||
| 322 | return 0; | ||
| 323 | } | ||
| 324 | |||
| 325 | static int mtk_pcie_subsys_powerup(struct mtk_pcie *pcie) | ||
| 326 | { | ||
| 327 | struct device *dev = pcie->dev; | ||
| 328 | struct platform_device *pdev = to_platform_device(dev); | ||
| 329 | struct resource *regs; | ||
| 330 | int err; | ||
| 331 | |||
| 332 | /* get shared registers */ | ||
| 333 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 334 | pcie->base = devm_ioremap_resource(dev, regs); | ||
| 335 | if (IS_ERR(pcie->base)) { | ||
| 336 | dev_err(dev, "failed to map shared register\n"); | ||
| 337 | return PTR_ERR(pcie->base); | ||
| 338 | } | ||
| 339 | |||
| 340 | pcie->free_ck = devm_clk_get(dev, "free_ck"); | ||
| 341 | if (IS_ERR(pcie->free_ck)) { | ||
| 342 | if (PTR_ERR(pcie->free_ck) == -EPROBE_DEFER) | ||
| 343 | return -EPROBE_DEFER; | ||
| 344 | |||
| 345 | pcie->free_ck = NULL; | ||
| 346 | } | ||
| 347 | |||
| 348 | if (dev->pm_domain) { | ||
| 349 | pm_runtime_enable(dev); | ||
| 350 | pm_runtime_get_sync(dev); | ||
| 351 | } | ||
| 352 | |||
| 353 | /* enable top level clock */ | ||
| 354 | err = clk_prepare_enable(pcie->free_ck); | ||
| 355 | if (err) { | ||
| 356 | dev_err(dev, "failed to enable free_ck\n"); | ||
| 357 | goto err_free_ck; | ||
| 358 | } | ||
| 359 | |||
| 360 | return 0; | ||
| 361 | |||
| 362 | err_free_ck: | ||
| 363 | if (dev->pm_domain) { | ||
| 364 | pm_runtime_put_sync(dev); | ||
| 365 | pm_runtime_disable(dev); | ||
| 366 | } | ||
| 367 | |||
| 368 | return err; | ||
| 369 | } | ||
| 370 | |||
| 371 | static int mtk_pcie_setup(struct mtk_pcie *pcie) | ||
| 372 | { | ||
| 373 | struct device *dev = pcie->dev; | ||
| 374 | struct device_node *node = dev->of_node, *child; | ||
| 375 | struct of_pci_range_parser parser; | ||
| 376 | struct of_pci_range range; | ||
| 377 | struct resource res; | ||
| 378 | struct mtk_pcie_port *port, *tmp; | ||
| 379 | int err; | ||
| 380 | |||
| 381 | if (of_pci_range_parser_init(&parser, node)) { | ||
| 382 | dev_err(dev, "missing \"ranges\" property\n"); | ||
| 383 | return -EINVAL; | ||
| 384 | } | ||
| 385 | |||
| 386 | for_each_of_pci_range(&parser, &range) { | ||
| 387 | err = of_pci_range_to_resource(&range, node, &res); | ||
| 388 | if (err < 0) | ||
| 389 | return err; | ||
| 390 | |||
| 391 | switch (res.flags & IORESOURCE_TYPE_BITS) { | ||
| 392 | case IORESOURCE_IO: | ||
| 393 | pcie->offset.io = res.start - range.pci_addr; | ||
| 394 | |||
| 395 | memcpy(&pcie->pio, &res, sizeof(res)); | ||
| 396 | pcie->pio.name = node->full_name; | ||
| 397 | |||
| 398 | pcie->io.start = range.cpu_addr; | ||
| 399 | pcie->io.end = range.cpu_addr + range.size - 1; | ||
| 400 | pcie->io.flags = IORESOURCE_MEM; | ||
| 401 | pcie->io.name = "I/O"; | ||
| 402 | |||
| 403 | memcpy(&res, &pcie->io, sizeof(res)); | ||
| 404 | break; | ||
| 405 | |||
| 406 | case IORESOURCE_MEM: | ||
| 407 | pcie->offset.mem = res.start - range.pci_addr; | ||
| 408 | |||
| 409 | memcpy(&pcie->mem, &res, sizeof(res)); | ||
| 410 | pcie->mem.name = "non-prefetchable"; | ||
| 411 | break; | ||
| 412 | } | ||
| 413 | } | ||
| 414 | |||
| 415 | err = of_pci_parse_bus_range(node, &pcie->busn); | ||
| 416 | if (err < 0) { | ||
| 417 | dev_err(dev, "failed to parse bus ranges property: %d\n", err); | ||
| 418 | pcie->busn.name = node->name; | ||
| 419 | pcie->busn.start = 0; | ||
| 420 | pcie->busn.end = 0xff; | ||
| 421 | pcie->busn.flags = IORESOURCE_BUS; | ||
| 422 | } | ||
| 423 | |||
| 424 | for_each_available_child_of_node(node, child) { | ||
| 425 | int index; | ||
| 426 | |||
| 427 | err = of_pci_get_devfn(child); | ||
| 428 | if (err < 0) { | ||
| 429 | dev_err(dev, "failed to parse devfn: %d\n", err); | ||
| 430 | return err; | ||
| 431 | } | ||
| 432 | |||
| 433 | index = PCI_SLOT(err); | ||
| 434 | |||
| 435 | err = mtk_pcie_parse_ports(pcie, child, index); | ||
| 436 | if (err) | ||
| 437 | return err; | ||
| 438 | } | ||
| 439 | |||
| 440 | err = mtk_pcie_subsys_powerup(pcie); | ||
| 441 | if (err) | ||
| 442 | return err; | ||
| 443 | |||
| 444 | /* enable each port, and then check link status */ | ||
| 445 | list_for_each_entry_safe(port, tmp, &pcie->ports, list) | ||
| 446 | mtk_pcie_enable_ports(port); | ||
| 447 | |||
| 448 | /* power down PCIe subsys if slots are all empty (link down) */ | ||
| 449 | if (list_empty(&pcie->ports)) | ||
| 450 | mtk_pcie_subsys_powerdown(pcie); | ||
| 451 | |||
| 452 | return 0; | ||
| 453 | } | ||
| 454 | |||
| 455 | static int mtk_pcie_request_resources(struct mtk_pcie *pcie) | ||
| 456 | { | ||
| 457 | struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); | ||
| 458 | struct list_head *windows = &host->windows; | ||
| 459 | struct device *dev = pcie->dev; | ||
| 460 | int err; | ||
| 461 | |||
| 462 | pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io); | ||
| 463 | pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem); | ||
| 464 | pci_add_resource(windows, &pcie->busn); | ||
| 465 | |||
| 466 | err = devm_request_pci_bus_resources(dev, windows); | ||
| 467 | if (err < 0) | ||
| 468 | return err; | ||
| 469 | |||
| 470 | pci_remap_iospace(&pcie->pio, pcie->io.start); | ||
| 471 | |||
| 472 | return 0; | ||
| 473 | } | ||
| 474 | |||
| 475 | static int mtk_pcie_register_host(struct pci_host_bridge *host) | ||
| 476 | { | ||
| 477 | struct mtk_pcie *pcie = pci_host_bridge_priv(host); | ||
| 478 | struct pci_bus *child; | ||
| 479 | int err; | ||
| 480 | |||
| 481 | host->busnr = pcie->busn.start; | ||
| 482 | host->dev.parent = pcie->dev; | ||
| 483 | host->ops = &mtk_pcie_ops; | ||
| 484 | host->map_irq = of_irq_parse_and_map_pci; | ||
| 485 | host->swizzle_irq = pci_common_swizzle; | ||
| 486 | |||
| 487 | err = pci_scan_root_bus_bridge(host); | ||
| 488 | if (err < 0) | ||
| 489 | return err; | ||
| 490 | |||
| 491 | pci_bus_size_bridges(host->bus); | ||
| 492 | pci_bus_assign_resources(host->bus); | ||
| 493 | |||
| 494 | list_for_each_entry(child, &host->bus->children, node) | ||
| 495 | pcie_bus_configure_settings(child); | ||
| 496 | |||
| 497 | pci_bus_add_devices(host->bus); | ||
| 498 | |||
| 499 | return 0; | ||
| 500 | } | ||
| 501 | |||
| 502 | static int mtk_pcie_probe(struct platform_device *pdev) | ||
| 503 | { | ||
| 504 | struct device *dev = &pdev->dev; | ||
| 505 | struct mtk_pcie *pcie; | ||
| 506 | struct pci_host_bridge *host; | ||
| 507 | int err; | ||
| 508 | |||
| 509 | host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); | ||
| 510 | if (!host) | ||
| 511 | return -ENOMEM; | ||
| 512 | |||
| 513 | pcie = pci_host_bridge_priv(host); | ||
| 514 | |||
| 515 | pcie->dev = dev; | ||
| 516 | platform_set_drvdata(pdev, pcie); | ||
| 517 | INIT_LIST_HEAD(&pcie->ports); | ||
| 518 | |||
| 519 | err = mtk_pcie_setup(pcie); | ||
| 520 | if (err) | ||
| 521 | return err; | ||
| 522 | |||
| 523 | err = mtk_pcie_request_resources(pcie); | ||
| 524 | if (err) | ||
| 525 | goto put_resources; | ||
| 526 | |||
| 527 | err = mtk_pcie_register_host(host); | ||
| 528 | if (err) | ||
| 529 | goto put_resources; | ||
| 530 | |||
| 531 | return 0; | ||
| 532 | |||
| 533 | put_resources: | ||
| 534 | if (!list_empty(&pcie->ports)) | ||
| 535 | mtk_pcie_put_resources(pcie); | ||
| 536 | |||
| 537 | return err; | ||
| 538 | } | ||
| 539 | |||
| 540 | static const struct of_device_id mtk_pcie_ids[] = { | ||
| 541 | { .compatible = "mediatek,mt7623-pcie"}, | ||
| 542 | { .compatible = "mediatek,mt2701-pcie"}, | ||
| 543 | {}, | ||
| 544 | }; | ||
| 545 | |||
| 546 | static struct platform_driver mtk_pcie_driver = { | ||
| 547 | .probe = mtk_pcie_probe, | ||
| 548 | .driver = { | ||
| 549 | .name = "mtk-pcie", | ||
| 550 | .of_match_table = mtk_pcie_ids, | ||
| 551 | .suppress_bind_attrs = true, | ||
| 552 | }, | ||
| 553 | }; | ||
| 554 | builtin_platform_driver(mtk_pcie_driver); | ||
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c index cb07c45c1858..246d485b24c6 100644 --- a/drivers/pci/host/pcie-rcar.c +++ b/drivers/pci/host/pcie-rcar.c | |||
| @@ -450,29 +450,33 @@ done: | |||
| 450 | static int rcar_pcie_enable(struct rcar_pcie *pcie) | 450 | static int rcar_pcie_enable(struct rcar_pcie *pcie) |
| 451 | { | 451 | { |
| 452 | struct device *dev = pcie->dev; | 452 | struct device *dev = pcie->dev; |
| 453 | struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); | ||
| 453 | struct pci_bus *bus, *child; | 454 | struct pci_bus *bus, *child; |
| 454 | LIST_HEAD(res); | 455 | int ret; |
| 455 | 456 | ||
| 456 | /* Try setting 5 GT/s link speed */ | 457 | /* Try setting 5 GT/s link speed */ |
| 457 | rcar_pcie_force_speedup(pcie); | 458 | rcar_pcie_force_speedup(pcie); |
| 458 | 459 | ||
| 459 | rcar_pcie_setup(&res, pcie); | 460 | rcar_pcie_setup(&bridge->windows, pcie); |
| 460 | 461 | ||
| 461 | pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS); | 462 | pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS); |
| 462 | 463 | ||
| 464 | bridge->dev.parent = dev; | ||
| 465 | bridge->sysdata = pcie; | ||
| 466 | bridge->busnr = pcie->root_bus_nr; | ||
| 467 | bridge->ops = &rcar_pcie_ops; | ||
| 468 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 469 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 463 | if (IS_ENABLED(CONFIG_PCI_MSI)) | 470 | if (IS_ENABLED(CONFIG_PCI_MSI)) |
| 464 | bus = pci_scan_root_bus_msi(dev, pcie->root_bus_nr, | 471 | bridge->msi = &pcie->msi.chip; |
| 465 | &rcar_pcie_ops, pcie, &res, &pcie->msi.chip); | ||
| 466 | else | ||
| 467 | bus = pci_scan_root_bus(dev, pcie->root_bus_nr, | ||
| 468 | &rcar_pcie_ops, pcie, &res); | ||
| 469 | 472 | ||
| 470 | if (!bus) { | 473 | ret = pci_scan_root_bus_bridge(bridge); |
| 471 | dev_err(dev, "Scanning rootbus failed"); | 474 | if (ret < 0) { |
| 472 | return -ENODEV; | 475 | kfree(bridge); |
| 476 | return ret; | ||
| 473 | } | 477 | } |
| 474 | 478 | ||
| 475 | pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); | 479 | bus = bridge->bus; |
| 476 | 480 | ||
| 477 | pci_bus_size_bridges(bus); | 481 | pci_bus_size_bridges(bus); |
| 478 | pci_bus_assign_resources(bus); | 482 | pci_bus_assign_resources(bus); |
| @@ -1127,11 +1131,14 @@ static int rcar_pcie_probe(struct platform_device *pdev) | |||
| 1127 | unsigned int data; | 1131 | unsigned int data; |
| 1128 | int err; | 1132 | int err; |
| 1129 | int (*hw_init_fn)(struct rcar_pcie *); | 1133 | int (*hw_init_fn)(struct rcar_pcie *); |
| 1134 | struct pci_host_bridge *bridge; | ||
| 1130 | 1135 | ||
| 1131 | pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); | 1136 | bridge = pci_alloc_host_bridge(sizeof(*pcie)); |
| 1132 | if (!pcie) | 1137 | if (!bridge) |
| 1133 | return -ENOMEM; | 1138 | return -ENOMEM; |
| 1134 | 1139 | ||
| 1140 | pcie = pci_host_bridge_priv(bridge); | ||
| 1141 | |||
| 1135 | pcie->dev = dev; | 1142 | pcie->dev = dev; |
| 1136 | 1143 | ||
| 1137 | INIT_LIST_HEAD(&pcie->resources); | 1144 | INIT_LIST_HEAD(&pcie->resources); |
| @@ -1141,12 +1148,12 @@ static int rcar_pcie_probe(struct platform_device *pdev) | |||
| 1141 | err = rcar_pcie_get_resources(pcie); | 1148 | err = rcar_pcie_get_resources(pcie); |
| 1142 | if (err < 0) { | 1149 | if (err < 0) { |
| 1143 | dev_err(dev, "failed to request resources: %d\n", err); | 1150 | dev_err(dev, "failed to request resources: %d\n", err); |
| 1144 | return err; | 1151 | goto err_free_bridge; |
| 1145 | } | 1152 | } |
| 1146 | 1153 | ||
| 1147 | err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node); | 1154 | err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node); |
| 1148 | if (err) | 1155 | if (err) |
| 1149 | return err; | 1156 | goto err_free_bridge; |
| 1150 | 1157 | ||
| 1151 | pm_runtime_enable(dev); | 1158 | pm_runtime_enable(dev); |
| 1152 | err = pm_runtime_get_sync(dev); | 1159 | err = pm_runtime_get_sync(dev); |
| @@ -1183,6 +1190,9 @@ static int rcar_pcie_probe(struct platform_device *pdev) | |||
| 1183 | 1190 | ||
| 1184 | return 0; | 1191 | return 0; |
| 1185 | 1192 | ||
| 1193 | err_free_bridge: | ||
| 1194 | pci_free_host_bridge(bridge); | ||
| 1195 | |||
| 1186 | err_pm_put: | 1196 | err_pm_put: |
| 1187 | pm_runtime_put(dev); | 1197 | pm_runtime_put(dev); |
| 1188 | 1198 | ||
diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c index 0e020b6e0943..5acf8694fb23 100644 --- a/drivers/pci/host/pcie-rockchip.c +++ b/drivers/pci/host/pcie-rockchip.c | |||
| @@ -139,6 +139,7 @@ | |||
| 139 | PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \ | 139 | PCIE_CORE_INT_CT | PCIE_CORE_INT_UTC | \ |
| 140 | PCIE_CORE_INT_MMVC) | 140 | PCIE_CORE_INT_MMVC) |
| 141 | 141 | ||
| 142 | #define PCIE_RC_CONFIG_NORMAL_BASE 0x800000 | ||
| 142 | #define PCIE_RC_CONFIG_BASE 0xa00000 | 143 | #define PCIE_RC_CONFIG_BASE 0xa00000 |
| 143 | #define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) | 144 | #define PCIE_RC_CONFIG_RID_CCR (PCIE_RC_CONFIG_BASE + 0x08) |
| 144 | #define PCIE_RC_CONFIG_SCC_SHIFT 16 | 145 | #define PCIE_RC_CONFIG_SCC_SHIFT 16 |
| @@ -146,6 +147,9 @@ | |||
| 146 | #define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18 | 147 | #define PCIE_RC_CONFIG_DCR_CSPL_SHIFT 18 |
| 147 | #define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff | 148 | #define PCIE_RC_CONFIG_DCR_CSPL_LIMIT 0xff |
| 148 | #define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26 | 149 | #define PCIE_RC_CONFIG_DCR_CPLS_SHIFT 26 |
| 150 | #define PCIE_RC_CONFIG_DCSR (PCIE_RC_CONFIG_BASE + 0xc8) | ||
| 151 | #define PCIE_RC_CONFIG_DCSR_MPS_MASK GENMASK(7, 5) | ||
| 152 | #define PCIE_RC_CONFIG_DCSR_MPS_256 (0x1 << 5) | ||
| 149 | #define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc) | 153 | #define PCIE_RC_CONFIG_LINK_CAP (PCIE_RC_CONFIG_BASE + 0xcc) |
| 150 | #define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10) | 154 | #define PCIE_RC_CONFIG_LINK_CAP_L0S BIT(10) |
| 151 | #define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0) | 155 | #define PCIE_RC_CONFIG_LCS (PCIE_RC_CONFIG_BASE + 0xd0) |
| @@ -175,6 +179,8 @@ | |||
| 175 | #define IB_ROOT_PORT_REG_SIZE_SHIFT 3 | 179 | #define IB_ROOT_PORT_REG_SIZE_SHIFT 3 |
| 176 | #define AXI_WRAPPER_IO_WRITE 0x6 | 180 | #define AXI_WRAPPER_IO_WRITE 0x6 |
| 177 | #define AXI_WRAPPER_MEM_WRITE 0x2 | 181 | #define AXI_WRAPPER_MEM_WRITE 0x2 |
| 182 | #define AXI_WRAPPER_TYPE0_CFG 0xa | ||
| 183 | #define AXI_WRAPPER_TYPE1_CFG 0xb | ||
| 178 | #define AXI_WRAPPER_NOR_MSG 0xc | 184 | #define AXI_WRAPPER_NOR_MSG 0xc |
| 179 | 185 | ||
| 180 | #define MAX_AXI_IB_ROOTPORT_REGION_NUM 3 | 186 | #define MAX_AXI_IB_ROOTPORT_REGION_NUM 3 |
| @@ -198,6 +204,7 @@ | |||
| 198 | #define RC_REGION_0_ADDR_TRANS_H 0x00000000 | 204 | #define RC_REGION_0_ADDR_TRANS_H 0x00000000 |
| 199 | #define RC_REGION_0_ADDR_TRANS_L 0x00000000 | 205 | #define RC_REGION_0_ADDR_TRANS_L 0x00000000 |
| 200 | #define RC_REGION_0_PASS_BITS (25 - 1) | 206 | #define RC_REGION_0_PASS_BITS (25 - 1) |
| 207 | #define RC_REGION_0_TYPE_MASK GENMASK(3, 0) | ||
| 201 | #define MAX_AXI_WRAPPER_REGION_NUM 33 | 208 | #define MAX_AXI_WRAPPER_REGION_NUM 33 |
| 202 | 209 | ||
| 203 | struct rockchip_pcie { | 210 | struct rockchip_pcie { |
| @@ -295,7 +302,9 @@ static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip, | |||
| 295 | static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip, | 302 | static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip, |
| 296 | int where, int size, u32 *val) | 303 | int where, int size, u32 *val) |
| 297 | { | 304 | { |
| 298 | void __iomem *addr = rockchip->apb_base + PCIE_RC_CONFIG_BASE + where; | 305 | void __iomem *addr; |
| 306 | |||
| 307 | addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where; | ||
| 299 | 308 | ||
| 300 | if (!IS_ALIGNED((uintptr_t)addr, size)) { | 309 | if (!IS_ALIGNED((uintptr_t)addr, size)) { |
| 301 | *val = 0; | 310 | *val = 0; |
| @@ -319,11 +328,13 @@ static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip, | |||
| 319 | int where, int size, u32 val) | 328 | int where, int size, u32 val) |
| 320 | { | 329 | { |
| 321 | u32 mask, tmp, offset; | 330 | u32 mask, tmp, offset; |
| 331 | void __iomem *addr; | ||
| 322 | 332 | ||
| 323 | offset = where & ~0x3; | 333 | offset = where & ~0x3; |
| 334 | addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset; | ||
| 324 | 335 | ||
| 325 | if (size == 4) { | 336 | if (size == 4) { |
| 326 | writel(val, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset); | 337 | writel(val, addr); |
| 327 | return PCIBIOS_SUCCESSFUL; | 338 | return PCIBIOS_SUCCESSFUL; |
| 328 | } | 339 | } |
| 329 | 340 | ||
| @@ -334,13 +345,33 @@ static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip, | |||
| 334 | * corrupt RW1C bits in adjacent registers. But the hardware | 345 | * corrupt RW1C bits in adjacent registers. But the hardware |
| 335 | * doesn't support smaller writes. | 346 | * doesn't support smaller writes. |
| 336 | */ | 347 | */ |
| 337 | tmp = readl(rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset) & mask; | 348 | tmp = readl(addr) & mask; |
| 338 | tmp |= val << ((where & 0x3) * 8); | 349 | tmp |= val << ((where & 0x3) * 8); |
| 339 | writel(tmp, rockchip->apb_base + PCIE_RC_CONFIG_BASE + offset); | 350 | writel(tmp, addr); |
| 340 | 351 | ||
| 341 | return PCIBIOS_SUCCESSFUL; | 352 | return PCIBIOS_SUCCESSFUL; |
| 342 | } | 353 | } |
| 343 | 354 | ||
| 355 | static void rockchip_pcie_cfg_configuration_accesses( | ||
| 356 | struct rockchip_pcie *rockchip, u32 type) | ||
| 357 | { | ||
| 358 | u32 ob_desc_0; | ||
| 359 | |||
| 360 | /* Configuration Accesses for region 0 */ | ||
| 361 | rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF); | ||
| 362 | |||
| 363 | rockchip_pcie_write(rockchip, | ||
| 364 | (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS), | ||
| 365 | PCIE_CORE_OB_REGION_ADDR0); | ||
| 366 | rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H, | ||
| 367 | PCIE_CORE_OB_REGION_ADDR1); | ||
| 368 | ob_desc_0 = rockchip_pcie_read(rockchip, PCIE_CORE_OB_REGION_DESC0); | ||
| 369 | ob_desc_0 &= ~(RC_REGION_0_TYPE_MASK); | ||
| 370 | ob_desc_0 |= (type | (0x1 << 23)); | ||
| 371 | rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0); | ||
| 372 | rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1); | ||
| 373 | } | ||
| 374 | |||
| 344 | static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip, | 375 | static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip, |
| 345 | struct pci_bus *bus, u32 devfn, | 376 | struct pci_bus *bus, u32 devfn, |
| 346 | int where, int size, u32 *val) | 377 | int where, int size, u32 *val) |
| @@ -355,6 +386,13 @@ static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip, | |||
| 355 | return PCIBIOS_BAD_REGISTER_NUMBER; | 386 | return PCIBIOS_BAD_REGISTER_NUMBER; |
| 356 | } | 387 | } |
| 357 | 388 | ||
| 389 | if (bus->parent->number == rockchip->root_bus_nr) | ||
| 390 | rockchip_pcie_cfg_configuration_accesses(rockchip, | ||
| 391 | AXI_WRAPPER_TYPE0_CFG); | ||
| 392 | else | ||
| 393 | rockchip_pcie_cfg_configuration_accesses(rockchip, | ||
| 394 | AXI_WRAPPER_TYPE1_CFG); | ||
| 395 | |||
| 358 | if (size == 4) { | 396 | if (size == 4) { |
| 359 | *val = readl(rockchip->reg_base + busdev); | 397 | *val = readl(rockchip->reg_base + busdev); |
| 360 | } else if (size == 2) { | 398 | } else if (size == 2) { |
| @@ -379,6 +417,13 @@ static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip, | |||
| 379 | if (!IS_ALIGNED(busdev, size)) | 417 | if (!IS_ALIGNED(busdev, size)) |
| 380 | return PCIBIOS_BAD_REGISTER_NUMBER; | 418 | return PCIBIOS_BAD_REGISTER_NUMBER; |
| 381 | 419 | ||
| 420 | if (bus->parent->number == rockchip->root_bus_nr) | ||
| 421 | rockchip_pcie_cfg_configuration_accesses(rockchip, | ||
| 422 | AXI_WRAPPER_TYPE0_CFG); | ||
| 423 | else | ||
| 424 | rockchip_pcie_cfg_configuration_accesses(rockchip, | ||
| 425 | AXI_WRAPPER_TYPE1_CFG); | ||
| 426 | |||
| 382 | if (size == 4) | 427 | if (size == 4) |
| 383 | writel(val, rockchip->reg_base + busdev); | 428 | writel(val, rockchip->reg_base + busdev); |
| 384 | else if (size == 2) | 429 | else if (size == 2) |
| @@ -664,15 +709,10 @@ static int rockchip_pcie_init_port(struct rockchip_pcie *rockchip) | |||
| 664 | rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP); | 709 | rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP); |
| 665 | } | 710 | } |
| 666 | 711 | ||
| 667 | rockchip_pcie_write(rockchip, 0x0, PCIE_RC_BAR_CONF); | 712 | status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR); |
| 668 | 713 | status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK; | |
| 669 | rockchip_pcie_write(rockchip, | 714 | status |= PCIE_RC_CONFIG_DCSR_MPS_256; |
| 670 | (RC_REGION_0_ADDR_TRANS_L + RC_REGION_0_PASS_BITS), | 715 | rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR); |
| 671 | PCIE_CORE_OB_REGION_ADDR0); | ||
| 672 | rockchip_pcie_write(rockchip, RC_REGION_0_ADDR_TRANS_H, | ||
| 673 | PCIE_CORE_OB_REGION_ADDR1); | ||
| 674 | rockchip_pcie_write(rockchip, 0x0080000a, PCIE_CORE_OB_REGION_DESC0); | ||
| 675 | rockchip_pcie_write(rockchip, 0x0, PCIE_CORE_OB_REGION_DESC1); | ||
| 676 | 716 | ||
| 677 | return 0; | 717 | return 0; |
| 678 | } | 718 | } |
| @@ -1156,13 +1196,16 @@ static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip, | |||
| 1156 | return 0; | 1196 | return 0; |
| 1157 | } | 1197 | } |
| 1158 | 1198 | ||
| 1159 | static int rockchip_cfg_atu(struct rockchip_pcie *rockchip) | 1199 | static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip) |
| 1160 | { | 1200 | { |
| 1161 | struct device *dev = rockchip->dev; | 1201 | struct device *dev = rockchip->dev; |
| 1162 | int offset; | 1202 | int offset; |
| 1163 | int err; | 1203 | int err; |
| 1164 | int reg_no; | 1204 | int reg_no; |
| 1165 | 1205 | ||
| 1206 | rockchip_pcie_cfg_configuration_accesses(rockchip, | ||
| 1207 | AXI_WRAPPER_TYPE0_CFG); | ||
| 1208 | |||
| 1166 | for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) { | 1209 | for (reg_no = 0; reg_no < (rockchip->mem_size >> 20); reg_no++) { |
| 1167 | err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1, | 1210 | err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1, |
| 1168 | AXI_WRAPPER_MEM_WRITE, | 1211 | AXI_WRAPPER_MEM_WRITE, |
| @@ -1251,6 +1294,9 @@ static int __maybe_unused rockchip_pcie_suspend_noirq(struct device *dev) | |||
| 1251 | clk_disable_unprepare(rockchip->aclk_perf_pcie); | 1294 | clk_disable_unprepare(rockchip->aclk_perf_pcie); |
| 1252 | clk_disable_unprepare(rockchip->aclk_pcie); | 1295 | clk_disable_unprepare(rockchip->aclk_pcie); |
| 1253 | 1296 | ||
| 1297 | if (!IS_ERR(rockchip->vpcie0v9)) | ||
| 1298 | regulator_disable(rockchip->vpcie0v9); | ||
| 1299 | |||
| 1254 | return ret; | 1300 | return ret; |
| 1255 | } | 1301 | } |
| 1256 | 1302 | ||
| @@ -1259,24 +1305,54 @@ static int __maybe_unused rockchip_pcie_resume_noirq(struct device *dev) | |||
| 1259 | struct rockchip_pcie *rockchip = dev_get_drvdata(dev); | 1305 | struct rockchip_pcie *rockchip = dev_get_drvdata(dev); |
| 1260 | int err; | 1306 | int err; |
| 1261 | 1307 | ||
| 1262 | clk_prepare_enable(rockchip->clk_pcie_pm); | 1308 | if (!IS_ERR(rockchip->vpcie0v9)) { |
| 1263 | clk_prepare_enable(rockchip->hclk_pcie); | 1309 | err = regulator_enable(rockchip->vpcie0v9); |
| 1264 | clk_prepare_enable(rockchip->aclk_perf_pcie); | 1310 | if (err) { |
| 1265 | clk_prepare_enable(rockchip->aclk_pcie); | 1311 | dev_err(dev, "fail to enable vpcie0v9 regulator\n"); |
| 1312 | return err; | ||
| 1313 | } | ||
| 1314 | } | ||
| 1315 | |||
| 1316 | err = clk_prepare_enable(rockchip->clk_pcie_pm); | ||
| 1317 | if (err) | ||
| 1318 | goto err_pcie_pm; | ||
| 1319 | |||
| 1320 | err = clk_prepare_enable(rockchip->hclk_pcie); | ||
| 1321 | if (err) | ||
| 1322 | goto err_hclk_pcie; | ||
| 1323 | |||
| 1324 | err = clk_prepare_enable(rockchip->aclk_perf_pcie); | ||
| 1325 | if (err) | ||
| 1326 | goto err_aclk_perf_pcie; | ||
| 1327 | |||
| 1328 | err = clk_prepare_enable(rockchip->aclk_pcie); | ||
| 1329 | if (err) | ||
| 1330 | goto err_aclk_pcie; | ||
| 1266 | 1331 | ||
| 1267 | err = rockchip_pcie_init_port(rockchip); | 1332 | err = rockchip_pcie_init_port(rockchip); |
| 1268 | if (err) | 1333 | if (err) |
| 1269 | return err; | 1334 | goto err_pcie_resume; |
| 1270 | 1335 | ||
| 1271 | err = rockchip_cfg_atu(rockchip); | 1336 | err = rockchip_pcie_cfg_atu(rockchip); |
| 1272 | if (err) | 1337 | if (err) |
| 1273 | return err; | 1338 | goto err_pcie_resume; |
| 1274 | 1339 | ||
| 1275 | /* Need this to enter L1 again */ | 1340 | /* Need this to enter L1 again */ |
| 1276 | rockchip_pcie_update_txcredit_mui(rockchip); | 1341 | rockchip_pcie_update_txcredit_mui(rockchip); |
| 1277 | rockchip_pcie_enable_interrupts(rockchip); | 1342 | rockchip_pcie_enable_interrupts(rockchip); |
| 1278 | 1343 | ||
| 1279 | return 0; | 1344 | return 0; |
| 1345 | |||
| 1346 | err_pcie_resume: | ||
| 1347 | clk_disable_unprepare(rockchip->aclk_pcie); | ||
| 1348 | err_aclk_pcie: | ||
| 1349 | clk_disable_unprepare(rockchip->aclk_perf_pcie); | ||
| 1350 | err_aclk_perf_pcie: | ||
| 1351 | clk_disable_unprepare(rockchip->hclk_pcie); | ||
| 1352 | err_hclk_pcie: | ||
| 1353 | clk_disable_unprepare(rockchip->clk_pcie_pm); | ||
| 1354 | err_pcie_pm: | ||
| 1355 | return err; | ||
| 1280 | } | 1356 | } |
| 1281 | 1357 | ||
| 1282 | static int rockchip_pcie_probe(struct platform_device *pdev) | 1358 | static int rockchip_pcie_probe(struct platform_device *pdev) |
| @@ -1284,6 +1360,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev) | |||
| 1284 | struct rockchip_pcie *rockchip; | 1360 | struct rockchip_pcie *rockchip; |
| 1285 | struct device *dev = &pdev->dev; | 1361 | struct device *dev = &pdev->dev; |
| 1286 | struct pci_bus *bus, *child; | 1362 | struct pci_bus *bus, *child; |
| 1363 | struct pci_host_bridge *bridge; | ||
| 1287 | struct resource_entry *win; | 1364 | struct resource_entry *win; |
| 1288 | resource_size_t io_base; | 1365 | resource_size_t io_base; |
| 1289 | struct resource *mem; | 1366 | struct resource *mem; |
| @@ -1295,10 +1372,12 @@ static int rockchip_pcie_probe(struct platform_device *pdev) | |||
| 1295 | if (!dev->of_node) | 1372 | if (!dev->of_node) |
| 1296 | return -ENODEV; | 1373 | return -ENODEV; |
| 1297 | 1374 | ||
| 1298 | rockchip = devm_kzalloc(dev, sizeof(*rockchip), GFP_KERNEL); | 1375 | bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip)); |
| 1299 | if (!rockchip) | 1376 | if (!bridge) |
| 1300 | return -ENOMEM; | 1377 | return -ENOMEM; |
| 1301 | 1378 | ||
| 1379 | rockchip = pci_host_bridge_priv(bridge); | ||
| 1380 | |||
| 1302 | platform_set_drvdata(pdev, rockchip); | 1381 | platform_set_drvdata(pdev, rockchip); |
| 1303 | rockchip->dev = dev; | 1382 | rockchip->dev = dev; |
| 1304 | 1383 | ||
| @@ -1385,22 +1464,30 @@ static int rockchip_pcie_probe(struct platform_device *pdev) | |||
| 1385 | } | 1464 | } |
| 1386 | } | 1465 | } |
| 1387 | 1466 | ||
| 1388 | err = rockchip_cfg_atu(rockchip); | 1467 | err = rockchip_pcie_cfg_atu(rockchip); |
| 1389 | if (err) | 1468 | if (err) |
| 1390 | goto err_free_res; | 1469 | goto err_free_res; |
| 1391 | 1470 | ||
| 1392 | rockchip->msg_region = devm_ioremap(rockchip->dev, | 1471 | rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M); |
| 1393 | rockchip->msg_bus_addr, SZ_1M); | ||
| 1394 | if (!rockchip->msg_region) { | 1472 | if (!rockchip->msg_region) { |
| 1395 | err = -ENOMEM; | 1473 | err = -ENOMEM; |
| 1396 | goto err_free_res; | 1474 | goto err_free_res; |
| 1397 | } | 1475 | } |
| 1398 | 1476 | ||
| 1399 | bus = pci_scan_root_bus(&pdev->dev, 0, &rockchip_pcie_ops, rockchip, &res); | 1477 | list_splice_init(&res, &bridge->windows); |
| 1400 | if (!bus) { | 1478 | bridge->dev.parent = dev; |
| 1401 | err = -ENOMEM; | 1479 | bridge->sysdata = rockchip; |
| 1480 | bridge->busnr = 0; | ||
| 1481 | bridge->ops = &rockchip_pcie_ops; | ||
| 1482 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 1483 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 1484 | |||
| 1485 | err = pci_scan_root_bus_bridge(bridge); | ||
| 1486 | if (!err) | ||
| 1402 | goto err_free_res; | 1487 | goto err_free_res; |
| 1403 | } | 1488 | |
| 1489 | bus = bridge->bus; | ||
| 1490 | |||
| 1404 | rockchip->root_bus = bus; | 1491 | rockchip->root_bus = bus; |
| 1405 | 1492 | ||
| 1406 | pci_bus_size_bridges(bus); | 1493 | pci_bus_size_bridges(bus); |
diff --git a/drivers/pci/host/pcie-tango.c b/drivers/pci/host/pcie-tango.c new file mode 100644 index 000000000000..6bbb81f06a53 --- /dev/null +++ b/drivers/pci/host/pcie-tango.c | |||
| @@ -0,0 +1,141 @@ | |||
| 1 | #include <linux/pci-ecam.h> | ||
| 2 | #include <linux/delay.h> | ||
| 3 | #include <linux/of.h> | ||
| 4 | |||
| 5 | #define SMP8759_MUX 0x48 | ||
| 6 | #define SMP8759_TEST_OUT 0x74 | ||
| 7 | |||
| 8 | struct tango_pcie { | ||
| 9 | void __iomem *base; | ||
| 10 | }; | ||
| 11 | |||
| 12 | static int smp8759_config_read(struct pci_bus *bus, unsigned int devfn, | ||
| 13 | int where, int size, u32 *val) | ||
| 14 | { | ||
| 15 | struct pci_config_window *cfg = bus->sysdata; | ||
| 16 | struct tango_pcie *pcie = dev_get_drvdata(cfg->parent); | ||
| 17 | int ret; | ||
| 18 | |||
| 19 | /* Reads in configuration space outside devfn 0 return garbage */ | ||
| 20 | if (devfn != 0) | ||
| 21 | return PCIBIOS_FUNC_NOT_SUPPORTED; | ||
| 22 | |||
| 23 | /* | ||
| 24 | * PCI config and MMIO accesses are muxed. Linux doesn't have a | ||
| 25 | * mutual exclusion mechanism for config vs. MMIO accesses, so | ||
| 26 | * concurrent accesses may cause corruption. | ||
| 27 | */ | ||
| 28 | writel_relaxed(1, pcie->base + SMP8759_MUX); | ||
| 29 | ret = pci_generic_config_read(bus, devfn, where, size, val); | ||
| 30 | writel_relaxed(0, pcie->base + SMP8759_MUX); | ||
| 31 | |||
| 32 | return ret; | ||
| 33 | } | ||
| 34 | |||
| 35 | static int smp8759_config_write(struct pci_bus *bus, unsigned int devfn, | ||
| 36 | int where, int size, u32 val) | ||
| 37 | { | ||
| 38 | struct pci_config_window *cfg = bus->sysdata; | ||
| 39 | struct tango_pcie *pcie = dev_get_drvdata(cfg->parent); | ||
| 40 | int ret; | ||
| 41 | |||
| 42 | writel_relaxed(1, pcie->base + SMP8759_MUX); | ||
| 43 | ret = pci_generic_config_write(bus, devfn, where, size, val); | ||
| 44 | writel_relaxed(0, pcie->base + SMP8759_MUX); | ||
| 45 | |||
| 46 | return ret; | ||
| 47 | } | ||
| 48 | |||
| 49 | static struct pci_ecam_ops smp8759_ecam_ops = { | ||
| 50 | .bus_shift = 20, | ||
| 51 | .pci_ops = { | ||
| 52 | .map_bus = pci_ecam_map_bus, | ||
| 53 | .read = smp8759_config_read, | ||
| 54 | .write = smp8759_config_write, | ||
| 55 | } | ||
| 56 | }; | ||
| 57 | |||
| 58 | static int tango_pcie_link_up(struct tango_pcie *pcie) | ||
| 59 | { | ||
| 60 | void __iomem *test_out = pcie->base + SMP8759_TEST_OUT; | ||
| 61 | int i; | ||
| 62 | |||
| 63 | writel_relaxed(16, test_out); | ||
| 64 | for (i = 0; i < 10; ++i) { | ||
| 65 | u32 ltssm_state = readl_relaxed(test_out) >> 8; | ||
| 66 | if ((ltssm_state & 0x1f) == 0xf) /* L0 */ | ||
| 67 | return 1; | ||
| 68 | usleep_range(3000, 4000); | ||
| 69 | } | ||
| 70 | |||
| 71 | return 0; | ||
| 72 | } | ||
| 73 | |||
| 74 | static int tango_pcie_probe(struct platform_device *pdev) | ||
| 75 | { | ||
| 76 | struct device *dev = &pdev->dev; | ||
| 77 | struct tango_pcie *pcie; | ||
| 78 | struct resource *res; | ||
| 79 | int ret; | ||
| 80 | |||
| 81 | dev_warn(dev, "simultaneous PCI config and MMIO accesses may cause data corruption\n"); | ||
| 82 | add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); | ||
| 83 | |||
| 84 | pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); | ||
| 85 | if (!pcie) | ||
| 86 | return -ENOMEM; | ||
| 87 | |||
| 88 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 89 | pcie->base = devm_ioremap_resource(dev, res); | ||
| 90 | if (IS_ERR(pcie->base)) | ||
| 91 | return PTR_ERR(pcie->base); | ||
| 92 | |||
| 93 | platform_set_drvdata(pdev, pcie); | ||
| 94 | |||
| 95 | if (!tango_pcie_link_up(pcie)) | ||
| 96 | return -ENODEV; | ||
| 97 | |||
| 98 | return pci_host_common_probe(pdev, &smp8759_ecam_ops); | ||
| 99 | } | ||
| 100 | |||
| 101 | static const struct of_device_id tango_pcie_ids[] = { | ||
| 102 | { .compatible = "sigma,smp8759-pcie" }, | ||
| 103 | { }, | ||
| 104 | }; | ||
| 105 | |||
| 106 | static struct platform_driver tango_pcie_driver = { | ||
| 107 | .probe = tango_pcie_probe, | ||
| 108 | .driver = { | ||
| 109 | .name = KBUILD_MODNAME, | ||
| 110 | .of_match_table = tango_pcie_ids, | ||
| 111 | .suppress_bind_attrs = true, | ||
| 112 | }, | ||
| 113 | }; | ||
| 114 | builtin_platform_driver(tango_pcie_driver); | ||
| 115 | |||
| 116 | /* | ||
| 117 | * The root complex advertises the wrong device class. | ||
| 118 | * Header Type 1 is for PCI-to-PCI bridges. | ||
| 119 | */ | ||
| 120 | static void tango_fixup_class(struct pci_dev *dev) | ||
| 121 | { | ||
| 122 | dev->class = PCI_CLASS_BRIDGE_PCI << 8; | ||
| 123 | } | ||
| 124 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_class); | ||
| 125 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_class); | ||
| 126 | |||
| 127 | /* | ||
| 128 | * The root complex exposes a "fake" BAR, which is used to filter | ||
| 129 | * bus-to-system accesses. Only accesses within the range defined by this | ||
| 130 | * BAR are forwarded to the host, others are ignored. | ||
| 131 | * | ||
| 132 | * By default, the DMA framework expects an identity mapping, and DRAM0 is | ||
| 133 | * mapped at 0x80000000. | ||
| 134 | */ | ||
| 135 | static void tango_fixup_bar(struct pci_dev *dev) | ||
| 136 | { | ||
| 137 | dev->non_compliant_bars = true; | ||
| 138 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, 0x80000000); | ||
| 139 | } | ||
| 140 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0024, tango_fixup_bar); | ||
| 141 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SIGMA, 0x0028, tango_fixup_bar); | ||
diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c index 4b16b26ae909..eec641a34fc5 100644 --- a/drivers/pci/host/pcie-xilinx-nwl.c +++ b/drivers/pci/host/pcie-xilinx-nwl.c | |||
| @@ -172,6 +172,7 @@ struct nwl_pcie { | |||
| 172 | u8 root_busno; | 172 | u8 root_busno; |
| 173 | struct nwl_msi msi; | 173 | struct nwl_msi msi; |
| 174 | struct irq_domain *legacy_irq_domain; | 174 | struct irq_domain *legacy_irq_domain; |
| 175 | raw_spinlock_t leg_mask_lock; | ||
| 175 | }; | 176 | }; |
| 176 | 177 | ||
| 177 | static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off) | 178 | static inline u32 nwl_bridge_readl(struct nwl_pcie *pcie, u32 off) |
| @@ -383,11 +384,52 @@ static void nwl_pcie_msi_handler_low(struct irq_desc *desc) | |||
| 383 | chained_irq_exit(chip, desc); | 384 | chained_irq_exit(chip, desc); |
| 384 | } | 385 | } |
| 385 | 386 | ||
| 387 | static void nwl_mask_leg_irq(struct irq_data *data) | ||
| 388 | { | ||
| 389 | struct irq_desc *desc = irq_to_desc(data->irq); | ||
| 390 | struct nwl_pcie *pcie; | ||
| 391 | unsigned long flags; | ||
| 392 | u32 mask; | ||
| 393 | u32 val; | ||
| 394 | |||
| 395 | pcie = irq_desc_get_chip_data(desc); | ||
| 396 | mask = 1 << (data->hwirq - 1); | ||
| 397 | raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); | ||
| 398 | val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); | ||
| 399 | nwl_bridge_writel(pcie, (val & (~mask)), MSGF_LEG_MASK); | ||
| 400 | raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags); | ||
| 401 | } | ||
| 402 | |||
| 403 | static void nwl_unmask_leg_irq(struct irq_data *data) | ||
| 404 | { | ||
| 405 | struct irq_desc *desc = irq_to_desc(data->irq); | ||
| 406 | struct nwl_pcie *pcie; | ||
| 407 | unsigned long flags; | ||
| 408 | u32 mask; | ||
| 409 | u32 val; | ||
| 410 | |||
| 411 | pcie = irq_desc_get_chip_data(desc); | ||
| 412 | mask = 1 << (data->hwirq - 1); | ||
| 413 | raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); | ||
| 414 | val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); | ||
| 415 | nwl_bridge_writel(pcie, (val | mask), MSGF_LEG_MASK); | ||
| 416 | raw_spin_unlock_irqrestore(&pcie->leg_mask_lock, flags); | ||
| 417 | } | ||
| 418 | |||
| 419 | static struct irq_chip nwl_leg_irq_chip = { | ||
| 420 | .name = "nwl_pcie:legacy", | ||
| 421 | .irq_enable = nwl_unmask_leg_irq, | ||
| 422 | .irq_disable = nwl_mask_leg_irq, | ||
| 423 | .irq_mask = nwl_mask_leg_irq, | ||
| 424 | .irq_unmask = nwl_unmask_leg_irq, | ||
| 425 | }; | ||
| 426 | |||
| 386 | static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq, | 427 | static int nwl_legacy_map(struct irq_domain *domain, unsigned int irq, |
| 387 | irq_hw_number_t hwirq) | 428 | irq_hw_number_t hwirq) |
| 388 | { | 429 | { |
| 389 | irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); | 430 | irq_set_chip_and_handler(irq, &nwl_leg_irq_chip, handle_level_irq); |
| 390 | irq_set_chip_data(irq, domain->host_data); | 431 | irq_set_chip_data(irq, domain->host_data); |
| 432 | irq_set_status_flags(irq, IRQ_LEVEL); | ||
| 391 | 433 | ||
| 392 | return 0; | 434 | return 0; |
| 393 | } | 435 | } |
| @@ -526,11 +568,12 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie) | |||
| 526 | return -ENOMEM; | 568 | return -ENOMEM; |
| 527 | } | 569 | } |
| 528 | 570 | ||
| 571 | raw_spin_lock_init(&pcie->leg_mask_lock); | ||
| 529 | nwl_pcie_init_msi_irq_domain(pcie); | 572 | nwl_pcie_init_msi_irq_domain(pcie); |
| 530 | return 0; | 573 | return 0; |
| 531 | } | 574 | } |
| 532 | 575 | ||
| 533 | static int nwl_pcie_enable_msi(struct nwl_pcie *pcie, struct pci_bus *bus) | 576 | static int nwl_pcie_enable_msi(struct nwl_pcie *pcie) |
| 534 | { | 577 | { |
| 535 | struct device *dev = pcie->dev; | 578 | struct device *dev = pcie->dev; |
| 536 | struct platform_device *pdev = to_platform_device(dev); | 579 | struct platform_device *pdev = to_platform_device(dev); |
| @@ -791,13 +834,16 @@ static int nwl_pcie_probe(struct platform_device *pdev) | |||
| 791 | struct nwl_pcie *pcie; | 834 | struct nwl_pcie *pcie; |
| 792 | struct pci_bus *bus; | 835 | struct pci_bus *bus; |
| 793 | struct pci_bus *child; | 836 | struct pci_bus *child; |
| 837 | struct pci_host_bridge *bridge; | ||
| 794 | int err; | 838 | int err; |
| 795 | resource_size_t iobase = 0; | 839 | resource_size_t iobase = 0; |
| 796 | LIST_HEAD(res); | 840 | LIST_HEAD(res); |
| 797 | 841 | ||
| 798 | pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); | 842 | bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); |
| 799 | if (!pcie) | 843 | if (!bridge) |
| 800 | return -ENOMEM; | 844 | return -ENODEV; |
| 845 | |||
| 846 | pcie = pci_host_bridge_priv(bridge); | ||
| 801 | 847 | ||
| 802 | pcie->dev = dev; | 848 | pcie->dev = dev; |
| 803 | pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT; | 849 | pcie->ecam_value = NWL_ECAM_VALUE_DEFAULT; |
| @@ -830,21 +876,28 @@ static int nwl_pcie_probe(struct platform_device *pdev) | |||
| 830 | goto error; | 876 | goto error; |
| 831 | } | 877 | } |
| 832 | 878 | ||
| 833 | bus = pci_create_root_bus(dev, pcie->root_busno, | 879 | list_splice_init(&res, &bridge->windows); |
| 834 | &nwl_pcie_ops, pcie, &res); | 880 | bridge->dev.parent = dev; |
| 835 | if (!bus) { | 881 | bridge->sysdata = pcie; |
| 836 | err = -ENOMEM; | 882 | bridge->busnr = pcie->root_busno; |
| 837 | goto error; | 883 | bridge->ops = &nwl_pcie_ops; |
| 838 | } | 884 | bridge->map_irq = of_irq_parse_and_map_pci; |
| 885 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 839 | 886 | ||
| 840 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | 887 | if (IS_ENABLED(CONFIG_PCI_MSI)) { |
| 841 | err = nwl_pcie_enable_msi(pcie, bus); | 888 | err = nwl_pcie_enable_msi(pcie); |
| 842 | if (err < 0) { | 889 | if (err < 0) { |
| 843 | dev_err(dev, "failed to enable MSI support: %d\n", err); | 890 | dev_err(dev, "failed to enable MSI support: %d\n", err); |
| 844 | goto error; | 891 | goto error; |
| 845 | } | 892 | } |
| 846 | } | 893 | } |
| 847 | pci_scan_child_bus(bus); | 894 | |
| 895 | err = pci_scan_root_bus_bridge(bridge); | ||
| 896 | if (err) | ||
| 897 | goto error; | ||
| 898 | |||
| 899 | bus = bridge->bus; | ||
| 900 | |||
| 848 | pci_assign_unassigned_bus_resources(bus); | 901 | pci_assign_unassigned_bus_resources(bus); |
| 849 | list_for_each_entry(child, &bus->children, node) | 902 | list_for_each_entry(child, &bus->children, node) |
| 850 | pcie_bus_configure_settings(child); | 903 | pcie_bus_configure_settings(child); |
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c index 2fe2df51f9f8..f63fa5e0278c 100644 --- a/drivers/pci/host/pcie-xilinx.c +++ b/drivers/pci/host/pcie-xilinx.c | |||
| @@ -633,6 +633,7 @@ static int xilinx_pcie_probe(struct platform_device *pdev) | |||
| 633 | struct device *dev = &pdev->dev; | 633 | struct device *dev = &pdev->dev; |
| 634 | struct xilinx_pcie_port *port; | 634 | struct xilinx_pcie_port *port; |
| 635 | struct pci_bus *bus, *child; | 635 | struct pci_bus *bus, *child; |
| 636 | struct pci_host_bridge *bridge; | ||
| 636 | int err; | 637 | int err; |
| 637 | resource_size_t iobase = 0; | 638 | resource_size_t iobase = 0; |
| 638 | LIST_HEAD(res); | 639 | LIST_HEAD(res); |
| @@ -640,9 +641,11 @@ static int xilinx_pcie_probe(struct platform_device *pdev) | |||
| 640 | if (!dev->of_node) | 641 | if (!dev->of_node) |
| 641 | return -ENODEV; | 642 | return -ENODEV; |
| 642 | 643 | ||
| 643 | port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); | 644 | bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); |
| 644 | if (!port) | 645 | if (!bridge) |
| 645 | return -ENOMEM; | 646 | return -ENODEV; |
| 647 | |||
| 648 | port = pci_host_bridge_priv(bridge); | ||
| 646 | 649 | ||
| 647 | port->dev = dev; | 650 | port->dev = dev; |
| 648 | 651 | ||
| @@ -671,21 +674,26 @@ static int xilinx_pcie_probe(struct platform_device *pdev) | |||
| 671 | if (err) | 674 | if (err) |
| 672 | goto error; | 675 | goto error; |
| 673 | 676 | ||
| 674 | bus = pci_create_root_bus(dev, 0, &xilinx_pcie_ops, port, &res); | 677 | |
| 675 | if (!bus) { | 678 | list_splice_init(&res, &bridge->windows); |
| 676 | err = -ENOMEM; | 679 | bridge->dev.parent = dev; |
| 677 | goto error; | 680 | bridge->sysdata = port; |
| 678 | } | 681 | bridge->busnr = 0; |
| 682 | bridge->ops = &xilinx_pcie_ops; | ||
| 683 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 684 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 679 | 685 | ||
| 680 | #ifdef CONFIG_PCI_MSI | 686 | #ifdef CONFIG_PCI_MSI |
| 681 | xilinx_pcie_msi_chip.dev = dev; | 687 | xilinx_pcie_msi_chip.dev = dev; |
| 682 | bus->msi = &xilinx_pcie_msi_chip; | 688 | bridge->msi = &xilinx_pcie_msi_chip; |
| 683 | #endif | 689 | #endif |
| 684 | pci_scan_child_bus(bus); | 690 | err = pci_scan_root_bus_bridge(bridge); |
| 691 | if (err < 0) | ||
| 692 | goto error; | ||
| 693 | |||
| 694 | bus = bridge->bus; | ||
| 695 | |||
| 685 | pci_assign_unassigned_bus_resources(bus); | 696 | pci_assign_unassigned_bus_resources(bus); |
| 686 | #ifndef CONFIG_MICROBLAZE | ||
| 687 | pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); | ||
| 688 | #endif | ||
| 689 | list_for_each_entry(child, &bus->children, node) | 697 | list_for_each_entry(child, &bus->children, node) |
| 690 | pcie_bus_configure_settings(child); | 698 | pcie_bus_configure_settings(child); |
| 691 | pci_bus_add_devices(bus); | 699 | pci_bus_add_devices(bus); |
| @@ -696,7 +704,7 @@ error: | |||
| 696 | return err; | 704 | return err; |
| 697 | } | 705 | } |
| 698 | 706 | ||
| 699 | static struct of_device_id xilinx_pcie_of_match[] = { | 707 | static const struct of_device_id xilinx_pcie_of_match[] = { |
| 700 | { .compatible = "xlnx,axi-pcie-host-1.00.a", }, | 708 | { .compatible = "xlnx,axi-pcie-host-1.00.a", }, |
| 701 | {} | 709 | {} |
| 702 | }; | 710 | }; |
diff --git a/drivers/pci/host/vmd.c b/drivers/pci/host/vmd.c index e27ad2a3bd33..6088c3083194 100644 --- a/drivers/pci/host/vmd.c +++ b/drivers/pci/host/vmd.c | |||
| @@ -539,7 +539,10 @@ static void vmd_detach_resources(struct vmd_dev *vmd) | |||
| 539 | } | 539 | } |
| 540 | 540 | ||
| 541 | /* | 541 | /* |
| 542 | * VMD domains start at 0x1000 to not clash with ACPI _SEG domains. | 542 | * VMD domains start at 0x10000 to not clash with ACPI _SEG domains. |
| 543 | * Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of which the lower | ||
| 544 | * 16 bits are the PCI Segment Group (domain) number. Other bits are | ||
| 545 | * currently reserved. | ||
| 543 | */ | 546 | */ |
| 544 | static int vmd_find_free_domain(void) | 547 | static int vmd_find_free_domain(void) |
| 545 | { | 548 | { |
| @@ -554,6 +557,7 @@ static int vmd_find_free_domain(void) | |||
| 554 | static int vmd_enable_domain(struct vmd_dev *vmd) | 557 | static int vmd_enable_domain(struct vmd_dev *vmd) |
| 555 | { | 558 | { |
| 556 | struct pci_sysdata *sd = &vmd->sysdata; | 559 | struct pci_sysdata *sd = &vmd->sysdata; |
| 560 | struct fwnode_handle *fn; | ||
| 557 | struct resource *res; | 561 | struct resource *res; |
| 558 | u32 upper_bits; | 562 | u32 upper_bits; |
| 559 | unsigned long flags; | 563 | unsigned long flags; |
| @@ -617,8 +621,13 @@ static int vmd_enable_domain(struct vmd_dev *vmd) | |||
| 617 | 621 | ||
| 618 | sd->node = pcibus_to_node(vmd->dev->bus); | 622 | sd->node = pcibus_to_node(vmd->dev->bus); |
| 619 | 623 | ||
| 620 | vmd->irq_domain = pci_msi_create_irq_domain(NULL, &vmd_msi_domain_info, | 624 | fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain); |
| 625 | if (!fn) | ||
| 626 | return -ENODEV; | ||
| 627 | |||
| 628 | vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, | ||
| 621 | x86_vector_domain); | 629 | x86_vector_domain); |
| 630 | irq_domain_free_fwnode(fn); | ||
| 622 | if (!vmd->irq_domain) | 631 | if (!vmd->irq_domain) |
| 623 | return -ENODEV; | 632 | return -ENODEV; |
| 624 | 633 | ||
| @@ -704,7 +713,8 @@ static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 704 | 713 | ||
| 705 | INIT_LIST_HEAD(&vmd->irqs[i].irq_list); | 714 | INIT_LIST_HEAD(&vmd->irqs[i].irq_list); |
| 706 | err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), | 715 | err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i), |
| 707 | vmd_irq, 0, "vmd", &vmd->irqs[i]); | 716 | vmd_irq, IRQF_NO_THREAD, |
| 717 | "vmd", &vmd->irqs[i]); | ||
| 708 | if (err) | 718 | if (err) |
| 709 | return err; | 719 | return err; |
| 710 | } | 720 | } |
| @@ -733,10 +743,10 @@ static void vmd_remove(struct pci_dev *dev) | |||
| 733 | struct vmd_dev *vmd = pci_get_drvdata(dev); | 743 | struct vmd_dev *vmd = pci_get_drvdata(dev); |
| 734 | 744 | ||
| 735 | vmd_detach_resources(vmd); | 745 | vmd_detach_resources(vmd); |
| 736 | vmd_cleanup_srcu(vmd); | ||
| 737 | sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); | 746 | sysfs_remove_link(&vmd->dev->dev.kobj, "domain"); |
| 738 | pci_stop_root_bus(vmd->bus); | 747 | pci_stop_root_bus(vmd->bus); |
| 739 | pci_remove_root_bus(vmd->bus); | 748 | pci_remove_root_bus(vmd->bus); |
| 749 | vmd_cleanup_srcu(vmd); | ||
| 740 | vmd_teardown_dma_ops(vmd); | 750 | vmd_teardown_dma_ops(vmd); |
| 741 | irq_domain_remove(vmd->irq_domain); | 751 | irq_domain_remove(vmd->irq_domain); |
| 742 | } | 752 | } |
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index d9dc7363ac77..120485d6f352 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c | |||
| @@ -461,8 +461,6 @@ found: | |||
| 461 | else | 461 | else |
| 462 | iov->dev = dev; | 462 | iov->dev = dev; |
| 463 | 463 | ||
| 464 | mutex_init(&iov->lock); | ||
| 465 | |||
| 466 | dev->sriov = iov; | 464 | dev->sriov = iov; |
| 467 | dev->is_physfn = 1; | 465 | dev->is_physfn = 1; |
| 468 | rc = compute_max_vf_buses(dev); | 466 | rc = compute_max_vf_buses(dev); |
| @@ -491,8 +489,6 @@ static void sriov_release(struct pci_dev *dev) | |||
| 491 | if (dev != dev->sriov->dev) | 489 | if (dev != dev->sriov->dev) |
| 492 | pci_dev_put(dev->sriov->dev); | 490 | pci_dev_put(dev->sriov->dev); |
| 493 | 491 | ||
| 494 | mutex_destroy(&dev->sriov->lock); | ||
| 495 | |||
| 496 | kfree(dev->sriov); | 492 | kfree(dev->sriov); |
| 497 | dev->sriov = NULL; | 493 | dev->sriov = NULL; |
| 498 | } | 494 | } |
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index ba44fdfda66b..253d92409bb3 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
| @@ -1058,7 +1058,7 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, | |||
| 1058 | 1058 | ||
| 1059 | for (;;) { | 1059 | for (;;) { |
| 1060 | if (affd) { | 1060 | if (affd) { |
| 1061 | nvec = irq_calc_affinity_vectors(nvec, affd); | 1061 | nvec = irq_calc_affinity_vectors(minvec, nvec, affd); |
| 1062 | if (nvec < minvec) | 1062 | if (nvec < minvec) |
| 1063 | return -ENOSPC; | 1063 | return -ENOSPC; |
| 1064 | } | 1064 | } |
| @@ -1097,7 +1097,7 @@ static int __pci_enable_msix_range(struct pci_dev *dev, | |||
| 1097 | 1097 | ||
| 1098 | for (;;) { | 1098 | for (;;) { |
| 1099 | if (affd) { | 1099 | if (affd) { |
| 1100 | nvec = irq_calc_affinity_vectors(nvec, affd); | 1100 | nvec = irq_calc_affinity_vectors(minvec, nvec, affd); |
| 1101 | if (nvec < minvec) | 1101 | if (nvec < minvec) |
| 1102 | return -ENOSPC; | 1102 | return -ENOSPC; |
| 1103 | } | 1103 | } |
| @@ -1165,16 +1165,6 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, | |||
| 1165 | if (flags & PCI_IRQ_AFFINITY) { | 1165 | if (flags & PCI_IRQ_AFFINITY) { |
| 1166 | if (!affd) | 1166 | if (!affd) |
| 1167 | affd = &msi_default_affd; | 1167 | affd = &msi_default_affd; |
| 1168 | |||
| 1169 | if (affd->pre_vectors + affd->post_vectors > min_vecs) | ||
| 1170 | return -EINVAL; | ||
| 1171 | |||
| 1172 | /* | ||
| 1173 | * If there aren't any vectors left after applying the pre/post | ||
| 1174 | * vectors don't bother with assigning affinity. | ||
| 1175 | */ | ||
| 1176 | if (affd->pre_vectors + affd->post_vectors == min_vecs) | ||
| 1177 | affd = NULL; | ||
| 1178 | } else { | 1168 | } else { |
| 1179 | if (WARN_ON(affd)) | 1169 | if (WARN_ON(affd)) |
| 1180 | affd = NULL; | 1170 | affd = NULL; |
| @@ -1463,7 +1453,7 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, | |||
| 1463 | if (!domain) | 1453 | if (!domain) |
| 1464 | return NULL; | 1454 | return NULL; |
| 1465 | 1455 | ||
| 1466 | domain->bus_token = DOMAIN_BUS_PCI_MSI; | 1456 | irq_domain_update_bus_token(domain, DOMAIN_BUS_PCI_MSI); |
| 1467 | return domain; | 1457 | return domain; |
| 1468 | } | 1458 | } |
| 1469 | EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain); | 1459 | EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain); |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 001860361434..e70c1c7ba1bf 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
| @@ -21,13 +21,12 @@ | |||
| 21 | #include "pci.h" | 21 | #include "pci.h" |
| 22 | 22 | ||
| 23 | /* | 23 | /* |
| 24 | * The UUID is defined in the PCI Firmware Specification available here: | 24 | * The GUID is defined in the PCI Firmware Specification available here: |
| 25 | * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf | 25 | * https://www.pcisig.com/members/downloads/pcifw_r3_1_13Dec10.pdf |
| 26 | */ | 26 | */ |
| 27 | const u8 pci_acpi_dsm_uuid[] = { | 27 | const guid_t pci_acpi_dsm_guid = |
| 28 | 0xd0, 0x37, 0xc9, 0xe5, 0x53, 0x35, 0x7a, 0x4d, | 28 | GUID_INIT(0xe5c937d0, 0x3553, 0x4d7a, |
| 29 | 0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d | 29 | 0x91, 0x17, 0xea, 0x4d, 0x19, 0xc3, 0x43, 0x4d); |
| 30 | }; | ||
| 31 | 30 | ||
| 32 | #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64) | 31 | #if defined(CONFIG_PCI_QUIRKS) && defined(CONFIG_ARM64) |
| 33 | static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res) | 32 | static int acpi_get_rc_addr(struct acpi_device *adev, struct resource *res) |
| @@ -395,29 +394,26 @@ bool pciehp_is_native(struct pci_dev *pdev) | |||
| 395 | 394 | ||
| 396 | /** | 395 | /** |
| 397 | * pci_acpi_wake_bus - Root bus wakeup notification fork function. | 396 | * pci_acpi_wake_bus - Root bus wakeup notification fork function. |
| 398 | * @work: Work item to handle. | 397 | * @context: Device wakeup context. |
| 399 | */ | 398 | */ |
| 400 | static void pci_acpi_wake_bus(struct work_struct *work) | 399 | static void pci_acpi_wake_bus(struct acpi_device_wakeup_context *context) |
| 401 | { | 400 | { |
| 402 | struct acpi_device *adev; | 401 | struct acpi_device *adev; |
| 403 | struct acpi_pci_root *root; | 402 | struct acpi_pci_root *root; |
| 404 | 403 | ||
| 405 | adev = container_of(work, struct acpi_device, wakeup.context.work); | 404 | adev = container_of(context, struct acpi_device, wakeup.context); |
| 406 | root = acpi_driver_data(adev); | 405 | root = acpi_driver_data(adev); |
| 407 | pci_pme_wakeup_bus(root->bus); | 406 | pci_pme_wakeup_bus(root->bus); |
| 408 | } | 407 | } |
| 409 | 408 | ||
| 410 | /** | 409 | /** |
| 411 | * pci_acpi_wake_dev - PCI device wakeup notification work function. | 410 | * pci_acpi_wake_dev - PCI device wakeup notification work function. |
| 412 | * @handle: ACPI handle of a device the notification is for. | 411 | * @context: Device wakeup context. |
| 413 | * @work: Work item to handle. | ||
| 414 | */ | 412 | */ |
| 415 | static void pci_acpi_wake_dev(struct work_struct *work) | 413 | static void pci_acpi_wake_dev(struct acpi_device_wakeup_context *context) |
| 416 | { | 414 | { |
| 417 | struct acpi_device_wakeup_context *context; | ||
| 418 | struct pci_dev *pci_dev; | 415 | struct pci_dev *pci_dev; |
| 419 | 416 | ||
| 420 | context = container_of(work, struct acpi_device_wakeup_context, work); | ||
| 421 | pci_dev = to_pci_dev(context->dev); | 417 | pci_dev = to_pci_dev(context->dev); |
| 422 | 418 | ||
| 423 | if (pci_dev->pme_poll) | 419 | if (pci_dev->pme_poll) |
| @@ -425,7 +421,7 @@ static void pci_acpi_wake_dev(struct work_struct *work) | |||
| 425 | 421 | ||
| 426 | if (pci_dev->current_state == PCI_D3cold) { | 422 | if (pci_dev->current_state == PCI_D3cold) { |
| 427 | pci_wakeup_event(pci_dev); | 423 | pci_wakeup_event(pci_dev); |
| 428 | pm_runtime_resume(&pci_dev->dev); | 424 | pm_request_resume(&pci_dev->dev); |
| 429 | return; | 425 | return; |
| 430 | } | 426 | } |
| 431 | 427 | ||
| @@ -434,7 +430,7 @@ static void pci_acpi_wake_dev(struct work_struct *work) | |||
| 434 | pci_check_pme_status(pci_dev); | 430 | pci_check_pme_status(pci_dev); |
| 435 | 431 | ||
| 436 | pci_wakeup_event(pci_dev); | 432 | pci_wakeup_event(pci_dev); |
| 437 | pm_runtime_resume(&pci_dev->dev); | 433 | pm_request_resume(&pci_dev->dev); |
| 438 | 434 | ||
| 439 | pci_pme_wakeup_bus(pci_dev->subordinate); | 435 | pci_pme_wakeup_bus(pci_dev->subordinate); |
| 440 | } | 436 | } |
| @@ -573,67 +569,29 @@ static pci_power_t acpi_pci_get_power_state(struct pci_dev *dev) | |||
| 573 | return state_conv[state]; | 569 | return state_conv[state]; |
| 574 | } | 570 | } |
| 575 | 571 | ||
| 576 | static bool acpi_pci_can_wakeup(struct pci_dev *dev) | 572 | static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable) |
| 577 | { | ||
| 578 | struct acpi_device *adev = ACPI_COMPANION(&dev->dev); | ||
| 579 | return adev ? acpi_device_can_wakeup(adev) : false; | ||
| 580 | } | ||
| 581 | |||
| 582 | static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable) | ||
| 583 | { | 573 | { |
| 584 | while (bus->parent) { | 574 | while (bus->parent) { |
| 585 | if (!acpi_pm_device_sleep_wake(&bus->self->dev, enable)) | 575 | if (acpi_pm_device_can_wakeup(&bus->self->dev)) |
| 586 | return; | 576 | return acpi_pm_set_device_wakeup(&bus->self->dev, enable); |
| 587 | bus = bus->parent; | ||
| 588 | } | ||
| 589 | 577 | ||
| 590 | /* We have reached the root bus. */ | ||
| 591 | if (bus->bridge) | ||
| 592 | acpi_pm_device_sleep_wake(bus->bridge, enable); | ||
| 593 | } | ||
| 594 | |||
| 595 | static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable) | ||
| 596 | { | ||
| 597 | if (acpi_pci_can_wakeup(dev)) | ||
| 598 | return acpi_pm_device_sleep_wake(&dev->dev, enable); | ||
| 599 | |||
| 600 | acpi_pci_propagate_wakeup_enable(dev->bus, enable); | ||
| 601 | return 0; | ||
| 602 | } | ||
| 603 | |||
| 604 | static void acpi_pci_propagate_run_wake(struct pci_bus *bus, bool enable) | ||
| 605 | { | ||
| 606 | while (bus->parent) { | ||
| 607 | struct pci_dev *bridge = bus->self; | ||
| 608 | |||
| 609 | if (bridge->pme_interrupt) | ||
| 610 | return; | ||
| 611 | if (!acpi_pm_device_run_wake(&bridge->dev, enable)) | ||
| 612 | return; | ||
| 613 | bus = bus->parent; | 578 | bus = bus->parent; |
| 614 | } | 579 | } |
| 615 | 580 | ||
| 616 | /* We have reached the root bus. */ | 581 | /* We have reached the root bus. */ |
| 617 | if (bus->bridge) | 582 | if (bus->bridge) { |
| 618 | acpi_pm_device_run_wake(bus->bridge, enable); | 583 | if (acpi_pm_device_can_wakeup(bus->bridge)) |
| 584 | return acpi_pm_set_device_wakeup(bus->bridge, enable); | ||
| 585 | } | ||
| 586 | return 0; | ||
| 619 | } | 587 | } |
| 620 | 588 | ||
| 621 | static int acpi_pci_run_wake(struct pci_dev *dev, bool enable) | 589 | static int acpi_pci_wakeup(struct pci_dev *dev, bool enable) |
| 622 | { | 590 | { |
| 623 | /* | 591 | if (acpi_pm_device_can_wakeup(&dev->dev)) |
| 624 | * Per PCI Express Base Specification Revision 2.0 section | 592 | return acpi_pm_set_device_wakeup(&dev->dev, enable); |
| 625 | * 5.3.3.2 Link Wakeup, platform support is needed for D3cold | ||
| 626 | * waking up to power on the main link even if there is PME | ||
| 627 | * support for D3cold | ||
| 628 | */ | ||
| 629 | if (dev->pme_interrupt && !dev->runtime_d3cold) | ||
| 630 | return 0; | ||
| 631 | |||
| 632 | if (!acpi_pm_device_run_wake(&dev->dev, enable)) | ||
| 633 | return 0; | ||
| 634 | 593 | ||
| 635 | acpi_pci_propagate_run_wake(dev->bus, enable); | 594 | return acpi_pci_propagate_wakeup(dev->bus, enable); |
| 636 | return 0; | ||
| 637 | } | 595 | } |
| 638 | 596 | ||
| 639 | static bool acpi_pci_need_resume(struct pci_dev *dev) | 597 | static bool acpi_pci_need_resume(struct pci_dev *dev) |
| @@ -657,8 +615,7 @@ static const struct pci_platform_pm_ops acpi_pci_platform_pm = { | |||
| 657 | .set_state = acpi_pci_set_power_state, | 615 | .set_state = acpi_pci_set_power_state, |
| 658 | .get_state = acpi_pci_get_power_state, | 616 | .get_state = acpi_pci_get_power_state, |
| 659 | .choose_state = acpi_pci_choose_state, | 617 | .choose_state = acpi_pci_choose_state, |
| 660 | .sleep_wake = acpi_pci_sleep_wake, | 618 | .set_wakeup = acpi_pci_wakeup, |
| 661 | .run_wake = acpi_pci_run_wake, | ||
| 662 | .need_resume = acpi_pci_need_resume, | 619 | .need_resume = acpi_pci_need_resume, |
| 663 | }; | 620 | }; |
| 664 | 621 | ||
| @@ -680,7 +637,7 @@ void acpi_pci_add_bus(struct pci_bus *bus) | |||
| 680 | if (!pci_is_root_bus(bus)) | 637 | if (!pci_is_root_bus(bus)) |
| 681 | return; | 638 | return; |
| 682 | 639 | ||
| 683 | obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), pci_acpi_dsm_uuid, 3, | 640 | obj = acpi_evaluate_dsm(ACPI_HANDLE(bus->bridge), &pci_acpi_dsm_guid, 3, |
| 684 | RESET_DELAY_DSM, NULL); | 641 | RESET_DELAY_DSM, NULL); |
| 685 | if (!obj) | 642 | if (!obj) |
| 686 | return; | 643 | return; |
| @@ -745,7 +702,7 @@ static void pci_acpi_optimize_delay(struct pci_dev *pdev, | |||
| 745 | if (bridge->ignore_reset_delay) | 702 | if (bridge->ignore_reset_delay) |
| 746 | pdev->d3cold_delay = 0; | 703 | pdev->d3cold_delay = 0; |
| 747 | 704 | ||
| 748 | obj = acpi_evaluate_dsm(handle, pci_acpi_dsm_uuid, 3, | 705 | obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 3, |
| 749 | FUNCTION_DELAY_DSM, NULL); | 706 | FUNCTION_DELAY_DSM, NULL); |
| 750 | if (!obj) | 707 | if (!obj) |
| 751 | return; | 708 | return; |
| @@ -781,9 +738,7 @@ static void pci_acpi_setup(struct device *dev) | |||
| 781 | return; | 738 | return; |
| 782 | 739 | ||
| 783 | device_set_wakeup_capable(dev, true); | 740 | device_set_wakeup_capable(dev, true); |
| 784 | acpi_pci_sleep_wake(pci_dev, false); | 741 | acpi_pci_wakeup(pci_dev, false); |
| 785 | if (adev->wakeup.flags.run_wake) | ||
| 786 | device_set_run_wake(dev, true); | ||
| 787 | } | 742 | } |
| 788 | 743 | ||
| 789 | static void pci_acpi_cleanup(struct device *dev) | 744 | static void pci_acpi_cleanup(struct device *dev) |
| @@ -794,10 +749,8 @@ static void pci_acpi_cleanup(struct device *dev) | |||
| 794 | return; | 749 | return; |
| 795 | 750 | ||
| 796 | pci_acpi_remove_pm_notifier(adev); | 751 | pci_acpi_remove_pm_notifier(adev); |
| 797 | if (adev->wakeup.flags.valid) { | 752 | if (adev->wakeup.flags.valid) |
| 798 | device_set_wakeup_capable(dev, false); | 753 | device_set_wakeup_capable(dev, false); |
| 799 | device_set_run_wake(dev, false); | ||
| 800 | } | ||
| 801 | } | 754 | } |
| 802 | 755 | ||
| 803 | static bool pci_acpi_bus_match(struct device *dev) | 756 | static bool pci_acpi_bus_match(struct device *dev) |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 192e7b681b96..d51e8738f9c2 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
| @@ -96,7 +96,7 @@ static void pci_free_dynids(struct pci_driver *drv) | |||
| 96 | * | 96 | * |
| 97 | * Allow PCI IDs to be added to an existing driver via sysfs. | 97 | * Allow PCI IDs to be added to an existing driver via sysfs. |
| 98 | */ | 98 | */ |
| 99 | static ssize_t store_new_id(struct device_driver *driver, const char *buf, | 99 | static ssize_t new_id_store(struct device_driver *driver, const char *buf, |
| 100 | size_t count) | 100 | size_t count) |
| 101 | { | 101 | { |
| 102 | struct pci_driver *pdrv = to_pci_driver(driver); | 102 | struct pci_driver *pdrv = to_pci_driver(driver); |
| @@ -154,7 +154,7 @@ static ssize_t store_new_id(struct device_driver *driver, const char *buf, | |||
| 154 | return retval; | 154 | return retval; |
| 155 | return count; | 155 | return count; |
| 156 | } | 156 | } |
| 157 | static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id); | 157 | static DRIVER_ATTR_WO(new_id); |
| 158 | 158 | ||
| 159 | /** | 159 | /** |
| 160 | * store_remove_id - remove a PCI device ID from this driver | 160 | * store_remove_id - remove a PCI device ID from this driver |
| @@ -164,7 +164,7 @@ static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id); | |||
| 164 | * | 164 | * |
| 165 | * Removes a dynamic pci device ID to this driver. | 165 | * Removes a dynamic pci device ID to this driver. |
| 166 | */ | 166 | */ |
| 167 | static ssize_t store_remove_id(struct device_driver *driver, const char *buf, | 167 | static ssize_t remove_id_store(struct device_driver *driver, const char *buf, |
| 168 | size_t count) | 168 | size_t count) |
| 169 | { | 169 | { |
| 170 | struct pci_dynid *dynid, *n; | 170 | struct pci_dynid *dynid, *n; |
| @@ -198,7 +198,7 @@ static ssize_t store_remove_id(struct device_driver *driver, const char *buf, | |||
| 198 | 198 | ||
| 199 | return retval; | 199 | return retval; |
| 200 | } | 200 | } |
| 201 | static DRIVER_ATTR(remove_id, S_IWUSR, NULL, store_remove_id); | 201 | static DRIVER_ATTR_WO(remove_id); |
| 202 | 202 | ||
| 203 | static struct attribute *pci_drv_attrs[] = { | 203 | static struct attribute *pci_drv_attrs[] = { |
| 204 | &driver_attr_new_id.attr, | 204 | &driver_attr_new_id.attr, |
| @@ -320,10 +320,19 @@ static long local_pci_probe(void *_ddi) | |||
| 320 | return 0; | 320 | return 0; |
| 321 | } | 321 | } |
| 322 | 322 | ||
| 323 | static bool pci_physfn_is_probed(struct pci_dev *dev) | ||
| 324 | { | ||
| 325 | #ifdef CONFIG_PCI_IOV | ||
| 326 | return dev->is_virtfn && dev->physfn->is_probed; | ||
| 327 | #else | ||
| 328 | return false; | ||
| 329 | #endif | ||
| 330 | } | ||
| 331 | |||
| 323 | static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, | 332 | static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, |
| 324 | const struct pci_device_id *id) | 333 | const struct pci_device_id *id) |
| 325 | { | 334 | { |
| 326 | int error, node; | 335 | int error, node, cpu; |
| 327 | struct drv_dev_and_id ddi = { drv, dev, id }; | 336 | struct drv_dev_and_id ddi = { drv, dev, id }; |
| 328 | 337 | ||
| 329 | /* | 338 | /* |
| @@ -332,33 +341,27 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev, | |||
| 332 | * on the right node. | 341 | * on the right node. |
| 333 | */ | 342 | */ |
| 334 | node = dev_to_node(&dev->dev); | 343 | node = dev_to_node(&dev->dev); |
| 344 | dev->is_probed = 1; | ||
| 345 | |||
| 346 | cpu_hotplug_disable(); | ||
| 335 | 347 | ||
| 336 | /* | 348 | /* |
| 337 | * On NUMA systems, we are likely to call a PF probe function using | 349 | * Prevent nesting work_on_cpu() for the case where a Virtual Function |
| 338 | * work_on_cpu(). If that probe calls pci_enable_sriov() (which | 350 | * device is probed from work_on_cpu() of the Physical device. |
| 339 | * adds the VF devices via pci_bus_add_device()), we may re-enter | ||
| 340 | * this function to call the VF probe function. Calling | ||
| 341 | * work_on_cpu() again will cause a lockdep warning. Since VFs are | ||
| 342 | * always on the same node as the PF, we can work around this by | ||
| 343 | * avoiding work_on_cpu() when we're already on the correct node. | ||
| 344 | * | ||
| 345 | * Preemption is enabled, so it's theoretically unsafe to use | ||
| 346 | * numa_node_id(), but even if we run the probe function on the | ||
| 347 | * wrong node, it should be functionally correct. | ||
| 348 | */ | 351 | */ |
| 349 | if (node >= 0 && node != numa_node_id()) { | 352 | if (node < 0 || node >= MAX_NUMNODES || !node_online(node) || |
| 350 | int cpu; | 353 | pci_physfn_is_probed(dev)) |
| 351 | 354 | cpu = nr_cpu_ids; | |
| 352 | get_online_cpus(); | 355 | else |
| 353 | cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask); | 356 | cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask); |
| 354 | if (cpu < nr_cpu_ids) | 357 | |
| 355 | error = work_on_cpu(cpu, local_pci_probe, &ddi); | 358 | if (cpu < nr_cpu_ids) |
| 356 | else | 359 | error = work_on_cpu(cpu, local_pci_probe, &ddi); |
| 357 | error = local_pci_probe(&ddi); | 360 | else |
| 358 | put_online_cpus(); | ||
| 359 | } else | ||
| 360 | error = local_pci_probe(&ddi); | 361 | error = local_pci_probe(&ddi); |
| 361 | 362 | ||
| 363 | dev->is_probed = 0; | ||
| 364 | cpu_hotplug_enable(); | ||
| 362 | return error; | 365 | return error; |
| 363 | } | 366 | } |
| 364 | 367 | ||
| @@ -412,6 +415,8 @@ static int pci_device_probe(struct device *dev) | |||
| 412 | struct pci_dev *pci_dev = to_pci_dev(dev); | 415 | struct pci_dev *pci_dev = to_pci_dev(dev); |
| 413 | struct pci_driver *drv = to_pci_driver(dev->driver); | 416 | struct pci_driver *drv = to_pci_driver(dev->driver); |
| 414 | 417 | ||
| 418 | pci_assign_irq(pci_dev); | ||
| 419 | |||
| 415 | error = pcibios_alloc_irq(pci_dev); | 420 | error = pcibios_alloc_irq(pci_dev); |
| 416 | if (error < 0) | 421 | if (error < 0) |
| 417 | return error; | 422 | return error; |
| @@ -506,6 +511,7 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev) | |||
| 506 | } | 511 | } |
| 507 | 512 | ||
| 508 | pci_restore_state(pci_dev); | 513 | pci_restore_state(pci_dev); |
| 514 | pci_pme_restore(pci_dev); | ||
| 509 | return 0; | 515 | return 0; |
| 510 | } | 516 | } |
| 511 | 517 | ||
| @@ -517,6 +523,7 @@ static void pci_pm_default_resume_early(struct pci_dev *pci_dev) | |||
| 517 | { | 523 | { |
| 518 | pci_power_up(pci_dev); | 524 | pci_power_up(pci_dev); |
| 519 | pci_restore_state(pci_dev); | 525 | pci_restore_state(pci_dev); |
| 526 | pci_pme_restore(pci_dev); | ||
| 520 | pci_fixup_device(pci_fixup_resume_early, pci_dev); | 527 | pci_fixup_device(pci_fixup_resume_early, pci_dev); |
| 521 | } | 528 | } |
| 522 | 529 | ||
| @@ -964,6 +971,7 @@ static int pci_pm_thaw_noirq(struct device *dev) | |||
| 964 | return pci_legacy_resume_early(dev); | 971 | return pci_legacy_resume_early(dev); |
| 965 | 972 | ||
| 966 | pci_update_current_state(pci_dev, PCI_D0); | 973 | pci_update_current_state(pci_dev, PCI_D0); |
| 974 | pci_restore_state(pci_dev); | ||
| 967 | 975 | ||
| 968 | if (drv && drv->pm && drv->pm->thaw_noirq) | 976 | if (drv && drv->pm && drv->pm->thaw_noirq) |
| 969 | error = drv->pm->thaw_noirq(dev); | 977 | error = drv->pm->thaw_noirq(dev); |
| @@ -1216,7 +1224,7 @@ static int pci_pm_runtime_resume(struct device *dev) | |||
| 1216 | 1224 | ||
| 1217 | pci_restore_standard_config(pci_dev); | 1225 | pci_restore_standard_config(pci_dev); |
| 1218 | pci_fixup_device(pci_fixup_resume_early, pci_dev); | 1226 | pci_fixup_device(pci_fixup_resume_early, pci_dev); |
| 1219 | __pci_enable_wake(pci_dev, PCI_D0, true, false); | 1227 | pci_enable_wake(pci_dev, PCI_D0, false); |
| 1220 | pci_fixup_device(pci_fixup_resume, pci_dev); | 1228 | pci_fixup_device(pci_fixup_resume, pci_dev); |
| 1221 | 1229 | ||
| 1222 | rc = pm->runtime_resume(dev); | 1230 | rc = pm->runtime_resume(dev); |
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c index 51357377efbc..a7a41d9c29df 100644 --- a/drivers/pci/pci-label.c +++ b/drivers/pci/pci-label.c | |||
| @@ -43,9 +43,11 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf, | |||
| 43 | { | 43 | { |
| 44 | const struct dmi_device *dmi; | 44 | const struct dmi_device *dmi; |
| 45 | struct dmi_dev_onboard *donboard; | 45 | struct dmi_dev_onboard *donboard; |
| 46 | int domain_nr; | ||
| 46 | int bus; | 47 | int bus; |
| 47 | int devfn; | 48 | int devfn; |
| 48 | 49 | ||
| 50 | domain_nr = pci_domain_nr(pdev->bus); | ||
| 49 | bus = pdev->bus->number; | 51 | bus = pdev->bus->number; |
| 50 | devfn = pdev->devfn; | 52 | devfn = pdev->devfn; |
| 51 | 53 | ||
| @@ -53,8 +55,9 @@ static size_t find_smbios_instance_string(struct pci_dev *pdev, char *buf, | |||
| 53 | while ((dmi = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD, | 55 | while ((dmi = dmi_find_device(DMI_DEV_TYPE_DEV_ONBOARD, |
| 54 | NULL, dmi)) != NULL) { | 56 | NULL, dmi)) != NULL) { |
| 55 | donboard = dmi->device_data; | 57 | donboard = dmi->device_data; |
| 56 | if (donboard && donboard->bus == bus && | 58 | if (donboard && donboard->segment == domain_nr && |
| 57 | donboard->devfn == devfn) { | 59 | donboard->bus == bus && |
| 60 | donboard->devfn == devfn) { | ||
| 58 | if (buf) { | 61 | if (buf) { |
| 59 | if (attribute == SMBIOS_ATTR_INSTANCE_SHOW) | 62 | if (attribute == SMBIOS_ATTR_INSTANCE_SHOW) |
| 60 | return scnprintf(buf, PAGE_SIZE, | 63 | return scnprintf(buf, PAGE_SIZE, |
| @@ -172,7 +175,7 @@ static int dsm_get_label(struct device *dev, char *buf, | |||
| 172 | if (!handle) | 175 | if (!handle) |
| 173 | return -1; | 176 | return -1; |
| 174 | 177 | ||
| 175 | obj = acpi_evaluate_dsm(handle, pci_acpi_dsm_uuid, 0x2, | 178 | obj = acpi_evaluate_dsm(handle, &pci_acpi_dsm_guid, 0x2, |
| 176 | DEVICE_LABEL_DSM, NULL); | 179 | DEVICE_LABEL_DSM, NULL); |
| 177 | if (!obj) | 180 | if (!obj) |
| 178 | return -1; | 181 | return -1; |
| @@ -212,7 +215,7 @@ static bool device_has_dsm(struct device *dev) | |||
| 212 | if (!handle) | 215 | if (!handle) |
| 213 | return false; | 216 | return false; |
| 214 | 217 | ||
| 215 | return !!acpi_check_dsm(handle, pci_acpi_dsm_uuid, 0x2, | 218 | return !!acpi_check_dsm(handle, &pci_acpi_dsm_guid, 0x2, |
| 216 | 1 << DEVICE_LABEL_DSM); | 219 | 1 << DEVICE_LABEL_DSM); |
| 217 | } | 220 | } |
| 218 | 221 | ||
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c index 1c4af7227bca..a4ac940c7696 100644 --- a/drivers/pci/pci-mid.c +++ b/drivers/pci/pci-mid.c | |||
| @@ -39,12 +39,7 @@ static pci_power_t mid_pci_choose_state(struct pci_dev *pdev) | |||
| 39 | return PCI_D3hot; | 39 | return PCI_D3hot; |
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | static int mid_pci_sleep_wake(struct pci_dev *dev, bool enable) | 42 | static int mid_pci_wakeup(struct pci_dev *dev, bool enable) |
| 43 | { | ||
| 44 | return 0; | ||
| 45 | } | ||
| 46 | |||
| 47 | static int mid_pci_run_wake(struct pci_dev *dev, bool enable) | ||
| 48 | { | 43 | { |
| 49 | return 0; | 44 | return 0; |
| 50 | } | 45 | } |
| @@ -59,8 +54,7 @@ static const struct pci_platform_pm_ops mid_pci_platform_pm = { | |||
| 59 | .set_state = mid_pci_set_power_state, | 54 | .set_state = mid_pci_set_power_state, |
| 60 | .get_state = mid_pci_get_power_state, | 55 | .get_state = mid_pci_get_power_state, |
| 61 | .choose_state = mid_pci_choose_state, | 56 | .choose_state = mid_pci_choose_state, |
| 62 | .sleep_wake = mid_pci_sleep_wake, | 57 | .set_wakeup = mid_pci_wakeup, |
| 63 | .run_wake = mid_pci_run_wake, | ||
| 64 | .need_resume = mid_pci_need_resume, | 58 | .need_resume = mid_pci_need_resume, |
| 65 | }; | 59 | }; |
| 66 | 60 | ||
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 31e99613a12e..2f3780b50723 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
| @@ -154,6 +154,129 @@ static ssize_t resource_show(struct device *dev, struct device_attribute *attr, | |||
| 154 | } | 154 | } |
| 155 | static DEVICE_ATTR_RO(resource); | 155 | static DEVICE_ATTR_RO(resource); |
| 156 | 156 | ||
| 157 | static ssize_t max_link_speed_show(struct device *dev, | ||
| 158 | struct device_attribute *attr, char *buf) | ||
| 159 | { | ||
| 160 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
| 161 | u32 linkcap; | ||
| 162 | int err; | ||
| 163 | const char *speed; | ||
| 164 | |||
| 165 | err = pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &linkcap); | ||
| 166 | if (err) | ||
| 167 | return -EINVAL; | ||
| 168 | |||
| 169 | switch (linkcap & PCI_EXP_LNKCAP_SLS) { | ||
| 170 | case PCI_EXP_LNKCAP_SLS_8_0GB: | ||
| 171 | speed = "8 GT/s"; | ||
| 172 | break; | ||
| 173 | case PCI_EXP_LNKCAP_SLS_5_0GB: | ||
| 174 | speed = "5 GT/s"; | ||
| 175 | break; | ||
| 176 | case PCI_EXP_LNKCAP_SLS_2_5GB: | ||
| 177 | speed = "2.5 GT/s"; | ||
| 178 | break; | ||
| 179 | default: | ||
| 180 | speed = "Unknown speed"; | ||
| 181 | } | ||
| 182 | |||
| 183 | return sprintf(buf, "%s\n", speed); | ||
| 184 | } | ||
| 185 | static DEVICE_ATTR_RO(max_link_speed); | ||
| 186 | |||
| 187 | static ssize_t max_link_width_show(struct device *dev, | ||
| 188 | struct device_attribute *attr, char *buf) | ||
| 189 | { | ||
| 190 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
| 191 | u32 linkcap; | ||
| 192 | int err; | ||
| 193 | |||
| 194 | err = pcie_capability_read_dword(pci_dev, PCI_EXP_LNKCAP, &linkcap); | ||
| 195 | if (err) | ||
| 196 | return -EINVAL; | ||
| 197 | |||
| 198 | return sprintf(buf, "%u\n", (linkcap & PCI_EXP_LNKCAP_MLW) >> 4); | ||
| 199 | } | ||
| 200 | static DEVICE_ATTR_RO(max_link_width); | ||
| 201 | |||
| 202 | static ssize_t current_link_speed_show(struct device *dev, | ||
| 203 | struct device_attribute *attr, char *buf) | ||
| 204 | { | ||
| 205 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
| 206 | u16 linkstat; | ||
| 207 | int err; | ||
| 208 | const char *speed; | ||
| 209 | |||
| 210 | err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); | ||
| 211 | if (err) | ||
| 212 | return -EINVAL; | ||
| 213 | |||
| 214 | switch (linkstat & PCI_EXP_LNKSTA_CLS) { | ||
| 215 | case PCI_EXP_LNKSTA_CLS_8_0GB: | ||
| 216 | speed = "8 GT/s"; | ||
| 217 | break; | ||
| 218 | case PCI_EXP_LNKSTA_CLS_5_0GB: | ||
| 219 | speed = "5 GT/s"; | ||
| 220 | break; | ||
| 221 | case PCI_EXP_LNKSTA_CLS_2_5GB: | ||
| 222 | speed = "2.5 GT/s"; | ||
| 223 | break; | ||
| 224 | default: | ||
| 225 | speed = "Unknown speed"; | ||
| 226 | } | ||
| 227 | |||
| 228 | return sprintf(buf, "%s\n", speed); | ||
| 229 | } | ||
| 230 | static DEVICE_ATTR_RO(current_link_speed); | ||
| 231 | |||
| 232 | static ssize_t current_link_width_show(struct device *dev, | ||
| 233 | struct device_attribute *attr, char *buf) | ||
| 234 | { | ||
| 235 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
| 236 | u16 linkstat; | ||
| 237 | int err; | ||
| 238 | |||
| 239 | err = pcie_capability_read_word(pci_dev, PCI_EXP_LNKSTA, &linkstat); | ||
| 240 | if (err) | ||
| 241 | return -EINVAL; | ||
| 242 | |||
| 243 | return sprintf(buf, "%u\n", | ||
| 244 | (linkstat & PCI_EXP_LNKSTA_NLW) >> PCI_EXP_LNKSTA_NLW_SHIFT); | ||
| 245 | } | ||
| 246 | static DEVICE_ATTR_RO(current_link_width); | ||
| 247 | |||
| 248 | static ssize_t secondary_bus_number_show(struct device *dev, | ||
| 249 | struct device_attribute *attr, | ||
| 250 | char *buf) | ||
| 251 | { | ||
| 252 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
| 253 | u8 sec_bus; | ||
| 254 | int err; | ||
| 255 | |||
| 256 | err = pci_read_config_byte(pci_dev, PCI_SECONDARY_BUS, &sec_bus); | ||
| 257 | if (err) | ||
| 258 | return -EINVAL; | ||
| 259 | |||
| 260 | return sprintf(buf, "%u\n", sec_bus); | ||
| 261 | } | ||
| 262 | static DEVICE_ATTR_RO(secondary_bus_number); | ||
| 263 | |||
| 264 | static ssize_t subordinate_bus_number_show(struct device *dev, | ||
| 265 | struct device_attribute *attr, | ||
| 266 | char *buf) | ||
| 267 | { | ||
| 268 | struct pci_dev *pci_dev = to_pci_dev(dev); | ||
| 269 | u8 sub_bus; | ||
| 270 | int err; | ||
| 271 | |||
| 272 | err = pci_read_config_byte(pci_dev, PCI_SUBORDINATE_BUS, &sub_bus); | ||
| 273 | if (err) | ||
| 274 | return -EINVAL; | ||
| 275 | |||
| 276 | return sprintf(buf, "%u\n", sub_bus); | ||
| 277 | } | ||
| 278 | static DEVICE_ATTR_RO(subordinate_bus_number); | ||
| 279 | |||
| 157 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, | 280 | static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, |
| 158 | char *buf) | 281 | char *buf) |
| 159 | { | 282 | { |
| @@ -472,7 +595,6 @@ static ssize_t sriov_numvfs_store(struct device *dev, | |||
| 472 | const char *buf, size_t count) | 595 | const char *buf, size_t count) |
| 473 | { | 596 | { |
| 474 | struct pci_dev *pdev = to_pci_dev(dev); | 597 | struct pci_dev *pdev = to_pci_dev(dev); |
| 475 | struct pci_sriov *iov = pdev->sriov; | ||
| 476 | int ret; | 598 | int ret; |
| 477 | u16 num_vfs; | 599 | u16 num_vfs; |
| 478 | 600 | ||
| @@ -483,7 +605,7 @@ static ssize_t sriov_numvfs_store(struct device *dev, | |||
| 483 | if (num_vfs > pci_sriov_get_totalvfs(pdev)) | 605 | if (num_vfs > pci_sriov_get_totalvfs(pdev)) |
| 484 | return -ERANGE; | 606 | return -ERANGE; |
| 485 | 607 | ||
| 486 | mutex_lock(&iov->dev->sriov->lock); | 608 | device_lock(&pdev->dev); |
| 487 | 609 | ||
| 488 | if (num_vfs == pdev->sriov->num_VFs) | 610 | if (num_vfs == pdev->sriov->num_VFs) |
| 489 | goto exit; | 611 | goto exit; |
| @@ -518,7 +640,7 @@ static ssize_t sriov_numvfs_store(struct device *dev, | |||
| 518 | num_vfs, ret); | 640 | num_vfs, ret); |
| 519 | 641 | ||
| 520 | exit: | 642 | exit: |
| 521 | mutex_unlock(&iov->dev->sriov->lock); | 643 | device_unlock(&pdev->dev); |
| 522 | 644 | ||
| 523 | if (ret < 0) | 645 | if (ret < 0) |
| 524 | return ret; | 646 | return ret; |
| @@ -629,12 +751,17 @@ static struct attribute *pci_dev_attrs[] = { | |||
| 629 | NULL, | 751 | NULL, |
| 630 | }; | 752 | }; |
| 631 | 753 | ||
| 632 | static const struct attribute_group pci_dev_group = { | 754 | static struct attribute *pci_bridge_attrs[] = { |
| 633 | .attrs = pci_dev_attrs, | 755 | &dev_attr_subordinate_bus_number.attr, |
| 756 | &dev_attr_secondary_bus_number.attr, | ||
| 757 | NULL, | ||
| 634 | }; | 758 | }; |
| 635 | 759 | ||
| 636 | const struct attribute_group *pci_dev_groups[] = { | 760 | static struct attribute *pcie_dev_attrs[] = { |
| 637 | &pci_dev_group, | 761 | &dev_attr_current_link_speed.attr, |
| 762 | &dev_attr_current_link_width.attr, | ||
| 763 | &dev_attr_max_link_width.attr, | ||
| 764 | &dev_attr_max_link_speed.attr, | ||
| 638 | NULL, | 765 | NULL, |
| 639 | }; | 766 | }; |
| 640 | 767 | ||
| @@ -1557,6 +1684,57 @@ static umode_t pci_dev_hp_attrs_are_visible(struct kobject *kobj, | |||
| 1557 | return a->mode; | 1684 | return a->mode; |
| 1558 | } | 1685 | } |
| 1559 | 1686 | ||
| 1687 | static umode_t pci_bridge_attrs_are_visible(struct kobject *kobj, | ||
| 1688 | struct attribute *a, int n) | ||
| 1689 | { | ||
| 1690 | struct device *dev = kobj_to_dev(kobj); | ||
| 1691 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 1692 | |||
| 1693 | if (pci_is_bridge(pdev)) | ||
| 1694 | return a->mode; | ||
| 1695 | |||
| 1696 | return 0; | ||
| 1697 | } | ||
| 1698 | |||
| 1699 | static umode_t pcie_dev_attrs_are_visible(struct kobject *kobj, | ||
| 1700 | struct attribute *a, int n) | ||
| 1701 | { | ||
| 1702 | struct device *dev = kobj_to_dev(kobj); | ||
| 1703 | struct pci_dev *pdev = to_pci_dev(dev); | ||
| 1704 | |||
| 1705 | if (pci_is_pcie(pdev)) | ||
| 1706 | return a->mode; | ||
| 1707 | |||
| 1708 | return 0; | ||
| 1709 | } | ||
| 1710 | |||
| 1711 | static const struct attribute_group pci_dev_group = { | ||
| 1712 | .attrs = pci_dev_attrs, | ||
| 1713 | }; | ||
| 1714 | |||
| 1715 | const struct attribute_group *pci_dev_groups[] = { | ||
| 1716 | &pci_dev_group, | ||
| 1717 | NULL, | ||
| 1718 | }; | ||
| 1719 | |||
| 1720 | static const struct attribute_group pci_bridge_group = { | ||
| 1721 | .attrs = pci_bridge_attrs, | ||
| 1722 | }; | ||
| 1723 | |||
| 1724 | const struct attribute_group *pci_bridge_groups[] = { | ||
| 1725 | &pci_bridge_group, | ||
| 1726 | NULL, | ||
| 1727 | }; | ||
| 1728 | |||
| 1729 | static const struct attribute_group pcie_dev_group = { | ||
| 1730 | .attrs = pcie_dev_attrs, | ||
| 1731 | }; | ||
| 1732 | |||
| 1733 | const struct attribute_group *pcie_dev_groups[] = { | ||
| 1734 | &pcie_dev_group, | ||
| 1735 | NULL, | ||
| 1736 | }; | ||
| 1737 | |||
| 1560 | static struct attribute_group pci_dev_hp_attr_group = { | 1738 | static struct attribute_group pci_dev_hp_attr_group = { |
| 1561 | .attrs = pci_dev_hp_attrs, | 1739 | .attrs = pci_dev_hp_attrs, |
| 1562 | .is_visible = pci_dev_hp_attrs_are_visible, | 1740 | .is_visible = pci_dev_hp_attrs_are_visible, |
| @@ -1592,12 +1770,24 @@ static struct attribute_group pci_dev_attr_group = { | |||
| 1592 | .is_visible = pci_dev_attrs_are_visible, | 1770 | .is_visible = pci_dev_attrs_are_visible, |
| 1593 | }; | 1771 | }; |
| 1594 | 1772 | ||
| 1773 | static struct attribute_group pci_bridge_attr_group = { | ||
| 1774 | .attrs = pci_bridge_attrs, | ||
| 1775 | .is_visible = pci_bridge_attrs_are_visible, | ||
| 1776 | }; | ||
| 1777 | |||
| 1778 | static struct attribute_group pcie_dev_attr_group = { | ||
| 1779 | .attrs = pcie_dev_attrs, | ||
| 1780 | .is_visible = pcie_dev_attrs_are_visible, | ||
| 1781 | }; | ||
| 1782 | |||
| 1595 | static const struct attribute_group *pci_dev_attr_groups[] = { | 1783 | static const struct attribute_group *pci_dev_attr_groups[] = { |
| 1596 | &pci_dev_attr_group, | 1784 | &pci_dev_attr_group, |
| 1597 | &pci_dev_hp_attr_group, | 1785 | &pci_dev_hp_attr_group, |
| 1598 | #ifdef CONFIG_PCI_IOV | 1786 | #ifdef CONFIG_PCI_IOV |
| 1599 | &sriov_dev_attr_group, | 1787 | &sriov_dev_attr_group, |
| 1600 | #endif | 1788 | #endif |
| 1789 | &pci_bridge_attr_group, | ||
| 1790 | &pcie_dev_attr_group, | ||
| 1601 | NULL, | 1791 | NULL, |
| 1602 | }; | 1792 | }; |
| 1603 | 1793 | ||
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 563901cd9c06..af0cc3456dc1 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/pm_runtime.h> | 28 | #include <linux/pm_runtime.h> |
| 29 | #include <linux/pci_hotplug.h> | 29 | #include <linux/pci_hotplug.h> |
| 30 | #include <linux/vmalloc.h> | 30 | #include <linux/vmalloc.h> |
| 31 | #include <linux/pci-ats.h> | ||
| 31 | #include <asm/setup.h> | 32 | #include <asm/setup.h> |
| 32 | #include <asm/dma.h> | 33 | #include <asm/dma.h> |
| 33 | #include <linux/aer.h> | 34 | #include <linux/aer.h> |
| @@ -455,7 +456,7 @@ struct resource *pci_find_parent_resource(const struct pci_dev *dev, | |||
| 455 | pci_bus_for_each_resource(bus, r, i) { | 456 | pci_bus_for_each_resource(bus, r, i) { |
| 456 | if (!r) | 457 | if (!r) |
| 457 | continue; | 458 | continue; |
| 458 | if (res->start && resource_contains(r, res)) { | 459 | if (resource_contains(r, res)) { |
| 459 | 460 | ||
| 460 | /* | 461 | /* |
| 461 | * If the window is prefetchable but the BAR is | 462 | * If the window is prefetchable but the BAR is |
| @@ -574,8 +575,7 @@ static const struct pci_platform_pm_ops *pci_platform_pm; | |||
| 574 | int pci_set_platform_pm(const struct pci_platform_pm_ops *ops) | 575 | int pci_set_platform_pm(const struct pci_platform_pm_ops *ops) |
| 575 | { | 576 | { |
| 576 | if (!ops->is_manageable || !ops->set_state || !ops->get_state || | 577 | if (!ops->is_manageable || !ops->set_state || !ops->get_state || |
| 577 | !ops->choose_state || !ops->sleep_wake || !ops->run_wake || | 578 | !ops->choose_state || !ops->set_wakeup || !ops->need_resume) |
| 578 | !ops->need_resume) | ||
| 579 | return -EINVAL; | 579 | return -EINVAL; |
| 580 | pci_platform_pm = ops; | 580 | pci_platform_pm = ops; |
| 581 | return 0; | 581 | return 0; |
| @@ -603,16 +603,10 @@ static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev) | |||
| 603 | pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR; | 603 | pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR; |
| 604 | } | 604 | } |
| 605 | 605 | ||
| 606 | static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable) | 606 | static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable) |
| 607 | { | 607 | { |
| 608 | return pci_platform_pm ? | 608 | return pci_platform_pm ? |
| 609 | pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; | 609 | pci_platform_pm->set_wakeup(dev, enable) : -ENODEV; |
| 610 | } | ||
| 611 | |||
| 612 | static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable) | ||
| 613 | { | ||
| 614 | return pci_platform_pm ? | ||
| 615 | pci_platform_pm->run_wake(dev, enable) : -ENODEV; | ||
| 616 | } | 610 | } |
| 617 | 611 | ||
| 618 | static inline bool platform_pci_need_resume(struct pci_dev *dev) | 612 | static inline bool platform_pci_need_resume(struct pci_dev *dev) |
| @@ -1173,6 +1167,8 @@ void pci_restore_state(struct pci_dev *dev) | |||
| 1173 | 1167 | ||
| 1174 | /* PCI Express register must be restored first */ | 1168 | /* PCI Express register must be restored first */ |
| 1175 | pci_restore_pcie_state(dev); | 1169 | pci_restore_pcie_state(dev); |
| 1170 | pci_restore_pasid_state(dev); | ||
| 1171 | pci_restore_pri_state(dev); | ||
| 1176 | pci_restore_ats_state(dev); | 1172 | pci_restore_ats_state(dev); |
| 1177 | pci_restore_vc_state(dev); | 1173 | pci_restore_vc_state(dev); |
| 1178 | 1174 | ||
| @@ -1806,6 +1802,28 @@ static void __pci_pme_active(struct pci_dev *dev, bool enable) | |||
| 1806 | } | 1802 | } |
| 1807 | 1803 | ||
| 1808 | /** | 1804 | /** |
| 1805 | * pci_pme_restore - Restore PME configuration after config space restore. | ||
| 1806 | * @dev: PCI device to update. | ||
| 1807 | */ | ||
| 1808 | void pci_pme_restore(struct pci_dev *dev) | ||
| 1809 | { | ||
| 1810 | u16 pmcsr; | ||
| 1811 | |||
| 1812 | if (!dev->pme_support) | ||
| 1813 | return; | ||
| 1814 | |||
| 1815 | pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); | ||
| 1816 | if (dev->wakeup_prepared) { | ||
| 1817 | pmcsr |= PCI_PM_CTRL_PME_ENABLE; | ||
| 1818 | pmcsr &= ~PCI_PM_CTRL_PME_STATUS; | ||
| 1819 | } else { | ||
| 1820 | pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; | ||
| 1821 | pmcsr |= PCI_PM_CTRL_PME_STATUS; | ||
| 1822 | } | ||
| 1823 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); | ||
| 1824 | } | ||
| 1825 | |||
| 1826 | /** | ||
| 1809 | * pci_pme_active - enable or disable PCI device's PME# function | 1827 | * pci_pme_active - enable or disable PCI device's PME# function |
| 1810 | * @dev: PCI device to handle. | 1828 | * @dev: PCI device to handle. |
| 1811 | * @enable: 'true' to enable PME# generation; 'false' to disable it. | 1829 | * @enable: 'true' to enable PME# generation; 'false' to disable it. |
| @@ -1872,10 +1890,9 @@ void pci_pme_active(struct pci_dev *dev, bool enable) | |||
| 1872 | EXPORT_SYMBOL(pci_pme_active); | 1890 | EXPORT_SYMBOL(pci_pme_active); |
| 1873 | 1891 | ||
| 1874 | /** | 1892 | /** |
| 1875 | * __pci_enable_wake - enable PCI device as wakeup event source | 1893 | * pci_enable_wake - enable PCI device as wakeup event source |
| 1876 | * @dev: PCI device affected | 1894 | * @dev: PCI device affected |
| 1877 | * @state: PCI state from which device will issue wakeup events | 1895 | * @state: PCI state from which device will issue wakeup events |
| 1878 | * @runtime: True if the events are to be generated at run time | ||
| 1879 | * @enable: True to enable event generation; false to disable | 1896 | * @enable: True to enable event generation; false to disable |
| 1880 | * | 1897 | * |
| 1881 | * This enables the device as a wakeup event source, or disables it. | 1898 | * This enables the device as a wakeup event source, or disables it. |
| @@ -1891,14 +1908,10 @@ EXPORT_SYMBOL(pci_pme_active); | |||
| 1891 | * Error code depending on the platform is returned if both the platform and | 1908 | * Error code depending on the platform is returned if both the platform and |
| 1892 | * the native mechanism fail to enable the generation of wake-up events | 1909 | * the native mechanism fail to enable the generation of wake-up events |
| 1893 | */ | 1910 | */ |
| 1894 | int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, | 1911 | int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) |
| 1895 | bool runtime, bool enable) | ||
| 1896 | { | 1912 | { |
| 1897 | int ret = 0; | 1913 | int ret = 0; |
| 1898 | 1914 | ||
| 1899 | if (enable && !runtime && !device_may_wakeup(&dev->dev)) | ||
| 1900 | return -EINVAL; | ||
| 1901 | |||
| 1902 | /* Don't do the same thing twice in a row for one device. */ | 1915 | /* Don't do the same thing twice in a row for one device. */ |
| 1903 | if (!!enable == !!dev->wakeup_prepared) | 1916 | if (!!enable == !!dev->wakeup_prepared) |
| 1904 | return 0; | 1917 | return 0; |
| @@ -1916,24 +1929,20 @@ int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, | |||
| 1916 | pci_pme_active(dev, true); | 1929 | pci_pme_active(dev, true); |
| 1917 | else | 1930 | else |
| 1918 | ret = 1; | 1931 | ret = 1; |
| 1919 | error = runtime ? platform_pci_run_wake(dev, true) : | 1932 | error = platform_pci_set_wakeup(dev, true); |
| 1920 | platform_pci_sleep_wake(dev, true); | ||
| 1921 | if (ret) | 1933 | if (ret) |
| 1922 | ret = error; | 1934 | ret = error; |
| 1923 | if (!ret) | 1935 | if (!ret) |
| 1924 | dev->wakeup_prepared = true; | 1936 | dev->wakeup_prepared = true; |
| 1925 | } else { | 1937 | } else { |
| 1926 | if (runtime) | 1938 | platform_pci_set_wakeup(dev, false); |
| 1927 | platform_pci_run_wake(dev, false); | ||
| 1928 | else | ||
| 1929 | platform_pci_sleep_wake(dev, false); | ||
| 1930 | pci_pme_active(dev, false); | 1939 | pci_pme_active(dev, false); |
| 1931 | dev->wakeup_prepared = false; | 1940 | dev->wakeup_prepared = false; |
| 1932 | } | 1941 | } |
| 1933 | 1942 | ||
| 1934 | return ret; | 1943 | return ret; |
| 1935 | } | 1944 | } |
| 1936 | EXPORT_SYMBOL(__pci_enable_wake); | 1945 | EXPORT_SYMBOL(pci_enable_wake); |
| 1937 | 1946 | ||
| 1938 | /** | 1947 | /** |
| 1939 | * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold | 1948 | * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold |
| @@ -1960,12 +1969,13 @@ EXPORT_SYMBOL(pci_wake_from_d3); | |||
| 1960 | /** | 1969 | /** |
| 1961 | * pci_target_state - find an appropriate low power state for a given PCI dev | 1970 | * pci_target_state - find an appropriate low power state for a given PCI dev |
| 1962 | * @dev: PCI device | 1971 | * @dev: PCI device |
| 1972 | * @wakeup: Whether or not wakeup functionality will be enabled for the device. | ||
| 1963 | * | 1973 | * |
| 1964 | * Use underlying platform code to find a supported low power state for @dev. | 1974 | * Use underlying platform code to find a supported low power state for @dev. |
| 1965 | * If the platform can't manage @dev, return the deepest state from which it | 1975 | * If the platform can't manage @dev, return the deepest state from which it |
| 1966 | * can generate wake events, based on any available PME info. | 1976 | * can generate wake events, based on any available PME info. |
| 1967 | */ | 1977 | */ |
| 1968 | static pci_power_t pci_target_state(struct pci_dev *dev) | 1978 | static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup) |
| 1969 | { | 1979 | { |
| 1970 | pci_power_t target_state = PCI_D3hot; | 1980 | pci_power_t target_state = PCI_D3hot; |
| 1971 | 1981 | ||
| @@ -2002,7 +2012,7 @@ static pci_power_t pci_target_state(struct pci_dev *dev) | |||
| 2002 | if (dev->current_state == PCI_D3cold) | 2012 | if (dev->current_state == PCI_D3cold) |
| 2003 | target_state = PCI_D3cold; | 2013 | target_state = PCI_D3cold; |
| 2004 | 2014 | ||
| 2005 | if (device_may_wakeup(&dev->dev)) { | 2015 | if (wakeup) { |
| 2006 | /* | 2016 | /* |
| 2007 | * Find the deepest state from which the device can generate | 2017 | * Find the deepest state from which the device can generate |
| 2008 | * wake-up events, make it the target state and enable device | 2018 | * wake-up events, make it the target state and enable device |
| @@ -2028,13 +2038,14 @@ static pci_power_t pci_target_state(struct pci_dev *dev) | |||
| 2028 | */ | 2038 | */ |
| 2029 | int pci_prepare_to_sleep(struct pci_dev *dev) | 2039 | int pci_prepare_to_sleep(struct pci_dev *dev) |
| 2030 | { | 2040 | { |
| 2031 | pci_power_t target_state = pci_target_state(dev); | 2041 | bool wakeup = device_may_wakeup(&dev->dev); |
| 2042 | pci_power_t target_state = pci_target_state(dev, wakeup); | ||
| 2032 | int error; | 2043 | int error; |
| 2033 | 2044 | ||
| 2034 | if (target_state == PCI_POWER_ERROR) | 2045 | if (target_state == PCI_POWER_ERROR) |
| 2035 | return -EIO; | 2046 | return -EIO; |
| 2036 | 2047 | ||
| 2037 | pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); | 2048 | pci_enable_wake(dev, target_state, wakeup); |
| 2038 | 2049 | ||
| 2039 | error = pci_set_power_state(dev, target_state); | 2050 | error = pci_set_power_state(dev, target_state); |
| 2040 | 2051 | ||
| @@ -2067,20 +2078,21 @@ EXPORT_SYMBOL(pci_back_from_sleep); | |||
| 2067 | */ | 2078 | */ |
| 2068 | int pci_finish_runtime_suspend(struct pci_dev *dev) | 2079 | int pci_finish_runtime_suspend(struct pci_dev *dev) |
| 2069 | { | 2080 | { |
| 2070 | pci_power_t target_state = pci_target_state(dev); | 2081 | pci_power_t target_state; |
| 2071 | int error; | 2082 | int error; |
| 2072 | 2083 | ||
| 2084 | target_state = pci_target_state(dev, device_can_wakeup(&dev->dev)); | ||
| 2073 | if (target_state == PCI_POWER_ERROR) | 2085 | if (target_state == PCI_POWER_ERROR) |
| 2074 | return -EIO; | 2086 | return -EIO; |
| 2075 | 2087 | ||
| 2076 | dev->runtime_d3cold = target_state == PCI_D3cold; | 2088 | dev->runtime_d3cold = target_state == PCI_D3cold; |
| 2077 | 2089 | ||
| 2078 | __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev)); | 2090 | pci_enable_wake(dev, target_state, pci_dev_run_wake(dev)); |
| 2079 | 2091 | ||
| 2080 | error = pci_set_power_state(dev, target_state); | 2092 | error = pci_set_power_state(dev, target_state); |
| 2081 | 2093 | ||
| 2082 | if (error) { | 2094 | if (error) { |
| 2083 | __pci_enable_wake(dev, target_state, true, false); | 2095 | pci_enable_wake(dev, target_state, false); |
| 2084 | dev->runtime_d3cold = false; | 2096 | dev->runtime_d3cold = false; |
| 2085 | } | 2097 | } |
| 2086 | 2098 | ||
| @@ -2099,20 +2111,20 @@ bool pci_dev_run_wake(struct pci_dev *dev) | |||
| 2099 | { | 2111 | { |
| 2100 | struct pci_bus *bus = dev->bus; | 2112 | struct pci_bus *bus = dev->bus; |
| 2101 | 2113 | ||
| 2102 | if (device_run_wake(&dev->dev)) | 2114 | if (device_can_wakeup(&dev->dev)) |
| 2103 | return true; | 2115 | return true; |
| 2104 | 2116 | ||
| 2105 | if (!dev->pme_support) | 2117 | if (!dev->pme_support) |
| 2106 | return false; | 2118 | return false; |
| 2107 | 2119 | ||
| 2108 | /* PME-capable in principle, but not from the intended sleep state */ | 2120 | /* PME-capable in principle, but not from the target power state */ |
| 2109 | if (!pci_pme_capable(dev, pci_target_state(dev))) | 2121 | if (!pci_pme_capable(dev, pci_target_state(dev, false))) |
| 2110 | return false; | 2122 | return false; |
| 2111 | 2123 | ||
| 2112 | while (bus->parent) { | 2124 | while (bus->parent) { |
| 2113 | struct pci_dev *bridge = bus->self; | 2125 | struct pci_dev *bridge = bus->self; |
| 2114 | 2126 | ||
| 2115 | if (device_run_wake(&bridge->dev)) | 2127 | if (device_can_wakeup(&bridge->dev)) |
| 2116 | return true; | 2128 | return true; |
| 2117 | 2129 | ||
| 2118 | bus = bus->parent; | 2130 | bus = bus->parent; |
| @@ -2120,7 +2132,7 @@ bool pci_dev_run_wake(struct pci_dev *dev) | |||
| 2120 | 2132 | ||
| 2121 | /* We have reached the root bus. */ | 2133 | /* We have reached the root bus. */ |
| 2122 | if (bus->bridge) | 2134 | if (bus->bridge) |
| 2123 | return device_run_wake(bus->bridge); | 2135 | return device_can_wakeup(bus->bridge); |
| 2124 | 2136 | ||
| 2125 | return false; | 2137 | return false; |
| 2126 | } | 2138 | } |
| @@ -2141,9 +2153,10 @@ EXPORT_SYMBOL_GPL(pci_dev_run_wake); | |||
| 2141 | bool pci_dev_keep_suspended(struct pci_dev *pci_dev) | 2153 | bool pci_dev_keep_suspended(struct pci_dev *pci_dev) |
| 2142 | { | 2154 | { |
| 2143 | struct device *dev = &pci_dev->dev; | 2155 | struct device *dev = &pci_dev->dev; |
| 2156 | bool wakeup = device_may_wakeup(dev); | ||
| 2144 | 2157 | ||
| 2145 | if (!pm_runtime_suspended(dev) | 2158 | if (!pm_runtime_suspended(dev) |
| 2146 | || pci_target_state(pci_dev) != pci_dev->current_state | 2159 | || pci_target_state(pci_dev, wakeup) != pci_dev->current_state |
| 2147 | || platform_pci_need_resume(pci_dev) | 2160 | || platform_pci_need_resume(pci_dev) |
| 2148 | || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME)) | 2161 | || (pci_dev->dev_flags & PCI_DEV_FLAGS_NEEDS_RESUME)) |
| 2149 | return false; | 2162 | return false; |
| @@ -2161,7 +2174,7 @@ bool pci_dev_keep_suspended(struct pci_dev *pci_dev) | |||
| 2161 | spin_lock_irq(&dev->power.lock); | 2174 | spin_lock_irq(&dev->power.lock); |
| 2162 | 2175 | ||
| 2163 | if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold && | 2176 | if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold && |
| 2164 | !device_may_wakeup(dev)) | 2177 | !wakeup) |
| 2165 | __pci_pme_active(pci_dev, false); | 2178 | __pci_pme_active(pci_dev, false); |
| 2166 | 2179 | ||
| 2167 | spin_unlock_irq(&dev->power.lock); | 2180 | spin_unlock_irq(&dev->power.lock); |
| @@ -3709,46 +3722,6 @@ void pci_intx(struct pci_dev *pdev, int enable) | |||
| 3709 | } | 3722 | } |
| 3710 | EXPORT_SYMBOL_GPL(pci_intx); | 3723 | EXPORT_SYMBOL_GPL(pci_intx); |
| 3711 | 3724 | ||
| 3712 | /** | ||
| 3713 | * pci_intx_mask_supported - probe for INTx masking support | ||
| 3714 | * @dev: the PCI device to operate on | ||
| 3715 | * | ||
| 3716 | * Check if the device dev support INTx masking via the config space | ||
| 3717 | * command word. | ||
| 3718 | */ | ||
| 3719 | bool pci_intx_mask_supported(struct pci_dev *dev) | ||
| 3720 | { | ||
| 3721 | bool mask_supported = false; | ||
| 3722 | u16 orig, new; | ||
| 3723 | |||
| 3724 | if (dev->broken_intx_masking) | ||
| 3725 | return false; | ||
| 3726 | |||
| 3727 | pci_cfg_access_lock(dev); | ||
| 3728 | |||
| 3729 | pci_read_config_word(dev, PCI_COMMAND, &orig); | ||
| 3730 | pci_write_config_word(dev, PCI_COMMAND, | ||
| 3731 | orig ^ PCI_COMMAND_INTX_DISABLE); | ||
| 3732 | pci_read_config_word(dev, PCI_COMMAND, &new); | ||
| 3733 | |||
| 3734 | /* | ||
| 3735 | * There's no way to protect against hardware bugs or detect them | ||
| 3736 | * reliably, but as long as we know what the value should be, let's | ||
| 3737 | * go ahead and check it. | ||
| 3738 | */ | ||
| 3739 | if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) { | ||
| 3740 | dev_err(&dev->dev, "Command register changed from 0x%x to 0x%x: driver or hardware bug?\n", | ||
| 3741 | orig, new); | ||
| 3742 | } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) { | ||
| 3743 | mask_supported = true; | ||
| 3744 | pci_write_config_word(dev, PCI_COMMAND, orig); | ||
| 3745 | } | ||
| 3746 | |||
| 3747 | pci_cfg_access_unlock(dev); | ||
| 3748 | return mask_supported; | ||
| 3749 | } | ||
| 3750 | EXPORT_SYMBOL_GPL(pci_intx_mask_supported); | ||
| 3751 | |||
| 3752 | static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask) | 3725 | static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask) |
| 3753 | { | 3726 | { |
| 3754 | struct pci_bus *bus = dev->bus; | 3727 | struct pci_bus *bus = dev->bus; |
| @@ -3799,7 +3772,7 @@ done: | |||
| 3799 | * @dev: the PCI device to operate on | 3772 | * @dev: the PCI device to operate on |
| 3800 | * | 3773 | * |
| 3801 | * Check if the device dev has its INTx line asserted, mask it and | 3774 | * Check if the device dev has its INTx line asserted, mask it and |
| 3802 | * return true in that case. False is returned if not interrupt was | 3775 | * return true in that case. False is returned if no interrupt was |
| 3803 | * pending. | 3776 | * pending. |
| 3804 | */ | 3777 | */ |
| 3805 | bool pci_check_and_mask_intx(struct pci_dev *dev) | 3778 | bool pci_check_and_mask_intx(struct pci_dev *dev) |
| @@ -4069,40 +4042,6 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe) | |||
| 4069 | return pci_reset_hotplug_slot(dev->slot->hotplug, probe); | 4042 | return pci_reset_hotplug_slot(dev->slot->hotplug, probe); |
| 4070 | } | 4043 | } |
| 4071 | 4044 | ||
| 4072 | static int __pci_dev_reset(struct pci_dev *dev, int probe) | ||
| 4073 | { | ||
| 4074 | int rc; | ||
| 4075 | |||
| 4076 | might_sleep(); | ||
| 4077 | |||
| 4078 | rc = pci_dev_specific_reset(dev, probe); | ||
| 4079 | if (rc != -ENOTTY) | ||
| 4080 | goto done; | ||
| 4081 | |||
| 4082 | if (pcie_has_flr(dev)) { | ||
| 4083 | if (!probe) | ||
| 4084 | pcie_flr(dev); | ||
| 4085 | rc = 0; | ||
| 4086 | goto done; | ||
| 4087 | } | ||
| 4088 | |||
| 4089 | rc = pci_af_flr(dev, probe); | ||
| 4090 | if (rc != -ENOTTY) | ||
| 4091 | goto done; | ||
| 4092 | |||
| 4093 | rc = pci_pm_reset(dev, probe); | ||
| 4094 | if (rc != -ENOTTY) | ||
| 4095 | goto done; | ||
| 4096 | |||
| 4097 | rc = pci_dev_reset_slot_function(dev, probe); | ||
| 4098 | if (rc != -ENOTTY) | ||
| 4099 | goto done; | ||
| 4100 | |||
| 4101 | rc = pci_parent_bus_reset(dev, probe); | ||
| 4102 | done: | ||
| 4103 | return rc; | ||
| 4104 | } | ||
| 4105 | |||
| 4106 | static void pci_dev_lock(struct pci_dev *dev) | 4045 | static void pci_dev_lock(struct pci_dev *dev) |
| 4107 | { | 4046 | { |
| 4108 | pci_cfg_access_lock(dev); | 4047 | pci_cfg_access_lock(dev); |
| @@ -4128,26 +4067,18 @@ static void pci_dev_unlock(struct pci_dev *dev) | |||
| 4128 | pci_cfg_access_unlock(dev); | 4067 | pci_cfg_access_unlock(dev); |
| 4129 | } | 4068 | } |
| 4130 | 4069 | ||
| 4131 | /** | 4070 | static void pci_dev_save_and_disable(struct pci_dev *dev) |
| 4132 | * pci_reset_notify - notify device driver of reset | ||
| 4133 | * @dev: device to be notified of reset | ||
| 4134 | * @prepare: 'true' if device is about to be reset; 'false' if reset attempt | ||
| 4135 | * completed | ||
| 4136 | * | ||
| 4137 | * Must be called prior to device access being disabled and after device | ||
| 4138 | * access is restored. | ||
| 4139 | */ | ||
| 4140 | static void pci_reset_notify(struct pci_dev *dev, bool prepare) | ||
| 4141 | { | 4071 | { |
| 4142 | const struct pci_error_handlers *err_handler = | 4072 | const struct pci_error_handlers *err_handler = |
| 4143 | dev->driver ? dev->driver->err_handler : NULL; | 4073 | dev->driver ? dev->driver->err_handler : NULL; |
| 4144 | if (err_handler && err_handler->reset_notify) | ||
| 4145 | err_handler->reset_notify(dev, prepare); | ||
| 4146 | } | ||
| 4147 | 4074 | ||
| 4148 | static void pci_dev_save_and_disable(struct pci_dev *dev) | 4075 | /* |
| 4149 | { | 4076 | * dev->driver->err_handler->reset_prepare() is protected against |
| 4150 | pci_reset_notify(dev, true); | 4077 | * races with ->remove() by the device lock, which must be held by |
| 4078 | * the caller. | ||
| 4079 | */ | ||
| 4080 | if (err_handler && err_handler->reset_prepare) | ||
| 4081 | err_handler->reset_prepare(dev); | ||
| 4151 | 4082 | ||
| 4152 | /* | 4083 | /* |
| 4153 | * Wake-up device prior to save. PM registers default to D0 after | 4084 | * Wake-up device prior to save. PM registers default to D0 after |
| @@ -4169,23 +4100,18 @@ static void pci_dev_save_and_disable(struct pci_dev *dev) | |||
| 4169 | 4100 | ||
| 4170 | static void pci_dev_restore(struct pci_dev *dev) | 4101 | static void pci_dev_restore(struct pci_dev *dev) |
| 4171 | { | 4102 | { |
| 4172 | pci_restore_state(dev); | 4103 | const struct pci_error_handlers *err_handler = |
| 4173 | pci_reset_notify(dev, false); | 4104 | dev->driver ? dev->driver->err_handler : NULL; |
| 4174 | } | ||
| 4175 | |||
| 4176 | static int pci_dev_reset(struct pci_dev *dev, int probe) | ||
| 4177 | { | ||
| 4178 | int rc; | ||
| 4179 | |||
| 4180 | if (!probe) | ||
| 4181 | pci_dev_lock(dev); | ||
| 4182 | |||
| 4183 | rc = __pci_dev_reset(dev, probe); | ||
| 4184 | 4105 | ||
| 4185 | if (!probe) | 4106 | pci_restore_state(dev); |
| 4186 | pci_dev_unlock(dev); | ||
| 4187 | 4107 | ||
| 4188 | return rc; | 4108 | /* |
| 4109 | * dev->driver->err_handler->reset_done() is protected against | ||
| 4110 | * races with ->remove() by the device lock, which must be held by | ||
| 4111 | * the caller. | ||
| 4112 | */ | ||
| 4113 | if (err_handler && err_handler->reset_done) | ||
| 4114 | err_handler->reset_done(dev); | ||
| 4189 | } | 4115 | } |
| 4190 | 4116 | ||
| 4191 | /** | 4117 | /** |
| @@ -4207,7 +4133,13 @@ static int pci_dev_reset(struct pci_dev *dev, int probe) | |||
| 4207 | */ | 4133 | */ |
| 4208 | int __pci_reset_function(struct pci_dev *dev) | 4134 | int __pci_reset_function(struct pci_dev *dev) |
| 4209 | { | 4135 | { |
| 4210 | return pci_dev_reset(dev, 0); | 4136 | int ret; |
| 4137 | |||
| 4138 | pci_dev_lock(dev); | ||
| 4139 | ret = __pci_reset_function_locked(dev); | ||
| 4140 | pci_dev_unlock(dev); | ||
| 4141 | |||
| 4142 | return ret; | ||
| 4211 | } | 4143 | } |
| 4212 | EXPORT_SYMBOL_GPL(__pci_reset_function); | 4144 | EXPORT_SYMBOL_GPL(__pci_reset_function); |
| 4213 | 4145 | ||
| @@ -4232,7 +4164,27 @@ EXPORT_SYMBOL_GPL(__pci_reset_function); | |||
| 4232 | */ | 4164 | */ |
| 4233 | int __pci_reset_function_locked(struct pci_dev *dev) | 4165 | int __pci_reset_function_locked(struct pci_dev *dev) |
| 4234 | { | 4166 | { |
| 4235 | return __pci_dev_reset(dev, 0); | 4167 | int rc; |
| 4168 | |||
| 4169 | might_sleep(); | ||
| 4170 | |||
| 4171 | rc = pci_dev_specific_reset(dev, 0); | ||
| 4172 | if (rc != -ENOTTY) | ||
| 4173 | return rc; | ||
| 4174 | if (pcie_has_flr(dev)) { | ||
| 4175 | pcie_flr(dev); | ||
| 4176 | return 0; | ||
| 4177 | } | ||
| 4178 | rc = pci_af_flr(dev, 0); | ||
| 4179 | if (rc != -ENOTTY) | ||
| 4180 | return rc; | ||
| 4181 | rc = pci_pm_reset(dev, 0); | ||
| 4182 | if (rc != -ENOTTY) | ||
| 4183 | return rc; | ||
| 4184 | rc = pci_dev_reset_slot_function(dev, 0); | ||
| 4185 | if (rc != -ENOTTY) | ||
| 4186 | return rc; | ||
| 4187 | return pci_parent_bus_reset(dev, 0); | ||
| 4236 | } | 4188 | } |
| 4237 | EXPORT_SYMBOL_GPL(__pci_reset_function_locked); | 4189 | EXPORT_SYMBOL_GPL(__pci_reset_function_locked); |
| 4238 | 4190 | ||
| @@ -4249,7 +4201,26 @@ EXPORT_SYMBOL_GPL(__pci_reset_function_locked); | |||
| 4249 | */ | 4201 | */ |
| 4250 | int pci_probe_reset_function(struct pci_dev *dev) | 4202 | int pci_probe_reset_function(struct pci_dev *dev) |
| 4251 | { | 4203 | { |
| 4252 | return pci_dev_reset(dev, 1); | 4204 | int rc; |
| 4205 | |||
| 4206 | might_sleep(); | ||
| 4207 | |||
| 4208 | rc = pci_dev_specific_reset(dev, 1); | ||
| 4209 | if (rc != -ENOTTY) | ||
| 4210 | return rc; | ||
| 4211 | if (pcie_has_flr(dev)) | ||
| 4212 | return 0; | ||
| 4213 | rc = pci_af_flr(dev, 1); | ||
| 4214 | if (rc != -ENOTTY) | ||
| 4215 | return rc; | ||
| 4216 | rc = pci_pm_reset(dev, 1); | ||
| 4217 | if (rc != -ENOTTY) | ||
| 4218 | return rc; | ||
| 4219 | rc = pci_dev_reset_slot_function(dev, 1); | ||
| 4220 | if (rc != -ENOTTY) | ||
| 4221 | return rc; | ||
| 4222 | |||
| 4223 | return pci_parent_bus_reset(dev, 1); | ||
| 4253 | } | 4224 | } |
| 4254 | 4225 | ||
| 4255 | /** | 4226 | /** |
| @@ -4272,15 +4243,17 @@ int pci_reset_function(struct pci_dev *dev) | |||
| 4272 | { | 4243 | { |
| 4273 | int rc; | 4244 | int rc; |
| 4274 | 4245 | ||
| 4275 | rc = pci_dev_reset(dev, 1); | 4246 | rc = pci_probe_reset_function(dev); |
| 4276 | if (rc) | 4247 | if (rc) |
| 4277 | return rc; | 4248 | return rc; |
| 4278 | 4249 | ||
| 4250 | pci_dev_lock(dev); | ||
| 4279 | pci_dev_save_and_disable(dev); | 4251 | pci_dev_save_and_disable(dev); |
| 4280 | 4252 | ||
| 4281 | rc = pci_dev_reset(dev, 0); | 4253 | rc = __pci_reset_function_locked(dev); |
| 4282 | 4254 | ||
| 4283 | pci_dev_restore(dev); | 4255 | pci_dev_restore(dev); |
| 4256 | pci_dev_unlock(dev); | ||
| 4284 | 4257 | ||
| 4285 | return rc; | 4258 | return rc; |
| 4286 | } | 4259 | } |
| @@ -4296,20 +4269,18 @@ int pci_try_reset_function(struct pci_dev *dev) | |||
| 4296 | { | 4269 | { |
| 4297 | int rc; | 4270 | int rc; |
| 4298 | 4271 | ||
| 4299 | rc = pci_dev_reset(dev, 1); | 4272 | rc = pci_probe_reset_function(dev); |
| 4300 | if (rc) | 4273 | if (rc) |
| 4301 | return rc; | 4274 | return rc; |
| 4302 | 4275 | ||
| 4303 | pci_dev_save_and_disable(dev); | 4276 | if (!pci_dev_trylock(dev)) |
| 4277 | return -EAGAIN; | ||
| 4304 | 4278 | ||
| 4305 | if (pci_dev_trylock(dev)) { | 4279 | pci_dev_save_and_disable(dev); |
| 4306 | rc = __pci_dev_reset(dev, 0); | 4280 | rc = __pci_reset_function_locked(dev); |
| 4307 | pci_dev_unlock(dev); | 4281 | pci_dev_unlock(dev); |
| 4308 | } else | ||
| 4309 | rc = -EAGAIN; | ||
| 4310 | 4282 | ||
| 4311 | pci_dev_restore(dev); | 4283 | pci_dev_restore(dev); |
| 4312 | |||
| 4313 | return rc; | 4284 | return rc; |
| 4314 | } | 4285 | } |
| 4315 | EXPORT_SYMBOL_GPL(pci_try_reset_function); | 4286 | EXPORT_SYMBOL_GPL(pci_try_reset_function); |
| @@ -4459,7 +4430,9 @@ static void pci_bus_save_and_disable(struct pci_bus *bus) | |||
| 4459 | struct pci_dev *dev; | 4430 | struct pci_dev *dev; |
| 4460 | 4431 | ||
| 4461 | list_for_each_entry(dev, &bus->devices, bus_list) { | 4432 | list_for_each_entry(dev, &bus->devices, bus_list) { |
| 4433 | pci_dev_lock(dev); | ||
| 4462 | pci_dev_save_and_disable(dev); | 4434 | pci_dev_save_and_disable(dev); |
| 4435 | pci_dev_unlock(dev); | ||
| 4463 | if (dev->subordinate) | 4436 | if (dev->subordinate) |
| 4464 | pci_bus_save_and_disable(dev->subordinate); | 4437 | pci_bus_save_and_disable(dev->subordinate); |
| 4465 | } | 4438 | } |
| @@ -4474,7 +4447,9 @@ static void pci_bus_restore(struct pci_bus *bus) | |||
| 4474 | struct pci_dev *dev; | 4447 | struct pci_dev *dev; |
| 4475 | 4448 | ||
| 4476 | list_for_each_entry(dev, &bus->devices, bus_list) { | 4449 | list_for_each_entry(dev, &bus->devices, bus_list) { |
| 4450 | pci_dev_lock(dev); | ||
| 4477 | pci_dev_restore(dev); | 4451 | pci_dev_restore(dev); |
| 4452 | pci_dev_unlock(dev); | ||
| 4478 | if (dev->subordinate) | 4453 | if (dev->subordinate) |
| 4479 | pci_bus_restore(dev->subordinate); | 4454 | pci_bus_restore(dev->subordinate); |
| 4480 | } | 4455 | } |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index f8113e5b9812..22e061738c6f 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
| @@ -47,11 +47,7 @@ int pci_probe_reset_function(struct pci_dev *dev); | |||
| 47 | * platform; to be used during system-wide transitions from a | 47 | * platform; to be used during system-wide transitions from a |
| 48 | * sleeping state to the working state and vice versa | 48 | * sleeping state to the working state and vice versa |
| 49 | * | 49 | * |
| 50 | * @sleep_wake: enables/disables the system wake up capability of given device | 50 | * @set_wakeup: enables/disables wakeup capability for the device |
| 51 | * | ||
| 52 | * @run_wake: enables/disables the platform to generate run-time wake-up events | ||
| 53 | * for given device (the device's wake-up capability has to be | ||
| 54 | * enabled by @sleep_wake for this feature to work) | ||
| 55 | * | 51 | * |
| 56 | * @need_resume: returns 'true' if the given device (which is currently | 52 | * @need_resume: returns 'true' if the given device (which is currently |
| 57 | * suspended) needs to be resumed to be configured for system | 53 | * suspended) needs to be resumed to be configured for system |
| @@ -65,8 +61,7 @@ struct pci_platform_pm_ops { | |||
| 65 | int (*set_state)(struct pci_dev *dev, pci_power_t state); | 61 | int (*set_state)(struct pci_dev *dev, pci_power_t state); |
| 66 | pci_power_t (*get_state)(struct pci_dev *dev); | 62 | pci_power_t (*get_state)(struct pci_dev *dev); |
| 67 | pci_power_t (*choose_state)(struct pci_dev *dev); | 63 | pci_power_t (*choose_state)(struct pci_dev *dev); |
| 68 | int (*sleep_wake)(struct pci_dev *dev, bool enable); | 64 | int (*set_wakeup)(struct pci_dev *dev, bool enable); |
| 69 | int (*run_wake)(struct pci_dev *dev, bool enable); | ||
| 70 | bool (*need_resume)(struct pci_dev *dev); | 65 | bool (*need_resume)(struct pci_dev *dev); |
| 71 | }; | 66 | }; |
| 72 | 67 | ||
| @@ -76,6 +71,7 @@ void pci_power_up(struct pci_dev *dev); | |||
| 76 | void pci_disable_enabled_device(struct pci_dev *dev); | 71 | void pci_disable_enabled_device(struct pci_dev *dev); |
| 77 | int pci_finish_runtime_suspend(struct pci_dev *dev); | 72 | int pci_finish_runtime_suspend(struct pci_dev *dev); |
| 78 | int __pci_pme_wakeup(struct pci_dev *dev, void *ign); | 73 | int __pci_pme_wakeup(struct pci_dev *dev, void *ign); |
| 74 | void pci_pme_restore(struct pci_dev *dev); | ||
| 79 | bool pci_dev_keep_suspended(struct pci_dev *dev); | 75 | bool pci_dev_keep_suspended(struct pci_dev *dev); |
| 80 | void pci_dev_complete_resume(struct pci_dev *pci_dev); | 76 | void pci_dev_complete_resume(struct pci_dev *pci_dev); |
| 81 | void pci_config_pm_runtime_get(struct pci_dev *dev); | 77 | void pci_config_pm_runtime_get(struct pci_dev *dev); |
| @@ -272,7 +268,6 @@ struct pci_sriov { | |||
| 272 | u16 driver_max_VFs; /* max num VFs driver supports */ | 268 | u16 driver_max_VFs; /* max num VFs driver supports */ |
| 273 | struct pci_dev *dev; /* lowest numbered PF */ | 269 | struct pci_dev *dev; /* lowest numbered PF */ |
| 274 | struct pci_dev *self; /* this PF */ | 270 | struct pci_dev *self; /* this PF */ |
| 275 | struct mutex lock; /* lock for setting sriov_numvfs in sysfs */ | ||
| 276 | resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ | 271 | resource_size_t barsz[PCI_SRIOV_NUM_BARS]; /* VF BAR size */ |
| 277 | bool drivers_autoprobe; /* auto probing of VFs by driver */ | 272 | bool drivers_autoprobe; /* auto probing of VFs by driver */ |
| 278 | }; | 273 | }; |
diff --git a/drivers/pci/pcie/pcie-dpc.c b/drivers/pci/pcie/pcie-dpc.c index 77d2ca99d2ec..c39f32e42b4d 100644 --- a/drivers/pci/pcie/pcie-dpc.c +++ b/drivers/pci/pcie/pcie-dpc.c | |||
| @@ -92,7 +92,7 @@ static irqreturn_t dpc_irq(int irq, void *context) | |||
| 92 | pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status); | 92 | pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_STATUS, &status); |
| 93 | pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_SOURCE_ID, | 93 | pci_read_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_SOURCE_ID, |
| 94 | &source); | 94 | &source); |
| 95 | if (!status) | 95 | if (!status || status == (u16)(~0)) |
| 96 | return IRQ_NONE; | 96 | return IRQ_NONE; |
| 97 | 97 | ||
| 98 | dev_info(&dpc->dev->device, "DPC containment event, status:%#06x source:%#06x\n", | 98 | dev_info(&dpc->dev->device, "DPC containment event, status:%#06x source:%#06x\n", |
| @@ -144,7 +144,7 @@ static int dpc_probe(struct pcie_device *dev) | |||
| 144 | 144 | ||
| 145 | dpc->rp = (cap & PCI_EXP_DPC_CAP_RP_EXT); | 145 | dpc->rp = (cap & PCI_EXP_DPC_CAP_RP_EXT); |
| 146 | 146 | ||
| 147 | ctl |= PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN; | 147 | ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_NONFATAL | PCI_EXP_DPC_CTL_INT_EN; |
| 148 | pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl); | 148 | pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl); |
| 149 | 149 | ||
| 150 | dev_info(&dev->device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n", | 150 | dev_info(&dev->device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n", |
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index 2dd1c68e6de8..fafdb165dd2e 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c | |||
| @@ -40,17 +40,11 @@ static int __init pcie_pme_setup(char *str) | |||
| 40 | } | 40 | } |
| 41 | __setup("pcie_pme=", pcie_pme_setup); | 41 | __setup("pcie_pme=", pcie_pme_setup); |
| 42 | 42 | ||
| 43 | enum pme_suspend_level { | ||
| 44 | PME_SUSPEND_NONE = 0, | ||
| 45 | PME_SUSPEND_WAKEUP, | ||
| 46 | PME_SUSPEND_NOIRQ, | ||
| 47 | }; | ||
| 48 | |||
| 49 | struct pcie_pme_service_data { | 43 | struct pcie_pme_service_data { |
| 50 | spinlock_t lock; | 44 | spinlock_t lock; |
| 51 | struct pcie_device *srv; | 45 | struct pcie_device *srv; |
| 52 | struct work_struct work; | 46 | struct work_struct work; |
| 53 | enum pme_suspend_level suspend_level; | 47 | bool noirq; /* If set, keep the PME interrupt disabled. */ |
| 54 | }; | 48 | }; |
| 55 | 49 | ||
| 56 | /** | 50 | /** |
| @@ -228,7 +222,7 @@ static void pcie_pme_work_fn(struct work_struct *work) | |||
| 228 | spin_lock_irq(&data->lock); | 222 | spin_lock_irq(&data->lock); |
| 229 | 223 | ||
| 230 | for (;;) { | 224 | for (;;) { |
| 231 | if (data->suspend_level != PME_SUSPEND_NONE) | 225 | if (data->noirq) |
| 232 | break; | 226 | break; |
| 233 | 227 | ||
| 234 | pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); | 228 | pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); |
| @@ -255,7 +249,7 @@ static void pcie_pme_work_fn(struct work_struct *work) | |||
| 255 | spin_lock_irq(&data->lock); | 249 | spin_lock_irq(&data->lock); |
| 256 | } | 250 | } |
| 257 | 251 | ||
| 258 | if (data->suspend_level == PME_SUSPEND_NONE) | 252 | if (!data->noirq) |
| 259 | pcie_pme_interrupt_enable(port, true); | 253 | pcie_pme_interrupt_enable(port, true); |
| 260 | 254 | ||
| 261 | spin_unlock_irq(&data->lock); | 255 | spin_unlock_irq(&data->lock); |
| @@ -294,31 +288,29 @@ static irqreturn_t pcie_pme_irq(int irq, void *context) | |||
| 294 | } | 288 | } |
| 295 | 289 | ||
| 296 | /** | 290 | /** |
| 297 | * pcie_pme_set_native - Set the PME interrupt flag for given device. | 291 | * pcie_pme_can_wakeup - Set the wakeup capability flag. |
| 298 | * @dev: PCI device to handle. | 292 | * @dev: PCI device to handle. |
| 299 | * @ign: Ignored. | 293 | * @ign: Ignored. |
| 300 | */ | 294 | */ |
| 301 | static int pcie_pme_set_native(struct pci_dev *dev, void *ign) | 295 | static int pcie_pme_can_wakeup(struct pci_dev *dev, void *ign) |
| 302 | { | 296 | { |
| 303 | device_set_run_wake(&dev->dev, true); | 297 | device_set_wakeup_capable(&dev->dev, true); |
| 304 | dev->pme_interrupt = true; | ||
| 305 | return 0; | 298 | return 0; |
| 306 | } | 299 | } |
| 307 | 300 | ||
| 308 | /** | 301 | /** |
| 309 | * pcie_pme_mark_devices - Set the PME interrupt flag for devices below a port. | 302 | * pcie_pme_mark_devices - Set the wakeup flag for devices below a port. |
| 310 | * @port: PCIe root port or event collector to handle. | 303 | * @port: PCIe root port or event collector to handle. |
| 311 | * | 304 | * |
| 312 | * For each device below given root port, including the port itself (or for each | 305 | * For each device below given root port, including the port itself (or for each |
| 313 | * root complex integrated endpoint if @port is a root complex event collector) | 306 | * root complex integrated endpoint if @port is a root complex event collector) |
| 314 | * set the flag indicating that it can signal run-time wake-up events via PCIe | 307 | * set the flag indicating that it can signal run-time wake-up events. |
| 315 | * PME interrupts. | ||
| 316 | */ | 308 | */ |
| 317 | static void pcie_pme_mark_devices(struct pci_dev *port) | 309 | static void pcie_pme_mark_devices(struct pci_dev *port) |
| 318 | { | 310 | { |
| 319 | pcie_pme_set_native(port, NULL); | 311 | pcie_pme_can_wakeup(port, NULL); |
| 320 | if (port->subordinate) | 312 | if (port->subordinate) |
| 321 | pci_walk_bus(port->subordinate, pcie_pme_set_native, NULL); | 313 | pci_walk_bus(port->subordinate, pcie_pme_can_wakeup, NULL); |
| 322 | } | 314 | } |
| 323 | 315 | ||
| 324 | /** | 316 | /** |
| @@ -380,7 +372,7 @@ static int pcie_pme_suspend(struct pcie_device *srv) | |||
| 380 | { | 372 | { |
| 381 | struct pcie_pme_service_data *data = get_service_data(srv); | 373 | struct pcie_pme_service_data *data = get_service_data(srv); |
| 382 | struct pci_dev *port = srv->port; | 374 | struct pci_dev *port = srv->port; |
| 383 | bool wakeup, wake_irq_enabled = false; | 375 | bool wakeup; |
| 384 | int ret; | 376 | int ret; |
| 385 | 377 | ||
| 386 | if (device_may_wakeup(&port->dev)) { | 378 | if (device_may_wakeup(&port->dev)) { |
| @@ -390,19 +382,16 @@ static int pcie_pme_suspend(struct pcie_device *srv) | |||
| 390 | wakeup = pcie_pme_check_wakeup(port->subordinate); | 382 | wakeup = pcie_pme_check_wakeup(port->subordinate); |
| 391 | up_read(&pci_bus_sem); | 383 | up_read(&pci_bus_sem); |
| 392 | } | 384 | } |
| 393 | spin_lock_irq(&data->lock); | ||
| 394 | if (wakeup) { | 385 | if (wakeup) { |
| 395 | ret = enable_irq_wake(srv->irq); | 386 | ret = enable_irq_wake(srv->irq); |
| 396 | if (ret == 0) { | 387 | if (!ret) |
| 397 | data->suspend_level = PME_SUSPEND_WAKEUP; | 388 | return 0; |
| 398 | wake_irq_enabled = true; | ||
| 399 | } | ||
| 400 | } | ||
| 401 | if (!wake_irq_enabled) { | ||
| 402 | pcie_pme_interrupt_enable(port, false); | ||
| 403 | pcie_clear_root_pme_status(port); | ||
| 404 | data->suspend_level = PME_SUSPEND_NOIRQ; | ||
| 405 | } | 389 | } |
| 390 | |||
| 391 | spin_lock_irq(&data->lock); | ||
| 392 | pcie_pme_interrupt_enable(port, false); | ||
| 393 | pcie_clear_root_pme_status(port); | ||
| 394 | data->noirq = true; | ||
| 406 | spin_unlock_irq(&data->lock); | 395 | spin_unlock_irq(&data->lock); |
| 407 | 396 | ||
| 408 | synchronize_irq(srv->irq); | 397 | synchronize_irq(srv->irq); |
| @@ -419,15 +408,15 @@ static int pcie_pme_resume(struct pcie_device *srv) | |||
| 419 | struct pcie_pme_service_data *data = get_service_data(srv); | 408 | struct pcie_pme_service_data *data = get_service_data(srv); |
| 420 | 409 | ||
| 421 | spin_lock_irq(&data->lock); | 410 | spin_lock_irq(&data->lock); |
| 422 | if (data->suspend_level == PME_SUSPEND_NOIRQ) { | 411 | if (data->noirq) { |
| 423 | struct pci_dev *port = srv->port; | 412 | struct pci_dev *port = srv->port; |
| 424 | 413 | ||
| 425 | pcie_clear_root_pme_status(port); | 414 | pcie_clear_root_pme_status(port); |
| 426 | pcie_pme_interrupt_enable(port, true); | 415 | pcie_pme_interrupt_enable(port, true); |
| 416 | data->noirq = false; | ||
| 427 | } else { | 417 | } else { |
| 428 | disable_irq_wake(srv->irq); | 418 | disable_irq_wake(srv->irq); |
| 429 | } | 419 | } |
| 430 | data->suspend_level = PME_SUSPEND_NONE; | ||
| 431 | spin_unlock_irq(&data->lock); | 420 | spin_unlock_irq(&data->lock); |
| 432 | 421 | ||
| 433 | return 0; | 422 | return 0; |
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index 587aef36030d..4334fd5d7de9 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h | |||
| @@ -13,10 +13,11 @@ | |||
| 13 | 13 | ||
| 14 | #define PCIE_PORT_DEVICE_MAXSERVICES 5 | 14 | #define PCIE_PORT_DEVICE_MAXSERVICES 5 |
| 15 | /* | 15 | /* |
| 16 | * According to the PCI Express Base Specification 2.0, the indices of | 16 | * The PCIe Capability Interrupt Message Number (PCIe r3.1, sec 7.8.2) must |
| 17 | * the MSI-X table entries used by port services must not exceed 31 | 17 | * be one of the first 32 MSI-X entries. Per PCI r3.0, sec 6.8.3.1, MSI |
| 18 | * supports a maximum of 32 vectors per function. | ||
| 18 | */ | 19 | */ |
| 19 | #define PCIE_PORT_MAX_MSIX_ENTRIES 32 | 20 | #define PCIE_PORT_MAX_MSI_ENTRIES 32 |
| 20 | 21 | ||
| 21 | #define get_descriptor_id(type, service) (((type - 4) << 8) | service) | 22 | #define get_descriptor_id(type, service) (((type - 4) << 8) | service) |
| 22 | 23 | ||
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index cea504f6f478..313a21df1692 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c | |||
| @@ -44,14 +44,15 @@ static void release_pcie_device(struct device *dev) | |||
| 44 | } | 44 | } |
| 45 | 45 | ||
| 46 | /** | 46 | /** |
| 47 | * pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port | 47 | * pcie_port_enable_irq_vec - try to set up MSI-X or MSI as interrupt mode |
| 48 | * for given port | ||
| 48 | * @dev: PCI Express port to handle | 49 | * @dev: PCI Express port to handle |
| 49 | * @irqs: Array of interrupt vectors to populate | 50 | * @irqs: Array of interrupt vectors to populate |
| 50 | * @mask: Bitmask of port capabilities returned by get_port_device_capability() | 51 | * @mask: Bitmask of port capabilities returned by get_port_device_capability() |
| 51 | * | 52 | * |
| 52 | * Return value: 0 on success, error code on failure | 53 | * Return value: 0 on success, error code on failure |
| 53 | */ | 54 | */ |
| 54 | static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask) | 55 | static int pcie_port_enable_irq_vec(struct pci_dev *dev, int *irqs, int mask) |
| 55 | { | 56 | { |
| 56 | int nr_entries, entry, nvec = 0; | 57 | int nr_entries, entry, nvec = 0; |
| 57 | 58 | ||
| @@ -61,8 +62,8 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask) | |||
| 61 | * equal to the number of entries this port actually uses, we'll happily | 62 | * equal to the number of entries this port actually uses, we'll happily |
| 62 | * go through without any tricks. | 63 | * go through without any tricks. |
| 63 | */ | 64 | */ |
| 64 | nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSIX_ENTRIES, | 65 | nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSI_ENTRIES, |
| 65 | PCI_IRQ_MSIX); | 66 | PCI_IRQ_MSIX | PCI_IRQ_MSI); |
| 66 | if (nr_entries < 0) | 67 | if (nr_entries < 0) |
| 67 | return nr_entries; | 68 | return nr_entries; |
| 68 | 69 | ||
| @@ -70,14 +71,19 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask) | |||
| 70 | u16 reg16; | 71 | u16 reg16; |
| 71 | 72 | ||
| 72 | /* | 73 | /* |
| 73 | * The code below follows the PCI Express Base Specification 2.0 | 74 | * Per PCIe r3.1, sec 6.1.6, "PME and Hot-Plug Event |
| 74 | * stating in Section 6.1.6 that "PME and Hot-Plug Event | 75 | * interrupts (when both are implemented) always share the |
| 75 | * interrupts (when both are implemented) always share the same | 76 | * same MSI or MSI-X vector, as indicated by the Interrupt |
| 76 | * MSI or MSI-X vector, as indicated by the Interrupt Message | 77 | * Message Number field in the PCI Express Capabilities |
| 77 | * Number field in the PCI Express Capabilities register", where | 78 | * register". |
| 78 | * according to Section 7.8.2 of the specification "For MSI-X, | 79 | * |
| 79 | * the value in this field indicates which MSI-X Table entry is | 80 | * Per sec 7.8.2, "For MSI, the [Interrupt Message Number] |
| 80 | * used to generate the interrupt message." | 81 | * indicates the offset between the base Message Data and |
| 82 | * the interrupt message that is generated." | ||
| 83 | * | ||
| 84 | * "For MSI-X, the [Interrupt Message Number] indicates | ||
| 85 | * which MSI-X Table entry is used to generate the | ||
| 86 | * interrupt message." | ||
| 81 | */ | 87 | */ |
| 82 | pcie_capability_read_word(dev, PCI_EXP_FLAGS, ®16); | 88 | pcie_capability_read_word(dev, PCI_EXP_FLAGS, ®16); |
| 83 | entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9; | 89 | entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9; |
| @@ -94,13 +100,17 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask) | |||
| 94 | u32 reg32, pos; | 100 | u32 reg32, pos; |
| 95 | 101 | ||
| 96 | /* | 102 | /* |
| 97 | * The code below follows Section 7.10.10 of the PCI Express | 103 | * Per PCIe r3.1, sec 7.10.10, the Advanced Error Interrupt |
| 98 | * Base Specification 2.0 stating that bits 31-27 of the Root | 104 | * Message Number in the Root Error Status register |
| 99 | * Error Status Register contain a value indicating which of the | 105 | * indicates which MSI/MSI-X vector is used for AER. |
| 100 | * MSI/MSI-X vectors assigned to the port is going to be used | 106 | * |
| 101 | * for AER, where "For MSI-X, the value in this register | 107 | * "For MSI, the [Advanced Error Interrupt Message Number] |
| 102 | * indicates which MSI-X Table entry is used to generate the | 108 | * indicates the offset between the base Message Data and |
| 103 | * interrupt message." | 109 | * the interrupt message that is generated." |
| 110 | * | ||
| 111 | * "For MSI-X, the [Advanced Error Interrupt Message | ||
| 112 | * Number] indicates which MSI-X Table entry is used to | ||
| 113 | * generate the interrupt message." | ||
| 104 | */ | 114 | */ |
| 105 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); | 115 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); |
| 106 | pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, ®32); | 116 | pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, ®32); |
| @@ -113,6 +123,33 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask) | |||
| 113 | nvec = max(nvec, entry + 1); | 123 | nvec = max(nvec, entry + 1); |
| 114 | } | 124 | } |
| 115 | 125 | ||
| 126 | if (mask & PCIE_PORT_SERVICE_DPC) { | ||
| 127 | u16 reg16, pos; | ||
| 128 | |||
| 129 | /* | ||
| 130 | * Per PCIe r4.0 (v0.9), sec 7.9.15.2, the DPC Interrupt | ||
| 131 | * Message Number in the DPC Capability register indicates | ||
| 132 | * which MSI/MSI-X vector is used for DPC. | ||
| 133 | * | ||
| 134 | * "For MSI, the [DPC Interrupt Message Number] indicates | ||
| 135 | * the offset between the base Message Data and the | ||
| 136 | * interrupt message that is generated." | ||
| 137 | * | ||
| 138 | * "For MSI-X, the [DPC Interrupt Message Number] indicates | ||
| 139 | * which MSI-X Table entry is used to generate the | ||
| 140 | * interrupt message." | ||
| 141 | */ | ||
| 142 | pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DPC); | ||
| 143 | pci_read_config_word(dev, pos + PCI_EXP_DPC_CAP, ®16); | ||
| 144 | entry = reg16 & 0x1f; | ||
| 145 | if (entry >= nr_entries) | ||
| 146 | goto out_free_irqs; | ||
| 147 | |||
| 148 | irqs[PCIE_PORT_SERVICE_DPC_SHIFT] = pci_irq_vector(dev, entry); | ||
| 149 | |||
| 150 | nvec = max(nvec, entry + 1); | ||
| 151 | } | ||
| 152 | |||
| 116 | /* | 153 | /* |
| 117 | * If nvec is equal to the allocated number of entries, we can just use | 154 | * If nvec is equal to the allocated number of entries, we can just use |
| 118 | * what we have. Otherwise, the port has some extra entries not for the | 155 | * what we have. Otherwise, the port has some extra entries not for the |
| @@ -124,7 +161,7 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *irqs, int mask) | |||
| 124 | 161 | ||
| 125 | /* Now allocate the MSI-X vectors for real */ | 162 | /* Now allocate the MSI-X vectors for real */ |
| 126 | nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec, | 163 | nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec, |
| 127 | PCI_IRQ_MSIX); | 164 | PCI_IRQ_MSIX | PCI_IRQ_MSI); |
| 128 | if (nr_entries < 0) | 165 | if (nr_entries < 0) |
| 129 | return nr_entries; | 166 | return nr_entries; |
| 130 | } | 167 | } |
| @@ -146,26 +183,29 @@ out_free_irqs: | |||
| 146 | */ | 183 | */ |
| 147 | static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask) | 184 | static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask) |
| 148 | { | 185 | { |
| 149 | unsigned flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI; | ||
| 150 | int ret, i; | 186 | int ret, i; |
| 151 | 187 | ||
| 152 | for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) | 188 | for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) |
| 153 | irqs[i] = -1; | 189 | irqs[i] = -1; |
| 154 | 190 | ||
| 155 | /* | 191 | /* |
| 156 | * If MSI cannot be used for PCIe PME or hotplug, we have to use | 192 | * If we support PME or hotplug, but we can't use MSI/MSI-X for |
| 157 | * INTx or other interrupts, e.g. system shared interrupt. | 193 | * them, we have to fall back to INTx or other interrupts, e.g., a |
| 194 | * system shared interrupt. | ||
| 158 | */ | 195 | */ |
| 159 | if (((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) || | 196 | if ((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) |
| 160 | ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())) { | 197 | goto legacy_irq; |
| 161 | flags &= ~PCI_IRQ_MSI; | 198 | |
| 162 | } else { | 199 | if ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi()) |
| 163 | /* Try to use MSI-X if supported */ | 200 | goto legacy_irq; |
| 164 | if (!pcie_port_enable_msix(dev, irqs, mask)) | 201 | |
| 165 | return 0; | 202 | /* Try to use MSI-X or MSI if supported */ |
| 166 | } | 203 | if (pcie_port_enable_irq_vec(dev, irqs, mask) == 0) |
| 204 | return 0; | ||
| 167 | 205 | ||
| 168 | ret = pci_alloc_irq_vectors(dev, 1, 1, flags); | 206 | legacy_irq: |
| 207 | /* fall back to legacy IRQ */ | ||
| 208 | ret = pci_alloc_irq_vectors(dev, 1, 1, PCI_IRQ_LEGACY); | ||
| 169 | if (ret < 0) | 209 | if (ret < 0) |
| 170 | return -ENODEV; | 210 | return -ENODEV; |
| 171 | 211 | ||
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 19c8950c6c38..c31310db0404 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
| @@ -510,16 +510,18 @@ static struct pci_bus *pci_alloc_bus(struct pci_bus *parent) | |||
| 510 | return b; | 510 | return b; |
| 511 | } | 511 | } |
| 512 | 512 | ||
| 513 | static void pci_release_host_bridge_dev(struct device *dev) | 513 | static void devm_pci_release_host_bridge_dev(struct device *dev) |
| 514 | { | 514 | { |
| 515 | struct pci_host_bridge *bridge = to_pci_host_bridge(dev); | 515 | struct pci_host_bridge *bridge = to_pci_host_bridge(dev); |
| 516 | 516 | ||
| 517 | if (bridge->release_fn) | 517 | if (bridge->release_fn) |
| 518 | bridge->release_fn(bridge); | 518 | bridge->release_fn(bridge); |
| 519 | } | ||
| 519 | 520 | ||
| 520 | pci_free_resource_list(&bridge->windows); | 521 | static void pci_release_host_bridge_dev(struct device *dev) |
| 521 | 522 | { | |
| 522 | kfree(bridge); | 523 | devm_pci_release_host_bridge_dev(dev); |
| 524 | pci_free_host_bridge(to_pci_host_bridge(dev)); | ||
| 523 | } | 525 | } |
| 524 | 526 | ||
| 525 | struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) | 527 | struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) |
| @@ -531,11 +533,36 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) | |||
| 531 | return NULL; | 533 | return NULL; |
| 532 | 534 | ||
| 533 | INIT_LIST_HEAD(&bridge->windows); | 535 | INIT_LIST_HEAD(&bridge->windows); |
| 536 | bridge->dev.release = pci_release_host_bridge_dev; | ||
| 534 | 537 | ||
| 535 | return bridge; | 538 | return bridge; |
| 536 | } | 539 | } |
| 537 | EXPORT_SYMBOL(pci_alloc_host_bridge); | 540 | EXPORT_SYMBOL(pci_alloc_host_bridge); |
| 538 | 541 | ||
| 542 | struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev, | ||
| 543 | size_t priv) | ||
| 544 | { | ||
| 545 | struct pci_host_bridge *bridge; | ||
| 546 | |||
| 547 | bridge = devm_kzalloc(dev, sizeof(*bridge) + priv, GFP_KERNEL); | ||
| 548 | if (!bridge) | ||
| 549 | return NULL; | ||
| 550 | |||
| 551 | INIT_LIST_HEAD(&bridge->windows); | ||
| 552 | bridge->dev.release = devm_pci_release_host_bridge_dev; | ||
| 553 | |||
| 554 | return bridge; | ||
| 555 | } | ||
| 556 | EXPORT_SYMBOL(devm_pci_alloc_host_bridge); | ||
| 557 | |||
| 558 | void pci_free_host_bridge(struct pci_host_bridge *bridge) | ||
| 559 | { | ||
| 560 | pci_free_resource_list(&bridge->windows); | ||
| 561 | |||
| 562 | kfree(bridge); | ||
| 563 | } | ||
| 564 | EXPORT_SYMBOL(pci_free_host_bridge); | ||
| 565 | |||
| 539 | static const unsigned char pcix_bus_speed[] = { | 566 | static const unsigned char pcix_bus_speed[] = { |
| 540 | PCI_SPEED_UNKNOWN, /* 0 */ | 567 | PCI_SPEED_UNKNOWN, /* 0 */ |
| 541 | PCI_SPEED_66MHz_PCIX, /* 1 */ | 568 | PCI_SPEED_66MHz_PCIX, /* 1 */ |
| @@ -719,7 +746,7 @@ static void pci_set_bus_msi_domain(struct pci_bus *bus) | |||
| 719 | dev_set_msi_domain(&bus->dev, d); | 746 | dev_set_msi_domain(&bus->dev, d); |
| 720 | } | 747 | } |
| 721 | 748 | ||
| 722 | int pci_register_host_bridge(struct pci_host_bridge *bridge) | 749 | static int pci_register_host_bridge(struct pci_host_bridge *bridge) |
| 723 | { | 750 | { |
| 724 | struct device *parent = bridge->dev.parent; | 751 | struct device *parent = bridge->dev.parent; |
| 725 | struct resource_entry *window, *n; | 752 | struct resource_entry *window, *n; |
| @@ -834,7 +861,6 @@ free: | |||
| 834 | kfree(bus); | 861 | kfree(bus); |
| 835 | return err; | 862 | return err; |
| 836 | } | 863 | } |
| 837 | EXPORT_SYMBOL(pci_register_host_bridge); | ||
| 838 | 864 | ||
| 839 | static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, | 865 | static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent, |
| 840 | struct pci_dev *bridge, int busnr) | 866 | struct pci_dev *bridge, int busnr) |
| @@ -1330,6 +1356,34 @@ static void pci_msi_setup_pci_dev(struct pci_dev *dev) | |||
| 1330 | } | 1356 | } |
| 1331 | 1357 | ||
| 1332 | /** | 1358 | /** |
| 1359 | * pci_intx_mask_broken - test PCI_COMMAND_INTX_DISABLE writability | ||
| 1360 | * @dev: PCI device | ||
| 1361 | * | ||
| 1362 | * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev. Check this | ||
| 1363 | * at enumeration-time to avoid modifying PCI_COMMAND at run-time. | ||
| 1364 | */ | ||
| 1365 | static int pci_intx_mask_broken(struct pci_dev *dev) | ||
| 1366 | { | ||
| 1367 | u16 orig, toggle, new; | ||
| 1368 | |||
| 1369 | pci_read_config_word(dev, PCI_COMMAND, &orig); | ||
| 1370 | toggle = orig ^ PCI_COMMAND_INTX_DISABLE; | ||
| 1371 | pci_write_config_word(dev, PCI_COMMAND, toggle); | ||
| 1372 | pci_read_config_word(dev, PCI_COMMAND, &new); | ||
| 1373 | |||
| 1374 | pci_write_config_word(dev, PCI_COMMAND, orig); | ||
| 1375 | |||
| 1376 | /* | ||
| 1377 | * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI | ||
| 1378 | * r2.3, so strictly speaking, a device is not *broken* if it's not | ||
| 1379 | * writable. But we'll live with the misnomer for now. | ||
| 1380 | */ | ||
| 1381 | if (new != toggle) | ||
| 1382 | return 1; | ||
| 1383 | return 0; | ||
| 1384 | } | ||
| 1385 | |||
| 1386 | /** | ||
| 1333 | * pci_setup_device - fill in class and map information of a device | 1387 | * pci_setup_device - fill in class and map information of a device |
| 1334 | * @dev: the device structure to fill | 1388 | * @dev: the device structure to fill |
| 1335 | * | 1389 | * |
| @@ -1399,6 +1453,8 @@ int pci_setup_device(struct pci_dev *dev) | |||
| 1399 | } | 1453 | } |
| 1400 | } | 1454 | } |
| 1401 | 1455 | ||
| 1456 | dev->broken_intx_masking = pci_intx_mask_broken(dev); | ||
| 1457 | |||
| 1402 | switch (dev->hdr_type) { /* header type */ | 1458 | switch (dev->hdr_type) { /* header type */ |
| 1403 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ | 1459 | case PCI_HEADER_TYPE_NORMAL: /* standard header */ |
| 1404 | if (class == PCI_CLASS_BRIDGE_PCI) | 1460 | if (class == PCI_CLASS_BRIDGE_PCI) |
| @@ -1674,6 +1730,11 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) | |||
| 1674 | /* Initialize Advanced Error Capabilities and Control Register */ | 1730 | /* Initialize Advanced Error Capabilities and Control Register */ |
| 1675 | pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); | 1731 | pci_read_config_dword(dev, pos + PCI_ERR_CAP, ®32); |
| 1676 | reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or; | 1732 | reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or; |
| 1733 | /* Don't enable ECRC generation or checking if unsupported */ | ||
| 1734 | if (!(reg32 & PCI_ERR_CAP_ECRC_GENC)) | ||
| 1735 | reg32 &= ~PCI_ERR_CAP_ECRC_GENE; | ||
| 1736 | if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC)) | ||
| 1737 | reg32 &= ~PCI_ERR_CAP_ECRC_CHKE; | ||
| 1677 | pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); | 1738 | pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32); |
| 1678 | 1739 | ||
| 1679 | /* | 1740 | /* |
| @@ -2298,9 +2359,8 @@ void __weak pcibios_remove_bus(struct pci_bus *bus) | |||
| 2298 | { | 2359 | { |
| 2299 | } | 2360 | } |
| 2300 | 2361 | ||
| 2301 | static struct pci_bus *pci_create_root_bus_msi(struct device *parent, | 2362 | struct pci_bus *pci_create_root_bus(struct device *parent, int bus, |
| 2302 | int bus, struct pci_ops *ops, void *sysdata, | 2363 | struct pci_ops *ops, void *sysdata, struct list_head *resources) |
| 2303 | struct list_head *resources, struct msi_controller *msi) | ||
| 2304 | { | 2364 | { |
| 2305 | int error; | 2365 | int error; |
| 2306 | struct pci_host_bridge *bridge; | 2366 | struct pci_host_bridge *bridge; |
| @@ -2310,13 +2370,11 @@ static struct pci_bus *pci_create_root_bus_msi(struct device *parent, | |||
| 2310 | return NULL; | 2370 | return NULL; |
| 2311 | 2371 | ||
| 2312 | bridge->dev.parent = parent; | 2372 | bridge->dev.parent = parent; |
| 2313 | bridge->dev.release = pci_release_host_bridge_dev; | ||
| 2314 | 2373 | ||
| 2315 | list_splice_init(resources, &bridge->windows); | 2374 | list_splice_init(resources, &bridge->windows); |
| 2316 | bridge->sysdata = sysdata; | 2375 | bridge->sysdata = sysdata; |
| 2317 | bridge->busnr = bus; | 2376 | bridge->busnr = bus; |
| 2318 | bridge->ops = ops; | 2377 | bridge->ops = ops; |
| 2319 | bridge->msi = msi; | ||
| 2320 | 2378 | ||
| 2321 | error = pci_register_host_bridge(bridge); | 2379 | error = pci_register_host_bridge(bridge); |
| 2322 | if (error < 0) | 2380 | if (error < 0) |
| @@ -2328,13 +2386,6 @@ err_out: | |||
| 2328 | kfree(bridge); | 2386 | kfree(bridge); |
| 2329 | return NULL; | 2387 | return NULL; |
| 2330 | } | 2388 | } |
| 2331 | |||
| 2332 | struct pci_bus *pci_create_root_bus(struct device *parent, int bus, | ||
| 2333 | struct pci_ops *ops, void *sysdata, struct list_head *resources) | ||
| 2334 | { | ||
| 2335 | return pci_create_root_bus_msi(parent, bus, ops, sysdata, resources, | ||
| 2336 | NULL); | ||
| 2337 | } | ||
| 2338 | EXPORT_SYMBOL_GPL(pci_create_root_bus); | 2389 | EXPORT_SYMBOL_GPL(pci_create_root_bus); |
| 2339 | 2390 | ||
| 2340 | int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max) | 2391 | int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max) |
| @@ -2400,24 +2451,28 @@ void pci_bus_release_busn_res(struct pci_bus *b) | |||
| 2400 | res, ret ? "can not be" : "is"); | 2451 | res, ret ? "can not be" : "is"); |
| 2401 | } | 2452 | } |
| 2402 | 2453 | ||
| 2403 | struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus, | 2454 | int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge) |
| 2404 | struct pci_ops *ops, void *sysdata, | ||
| 2405 | struct list_head *resources, struct msi_controller *msi) | ||
| 2406 | { | 2455 | { |
| 2407 | struct resource_entry *window; | 2456 | struct resource_entry *window; |
| 2408 | bool found = false; | 2457 | bool found = false; |
| 2409 | struct pci_bus *b; | 2458 | struct pci_bus *b; |
| 2410 | int max; | 2459 | int max, bus, ret; |
| 2411 | 2460 | ||
| 2412 | resource_list_for_each_entry(window, resources) | 2461 | if (!bridge) |
| 2462 | return -EINVAL; | ||
| 2463 | |||
| 2464 | resource_list_for_each_entry(window, &bridge->windows) | ||
| 2413 | if (window->res->flags & IORESOURCE_BUS) { | 2465 | if (window->res->flags & IORESOURCE_BUS) { |
| 2414 | found = true; | 2466 | found = true; |
| 2415 | break; | 2467 | break; |
| 2416 | } | 2468 | } |
| 2417 | 2469 | ||
| 2418 | b = pci_create_root_bus_msi(parent, bus, ops, sysdata, resources, msi); | 2470 | ret = pci_register_host_bridge(bridge); |
| 2419 | if (!b) | 2471 | if (ret < 0) |
| 2420 | return NULL; | 2472 | return ret; |
| 2473 | |||
| 2474 | b = bridge->bus; | ||
| 2475 | bus = bridge->busnr; | ||
| 2421 | 2476 | ||
| 2422 | if (!found) { | 2477 | if (!found) { |
| 2423 | dev_info(&b->dev, | 2478 | dev_info(&b->dev, |
| @@ -2431,14 +2486,41 @@ struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus, | |||
| 2431 | if (!found) | 2486 | if (!found) |
| 2432 | pci_bus_update_busn_res_end(b, max); | 2487 | pci_bus_update_busn_res_end(b, max); |
| 2433 | 2488 | ||
| 2434 | return b; | 2489 | return 0; |
| 2435 | } | 2490 | } |
| 2491 | EXPORT_SYMBOL(pci_scan_root_bus_bridge); | ||
| 2436 | 2492 | ||
| 2437 | struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, | 2493 | struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, |
| 2438 | struct pci_ops *ops, void *sysdata, struct list_head *resources) | 2494 | struct pci_ops *ops, void *sysdata, struct list_head *resources) |
| 2439 | { | 2495 | { |
| 2440 | return pci_scan_root_bus_msi(parent, bus, ops, sysdata, resources, | 2496 | struct resource_entry *window; |
| 2441 | NULL); | 2497 | bool found = false; |
| 2498 | struct pci_bus *b; | ||
| 2499 | int max; | ||
| 2500 | |||
| 2501 | resource_list_for_each_entry(window, resources) | ||
| 2502 | if (window->res->flags & IORESOURCE_BUS) { | ||
| 2503 | found = true; | ||
| 2504 | break; | ||
| 2505 | } | ||
| 2506 | |||
| 2507 | b = pci_create_root_bus(parent, bus, ops, sysdata, resources); | ||
| 2508 | if (!b) | ||
| 2509 | return NULL; | ||
| 2510 | |||
| 2511 | if (!found) { | ||
| 2512 | dev_info(&b->dev, | ||
| 2513 | "No busn resource found for root bus, will use [bus %02x-ff]\n", | ||
| 2514 | bus); | ||
| 2515 | pci_bus_insert_busn_res(b, bus, 255); | ||
| 2516 | } | ||
| 2517 | |||
| 2518 | max = pci_scan_child_bus(b); | ||
| 2519 | |||
| 2520 | if (!found) | ||
| 2521 | pci_bus_update_busn_res_end(b, max); | ||
| 2522 | |||
| 2523 | return b; | ||
| 2442 | } | 2524 | } |
| 2443 | EXPORT_SYMBOL(pci_scan_root_bus); | 2525 | EXPORT_SYMBOL(pci_scan_root_bus); |
| 2444 | 2526 | ||
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 085fb787aa9e..6967c6b4cf6b 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -304,7 +304,7 @@ static void quirk_extend_bar_to_page(struct pci_dev *dev) | |||
| 304 | { | 304 | { |
| 305 | int i; | 305 | int i; |
| 306 | 306 | ||
| 307 | for (i = 0; i < PCI_STD_RESOURCE_END; i++) { | 307 | for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { |
| 308 | struct resource *r = &dev->resource[i]; | 308 | struct resource *r = &dev->resource[i]; |
| 309 | 309 | ||
| 310 | if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) { | 310 | if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) { |
| @@ -1684,6 +1684,19 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2609, quirk_intel_pcie_pm); | |||
| 1684 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); | 1684 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260a, quirk_intel_pcie_pm); |
| 1685 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); | 1685 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm); |
| 1686 | 1686 | ||
| 1687 | static void quirk_radeon_pm(struct pci_dev *dev) | ||
| 1688 | { | ||
| 1689 | if (dev->subsystem_vendor == PCI_VENDOR_ID_APPLE && | ||
| 1690 | dev->subsystem_device == 0x00e2) { | ||
| 1691 | if (dev->d3_delay < 20) { | ||
| 1692 | dev->d3_delay = 20; | ||
| 1693 | dev_info(&dev->dev, "extending delay after power-on from D3 to %d msec\n", | ||
| 1694 | dev->d3_delay); | ||
| 1695 | } | ||
| 1696 | } | ||
| 1697 | } | ||
| 1698 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6741, quirk_radeon_pm); | ||
| 1699 | |||
| 1687 | #ifdef CONFIG_X86_IO_APIC | 1700 | #ifdef CONFIG_X86_IO_APIC |
| 1688 | static int dmi_disable_ioapicreroute(const struct dmi_system_id *d) | 1701 | static int dmi_disable_ioapicreroute(const struct dmi_system_id *d) |
| 1689 | { | 1702 | { |
| @@ -3236,6 +3249,10 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1588, | |||
| 3236 | quirk_broken_intx_masking); | 3249 | quirk_broken_intx_masking); |
| 3237 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589, | 3250 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1589, |
| 3238 | quirk_broken_intx_masking); | 3251 | quirk_broken_intx_masking); |
| 3252 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158a, | ||
| 3253 | quirk_broken_intx_masking); | ||
| 3254 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x158b, | ||
| 3255 | quirk_broken_intx_masking); | ||
| 3239 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0, | 3256 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d0, |
| 3240 | quirk_broken_intx_masking); | 3257 | quirk_broken_intx_masking); |
| 3241 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1, | 3258 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x37d1, |
diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c index 95c225be49d1..81eda3d93a5d 100644 --- a/drivers/pci/setup-irq.c +++ b/drivers/pci/setup-irq.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/errno.h> | 15 | #include <linux/errno.h> |
| 16 | #include <linux/ioport.h> | 16 | #include <linux/ioport.h> |
| 17 | #include <linux/cache.h> | 17 | #include <linux/cache.h> |
| 18 | #include "pci.h" | ||
| 18 | 19 | ||
| 19 | void __weak pcibios_update_irq(struct pci_dev *dev, int irq) | 20 | void __weak pcibios_update_irq(struct pci_dev *dev, int irq) |
| 20 | { | 21 | { |
| @@ -22,12 +23,17 @@ void __weak pcibios_update_irq(struct pci_dev *dev, int irq) | |||
| 22 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); | 23 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq); |
| 23 | } | 24 | } |
| 24 | 25 | ||
| 25 | static void pdev_fixup_irq(struct pci_dev *dev, | 26 | void pci_assign_irq(struct pci_dev *dev) |
| 26 | u8 (*swizzle)(struct pci_dev *, u8 *), | ||
| 27 | int (*map_irq)(const struct pci_dev *, u8, u8)) | ||
| 28 | { | 27 | { |
| 29 | u8 pin, slot; | 28 | u8 pin; |
| 29 | u8 slot = -1; | ||
| 30 | int irq = 0; | 30 | int irq = 0; |
| 31 | struct pci_host_bridge *hbrg = pci_find_host_bridge(dev->bus); | ||
| 32 | |||
| 33 | if (!(hbrg->map_irq)) { | ||
| 34 | dev_dbg(&dev->dev, "runtime IRQ mapping not provided by arch\n"); | ||
| 35 | return; | ||
| 36 | } | ||
| 31 | 37 | ||
| 32 | /* If this device is not on the primary bus, we need to figure out | 38 | /* If this device is not on the primary bus, we need to figure out |
| 33 | which interrupt pin it will come in on. We know which slot it | 39 | which interrupt pin it will come in on. We know which slot it |
| @@ -40,17 +46,22 @@ static void pdev_fixup_irq(struct pci_dev *dev, | |||
| 40 | if (pin > 4) | 46 | if (pin > 4) |
| 41 | pin = 1; | 47 | pin = 1; |
| 42 | 48 | ||
| 43 | if (pin != 0) { | 49 | if (pin) { |
| 44 | /* Follow the chain of bridges, swizzling as we go. */ | 50 | /* Follow the chain of bridges, swizzling as we go. */ |
| 45 | slot = (*swizzle)(dev, &pin); | 51 | if (hbrg->swizzle_irq) |
| 52 | slot = (*(hbrg->swizzle_irq))(dev, &pin); | ||
| 46 | 53 | ||
| 47 | irq = (*map_irq)(dev, slot, pin); | 54 | /* |
| 55 | * If a swizzling function is not used map_irq must | ||
| 56 | * ignore slot | ||
| 57 | */ | ||
| 58 | irq = (*(hbrg->map_irq))(dev, slot, pin); | ||
| 48 | if (irq == -1) | 59 | if (irq == -1) |
| 49 | irq = 0; | 60 | irq = 0; |
| 50 | } | 61 | } |
| 51 | dev->irq = irq; | 62 | dev->irq = irq; |
| 52 | 63 | ||
| 53 | dev_dbg(&dev->dev, "fixup irq: got %d\n", dev->irq); | 64 | dev_dbg(&dev->dev, "assign IRQ: got %d\n", dev->irq); |
| 54 | 65 | ||
| 55 | /* Always tell the device, so the driver knows what is | 66 | /* Always tell the device, so the driver knows what is |
| 56 | the real IRQ to use; the device does not use it. */ | 67 | the real IRQ to use; the device does not use it. */ |
| @@ -60,9 +71,23 @@ static void pdev_fixup_irq(struct pci_dev *dev, | |||
| 60 | void pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *), | 71 | void pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *), |
| 61 | int (*map_irq)(const struct pci_dev *, u8, u8)) | 72 | int (*map_irq)(const struct pci_dev *, u8, u8)) |
| 62 | { | 73 | { |
| 74 | /* | ||
| 75 | * Implement pci_fixup_irqs() through pci_assign_irq(). | ||
| 76 | * This code should be remove eventually, it is a wrapper | ||
| 77 | * around pci_assign_irq() interface to keep current | ||
| 78 | * pci_fixup_irqs() behaviour unchanged on architecture | ||
| 79 | * code still relying on its interface. | ||
| 80 | */ | ||
| 63 | struct pci_dev *dev = NULL; | 81 | struct pci_dev *dev = NULL; |
| 82 | struct pci_host_bridge *hbrg = NULL; | ||
| 64 | 83 | ||
| 65 | for_each_pci_dev(dev) | 84 | for_each_pci_dev(dev) { |
| 66 | pdev_fixup_irq(dev, swizzle, map_irq); | 85 | hbrg = pci_find_host_bridge(dev->bus); |
| 86 | hbrg->swizzle_irq = swizzle; | ||
| 87 | hbrg->map_irq = map_irq; | ||
| 88 | pci_assign_irq(dev); | ||
| 89 | hbrg->swizzle_irq = NULL; | ||
| 90 | hbrg->map_irq = NULL; | ||
| 91 | } | ||
| 67 | } | 92 | } |
| 68 | EXPORT_SYMBOL_GPL(pci_fixup_irqs); | 93 | EXPORT_SYMBOL_GPL(pci_fixup_irqs); |
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index f6a63406c76e..af81b2dec42e 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c | |||
| @@ -120,6 +120,13 @@ struct sw_event_regs { | |||
| 120 | u32 reserved16[4]; | 120 | u32 reserved16[4]; |
| 121 | } __packed; | 121 | } __packed; |
| 122 | 122 | ||
| 123 | enum { | ||
| 124 | SWITCHTEC_CFG0_RUNNING = 0x04, | ||
| 125 | SWITCHTEC_CFG1_RUNNING = 0x05, | ||
| 126 | SWITCHTEC_IMG0_RUNNING = 0x03, | ||
| 127 | SWITCHTEC_IMG1_RUNNING = 0x07, | ||
| 128 | }; | ||
| 129 | |||
| 123 | struct sys_info_regs { | 130 | struct sys_info_regs { |
| 124 | u32 device_id; | 131 | u32 device_id; |
| 125 | u32 device_version; | 132 | u32 device_version; |
| @@ -129,7 +136,9 @@ struct sys_info_regs { | |||
| 129 | u32 table_format_version; | 136 | u32 table_format_version; |
| 130 | u32 partition_id; | 137 | u32 partition_id; |
| 131 | u32 cfg_file_fmt_version; | 138 | u32 cfg_file_fmt_version; |
| 132 | u32 reserved2[58]; | 139 | u16 cfg_running; |
| 140 | u16 img_running; | ||
| 141 | u32 reserved2[57]; | ||
| 133 | char vendor_id[8]; | 142 | char vendor_id[8]; |
| 134 | char product_id[16]; | 143 | char product_id[16]; |
| 135 | char product_revision[4]; | 144 | char product_revision[4]; |
| @@ -807,6 +816,7 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev, | |||
| 807 | { | 816 | { |
| 808 | struct switchtec_ioctl_flash_part_info info = {0}; | 817 | struct switchtec_ioctl_flash_part_info info = {0}; |
| 809 | struct flash_info_regs __iomem *fi = stdev->mmio_flash_info; | 818 | struct flash_info_regs __iomem *fi = stdev->mmio_flash_info; |
| 819 | struct sys_info_regs __iomem *si = stdev->mmio_sys_info; | ||
| 810 | u32 active_addr = -1; | 820 | u32 active_addr = -1; |
| 811 | 821 | ||
| 812 | if (copy_from_user(&info, uinfo, sizeof(info))) | 822 | if (copy_from_user(&info, uinfo, sizeof(info))) |
| @@ -816,18 +826,26 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev, | |||
| 816 | case SWITCHTEC_IOCTL_PART_CFG0: | 826 | case SWITCHTEC_IOCTL_PART_CFG0: |
| 817 | active_addr = ioread32(&fi->active_cfg); | 827 | active_addr = ioread32(&fi->active_cfg); |
| 818 | set_fw_info_part(&info, &fi->cfg0); | 828 | set_fw_info_part(&info, &fi->cfg0); |
| 829 | if (ioread16(&si->cfg_running) == SWITCHTEC_CFG0_RUNNING) | ||
| 830 | info.active |= SWITCHTEC_IOCTL_PART_RUNNING; | ||
| 819 | break; | 831 | break; |
| 820 | case SWITCHTEC_IOCTL_PART_CFG1: | 832 | case SWITCHTEC_IOCTL_PART_CFG1: |
| 821 | active_addr = ioread32(&fi->active_cfg); | 833 | active_addr = ioread32(&fi->active_cfg); |
| 822 | set_fw_info_part(&info, &fi->cfg1); | 834 | set_fw_info_part(&info, &fi->cfg1); |
| 835 | if (ioread16(&si->cfg_running) == SWITCHTEC_CFG1_RUNNING) | ||
| 836 | info.active |= SWITCHTEC_IOCTL_PART_RUNNING; | ||
| 823 | break; | 837 | break; |
| 824 | case SWITCHTEC_IOCTL_PART_IMG0: | 838 | case SWITCHTEC_IOCTL_PART_IMG0: |
| 825 | active_addr = ioread32(&fi->active_img); | 839 | active_addr = ioread32(&fi->active_img); |
| 826 | set_fw_info_part(&info, &fi->img0); | 840 | set_fw_info_part(&info, &fi->img0); |
| 841 | if (ioread16(&si->img_running) == SWITCHTEC_IMG0_RUNNING) | ||
| 842 | info.active |= SWITCHTEC_IOCTL_PART_RUNNING; | ||
| 827 | break; | 843 | break; |
| 828 | case SWITCHTEC_IOCTL_PART_IMG1: | 844 | case SWITCHTEC_IOCTL_PART_IMG1: |
| 829 | active_addr = ioread32(&fi->active_img); | 845 | active_addr = ioread32(&fi->active_img); |
| 830 | set_fw_info_part(&info, &fi->img1); | 846 | set_fw_info_part(&info, &fi->img1); |
| 847 | if (ioread16(&si->img_running) == SWITCHTEC_IMG1_RUNNING) | ||
| 848 | info.active |= SWITCHTEC_IOCTL_PART_RUNNING; | ||
| 831 | break; | 849 | break; |
| 832 | case SWITCHTEC_IOCTL_PART_NVLOG: | 850 | case SWITCHTEC_IOCTL_PART_NVLOG: |
| 833 | set_fw_info_part(&info, &fi->nvlog); | 851 | set_fw_info_part(&info, &fi->nvlog); |
| @@ -861,7 +879,7 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev, | |||
| 861 | } | 879 | } |
| 862 | 880 | ||
| 863 | if (info.address == active_addr) | 881 | if (info.address == active_addr) |
| 864 | info.active = 1; | 882 | info.active |= SWITCHTEC_IOCTL_PART_ACTIVE; |
| 865 | 883 | ||
| 866 | if (copy_to_user(uinfo, &info, sizeof(info))) | 884 | if (copy_to_user(uinfo, &info, sizeof(info))) |
| 867 | return -EFAULT; | 885 | return -EFAULT; |
| @@ -1540,6 +1558,24 @@ static const struct pci_device_id switchtec_pci_tbl[] = { | |||
| 1540 | SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3 | 1558 | SWITCHTEC_PCI_DEVICE(0x8544), //PSX 64xG3 |
| 1541 | SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3 | 1559 | SWITCHTEC_PCI_DEVICE(0x8545), //PSX 80xG3 |
| 1542 | SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3 | 1560 | SWITCHTEC_PCI_DEVICE(0x8546), //PSX 96xG3 |
| 1561 | SWITCHTEC_PCI_DEVICE(0x8551), //PAX 24XG3 | ||
| 1562 | SWITCHTEC_PCI_DEVICE(0x8552), //PAX 32XG3 | ||
| 1563 | SWITCHTEC_PCI_DEVICE(0x8553), //PAX 48XG3 | ||
| 1564 | SWITCHTEC_PCI_DEVICE(0x8554), //PAX 64XG3 | ||
| 1565 | SWITCHTEC_PCI_DEVICE(0x8555), //PAX 80XG3 | ||
| 1566 | SWITCHTEC_PCI_DEVICE(0x8556), //PAX 96XG3 | ||
| 1567 | SWITCHTEC_PCI_DEVICE(0x8561), //PFXL 24XG3 | ||
| 1568 | SWITCHTEC_PCI_DEVICE(0x8562), //PFXL 32XG3 | ||
| 1569 | SWITCHTEC_PCI_DEVICE(0x8563), //PFXL 48XG3 | ||
| 1570 | SWITCHTEC_PCI_DEVICE(0x8564), //PFXL 64XG3 | ||
| 1571 | SWITCHTEC_PCI_DEVICE(0x8565), //PFXL 80XG3 | ||
| 1572 | SWITCHTEC_PCI_DEVICE(0x8566), //PFXL 96XG3 | ||
| 1573 | SWITCHTEC_PCI_DEVICE(0x8571), //PFXI 24XG3 | ||
| 1574 | SWITCHTEC_PCI_DEVICE(0x8572), //PFXI 32XG3 | ||
| 1575 | SWITCHTEC_PCI_DEVICE(0x8573), //PFXI 48XG3 | ||
| 1576 | SWITCHTEC_PCI_DEVICE(0x8574), //PFXI 64XG3 | ||
| 1577 | SWITCHTEC_PCI_DEVICE(0x8575), //PFXI 80XG3 | ||
| 1578 | SWITCHTEC_PCI_DEVICE(0x8576), //PFXI 96XG3 | ||
| 1543 | {0} | 1579 | {0} |
| 1544 | }; | 1580 | }; |
| 1545 | MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl); | 1581 | MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl); |
