diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-14 13:30:10 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-05-14 13:30:10 -0400 |
| commit | 414147d99b928c574ed76e9374a5d2cb77866a29 (patch) | |
| tree | 46d193f8db5be5d35f6f4239254dcc8ae2269572 | |
| parent | 318222a35bfb0ae9b5ff3e359a583463e6cfcd94 (diff) | |
| parent | c7a1c2bbb65e25551d585fba0fd36a01e0a22690 (diff) | |
Merge tag 'pci-v5.2-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci
Pull PCI updates from Bjorn Helgaas:
"Enumeration changes:
- Add _HPX Type 3 settings support, which gives firmware more
influence over device configuration (Alexandru Gagniuc)
- Support fixed bus numbers from bridge Enhanced Allocation
capabilities (Subbaraya Sundeep)
- Add "external-facing" DT property to identify cases where we
require IOMMU protection against untrusted devices (Jean-Philippe
Brucker)
- Enable PCIe services for host controller drivers that use managed
host bridge alloc (Jean-Philippe Brucker)
- Log PCIe port service messages with pci_dev, not the pcie_device
(Frederick Lawler)
- Convert pciehp from pciehp_debug module parameter to generic
dynamic debug (Frederick Lawler)
Peer-to-peer DMA:
- Add whitelist of Root Complexes that support peer-to-peer DMA
between Root Ports (Christian König)
Native controller drivers:
- Add PCI host bridge DMA ranges for bridges that can't DMA
everywhere, e.g., iProc (Srinath Mannam)
- Add Amazon Annapurna Labs PCIe host controller driver (Jonathan
Chocron)
- Fix Tegra MSI target allocation so DMA doesn't generate unwanted
MSIs (Vidya Sagar)
- Fix of_node reference leaks (Wen Yang)
- Fix Hyper-V module unload & device removal issues (Dexuan Cui)
- Cleanup R-Car driver (Marek Vasut)
- Cleanup Keystone driver (Kishon Vijay Abraham I)
- Cleanup i.MX6 driver (Andrey Smirnov)
Significant bug fixes:
- Reset Lenovo ThinkPad P50 GPU so nouveau works after reboot (Lyude
Paul)
- Fix Switchtec firmware update performance issue (Wesley Sheng)
- Work around Pericom switch link retraining erratum (Stefan Mätje)"
* tag 'pci-v5.2-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci: (141 commits)
MAINTAINERS: Add Karthikeyan Mitran and Hou Zhiqiang for Mobiveil PCI
PCI: pciehp: Remove pointless MY_NAME definition
PCI: pciehp: Remove pointless PCIE_MODULE_NAME definition
PCI: pciehp: Remove unused dbg/err/info/warn() wrappers
PCI: pciehp: Log messages with pci_dev, not pcie_device
PCI: pciehp: Replace pciehp_debug module param with dyndbg
PCI: pciehp: Remove pciehp_debug uses
PCI/AER: Log messages with pci_dev, not pcie_device
PCI/DPC: Log messages with pci_dev, not pcie_device
PCI/PME: Replace dev_printk(KERN_DEBUG) with dev_info()
PCI/AER: Replace dev_printk(KERN_DEBUG) with dev_info()
PCI: Replace dev_printk(KERN_DEBUG) with dev_info(), etc
PCI: Replace printk(KERN_INFO) with pr_info(), etc
PCI: Use dev_printk() when possible
PCI: Cleanup setup-bus.c comments and whitespace
PCI: imx6: Allow asynchronous probing
PCI: dwc: Save root bus for driver remove hooks
PCI: dwc: Use devm_pci_alloc_host_bridge() to simplify code
PCI: dwc: Free MSI in dw_pcie_host_init() error path
PCI: dwc: Free MSI IRQ page in dw_pcie_free_msi()
...
92 files changed, 2911 insertions, 1621 deletions
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt index c124f9bc11f3..5561a1c060d0 100644 --- a/Documentation/devicetree/bindings/pci/designware-pcie.txt +++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt | |||
| @@ -4,8 +4,11 @@ Required properties: | |||
| 4 | - compatible: | 4 | - compatible: |
| 5 | "snps,dw-pcie" for RC mode; | 5 | "snps,dw-pcie" for RC mode; |
| 6 | "snps,dw-pcie-ep" for EP mode; | 6 | "snps,dw-pcie-ep" for EP mode; |
| 7 | - reg: Should contain the configuration address space. | 7 | - reg: For designware cores version < 4.80 contains the configuration |
| 8 | - reg-names: Must be "config" for the PCIe configuration space. | 8 | address space. For designware core version >= 4.80, contains |
| 9 | the configuration and ATU address space | ||
| 10 | - reg-names: Must be "config" for the PCIe configuration space and "atu" for | ||
| 11 | the ATU address space. | ||
| 9 | (The old way of getting the configuration address space from "ranges" | 12 | (The old way of getting the configuration address space from "ranges" |
| 10 | is deprecated and should be avoided.) | 13 | is deprecated and should be avoided.) |
| 11 | - num-lanes: number of lanes to use | 14 | - num-lanes: number of lanes to use |
diff --git a/Documentation/devicetree/bindings/pci/pci-keystone.txt b/Documentation/devicetree/bindings/pci/pci-keystone.txt index 2030ee0dc4f9..47202a2938f2 100644 --- a/Documentation/devicetree/bindings/pci/pci-keystone.txt +++ b/Documentation/devicetree/bindings/pci/pci-keystone.txt | |||
| @@ -11,16 +11,24 @@ described here as well as properties that are not applicable. | |||
| 11 | 11 | ||
| 12 | Required Properties:- | 12 | Required Properties:- |
| 13 | 13 | ||
| 14 | compatibility: "ti,keystone-pcie" | 14 | compatibility: Should be "ti,keystone-pcie" for RC on Keystone2 SoC |
| 15 | reg: index 1 is the base address and length of DW application registers. | 15 | Should be "ti,am654-pcie-rc" for RC on AM654x SoC |
| 16 | index 2 is the base address and length of PCI device ID register. | 16 | reg: Three register ranges as listed in the reg-names property |
| 17 | reg-names: "dbics" for the DesignWare PCIe registers, "app" for the | ||
| 18 | TI specific application registers, "config" for the | ||
| 19 | configuration space address | ||
| 17 | 20 | ||
| 18 | pcie_msi_intc : Interrupt controller device node for MSI IRQ chip | 21 | pcie_msi_intc : Interrupt controller device node for MSI IRQ chip |
| 19 | interrupt-cells: should be set to 1 | 22 | interrupt-cells: should be set to 1 |
| 20 | interrupts: GIC interrupt lines connected to PCI MSI interrupt lines | 23 | interrupts: GIC interrupt lines connected to PCI MSI interrupt lines |
| 24 | (required if the compatible is "ti,keystone-pcie") | ||
| 25 | msi-map: As specified in Documentation/devicetree/bindings/pci/pci-msi.txt | ||
| 26 | (required if the compatible is "ti,am654-pcie-rc". | ||
| 21 | 27 | ||
| 22 | ti,syscon-pcie-id : phandle to the device control module required to set device | 28 | ti,syscon-pcie-id : phandle to the device control module required to set device |
| 23 | id and vendor id. | 29 | id and vendor id. |
| 30 | ti,syscon-pcie-mode : phandle to the device control module required to configure | ||
| 31 | PCI in either RC mode or EP mode. | ||
| 24 | 32 | ||
| 25 | Example: | 33 | Example: |
| 26 | pcie_msi_intc: msi-interrupt-controller { | 34 | pcie_msi_intc: msi-interrupt-controller { |
| @@ -61,3 +69,47 @@ Optional properties:- | |||
| 61 | DesignWare DT Properties not applicable for Keystone PCI | 69 | DesignWare DT Properties not applicable for Keystone PCI |
| 62 | 70 | ||
| 63 | 1. pcie_bus clock-names not used. Instead, a phandle to phys is used. | 71 | 1. pcie_bus clock-names not used. Instead, a phandle to phys is used. |
| 72 | |||
| 73 | AM654 PCIe Endpoint | ||
| 74 | =================== | ||
| 75 | |||
| 76 | Required Properties:- | ||
| 77 | |||
| 78 | compatibility: Should be "ti,am654-pcie-ep" for EP on AM654x SoC | ||
| 79 | reg: Four register ranges as listed in the reg-names property | ||
| 80 | reg-names: "dbics" for the DesignWare PCIe registers, "app" for the | ||
| 81 | TI specific application registers, "atu" for the | ||
| 82 | Address Translation Unit configuration registers and | ||
| 83 | "addr_space" used to map remote RC address space | ||
| 84 | num-ib-windows: As specified in | ||
| 85 | Documentation/devicetree/bindings/pci/designware-pcie.txt | ||
| 86 | num-ob-windows: As specified in | ||
| 87 | Documentation/devicetree/bindings/pci/designware-pcie.txt | ||
| 88 | num-lanes: As specified in | ||
| 89 | Documentation/devicetree/bindings/pci/designware-pcie.txt | ||
| 90 | power-domains: As documented by the generic PM domain bindings in | ||
| 91 | Documentation/devicetree/bindings/power/power_domain.txt. | ||
| 92 | ti,syscon-pcie-mode: phandle to the device control module required to configure | ||
| 93 | PCI in either RC mode or EP mode. | ||
| 94 | |||
| 95 | Optional properties:- | ||
| 96 | |||
| 97 | phys: list of PHY specifiers (used by generic PHY framework) | ||
| 98 | phy-names: must be "pcie-phy0", "pcie-phy1", "pcie-phyN".. based on the | ||
| 99 | number of lanes as specified in *num-lanes* property. | ||
| 100 | ("phys" and "phy-names" DT bindings are specified in | ||
| 101 | Documentation/devicetree/bindings/phy/phy-bindings.txt) | ||
| 102 | interrupts: platform interrupt for error interrupts. | ||
| 103 | |||
| 104 | pcie-ep { | ||
| 105 | compatible = "ti,am654-pcie-ep"; | ||
| 106 | reg = <0x5500000 0x1000>, <0x5501000 0x1000>, | ||
| 107 | <0x10000000 0x8000000>, <0x5506000 0x1000>; | ||
| 108 | reg-names = "app", "dbics", "addr_space", "atu"; | ||
| 109 | power-domains = <&k3_pds 120>; | ||
| 110 | ti,syscon-pcie-mode = <&pcie0_mode>; | ||
| 111 | num-lanes = <1>; | ||
| 112 | num-ib-windows = <16>; | ||
| 113 | num-ob-windows = <16>; | ||
| 114 | interrupts = <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>; | ||
| 115 | }; | ||
diff --git a/Documentation/devicetree/bindings/pci/pci.txt b/Documentation/devicetree/bindings/pci/pci.txt index c77981c5dd18..92c01db610df 100644 --- a/Documentation/devicetree/bindings/pci/pci.txt +++ b/Documentation/devicetree/bindings/pci/pci.txt | |||
| @@ -24,3 +24,53 @@ driver implementation may support the following properties: | |||
| 24 | unsupported link speed, for instance, trying to do training for | 24 | unsupported link speed, for instance, trying to do training for |
| 25 | unsupported link speed, etc. Must be '4' for gen4, '3' for gen3, '2' | 25 | unsupported link speed, etc. Must be '4' for gen4, '3' for gen3, '2' |
| 26 | for gen2, and '1' for gen1. Any other values are invalid. | 26 | for gen2, and '1' for gen1. Any other values are invalid. |
| 27 | |||
| 28 | PCI-PCI Bridge properties | ||
| 29 | ------------------------- | ||
| 30 | |||
| 31 | PCIe root ports and switch ports may be described explicitly in the device | ||
| 32 | tree, as children of the host bridge node. Even though those devices are | ||
| 33 | discoverable by probing, it might be necessary to describe properties that | ||
| 34 | aren't provided by standard PCIe capabilities. | ||
| 35 | |||
| 36 | Required properties: | ||
| 37 | |||
| 38 | - reg: | ||
| 39 | Identifies the PCI-PCI bridge. As defined in the IEEE Std 1275-1994 | ||
| 40 | document, it is a five-cell address encoded as (phys.hi phys.mid | ||
| 41 | phys.lo size.hi size.lo). phys.hi should contain the device's BDF as | ||
| 42 | 0b00000000 bbbbbbbb dddddfff 00000000. The other cells should be zero. | ||
| 43 | |||
| 44 | The bus number is defined by firmware, through the standard bridge | ||
| 45 | configuration mechanism. If this port is a switch port, then firmware | ||
| 46 | allocates the bus number and writes it into the Secondary Bus Number | ||
| 47 | register of the bridge directly above this port. Otherwise, the bus | ||
| 48 | number of a root port is the first number in the bus-range property, | ||
| 49 | defaulting to zero. | ||
| 50 | |||
| 51 | If firmware leaves the ARI Forwarding Enable bit set in the bridge | ||
| 52 | above this port, then phys.hi contains the 8-bit function number as | ||
| 53 | 0b00000000 bbbbbbbb ffffffff 00000000. Note that the PCIe specification | ||
| 54 | recommends that firmware only leaves ARI enabled when it knows that the | ||
| 55 | OS is ARI-aware. | ||
| 56 | |||
| 57 | Optional properties: | ||
| 58 | |||
| 59 | - external-facing: | ||
| 60 | When present, the port is external-facing. All bridges and endpoints | ||
| 61 | downstream of this port are external to the machine. The OS can, for | ||
| 62 | example, use this information to identify devices that cannot be | ||
| 63 | trusted with relaxed DMA protection, as users could easily attach | ||
| 64 | malicious devices to this port. | ||
| 65 | |||
| 66 | Example: | ||
| 67 | |||
| 68 | pcie@10000000 { | ||
| 69 | compatible = "pci-host-ecam-generic"; | ||
| 70 | ... | ||
| 71 | pcie@0008 { | ||
| 72 | /* Root port 00:01.0 is external-facing */ | ||
| 73 | reg = <0x00000800 0 0 0 0>; | ||
| 74 | external-facing; | ||
| 75 | }; | ||
| 76 | }; | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 372e60e416f6..a8b3eefc37a2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -12026,7 +12026,8 @@ F: include/linux/switchtec.h | |||
| 12026 | F: drivers/ntb/hw/mscc/ | 12026 | F: drivers/ntb/hw/mscc/ |
| 12027 | 12027 | ||
| 12028 | PCI DRIVER FOR MOBIVEIL PCIE IP | 12028 | PCI DRIVER FOR MOBIVEIL PCIE IP |
| 12029 | M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in> | 12029 | M: Karthikeyan Mitran <m.karthikeyan@mobiveil.co.in> |
| 12030 | M: Hou Zhiqiang <Zhiqiang.Hou@nxp.com> | ||
| 12030 | L: linux-pci@vger.kernel.org | 12031 | L: linux-pci@vger.kernel.org |
| 12031 | S: Supported | 12032 | S: Supported |
| 12032 | F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt | 12033 | F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt |
| @@ -12160,6 +12161,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lpieralisi/pci.git/ | |||
| 12160 | S: Supported | 12161 | S: Supported |
| 12161 | F: drivers/pci/controller/ | 12162 | F: drivers/pci/controller/ |
| 12162 | 12163 | ||
| 12164 | PCIE DRIVER FOR ANNAPURNA LABS | ||
| 12165 | M: Jonathan Chocron <jonnyc@amazon.com> | ||
| 12166 | L: linux-pci@vger.kernel.org | ||
| 12167 | S: Maintained | ||
| 12168 | F: drivers/pci/controller/dwc/pcie-al.c | ||
| 12169 | |||
| 12163 | PCIE DRIVER FOR AMLOGIC MESON | 12170 | PCIE DRIVER FOR AMLOGIC MESON |
| 12164 | M: Yue Wang <yue.wang@Amlogic.com> | 12171 | M: Yue Wang <yue.wang@Amlogic.com> |
| 12165 | L: linux-pci@vger.kernel.org | 12172 | L: linux-pci@vger.kernel.org |
diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi index 976d92a94738..43307bad3f0d 100644 --- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi +++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi | |||
| @@ -819,7 +819,6 @@ | |||
| 819 | #size-cells = <2>; | 819 | #size-cells = <2>; |
| 820 | #interrupt-cells = <1>; | 820 | #interrupt-cells = <1>; |
| 821 | ranges; | 821 | ranges; |
| 822 | num-lanes = <1>; | ||
| 823 | interrupt-map-mask = <0 0 0 7>; | 822 | interrupt-map-mask = <0 0 0 7>; |
| 824 | interrupt-map = <0 0 0 1 &pcie_intc0 0>, | 823 | interrupt-map = <0 0 0 1 &pcie_intc0 0>, |
| 825 | <0 0 0 2 &pcie_intc0 1>, | 824 | <0 0 0 2 &pcie_intc0 1>, |
| @@ -840,7 +839,6 @@ | |||
| 840 | #size-cells = <2>; | 839 | #size-cells = <2>; |
| 841 | #interrupt-cells = <1>; | 840 | #interrupt-cells = <1>; |
| 842 | ranges; | 841 | ranges; |
| 843 | num-lanes = <1>; | ||
| 844 | interrupt-map-mask = <0 0 0 7>; | 842 | interrupt-map-mask = <0 0 0 7>; |
| 845 | interrupt-map = <0 0 0 1 &pcie_intc1 0>, | 843 | interrupt-map = <0 0 0 1 &pcie_intc1 0>, |
| 846 | <0 0 0 2 &pcie_intc1 1>, | 844 | <0 0 0 2 &pcie_intc1 1>, |
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index dc23d9d2a7d9..495550432f3d 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c | |||
| @@ -1213,9 +1213,8 @@ int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid, | |||
| 1213 | * Currently we only support radix and non-zero LPCR only makes sense | 1213 | * Currently we only support radix and non-zero LPCR only makes sense |
| 1214 | * for hash tables so skiboot expects the LPCR parameter to be a zero. | 1214 | * for hash tables so skiboot expects the LPCR parameter to be a zero. |
| 1215 | */ | 1215 | */ |
| 1216 | ret = opal_npu_map_lpar(nphb->opal_id, | 1216 | ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), lparid, |
| 1217 | PCI_DEVID(gpdev->bus->number, gpdev->devfn), lparid, | 1217 | 0 /* LPCR bits */); |
| 1218 | 0 /* LPCR bits */); | ||
| 1219 | if (ret) { | 1218 | if (ret) { |
| 1220 | dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret); | 1219 | dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret); |
| 1221 | return ret; | 1220 | return ret; |
| @@ -1224,7 +1223,7 @@ int pnv_npu2_map_lpar_dev(struct pci_dev *gpdev, unsigned int lparid, | |||
| 1224 | dev_dbg(&gpdev->dev, "init context opalid=%llu msr=%lx\n", | 1223 | dev_dbg(&gpdev->dev, "init context opalid=%llu msr=%lx\n", |
| 1225 | nphb->opal_id, msr); | 1224 | nphb->opal_id, msr); |
| 1226 | ret = opal_npu_init_context(nphb->opal_id, 0/*__unused*/, msr, | 1225 | ret = opal_npu_init_context(nphb->opal_id, 0/*__unused*/, msr, |
| 1227 | PCI_DEVID(gpdev->bus->number, gpdev->devfn)); | 1226 | pci_dev_id(gpdev)); |
| 1228 | if (ret < 0) | 1227 | if (ret < 0) |
| 1229 | dev_err(&gpdev->dev, "Failed to init context: %d\n", ret); | 1228 | dev_err(&gpdev->dev, "Failed to init context: %d\n", ret); |
| 1230 | else | 1229 | else |
| @@ -1258,7 +1257,7 @@ int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev) | |||
| 1258 | dev_dbg(&gpdev->dev, "destroy context opalid=%llu\n", | 1257 | dev_dbg(&gpdev->dev, "destroy context opalid=%llu\n", |
| 1259 | nphb->opal_id); | 1258 | nphb->opal_id); |
| 1260 | ret = opal_npu_destroy_context(nphb->opal_id, 0/*__unused*/, | 1259 | ret = opal_npu_destroy_context(nphb->opal_id, 0/*__unused*/, |
| 1261 | PCI_DEVID(gpdev->bus->number, gpdev->devfn)); | 1260 | pci_dev_id(gpdev)); |
| 1262 | if (ret < 0) { | 1261 | if (ret < 0) { |
| 1263 | dev_err(&gpdev->dev, "Failed to destroy context: %d\n", ret); | 1262 | dev_err(&gpdev->dev, "Failed to destroy context: %d\n", ret); |
| 1264 | return ret; | 1263 | return ret; |
| @@ -1266,9 +1265,8 @@ int pnv_npu2_unmap_lpar_dev(struct pci_dev *gpdev) | |||
| 1266 | 1265 | ||
| 1267 | /* Set LPID to 0 anyway, just to be safe */ | 1266 | /* Set LPID to 0 anyway, just to be safe */ |
| 1268 | dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=0\n", nphb->opal_id); | 1267 | dev_dbg(&gpdev->dev, "Map LPAR opalid=%llu lparid=0\n", nphb->opal_id); |
| 1269 | ret = opal_npu_map_lpar(nphb->opal_id, | 1268 | ret = opal_npu_map_lpar(nphb->opal_id, pci_dev_id(gpdev), 0 /*LPID*/, |
| 1270 | PCI_DEVID(gpdev->bus->number, gpdev->devfn), 0 /*LPID*/, | 1269 | 0 /* LPCR bits */); |
| 1271 | 0 /* LPCR bits */); | ||
| 1272 | if (ret) | 1270 | if (ret) |
| 1273 | dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret); | 1271 | dev_err(&gpdev->dev, "Error %d mapping device to LPAR\n", ret); |
| 1274 | 1272 | ||
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index 52e55108404e..d3a73f9335e1 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c | |||
| @@ -1119,6 +1119,8 @@ static const struct dmi_system_id pciirq_dmi_table[] __initconst = { | |||
| 1119 | 1119 | ||
| 1120 | void __init pcibios_irq_init(void) | 1120 | void __init pcibios_irq_init(void) |
| 1121 | { | 1121 | { |
| 1122 | struct irq_routing_table *rtable = NULL; | ||
| 1123 | |||
| 1122 | DBG(KERN_DEBUG "PCI: IRQ init\n"); | 1124 | DBG(KERN_DEBUG "PCI: IRQ init\n"); |
| 1123 | 1125 | ||
| 1124 | if (raw_pci_ops == NULL) | 1126 | if (raw_pci_ops == NULL) |
| @@ -1129,8 +1131,10 @@ void __init pcibios_irq_init(void) | |||
| 1129 | pirq_table = pirq_find_routing_table(); | 1131 | pirq_table = pirq_find_routing_table(); |
| 1130 | 1132 | ||
| 1131 | #ifdef CONFIG_PCI_BIOS | 1133 | #ifdef CONFIG_PCI_BIOS |
| 1132 | if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) | 1134 | if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN)) { |
| 1133 | pirq_table = pcibios_get_irq_routing_table(); | 1135 | pirq_table = pcibios_get_irq_routing_table(); |
| 1136 | rtable = pirq_table; | ||
| 1137 | } | ||
| 1134 | #endif | 1138 | #endif |
| 1135 | if (pirq_table) { | 1139 | if (pirq_table) { |
| 1136 | pirq_peer_trick(); | 1140 | pirq_peer_trick(); |
| @@ -1145,8 +1149,10 @@ void __init pcibios_irq_init(void) | |||
| 1145 | * If we're using the I/O APIC, avoid using the PCI IRQ | 1149 | * If we're using the I/O APIC, avoid using the PCI IRQ |
| 1146 | * routing table | 1150 | * routing table |
| 1147 | */ | 1151 | */ |
| 1148 | if (io_apic_assign_pci_irqs) | 1152 | if (io_apic_assign_pci_irqs) { |
| 1153 | kfree(rtable); | ||
| 1149 | pirq_table = NULL; | 1154 | pirq_table = NULL; |
| 1155 | } | ||
| 1150 | } | 1156 | } |
| 1151 | 1157 | ||
| 1152 | x86_init.pci.fixup_irqs(); | 1158 | x86_init.pci.fixup_irqs(); |
diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c index a4e8432fc2fb..b42be067fb83 100644 --- a/drivers/acpi/pci_mcfg.c +++ b/drivers/acpi/pci_mcfg.c | |||
| @@ -52,6 +52,18 @@ struct mcfg_fixup { | |||
| 52 | static struct mcfg_fixup mcfg_quirks[] = { | 52 | static struct mcfg_fixup mcfg_quirks[] = { |
| 53 | /* { OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */ | 53 | /* { OEM_ID, OEM_TABLE_ID, REV, SEGMENT, BUS_RANGE, ops, cfgres }, */ |
| 54 | 54 | ||
| 55 | #define AL_ECAM(table_id, rev, seg, ops) \ | ||
| 56 | { "AMAZON", table_id, rev, seg, MCFG_BUS_ANY, ops } | ||
| 57 | |||
| 58 | AL_ECAM("GRAVITON", 0, 0, &al_pcie_ops), | ||
| 59 | AL_ECAM("GRAVITON", 0, 1, &al_pcie_ops), | ||
| 60 | AL_ECAM("GRAVITON", 0, 2, &al_pcie_ops), | ||
| 61 | AL_ECAM("GRAVITON", 0, 3, &al_pcie_ops), | ||
| 62 | AL_ECAM("GRAVITON", 0, 4, &al_pcie_ops), | ||
| 63 | AL_ECAM("GRAVITON", 0, 5, &al_pcie_ops), | ||
| 64 | AL_ECAM("GRAVITON", 0, 6, &al_pcie_ops), | ||
| 65 | AL_ECAM("GRAVITON", 0, 7, &al_pcie_ops), | ||
| 66 | |||
| 55 | #define QCOM_ECAM32(seg) \ | 67 | #define QCOM_ECAM32(seg) \ |
| 56 | { "QCOM ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops } | 68 | { "QCOM ", "QDF2432 ", 1, seg, MCFG_BUS_ANY, &pci_32b_ops } |
| 57 | 69 | ||
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 707aafc7c2aa..c36781a9b493 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
| @@ -145,6 +145,7 @@ static struct pci_osc_bit_struct pci_osc_support_bit[] = { | |||
| 145 | { OSC_PCI_CLOCK_PM_SUPPORT, "ClockPM" }, | 145 | { OSC_PCI_CLOCK_PM_SUPPORT, "ClockPM" }, |
| 146 | { OSC_PCI_SEGMENT_GROUPS_SUPPORT, "Segments" }, | 146 | { OSC_PCI_SEGMENT_GROUPS_SUPPORT, "Segments" }, |
| 147 | { OSC_PCI_MSI_SUPPORT, "MSI" }, | 147 | { OSC_PCI_MSI_SUPPORT, "MSI" }, |
| 148 | { OSC_PCI_HPX_TYPE_3_SUPPORT, "HPX-Type3" }, | ||
| 148 | }; | 149 | }; |
| 149 | 150 | ||
| 150 | static struct pci_osc_bit_struct pci_osc_control_bit[] = { | 151 | static struct pci_osc_bit_struct pci_osc_control_bit[] = { |
| @@ -446,6 +447,7 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm, | |||
| 446 | * PCI domains, so we indicate this in _OSC support capabilities. | 447 | * PCI domains, so we indicate this in _OSC support capabilities. |
| 447 | */ | 448 | */ |
| 448 | support = OSC_PCI_SEGMENT_GROUPS_SUPPORT; | 449 | support = OSC_PCI_SEGMENT_GROUPS_SUPPORT; |
| 450 | support |= OSC_PCI_HPX_TYPE_3_SUPPORT; | ||
| 449 | if (pci_ext_cfg_avail()) | 451 | if (pci_ext_cfg_avail()) |
| 450 | support |= OSC_PCI_EXT_CONFIG_SUPPORT; | 452 | support |= OSC_PCI_EXT_CONFIG_SUPPORT; |
| 451 | if (pcie_aspm_support_enabled()) | 453 | if (pcie_aspm_support_enabled()) |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 2cb09e088dce..769dbc7be8cb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c | |||
| @@ -1272,8 +1272,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu) | |||
| 1272 | 1272 | ||
| 1273 | dev->node_props.vendor_id = gpu->pdev->vendor; | 1273 | dev->node_props.vendor_id = gpu->pdev->vendor; |
| 1274 | dev->node_props.device_id = gpu->pdev->device; | 1274 | dev->node_props.device_id = gpu->pdev->device; |
| 1275 | dev->node_props.location_id = PCI_DEVID(gpu->pdev->bus->number, | 1275 | dev->node_props.location_id = pci_dev_id(gpu->pdev); |
| 1276 | gpu->pdev->devfn); | ||
| 1277 | dev->node_props.max_engine_clk_fcompute = | 1276 | dev->node_props.max_engine_clk_fcompute = |
| 1278 | amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd); | 1277 | amdgpu_amdkfd_get_max_engine_clock_in_mhz(dev->gpu->kgd); |
| 1279 | dev->node_props.max_engine_clk_ccompute = | 1278 | dev->node_props.max_engine_clk_ccompute = |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index fde16c5b0a70..09c9e45f7fa2 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
| @@ -165,7 +165,7 @@ static inline u16 get_pci_device_id(struct device *dev) | |||
| 165 | { | 165 | { |
| 166 | struct pci_dev *pdev = to_pci_dev(dev); | 166 | struct pci_dev *pdev = to_pci_dev(dev); |
| 167 | 167 | ||
| 168 | return PCI_DEVID(pdev->bus->number, pdev->devfn); | 168 | return pci_dev_id(pdev); |
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | static inline int get_acpihid_device_id(struct device *dev, | 171 | static inline int get_acpihid_device_id(struct device *dev, |
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 20abd19bbfbe..5e898047c390 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c | |||
| @@ -206,12 +206,13 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie, | |||
| 206 | return 0; | 206 | return 0; |
| 207 | } | 207 | } |
| 208 | 208 | ||
| 209 | static void iova_reserve_pci_windows(struct pci_dev *dev, | 209 | static int iova_reserve_pci_windows(struct pci_dev *dev, |
| 210 | struct iova_domain *iovad) | 210 | struct iova_domain *iovad) |
| 211 | { | 211 | { |
| 212 | struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); | 212 | struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); |
| 213 | struct resource_entry *window; | 213 | struct resource_entry *window; |
| 214 | unsigned long lo, hi; | 214 | unsigned long lo, hi; |
| 215 | phys_addr_t start = 0, end; | ||
| 215 | 216 | ||
| 216 | resource_list_for_each_entry(window, &bridge->windows) { | 217 | resource_list_for_each_entry(window, &bridge->windows) { |
| 217 | if (resource_type(window->res) != IORESOURCE_MEM) | 218 | if (resource_type(window->res) != IORESOURCE_MEM) |
| @@ -221,6 +222,31 @@ static void iova_reserve_pci_windows(struct pci_dev *dev, | |||
| 221 | hi = iova_pfn(iovad, window->res->end - window->offset); | 222 | hi = iova_pfn(iovad, window->res->end - window->offset); |
| 222 | reserve_iova(iovad, lo, hi); | 223 | reserve_iova(iovad, lo, hi); |
| 223 | } | 224 | } |
| 225 | |||
| 226 | /* Get reserved DMA windows from host bridge */ | ||
| 227 | resource_list_for_each_entry(window, &bridge->dma_ranges) { | ||
| 228 | end = window->res->start - window->offset; | ||
| 229 | resv_iova: | ||
| 230 | if (end > start) { | ||
| 231 | lo = iova_pfn(iovad, start); | ||
| 232 | hi = iova_pfn(iovad, end); | ||
| 233 | reserve_iova(iovad, lo, hi); | ||
| 234 | } else { | ||
| 235 | /* dma_ranges list should be sorted */ | ||
| 236 | dev_err(&dev->dev, "Failed to reserve IOVA\n"); | ||
| 237 | return -EINVAL; | ||
| 238 | } | ||
| 239 | |||
| 240 | start = window->res->end - window->offset + 1; | ||
| 241 | /* If window is last entry */ | ||
| 242 | if (window->node.next == &bridge->dma_ranges && | ||
| 243 | end != ~(dma_addr_t)0) { | ||
| 244 | end = ~(dma_addr_t)0; | ||
| 245 | goto resv_iova; | ||
| 246 | } | ||
| 247 | } | ||
| 248 | |||
| 249 | return 0; | ||
| 224 | } | 250 | } |
| 225 | 251 | ||
| 226 | static int iova_reserve_iommu_regions(struct device *dev, | 252 | static int iova_reserve_iommu_regions(struct device *dev, |
| @@ -232,8 +258,11 @@ static int iova_reserve_iommu_regions(struct device *dev, | |||
| 232 | LIST_HEAD(resv_regions); | 258 | LIST_HEAD(resv_regions); |
| 233 | int ret = 0; | 259 | int ret = 0; |
| 234 | 260 | ||
| 235 | if (dev_is_pci(dev)) | 261 | if (dev_is_pci(dev)) { |
| 236 | iova_reserve_pci_windows(to_pci_dev(dev), iovad); | 262 | ret = iova_reserve_pci_windows(to_pci_dev(dev), iovad); |
| 263 | if (ret) | ||
| 264 | return ret; | ||
| 265 | } | ||
| 237 | 266 | ||
| 238 | iommu_get_resv_regions(dev, &resv_regions); | 267 | iommu_get_resv_regions(dev, &resv_regions); |
| 239 | list_for_each_entry(region, &resv_regions, list) { | 268 | list_for_each_entry(region, &resv_regions, list) { |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a320bda2c305..a209199f3af6 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
| @@ -1391,7 +1391,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info) | |||
| 1391 | 1391 | ||
| 1392 | /* pdev will be returned if device is not a vf */ | 1392 | /* pdev will be returned if device is not a vf */ |
| 1393 | pf_pdev = pci_physfn(pdev); | 1393 | pf_pdev = pci_physfn(pdev); |
| 1394 | info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn); | 1394 | info->pfsid = pci_dev_id(pf_pdev); |
| 1395 | } | 1395 | } |
| 1396 | 1396 | ||
| 1397 | #ifdef CONFIG_INTEL_IOMMU_SVM | 1397 | #ifdef CONFIG_INTEL_IOMMU_SVM |
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 634d8f059019..4160aa9f3f80 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c | |||
| @@ -424,7 +424,7 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev) | |||
| 424 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias); | 424 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, data.alias); |
| 425 | else | 425 | else |
| 426 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, | 426 | set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16, |
| 427 | PCI_DEVID(dev->bus->number, dev->devfn)); | 427 | pci_dev_id(dev)); |
| 428 | 428 | ||
| 429 | return 0; | 429 | return 0; |
| 430 | } | 430 | } |
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index 29582fe57151..7b015f2a1c6f 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c | |||
| @@ -75,6 +75,11 @@ | |||
| 75 | #define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24 | 75 | #define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24 |
| 76 | #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28 | 76 | #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28 |
| 77 | 77 | ||
| 78 | #define PCI_DEVICE_ID_TI_AM654 0xb00c | ||
| 79 | |||
| 80 | #define is_am654_pci_dev(pdev) \ | ||
| 81 | ((pdev)->device == PCI_DEVICE_ID_TI_AM654) | ||
| 82 | |||
| 78 | static DEFINE_IDA(pci_endpoint_test_ida); | 83 | static DEFINE_IDA(pci_endpoint_test_ida); |
| 79 | 84 | ||
| 80 | #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \ | 85 | #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \ |
| @@ -588,6 +593,7 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd, | |||
| 588 | int ret = -EINVAL; | 593 | int ret = -EINVAL; |
| 589 | enum pci_barno bar; | 594 | enum pci_barno bar; |
| 590 | struct pci_endpoint_test *test = to_endpoint_test(file->private_data); | 595 | struct pci_endpoint_test *test = to_endpoint_test(file->private_data); |
| 596 | struct pci_dev *pdev = test->pdev; | ||
| 591 | 597 | ||
| 592 | mutex_lock(&test->mutex); | 598 | mutex_lock(&test->mutex); |
| 593 | switch (cmd) { | 599 | switch (cmd) { |
| @@ -595,6 +601,8 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd, | |||
| 595 | bar = arg; | 601 | bar = arg; |
| 596 | if (bar < 0 || bar > 5) | 602 | if (bar < 0 || bar > 5) |
| 597 | goto ret; | 603 | goto ret; |
| 604 | if (is_am654_pci_dev(pdev) && bar == BAR_0) | ||
| 605 | goto ret; | ||
| 598 | ret = pci_endpoint_test_bar(test, bar); | 606 | ret = pci_endpoint_test_bar(test, bar); |
| 599 | break; | 607 | break; |
| 600 | case PCITEST_LEGACY_IRQ: | 608 | case PCITEST_LEGACY_IRQ: |
| @@ -662,6 +670,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, | |||
| 662 | data = (struct pci_endpoint_test_data *)ent->driver_data; | 670 | data = (struct pci_endpoint_test_data *)ent->driver_data; |
| 663 | if (data) { | 671 | if (data) { |
| 664 | test_reg_bar = data->test_reg_bar; | 672 | test_reg_bar = data->test_reg_bar; |
| 673 | test->test_reg_bar = test_reg_bar; | ||
| 665 | test->alignment = data->alignment; | 674 | test->alignment = data->alignment; |
| 666 | irq_type = data->irq_type; | 675 | irq_type = data->irq_type; |
| 667 | } | 676 | } |
| @@ -785,11 +794,20 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev) | |||
| 785 | pci_disable_device(pdev); | 794 | pci_disable_device(pdev); |
| 786 | } | 795 | } |
| 787 | 796 | ||
| 797 | static const struct pci_endpoint_test_data am654_data = { | ||
| 798 | .test_reg_bar = BAR_2, | ||
| 799 | .alignment = SZ_64K, | ||
| 800 | .irq_type = IRQ_TYPE_MSI, | ||
| 801 | }; | ||
| 802 | |||
| 788 | static const struct pci_device_id pci_endpoint_test_tbl[] = { | 803 | static const struct pci_device_id pci_endpoint_test_tbl[] = { |
| 789 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) }, | 804 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) }, |
| 790 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) }, | 805 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) }, |
| 791 | { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) }, | 806 | { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) }, |
| 792 | { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) }, | 807 | { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) }, |
| 808 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654), | ||
| 809 | .driver_data = (kernel_ulong_t)&am654_data | ||
| 810 | }, | ||
| 793 | { } | 811 | { } |
| 794 | }; | 812 | }; |
| 795 | MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl); | 813 | MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl); |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 549be1c76a89..2e20334b76a1 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
| @@ -6992,8 +6992,7 @@ static int r8169_mdio_register(struct rtl8169_private *tp) | |||
| 6992 | new_bus->priv = tp; | 6992 | new_bus->priv = tp; |
| 6993 | new_bus->parent = &pdev->dev; | 6993 | new_bus->parent = &pdev->dev; |
| 6994 | new_bus->irq[0] = PHY_IGNORE_INTERRUPT; | 6994 | new_bus->irq[0] = PHY_IGNORE_INTERRUPT; |
| 6995 | snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", | 6995 | snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev)); |
| 6996 | PCI_DEVID(pdev->bus->number, pdev->devfn)); | ||
| 6997 | 6996 | ||
| 6998 | new_bus->read = r8169_mdio_read_reg; | 6997 | new_bus->read = r8169_mdio_read_reg; |
| 6999 | new_bus->write = r8169_mdio_write_reg; | 6998 | new_bus->write = r8169_mdio_write_reg; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c index 26db6aa002d1..7cbc01f316fa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c | |||
| @@ -208,7 +208,7 @@ static int quark_default_data(struct pci_dev *pdev, | |||
| 208 | ret = 1; | 208 | ret = 1; |
| 209 | } | 209 | } |
| 210 | 210 | ||
| 211 | plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn); | 211 | plat->bus_id = pci_dev_id(pdev); |
| 212 | plat->phy_addr = ret; | 212 | plat->phy_addr = ret; |
| 213 | plat->interface = PHY_INTERFACE_MODE_RMII; | 213 | plat->interface = PHY_INTERFACE_MODE_RMII; |
| 214 | 214 | ||
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 657d642fcc67..28cdd8c0213a 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
| @@ -10,10 +10,10 @@ obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \ | |||
| 10 | ifdef CONFIG_PCI | 10 | ifdef CONFIG_PCI |
| 11 | obj-$(CONFIG_PROC_FS) += proc.o | 11 | obj-$(CONFIG_PROC_FS) += proc.o |
| 12 | obj-$(CONFIG_SYSFS) += slot.o | 12 | obj-$(CONFIG_SYSFS) += slot.o |
| 13 | obj-$(CONFIG_OF) += of.o | ||
| 14 | obj-$(CONFIG_ACPI) += pci-acpi.o | 13 | obj-$(CONFIG_ACPI) += pci-acpi.o |
| 15 | endif | 14 | endif |
| 16 | 15 | ||
| 16 | obj-$(CONFIG_OF) += of.o | ||
| 17 | obj-$(CONFIG_PCI_QUIRKS) += quirks.o | 17 | obj-$(CONFIG_PCI_QUIRKS) += quirks.o |
| 18 | obj-$(CONFIG_PCIEPORTBUS) += pcie/ | 18 | obj-$(CONFIG_PCIEPORTBUS) += pcie/ |
| 19 | obj-$(CONFIG_HOTPLUG_PCI) += hotplug/ | 19 | obj-$(CONFIG_HOTPLUG_PCI) += hotplug/ |
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 5cb40b2518f9..495059d923f7 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c | |||
| @@ -23,7 +23,7 @@ void pci_add_resource_offset(struct list_head *resources, struct resource *res, | |||
| 23 | 23 | ||
| 24 | entry = resource_list_create_entry(res, 0); | 24 | entry = resource_list_create_entry(res, 0); |
| 25 | if (!entry) { | 25 | if (!entry) { |
| 26 | printk(KERN_ERR "PCI: can't add host bridge window %pR\n", res); | 26 | pr_err("PCI: can't add host bridge window %pR\n", res); |
| 27 | return; | 27 | return; |
| 28 | } | 28 | } |
| 29 | 29 | ||
| @@ -288,8 +288,7 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx) | |||
| 288 | res->end = end; | 288 | res->end = end; |
| 289 | res->flags &= ~IORESOURCE_UNSET; | 289 | res->flags &= ~IORESOURCE_UNSET; |
| 290 | orig_res.flags &= ~IORESOURCE_UNSET; | 290 | orig_res.flags &= ~IORESOURCE_UNSET; |
| 291 | pci_printk(KERN_DEBUG, dev, "%pR clipped to %pR\n", | 291 | pci_info(dev, "%pR clipped to %pR\n", &orig_res, res); |
| 292 | &orig_res, res); | ||
| 293 | 292 | ||
| 294 | return true; | 293 | return true; |
| 295 | } | 294 | } |
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 6ea74b1c0d94..a6ce1ee51b4c 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig | |||
| @@ -103,15 +103,32 @@ config PCIE_SPEAR13XX | |||
| 103 | Say Y here if you want PCIe support on SPEAr13XX SoCs. | 103 | Say Y here if you want PCIe support on SPEAr13XX SoCs. |
| 104 | 104 | ||
| 105 | config PCI_KEYSTONE | 105 | config PCI_KEYSTONE |
| 106 | bool "TI Keystone PCIe controller" | 106 | bool |
| 107 | depends on ARCH_KEYSTONE || (ARM && COMPILE_TEST) | 107 | |
| 108 | config PCI_KEYSTONE_HOST | ||
| 109 | bool "PCI Keystone Host Mode" | ||
| 110 | depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST) | ||
| 108 | depends on PCI_MSI_IRQ_DOMAIN | 111 | depends on PCI_MSI_IRQ_DOMAIN |
| 109 | select PCIE_DW_HOST | 112 | select PCIE_DW_HOST |
| 113 | select PCI_KEYSTONE | ||
| 114 | default y | ||
| 110 | help | 115 | help |
| 111 | Say Y here if you want to enable PCI controller support on Keystone | 116 | Enables support for the PCIe controller in the Keystone SoC to |
| 112 | SoCs. The PCI controller on Keystone is based on DesignWare hardware | 117 | work in host mode. The PCI controller on Keystone is based on |
| 113 | and therefore the driver re-uses the DesignWare core functions to | 118 | DesignWare hardware and therefore the driver re-uses the |
| 114 | implement the driver. | 119 | DesignWare core functions to implement the driver. |
| 120 | |||
| 121 | config PCI_KEYSTONE_EP | ||
| 122 | bool "PCI Keystone Endpoint Mode" | ||
| 123 | depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST) | ||
| 124 | depends on PCI_ENDPOINT | ||
| 125 | select PCIE_DW_EP | ||
| 126 | select PCI_KEYSTONE | ||
| 127 | help | ||
| 128 | Enables support for the PCIe controller in the Keystone SoC to | ||
| 129 | work in endpoint mode. The PCI controller on Keystone is based | ||
| 130 | on DesignWare hardware and therefore the driver re-uses the | ||
| 131 | DesignWare core functions to implement the driver. | ||
| 115 | 132 | ||
| 116 | config PCI_LAYERSCAPE | 133 | config PCI_LAYERSCAPE |
| 117 | bool "Freescale Layerscape PCIe controller" | 134 | bool "Freescale Layerscape PCIe controller" |
diff --git a/drivers/pci/controller/dwc/Makefile b/drivers/pci/controller/dwc/Makefile index b5f3b83cc2b3..b085dfd4fab7 100644 --- a/drivers/pci/controller/dwc/Makefile +++ b/drivers/pci/controller/dwc/Makefile | |||
| @@ -28,5 +28,6 @@ obj-$(CONFIG_PCIE_UNIPHIER) += pcie-uniphier.o | |||
| 28 | # depending on whether ACPI, the DT driver, or both are enabled. | 28 | # depending on whether ACPI, the DT driver, or both are enabled. |
| 29 | 29 | ||
| 30 | ifdef CONFIG_PCI | 30 | ifdef CONFIG_PCI |
| 31 | obj-$(CONFIG_ARM64) += pcie-al.o | ||
| 31 | obj-$(CONFIG_ARM64) += pcie-hisi.o | 32 | obj-$(CONFIG_ARM64) += pcie-hisi.o |
| 32 | endif | 33 | endif |
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index ae84a69ae63a..419451efd58c 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c | |||
| @@ -247,6 +247,7 @@ static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) | |||
| 247 | 247 | ||
| 248 | dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, | 248 | dra7xx->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, |
| 249 | &intx_domain_ops, pp); | 249 | &intx_domain_ops, pp); |
| 250 | of_node_put(pcie_intc_node); | ||
| 250 | if (!dra7xx->irq_domain) { | 251 | if (!dra7xx->irq_domain) { |
| 251 | dev_err(dev, "Failed to get a INTx IRQ domain\n"); | 252 | dev_err(dev, "Failed to get a INTx IRQ domain\n"); |
| 252 | return -ENODEV; | 253 | return -ENODEV; |
| @@ -406,7 +407,7 @@ dra7xx_pcie_get_features(struct dw_pcie_ep *ep) | |||
| 406 | return &dra7xx_pcie_epc_features; | 407 | return &dra7xx_pcie_epc_features; |
| 407 | } | 408 | } |
| 408 | 409 | ||
| 409 | static struct dw_pcie_ep_ops pcie_ep_ops = { | 410 | static const struct dw_pcie_ep_ops pcie_ep_ops = { |
| 410 | .ep_init = dra7xx_pcie_ep_init, | 411 | .ep_init = dra7xx_pcie_ep_init, |
| 411 | .raise_irq = dra7xx_pcie_raise_irq, | 412 | .raise_irq = dra7xx_pcie_raise_irq, |
| 412 | .get_features = dra7xx_pcie_get_features, | 413 | .get_features = dra7xx_pcie_get_features, |
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 3d627f94a166..9b5cb5b70389 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c | |||
| @@ -52,6 +52,7 @@ enum imx6_pcie_variants { | |||
| 52 | 52 | ||
| 53 | #define IMX6_PCIE_FLAG_IMX6_PHY BIT(0) | 53 | #define IMX6_PCIE_FLAG_IMX6_PHY BIT(0) |
| 54 | #define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1) | 54 | #define IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE BIT(1) |
| 55 | #define IMX6_PCIE_FLAG_SUPPORTS_SUSPEND BIT(2) | ||
| 55 | 56 | ||
| 56 | struct imx6_pcie_drvdata { | 57 | struct imx6_pcie_drvdata { |
| 57 | enum imx6_pcie_variants variant; | 58 | enum imx6_pcie_variants variant; |
| @@ -89,9 +90,8 @@ struct imx6_pcie { | |||
| 89 | }; | 90 | }; |
| 90 | 91 | ||
| 91 | /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ | 92 | /* Parameters for the waiting for PCIe PHY PLL to lock on i.MX7 */ |
| 92 | #define PHY_PLL_LOCK_WAIT_MAX_RETRIES 2000 | ||
| 93 | #define PHY_PLL_LOCK_WAIT_USLEEP_MIN 50 | ||
| 94 | #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 | 93 | #define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200 |
| 94 | #define PHY_PLL_LOCK_WAIT_TIMEOUT (2000 * PHY_PLL_LOCK_WAIT_USLEEP_MAX) | ||
| 95 | 95 | ||
| 96 | /* PCIe Root Complex registers (memory-mapped) */ | 96 | /* PCIe Root Complex registers (memory-mapped) */ |
| 97 | #define PCIE_RC_IMX6_MSI_CAP 0x50 | 97 | #define PCIE_RC_IMX6_MSI_CAP 0x50 |
| @@ -104,34 +104,29 @@ struct imx6_pcie { | |||
| 104 | 104 | ||
| 105 | /* PCIe Port Logic registers (memory-mapped) */ | 105 | /* PCIe Port Logic registers (memory-mapped) */ |
| 106 | #define PL_OFFSET 0x700 | 106 | #define PL_OFFSET 0x700 |
| 107 | #define PCIE_PL_PFLR (PL_OFFSET + 0x08) | ||
| 108 | #define PCIE_PL_PFLR_LINK_STATE_MASK (0x3f << 16) | ||
| 109 | #define PCIE_PL_PFLR_FORCE_LINK (1 << 15) | ||
| 110 | #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28) | ||
| 111 | #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c) | ||
| 112 | 107 | ||
| 113 | #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) | 108 | #define PCIE_PHY_CTRL (PL_OFFSET + 0x114) |
| 114 | #define PCIE_PHY_CTRL_DATA_LOC 0 | 109 | #define PCIE_PHY_CTRL_DATA(x) FIELD_PREP(GENMASK(15, 0), (x)) |
| 115 | #define PCIE_PHY_CTRL_CAP_ADR_LOC 16 | 110 | #define PCIE_PHY_CTRL_CAP_ADR BIT(16) |
| 116 | #define PCIE_PHY_CTRL_CAP_DAT_LOC 17 | 111 | #define PCIE_PHY_CTRL_CAP_DAT BIT(17) |
| 117 | #define PCIE_PHY_CTRL_WR_LOC 18 | 112 | #define PCIE_PHY_CTRL_WR BIT(18) |
| 118 | #define PCIE_PHY_CTRL_RD_LOC 19 | 113 | #define PCIE_PHY_CTRL_RD BIT(19) |
| 119 | 114 | ||
| 120 | #define PCIE_PHY_STAT (PL_OFFSET + 0x110) | 115 | #define PCIE_PHY_STAT (PL_OFFSET + 0x110) |
| 121 | #define PCIE_PHY_STAT_ACK_LOC 16 | 116 | #define PCIE_PHY_STAT_ACK BIT(16) |
| 122 | 117 | ||
| 123 | #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C | 118 | #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C |
| 124 | 119 | ||
| 125 | /* PHY registers (not memory-mapped) */ | 120 | /* PHY registers (not memory-mapped) */ |
| 126 | #define PCIE_PHY_ATEOVRD 0x10 | 121 | #define PCIE_PHY_ATEOVRD 0x10 |
| 127 | #define PCIE_PHY_ATEOVRD_EN (0x1 << 2) | 122 | #define PCIE_PHY_ATEOVRD_EN BIT(2) |
| 128 | #define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0 | 123 | #define PCIE_PHY_ATEOVRD_REF_CLKDIV_SHIFT 0 |
| 129 | #define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1 | 124 | #define PCIE_PHY_ATEOVRD_REF_CLKDIV_MASK 0x1 |
| 130 | 125 | ||
| 131 | #define PCIE_PHY_MPLL_OVRD_IN_LO 0x11 | 126 | #define PCIE_PHY_MPLL_OVRD_IN_LO 0x11 |
| 132 | #define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2 | 127 | #define PCIE_PHY_MPLL_MULTIPLIER_SHIFT 2 |
| 133 | #define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f | 128 | #define PCIE_PHY_MPLL_MULTIPLIER_MASK 0x7f |
| 134 | #define PCIE_PHY_MPLL_MULTIPLIER_OVRD (0x1 << 9) | 129 | #define PCIE_PHY_MPLL_MULTIPLIER_OVRD BIT(9) |
| 135 | 130 | ||
| 136 | #define PCIE_PHY_RX_ASIC_OUT 0x100D | 131 | #define PCIE_PHY_RX_ASIC_OUT 0x100D |
| 137 | #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) | 132 | #define PCIE_PHY_RX_ASIC_OUT_VALID (1 << 0) |
| @@ -154,19 +149,19 @@ struct imx6_pcie { | |||
| 154 | #define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC | 149 | #define PCIE_PHY_CMN_REG26_ATT_MODE 0xBC |
| 155 | 150 | ||
| 156 | #define PHY_RX_OVRD_IN_LO 0x1005 | 151 | #define PHY_RX_OVRD_IN_LO 0x1005 |
| 157 | #define PHY_RX_OVRD_IN_LO_RX_DATA_EN (1 << 5) | 152 | #define PHY_RX_OVRD_IN_LO_RX_DATA_EN BIT(5) |
| 158 | #define PHY_RX_OVRD_IN_LO_RX_PLL_EN (1 << 3) | 153 | #define PHY_RX_OVRD_IN_LO_RX_PLL_EN BIT(3) |
| 159 | 154 | ||
| 160 | static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, int exp_val) | 155 | static int pcie_phy_poll_ack(struct imx6_pcie *imx6_pcie, bool exp_val) |
| 161 | { | 156 | { |
| 162 | struct dw_pcie *pci = imx6_pcie->pci; | 157 | struct dw_pcie *pci = imx6_pcie->pci; |
| 163 | u32 val; | 158 | bool val; |
| 164 | u32 max_iterations = 10; | 159 | u32 max_iterations = 10; |
| 165 | u32 wait_counter = 0; | 160 | u32 wait_counter = 0; |
| 166 | 161 | ||
| 167 | do { | 162 | do { |
| 168 | val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); | 163 | val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT) & |
| 169 | val = (val >> PCIE_PHY_STAT_ACK_LOC) & 0x1; | 164 | PCIE_PHY_STAT_ACK; |
| 170 | wait_counter++; | 165 | wait_counter++; |
| 171 | 166 | ||
| 172 | if (val == exp_val) | 167 | if (val == exp_val) |
| @@ -184,27 +179,27 @@ static int pcie_phy_wait_ack(struct imx6_pcie *imx6_pcie, int addr) | |||
| 184 | u32 val; | 179 | u32 val; |
| 185 | int ret; | 180 | int ret; |
| 186 | 181 | ||
| 187 | val = addr << PCIE_PHY_CTRL_DATA_LOC; | 182 | val = PCIE_PHY_CTRL_DATA(addr); |
| 188 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); | 183 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); |
| 189 | 184 | ||
| 190 | val |= (0x1 << PCIE_PHY_CTRL_CAP_ADR_LOC); | 185 | val |= PCIE_PHY_CTRL_CAP_ADR; |
| 191 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); | 186 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); |
| 192 | 187 | ||
| 193 | ret = pcie_phy_poll_ack(imx6_pcie, 1); | 188 | ret = pcie_phy_poll_ack(imx6_pcie, true); |
| 194 | if (ret) | 189 | if (ret) |
| 195 | return ret; | 190 | return ret; |
| 196 | 191 | ||
| 197 | val = addr << PCIE_PHY_CTRL_DATA_LOC; | 192 | val = PCIE_PHY_CTRL_DATA(addr); |
| 198 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); | 193 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, val); |
| 199 | 194 | ||
| 200 | return pcie_phy_poll_ack(imx6_pcie, 0); | 195 | return pcie_phy_poll_ack(imx6_pcie, false); |
| 201 | } | 196 | } |
| 202 | 197 | ||
| 203 | /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ | 198 | /* Read from the 16-bit PCIe PHY control registers (not memory-mapped) */ |
| 204 | static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data) | 199 | static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, u16 *data) |
| 205 | { | 200 | { |
| 206 | struct dw_pcie *pci = imx6_pcie->pci; | 201 | struct dw_pcie *pci = imx6_pcie->pci; |
| 207 | u32 val, phy_ctl; | 202 | u32 phy_ctl; |
| 208 | int ret; | 203 | int ret; |
| 209 | 204 | ||
| 210 | ret = pcie_phy_wait_ack(imx6_pcie, addr); | 205 | ret = pcie_phy_wait_ack(imx6_pcie, addr); |
| @@ -212,23 +207,22 @@ static int pcie_phy_read(struct imx6_pcie *imx6_pcie, int addr, int *data) | |||
| 212 | return ret; | 207 | return ret; |
| 213 | 208 | ||
| 214 | /* assert Read signal */ | 209 | /* assert Read signal */ |
| 215 | phy_ctl = 0x1 << PCIE_PHY_CTRL_RD_LOC; | 210 | phy_ctl = PCIE_PHY_CTRL_RD; |
| 216 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl); | 211 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, phy_ctl); |
| 217 | 212 | ||
| 218 | ret = pcie_phy_poll_ack(imx6_pcie, 1); | 213 | ret = pcie_phy_poll_ack(imx6_pcie, true); |
| 219 | if (ret) | 214 | if (ret) |
| 220 | return ret; | 215 | return ret; |
| 221 | 216 | ||
| 222 | val = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); | 217 | *data = dw_pcie_readl_dbi(pci, PCIE_PHY_STAT); |
| 223 | *data = val & 0xffff; | ||
| 224 | 218 | ||
| 225 | /* deassert Read signal */ | 219 | /* deassert Read signal */ |
| 226 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00); | 220 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, 0x00); |
| 227 | 221 | ||
| 228 | return pcie_phy_poll_ack(imx6_pcie, 0); | 222 | return pcie_phy_poll_ack(imx6_pcie, false); |
| 229 | } | 223 | } |
| 230 | 224 | ||
| 231 | static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data) | 225 | static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, u16 data) |
| 232 | { | 226 | { |
| 233 | struct dw_pcie *pci = imx6_pcie->pci; | 227 | struct dw_pcie *pci = imx6_pcie->pci; |
| 234 | u32 var; | 228 | u32 var; |
| @@ -240,41 +234,41 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data) | |||
| 240 | if (ret) | 234 | if (ret) |
| 241 | return ret; | 235 | return ret; |
| 242 | 236 | ||
| 243 | var = data << PCIE_PHY_CTRL_DATA_LOC; | 237 | var = PCIE_PHY_CTRL_DATA(data); |
| 244 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); | 238 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); |
| 245 | 239 | ||
| 246 | /* capture data */ | 240 | /* capture data */ |
| 247 | var |= (0x1 << PCIE_PHY_CTRL_CAP_DAT_LOC); | 241 | var |= PCIE_PHY_CTRL_CAP_DAT; |
| 248 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); | 242 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); |
| 249 | 243 | ||
| 250 | ret = pcie_phy_poll_ack(imx6_pcie, 1); | 244 | ret = pcie_phy_poll_ack(imx6_pcie, true); |
| 251 | if (ret) | 245 | if (ret) |
| 252 | return ret; | 246 | return ret; |
| 253 | 247 | ||
| 254 | /* deassert cap data */ | 248 | /* deassert cap data */ |
| 255 | var = data << PCIE_PHY_CTRL_DATA_LOC; | 249 | var = PCIE_PHY_CTRL_DATA(data); |
| 256 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); | 250 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); |
| 257 | 251 | ||
| 258 | /* wait for ack de-assertion */ | 252 | /* wait for ack de-assertion */ |
| 259 | ret = pcie_phy_poll_ack(imx6_pcie, 0); | 253 | ret = pcie_phy_poll_ack(imx6_pcie, false); |
| 260 | if (ret) | 254 | if (ret) |
| 261 | return ret; | 255 | return ret; |
| 262 | 256 | ||
| 263 | /* assert wr signal */ | 257 | /* assert wr signal */ |
| 264 | var = 0x1 << PCIE_PHY_CTRL_WR_LOC; | 258 | var = PCIE_PHY_CTRL_WR; |
| 265 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); | 259 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); |
| 266 | 260 | ||
| 267 | /* wait for ack */ | 261 | /* wait for ack */ |
| 268 | ret = pcie_phy_poll_ack(imx6_pcie, 1); | 262 | ret = pcie_phy_poll_ack(imx6_pcie, true); |
| 269 | if (ret) | 263 | if (ret) |
| 270 | return ret; | 264 | return ret; |
| 271 | 265 | ||
| 272 | /* deassert wr signal */ | 266 | /* deassert wr signal */ |
| 273 | var = data << PCIE_PHY_CTRL_DATA_LOC; | 267 | var = PCIE_PHY_CTRL_DATA(data); |
| 274 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); | 268 | dw_pcie_writel_dbi(pci, PCIE_PHY_CTRL, var); |
| 275 | 269 | ||
| 276 | /* wait for ack de-assertion */ | 270 | /* wait for ack de-assertion */ |
| 277 | ret = pcie_phy_poll_ack(imx6_pcie, 0); | 271 | ret = pcie_phy_poll_ack(imx6_pcie, false); |
| 278 | if (ret) | 272 | if (ret) |
| 279 | return ret; | 273 | return ret; |
| 280 | 274 | ||
| @@ -285,7 +279,7 @@ static int pcie_phy_write(struct imx6_pcie *imx6_pcie, int addr, int data) | |||
| 285 | 279 | ||
| 286 | static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie) | 280 | static void imx6_pcie_reset_phy(struct imx6_pcie *imx6_pcie) |
| 287 | { | 281 | { |
| 288 | u32 tmp; | 282 | u16 tmp; |
| 289 | 283 | ||
| 290 | if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY)) | 284 | if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY)) |
| 291 | return; | 285 | return; |
| @@ -455,7 +449,7 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) | |||
| 455 | * reset time is too short, cannot meet the requirement. | 449 | * reset time is too short, cannot meet the requirement. |
| 456 | * add one ~10us delay here. | 450 | * add one ~10us delay here. |
| 457 | */ | 451 | */ |
| 458 | udelay(10); | 452 | usleep_range(10, 100); |
| 459 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, | 453 | regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, |
| 460 | IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); | 454 | IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); |
| 461 | break; | 455 | break; |
| @@ -488,20 +482,14 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie) | |||
| 488 | static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie) | 482 | static void imx7d_pcie_wait_for_phy_pll_lock(struct imx6_pcie *imx6_pcie) |
| 489 | { | 483 | { |
| 490 | u32 val; | 484 | u32 val; |
| 491 | unsigned int retries; | ||
| 492 | struct device *dev = imx6_pcie->pci->dev; | 485 | struct device *dev = imx6_pcie->pci->dev; |
| 493 | 486 | ||
| 494 | for (retries = 0; retries < PHY_PLL_LOCK_WAIT_MAX_RETRIES; retries++) { | 487 | if (regmap_read_poll_timeout(imx6_pcie->iomuxc_gpr, |
| 495 | regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR22, &val); | 488 | IOMUXC_GPR22, val, |
| 496 | 489 | val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED, | |
| 497 | if (val & IMX7D_GPR22_PCIE_PHY_PLL_LOCKED) | 490 | PHY_PLL_LOCK_WAIT_USLEEP_MAX, |
| 498 | return; | 491 | PHY_PLL_LOCK_WAIT_TIMEOUT)) |
| 499 | 492 | dev_err(dev, "PCIe PLL lock timeout\n"); | |
| 500 | usleep_range(PHY_PLL_LOCK_WAIT_USLEEP_MIN, | ||
| 501 | PHY_PLL_LOCK_WAIT_USLEEP_MAX); | ||
| 502 | } | ||
| 503 | |||
| 504 | dev_err(dev, "PCIe PLL lock timeout\n"); | ||
| 505 | } | 493 | } |
| 506 | 494 | ||
| 507 | static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) | 495 | static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie) |
| @@ -687,7 +675,7 @@ static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie) | |||
| 687 | { | 675 | { |
| 688 | unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy); | 676 | unsigned long phy_rate = clk_get_rate(imx6_pcie->pcie_phy); |
| 689 | int mult, div; | 677 | int mult, div; |
| 690 | u32 val; | 678 | u16 val; |
| 691 | 679 | ||
| 692 | if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY)) | 680 | if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_IMX6_PHY)) |
| 693 | return 0; | 681 | return 0; |
| @@ -730,21 +718,6 @@ static int imx6_setup_phy_mpll(struct imx6_pcie *imx6_pcie) | |||
| 730 | return 0; | 718 | return 0; |
| 731 | } | 719 | } |
| 732 | 720 | ||
| 733 | static int imx6_pcie_wait_for_link(struct imx6_pcie *imx6_pcie) | ||
| 734 | { | ||
| 735 | struct dw_pcie *pci = imx6_pcie->pci; | ||
| 736 | struct device *dev = pci->dev; | ||
| 737 | |||
| 738 | /* check if the link is up or not */ | ||
| 739 | if (!dw_pcie_wait_for_link(pci)) | ||
| 740 | return 0; | ||
| 741 | |||
| 742 | dev_dbg(dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n", | ||
| 743 | dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0), | ||
| 744 | dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1)); | ||
| 745 | return -ETIMEDOUT; | ||
| 746 | } | ||
| 747 | |||
| 748 | static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie) | 721 | static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie) |
| 749 | { | 722 | { |
| 750 | struct dw_pcie *pci = imx6_pcie->pci; | 723 | struct dw_pcie *pci = imx6_pcie->pci; |
| @@ -761,7 +734,7 @@ static int imx6_pcie_wait_for_speed_change(struct imx6_pcie *imx6_pcie) | |||
| 761 | } | 734 | } |
| 762 | 735 | ||
| 763 | dev_err(dev, "Speed change timeout\n"); | 736 | dev_err(dev, "Speed change timeout\n"); |
| 764 | return -EINVAL; | 737 | return -ETIMEDOUT; |
| 765 | } | 738 | } |
| 766 | 739 | ||
| 767 | static void imx6_pcie_ltssm_enable(struct device *dev) | 740 | static void imx6_pcie_ltssm_enable(struct device *dev) |
| @@ -803,7 +776,7 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) | |||
| 803 | /* Start LTSSM. */ | 776 | /* Start LTSSM. */ |
| 804 | imx6_pcie_ltssm_enable(dev); | 777 | imx6_pcie_ltssm_enable(dev); |
| 805 | 778 | ||
| 806 | ret = imx6_pcie_wait_for_link(imx6_pcie); | 779 | ret = dw_pcie_wait_for_link(pci); |
| 807 | if (ret) | 780 | if (ret) |
| 808 | goto err_reset_phy; | 781 | goto err_reset_phy; |
| 809 | 782 | ||
| @@ -841,7 +814,7 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) | |||
| 841 | } | 814 | } |
| 842 | 815 | ||
| 843 | /* Make sure link training is finished as well! */ | 816 | /* Make sure link training is finished as well! */ |
| 844 | ret = imx6_pcie_wait_for_link(imx6_pcie); | 817 | ret = dw_pcie_wait_for_link(pci); |
| 845 | if (ret) { | 818 | if (ret) { |
| 846 | dev_err(dev, "Failed to bring link up!\n"); | 819 | dev_err(dev, "Failed to bring link up!\n"); |
| 847 | goto err_reset_phy; | 820 | goto err_reset_phy; |
| @@ -856,8 +829,8 @@ static int imx6_pcie_establish_link(struct imx6_pcie *imx6_pcie) | |||
| 856 | 829 | ||
| 857 | err_reset_phy: | 830 | err_reset_phy: |
| 858 | dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", | 831 | dev_dbg(dev, "PHY DEBUG_R0=0x%08x DEBUG_R1=0x%08x\n", |
| 859 | dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0), | 832 | dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0), |
| 860 | dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1)); | 833 | dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG1)); |
| 861 | imx6_pcie_reset_phy(imx6_pcie); | 834 | imx6_pcie_reset_phy(imx6_pcie); |
| 862 | return ret; | 835 | return ret; |
| 863 | } | 836 | } |
| @@ -993,17 +966,11 @@ static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie) | |||
| 993 | } | 966 | } |
| 994 | } | 967 | } |
| 995 | 968 | ||
| 996 | static inline bool imx6_pcie_supports_suspend(struct imx6_pcie *imx6_pcie) | ||
| 997 | { | ||
| 998 | return (imx6_pcie->drvdata->variant == IMX7D || | ||
| 999 | imx6_pcie->drvdata->variant == IMX6SX); | ||
| 1000 | } | ||
| 1001 | |||
| 1002 | static int imx6_pcie_suspend_noirq(struct device *dev) | 969 | static int imx6_pcie_suspend_noirq(struct device *dev) |
| 1003 | { | 970 | { |
| 1004 | struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); | 971 | struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); |
| 1005 | 972 | ||
| 1006 | if (!imx6_pcie_supports_suspend(imx6_pcie)) | 973 | if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND)) |
| 1007 | return 0; | 974 | return 0; |
| 1008 | 975 | ||
| 1009 | imx6_pcie_pm_turnoff(imx6_pcie); | 976 | imx6_pcie_pm_turnoff(imx6_pcie); |
| @@ -1019,7 +986,7 @@ static int imx6_pcie_resume_noirq(struct device *dev) | |||
| 1019 | struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); | 986 | struct imx6_pcie *imx6_pcie = dev_get_drvdata(dev); |
| 1020 | struct pcie_port *pp = &imx6_pcie->pci->pp; | 987 | struct pcie_port *pp = &imx6_pcie->pci->pp; |
| 1021 | 988 | ||
| 1022 | if (!imx6_pcie_supports_suspend(imx6_pcie)) | 989 | if (!(imx6_pcie->drvdata->flags & IMX6_PCIE_FLAG_SUPPORTS_SUSPEND)) |
| 1023 | return 0; | 990 | return 0; |
| 1024 | 991 | ||
| 1025 | imx6_pcie_assert_core_reset(imx6_pcie); | 992 | imx6_pcie_assert_core_reset(imx6_pcie); |
| @@ -1249,7 +1216,8 @@ static const struct imx6_pcie_drvdata drvdata[] = { | |||
| 1249 | [IMX6SX] = { | 1216 | [IMX6SX] = { |
| 1250 | .variant = IMX6SX, | 1217 | .variant = IMX6SX, |
| 1251 | .flags = IMX6_PCIE_FLAG_IMX6_PHY | | 1218 | .flags = IMX6_PCIE_FLAG_IMX6_PHY | |
| 1252 | IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE, | 1219 | IMX6_PCIE_FLAG_IMX6_SPEED_CHANGE | |
| 1220 | IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, | ||
| 1253 | }, | 1221 | }, |
| 1254 | [IMX6QP] = { | 1222 | [IMX6QP] = { |
| 1255 | .variant = IMX6QP, | 1223 | .variant = IMX6QP, |
| @@ -1258,6 +1226,7 @@ static const struct imx6_pcie_drvdata drvdata[] = { | |||
| 1258 | }, | 1226 | }, |
| 1259 | [IMX7D] = { | 1227 | [IMX7D] = { |
| 1260 | .variant = IMX7D, | 1228 | .variant = IMX7D, |
| 1229 | .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND, | ||
| 1261 | }, | 1230 | }, |
| 1262 | [IMX8MQ] = { | 1231 | [IMX8MQ] = { |
| 1263 | .variant = IMX8MQ, | 1232 | .variant = IMX8MQ, |
| @@ -1279,6 +1248,7 @@ static struct platform_driver imx6_pcie_driver = { | |||
| 1279 | .of_match_table = imx6_pcie_of_match, | 1248 | .of_match_table = imx6_pcie_of_match, |
| 1280 | .suppress_bind_attrs = true, | 1249 | .suppress_bind_attrs = true, |
| 1281 | .pm = &imx6_pcie_pm_ops, | 1250 | .pm = &imx6_pcie_pm_ops, |
| 1251 | .probe_type = PROBE_PREFER_ASYNCHRONOUS, | ||
| 1282 | }, | 1252 | }, |
| 1283 | .probe = imx6_pcie_probe, | 1253 | .probe = imx6_pcie_probe, |
| 1284 | .shutdown = imx6_pcie_shutdown, | 1254 | .shutdown = imx6_pcie_shutdown, |
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 14f2b0b4ed5e..af677254a072 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | 11 | ||
| 12 | #include <linux/clk.h> | 12 | #include <linux/clk.h> |
| 13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
| 14 | #include <linux/gpio/consumer.h> | ||
| 14 | #include <linux/init.h> | 15 | #include <linux/init.h> |
| 15 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
| 16 | #include <linux/irqchip/chained_irq.h> | 17 | #include <linux/irqchip/chained_irq.h> |
| @@ -18,6 +19,7 @@ | |||
| 18 | #include <linux/mfd/syscon.h> | 19 | #include <linux/mfd/syscon.h> |
| 19 | #include <linux/msi.h> | 20 | #include <linux/msi.h> |
| 20 | #include <linux/of.h> | 21 | #include <linux/of.h> |
| 22 | #include <linux/of_device.h> | ||
| 21 | #include <linux/of_irq.h> | 23 | #include <linux/of_irq.h> |
| 22 | #include <linux/of_pci.h> | 24 | #include <linux/of_pci.h> |
| 23 | #include <linux/phy/phy.h> | 25 | #include <linux/phy/phy.h> |
| @@ -26,6 +28,7 @@ | |||
| 26 | #include <linux/resource.h> | 28 | #include <linux/resource.h> |
| 27 | #include <linux/signal.h> | 29 | #include <linux/signal.h> |
| 28 | 30 | ||
| 31 | #include "../../pci.h" | ||
| 29 | #include "pcie-designware.h" | 32 | #include "pcie-designware.h" |
| 30 | 33 | ||
| 31 | #define PCIE_VENDORID_MASK 0xffff | 34 | #define PCIE_VENDORID_MASK 0xffff |
| @@ -44,28 +47,34 @@ | |||
| 44 | #define CFG_TYPE1 BIT(24) | 47 | #define CFG_TYPE1 BIT(24) |
| 45 | 48 | ||
| 46 | #define OB_SIZE 0x030 | 49 | #define OB_SIZE 0x030 |
| 47 | #define SPACE0_REMOTE_CFG_OFFSET 0x1000 | ||
| 48 | #define OB_OFFSET_INDEX(n) (0x200 + (8 * (n))) | 50 | #define OB_OFFSET_INDEX(n) (0x200 + (8 * (n))) |
| 49 | #define OB_OFFSET_HI(n) (0x204 + (8 * (n))) | 51 | #define OB_OFFSET_HI(n) (0x204 + (8 * (n))) |
| 50 | #define OB_ENABLEN BIT(0) | 52 | #define OB_ENABLEN BIT(0) |
| 51 | #define OB_WIN_SIZE 8 /* 8MB */ | 53 | #define OB_WIN_SIZE 8 /* 8MB */ |
| 52 | 54 | ||
| 55 | #define PCIE_LEGACY_IRQ_ENABLE_SET(n) (0x188 + (0x10 * ((n) - 1))) | ||
| 56 | #define PCIE_LEGACY_IRQ_ENABLE_CLR(n) (0x18c + (0x10 * ((n) - 1))) | ||
| 57 | #define PCIE_EP_IRQ_SET 0x64 | ||
| 58 | #define PCIE_EP_IRQ_CLR 0x68 | ||
| 59 | #define INT_ENABLE BIT(0) | ||
| 60 | |||
| 53 | /* IRQ register defines */ | 61 | /* IRQ register defines */ |
| 54 | #define IRQ_EOI 0x050 | 62 | #define IRQ_EOI 0x050 |
| 55 | #define IRQ_STATUS 0x184 | ||
| 56 | #define IRQ_ENABLE_SET 0x188 | ||
| 57 | #define IRQ_ENABLE_CLR 0x18c | ||
| 58 | 63 | ||
| 59 | #define MSI_IRQ 0x054 | 64 | #define MSI_IRQ 0x054 |
| 60 | #define MSI0_IRQ_STATUS 0x104 | 65 | #define MSI_IRQ_STATUS(n) (0x104 + ((n) << 4)) |
| 61 | #define MSI0_IRQ_ENABLE_SET 0x108 | 66 | #define MSI_IRQ_ENABLE_SET(n) (0x108 + ((n) << 4)) |
| 62 | #define MSI0_IRQ_ENABLE_CLR 0x10c | 67 | #define MSI_IRQ_ENABLE_CLR(n) (0x10c + ((n) << 4)) |
| 63 | #define IRQ_STATUS 0x184 | ||
| 64 | #define MSI_IRQ_OFFSET 4 | 68 | #define MSI_IRQ_OFFSET 4 |
| 65 | 69 | ||
| 70 | #define IRQ_STATUS(n) (0x184 + ((n) << 4)) | ||
| 71 | #define IRQ_ENABLE_SET(n) (0x188 + ((n) << 4)) | ||
| 72 | #define INTx_EN BIT(0) | ||
| 73 | |||
| 66 | #define ERR_IRQ_STATUS 0x1c4 | 74 | #define ERR_IRQ_STATUS 0x1c4 |
| 67 | #define ERR_IRQ_ENABLE_SET 0x1c8 | 75 | #define ERR_IRQ_ENABLE_SET 0x1c8 |
| 68 | #define ERR_AER BIT(5) /* ECRC error */ | 76 | #define ERR_AER BIT(5) /* ECRC error */ |
| 77 | #define AM6_ERR_AER BIT(4) /* AM6 ECRC error */ | ||
| 69 | #define ERR_AXI BIT(4) /* AXI tag lookup fatal error */ | 78 | #define ERR_AXI BIT(4) /* AXI tag lookup fatal error */ |
| 70 | #define ERR_CORR BIT(3) /* Correctable error */ | 79 | #define ERR_CORR BIT(3) /* Correctable error */ |
| 71 | #define ERR_NONFATAL BIT(2) /* Non-fatal error */ | 80 | #define ERR_NONFATAL BIT(2) /* Non-fatal error */ |
| @@ -74,25 +83,45 @@ | |||
| 74 | #define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \ | 83 | #define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \ |
| 75 | ERR_NONFATAL | ERR_FATAL | ERR_SYS) | 84 | ERR_NONFATAL | ERR_FATAL | ERR_SYS) |
| 76 | 85 | ||
| 77 | #define MAX_MSI_HOST_IRQS 8 | ||
| 78 | /* PCIE controller device IDs */ | 86 | /* PCIE controller device IDs */ |
| 79 | #define PCIE_RC_K2HK 0xb008 | 87 | #define PCIE_RC_K2HK 0xb008 |
| 80 | #define PCIE_RC_K2E 0xb009 | 88 | #define PCIE_RC_K2E 0xb009 |
| 81 | #define PCIE_RC_K2L 0xb00a | 89 | #define PCIE_RC_K2L 0xb00a |
| 82 | #define PCIE_RC_K2G 0xb00b | 90 | #define PCIE_RC_K2G 0xb00b |
| 83 | 91 | ||
| 92 | #define KS_PCIE_DEV_TYPE_MASK (0x3 << 1) | ||
| 93 | #define KS_PCIE_DEV_TYPE(mode) ((mode) << 1) | ||
| 94 | |||
| 95 | #define EP 0x0 | ||
| 96 | #define LEG_EP 0x1 | ||
| 97 | #define RC 0x2 | ||
| 98 | |||
| 99 | #define EXP_CAP_ID_OFFSET 0x70 | ||
| 100 | |||
| 101 | #define KS_PCIE_SYSCLOCKOUTEN BIT(0) | ||
| 102 | |||
| 103 | #define AM654_PCIE_DEV_TYPE_MASK 0x3 | ||
| 104 | #define AM654_WIN_SIZE SZ_64K | ||
| 105 | |||
| 106 | #define APP_ADDR_SPACE_0 (16 * SZ_1K) | ||
| 107 | |||
| 84 | #define to_keystone_pcie(x) dev_get_drvdata((x)->dev) | 108 | #define to_keystone_pcie(x) dev_get_drvdata((x)->dev) |
| 85 | 109 | ||
| 110 | struct ks_pcie_of_data { | ||
| 111 | enum dw_pcie_device_mode mode; | ||
| 112 | const struct dw_pcie_host_ops *host_ops; | ||
| 113 | const struct dw_pcie_ep_ops *ep_ops; | ||
| 114 | unsigned int version; | ||
| 115 | }; | ||
| 116 | |||
| 86 | struct keystone_pcie { | 117 | struct keystone_pcie { |
| 87 | struct dw_pcie *pci; | 118 | struct dw_pcie *pci; |
| 88 | /* PCI Device ID */ | 119 | /* PCI Device ID */ |
| 89 | u32 device_id; | 120 | u32 device_id; |
| 90 | int num_legacy_host_irqs; | ||
| 91 | int legacy_host_irqs[PCI_NUM_INTX]; | 121 | int legacy_host_irqs[PCI_NUM_INTX]; |
| 92 | struct device_node *legacy_intc_np; | 122 | struct device_node *legacy_intc_np; |
| 93 | 123 | ||
| 94 | int num_msi_host_irqs; | 124 | int msi_host_irq; |
| 95 | int msi_host_irqs[MAX_MSI_HOST_IRQS]; | ||
| 96 | int num_lanes; | 125 | int num_lanes; |
| 97 | u32 num_viewport; | 126 | u32 num_viewport; |
| 98 | struct phy **phy; | 127 | struct phy **phy; |
| @@ -101,28 +130,12 @@ struct keystone_pcie { | |||
| 101 | struct irq_domain *legacy_irq_domain; | 130 | struct irq_domain *legacy_irq_domain; |
| 102 | struct device_node *np; | 131 | struct device_node *np; |
| 103 | 132 | ||
| 104 | int error_irq; | ||
| 105 | |||
| 106 | /* Application register space */ | 133 | /* Application register space */ |
| 107 | void __iomem *va_app_base; /* DT 1st resource */ | 134 | void __iomem *va_app_base; /* DT 1st resource */ |
| 108 | struct resource app; | 135 | struct resource app; |
| 136 | bool is_am6; | ||
| 109 | }; | 137 | }; |
| 110 | 138 | ||
| 111 | static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, | ||
| 112 | u32 *bit_pos) | ||
| 113 | { | ||
| 114 | *reg_offset = offset % 8; | ||
| 115 | *bit_pos = offset >> 3; | ||
| 116 | } | ||
| 117 | |||
| 118 | static phys_addr_t ks_pcie_get_msi_addr(struct pcie_port *pp) | ||
| 119 | { | ||
| 120 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 121 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
| 122 | |||
| 123 | return ks_pcie->app.start + MSI_IRQ; | ||
| 124 | } | ||
| 125 | |||
| 126 | static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset) | 139 | static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset) |
| 127 | { | 140 | { |
| 128 | return readl(ks_pcie->va_app_base + offset); | 141 | return readl(ks_pcie->va_app_base + offset); |
| @@ -134,81 +147,114 @@ static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset, | |||
| 134 | writel(val, ks_pcie->va_app_base + offset); | 147 | writel(val, ks_pcie->va_app_base + offset); |
| 135 | } | 148 | } |
| 136 | 149 | ||
| 137 | static void ks_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset) | 150 | static void ks_pcie_msi_irq_ack(struct irq_data *data) |
| 138 | { | 151 | { |
| 139 | struct dw_pcie *pci = ks_pcie->pci; | 152 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); |
| 140 | struct pcie_port *pp = &pci->pp; | 153 | struct keystone_pcie *ks_pcie; |
| 141 | struct device *dev = pci->dev; | 154 | u32 irq = data->hwirq; |
| 142 | u32 pending, vector; | 155 | struct dw_pcie *pci; |
| 143 | int src, virq; | 156 | u32 reg_offset; |
| 157 | u32 bit_pos; | ||
| 144 | 158 | ||
| 145 | pending = ks_pcie_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4)); | 159 | pci = to_dw_pcie_from_pp(pp); |
| 160 | ks_pcie = to_keystone_pcie(pci); | ||
| 146 | 161 | ||
| 147 | /* | 162 | reg_offset = irq % 8; |
| 148 | * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit | 163 | bit_pos = irq >> 3; |
| 149 | * shows 1, 9, 17, 25 and so forth | 164 | |
| 150 | */ | 165 | ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset), |
| 151 | for (src = 0; src < 4; src++) { | 166 | BIT(bit_pos)); |
| 152 | if (BIT(src) & pending) { | 167 | ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET); |
| 153 | vector = offset + (src << 3); | ||
| 154 | virq = irq_linear_revmap(pp->irq_domain, vector); | ||
| 155 | dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", | ||
| 156 | src, vector, virq); | ||
| 157 | generic_handle_irq(virq); | ||
| 158 | } | ||
| 159 | } | ||
| 160 | } | 168 | } |
| 161 | 169 | ||
| 162 | static void ks_pcie_msi_irq_ack(int irq, struct pcie_port *pp) | 170 | static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) |
| 163 | { | 171 | { |
| 164 | u32 reg_offset, bit_pos; | 172 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); |
| 165 | struct keystone_pcie *ks_pcie; | 173 | struct keystone_pcie *ks_pcie; |
| 166 | struct dw_pcie *pci; | 174 | struct dw_pcie *pci; |
| 175 | u64 msi_target; | ||
| 167 | 176 | ||
| 168 | pci = to_dw_pcie_from_pp(pp); | 177 | pci = to_dw_pcie_from_pp(pp); |
| 169 | ks_pcie = to_keystone_pcie(pci); | 178 | ks_pcie = to_keystone_pcie(pci); |
| 170 | update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); | ||
| 171 | 179 | ||
| 172 | ks_pcie_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4), | 180 | msi_target = ks_pcie->app.start + MSI_IRQ; |
| 173 | BIT(bit_pos)); | 181 | msg->address_lo = lower_32_bits(msi_target); |
| 174 | ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET); | 182 | msg->address_hi = upper_32_bits(msi_target); |
| 183 | msg->data = data->hwirq; | ||
| 184 | |||
| 185 | dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", | ||
| 186 | (int)data->hwirq, msg->address_hi, msg->address_lo); | ||
| 175 | } | 187 | } |
| 176 | 188 | ||
| 177 | static void ks_pcie_msi_set_irq(struct pcie_port *pp, int irq) | 189 | static int ks_pcie_msi_set_affinity(struct irq_data *irq_data, |
| 190 | const struct cpumask *mask, bool force) | ||
| 178 | { | 191 | { |
| 179 | u32 reg_offset, bit_pos; | 192 | return -EINVAL; |
| 180 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 193 | } |
| 181 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
| 182 | 194 | ||
| 183 | update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); | 195 | static void ks_pcie_msi_mask(struct irq_data *data) |
| 184 | ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4), | 196 | { |
| 197 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); | ||
| 198 | struct keystone_pcie *ks_pcie; | ||
| 199 | u32 irq = data->hwirq; | ||
| 200 | struct dw_pcie *pci; | ||
| 201 | unsigned long flags; | ||
| 202 | u32 reg_offset; | ||
| 203 | u32 bit_pos; | ||
| 204 | |||
| 205 | raw_spin_lock_irqsave(&pp->lock, flags); | ||
| 206 | |||
| 207 | pci = to_dw_pcie_from_pp(pp); | ||
| 208 | ks_pcie = to_keystone_pcie(pci); | ||
| 209 | |||
| 210 | reg_offset = irq % 8; | ||
| 211 | bit_pos = irq >> 3; | ||
| 212 | |||
| 213 | ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset), | ||
| 185 | BIT(bit_pos)); | 214 | BIT(bit_pos)); |
| 215 | |||
| 216 | raw_spin_unlock_irqrestore(&pp->lock, flags); | ||
| 186 | } | 217 | } |
| 187 | 218 | ||
| 188 | static void ks_pcie_msi_clear_irq(struct pcie_port *pp, int irq) | 219 | static void ks_pcie_msi_unmask(struct irq_data *data) |
| 189 | { | 220 | { |
| 190 | u32 reg_offset, bit_pos; | 221 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); |
| 191 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 222 | struct keystone_pcie *ks_pcie; |
| 192 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | 223 | u32 irq = data->hwirq; |
| 224 | struct dw_pcie *pci; | ||
| 225 | unsigned long flags; | ||
| 226 | u32 reg_offset; | ||
| 227 | u32 bit_pos; | ||
| 228 | |||
| 229 | raw_spin_lock_irqsave(&pp->lock, flags); | ||
| 193 | 230 | ||
| 194 | update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); | 231 | pci = to_dw_pcie_from_pp(pp); |
| 195 | ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4), | 232 | ks_pcie = to_keystone_pcie(pci); |
| 233 | |||
| 234 | reg_offset = irq % 8; | ||
| 235 | bit_pos = irq >> 3; | ||
| 236 | |||
| 237 | ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset), | ||
| 196 | BIT(bit_pos)); | 238 | BIT(bit_pos)); |
| 239 | |||
| 240 | raw_spin_unlock_irqrestore(&pp->lock, flags); | ||
| 197 | } | 241 | } |
| 198 | 242 | ||
| 243 | static struct irq_chip ks_pcie_msi_irq_chip = { | ||
| 244 | .name = "KEYSTONE-PCI-MSI", | ||
| 245 | .irq_ack = ks_pcie_msi_irq_ack, | ||
| 246 | .irq_compose_msi_msg = ks_pcie_compose_msi_msg, | ||
| 247 | .irq_set_affinity = ks_pcie_msi_set_affinity, | ||
| 248 | .irq_mask = ks_pcie_msi_mask, | ||
| 249 | .irq_unmask = ks_pcie_msi_unmask, | ||
| 250 | }; | ||
| 251 | |||
| 199 | static int ks_pcie_msi_host_init(struct pcie_port *pp) | 252 | static int ks_pcie_msi_host_init(struct pcie_port *pp) |
| 200 | { | 253 | { |
| 254 | pp->msi_irq_chip = &ks_pcie_msi_irq_chip; | ||
| 201 | return dw_pcie_allocate_domains(pp); | 255 | return dw_pcie_allocate_domains(pp); |
| 202 | } | 256 | } |
| 203 | 257 | ||
| 204 | static void ks_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie) | ||
| 205 | { | ||
| 206 | int i; | ||
| 207 | |||
| 208 | for (i = 0; i < PCI_NUM_INTX; i++) | ||
| 209 | ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1); | ||
| 210 | } | ||
| 211 | |||
| 212 | static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, | 258 | static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, |
| 213 | int offset) | 259 | int offset) |
| 214 | { | 260 | { |
| @@ -217,7 +263,7 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, | |||
| 217 | u32 pending; | 263 | u32 pending; |
| 218 | int virq; | 264 | int virq; |
| 219 | 265 | ||
| 220 | pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS + (offset << 4)); | 266 | pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset)); |
| 221 | 267 | ||
| 222 | if (BIT(0) & pending) { | 268 | if (BIT(0) & pending) { |
| 223 | virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset); | 269 | virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset); |
| @@ -229,6 +275,14 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, | |||
| 229 | ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset); | 275 | ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset); |
| 230 | } | 276 | } |
| 231 | 277 | ||
| 278 | /* | ||
| 279 | * Dummy function so that DW core doesn't configure MSI | ||
| 280 | */ | ||
| 281 | static int ks_pcie_am654_msi_host_init(struct pcie_port *pp) | ||
| 282 | { | ||
| 283 | return 0; | ||
| 284 | } | ||
| 285 | |||
| 232 | static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie) | 286 | static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie) |
| 233 | { | 287 | { |
| 234 | ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL); | 288 | ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL); |
| @@ -255,10 +309,10 @@ static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie) | |||
| 255 | if (reg & ERR_CORR) | 309 | if (reg & ERR_CORR) |
| 256 | dev_dbg(dev, "Correctable Error\n"); | 310 | dev_dbg(dev, "Correctable Error\n"); |
| 257 | 311 | ||
| 258 | if (reg & ERR_AXI) | 312 | if (!ks_pcie->is_am6 && (reg & ERR_AXI)) |
| 259 | dev_err(dev, "AXI tag lookup fatal Error\n"); | 313 | dev_err(dev, "AXI tag lookup fatal Error\n"); |
| 260 | 314 | ||
| 261 | if (reg & ERR_AER) | 315 | if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER))) |
| 262 | dev_err(dev, "ECRC Error\n"); | 316 | dev_err(dev, "ECRC Error\n"); |
| 263 | 317 | ||
| 264 | ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg); | 318 | ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg); |
| @@ -356,6 +410,9 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) | |||
| 356 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0); | 410 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0); |
| 357 | ks_pcie_clear_dbi_mode(ks_pcie); | 411 | ks_pcie_clear_dbi_mode(ks_pcie); |
| 358 | 412 | ||
| 413 | if (ks_pcie->is_am6) | ||
| 414 | return; | ||
| 415 | |||
| 359 | val = ilog2(OB_WIN_SIZE); | 416 | val = ilog2(OB_WIN_SIZE); |
| 360 | ks_pcie_app_writel(ks_pcie, OB_SIZE, val); | 417 | ks_pcie_app_writel(ks_pcie, OB_SIZE, val); |
| 361 | 418 | ||
| @@ -445,68 +502,33 @@ static int ks_pcie_link_up(struct dw_pcie *pci) | |||
| 445 | return (val == PORT_LOGIC_LTSSM_STATE_L0); | 502 | return (val == PORT_LOGIC_LTSSM_STATE_L0); |
| 446 | } | 503 | } |
| 447 | 504 | ||
| 448 | static void ks_pcie_initiate_link_train(struct keystone_pcie *ks_pcie) | 505 | static void ks_pcie_stop_link(struct dw_pcie *pci) |
| 449 | { | 506 | { |
| 507 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
| 450 | u32 val; | 508 | u32 val; |
| 451 | 509 | ||
| 452 | /* Disable Link training */ | 510 | /* Disable Link training */ |
| 453 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); | 511 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); |
| 454 | val &= ~LTSSM_EN_VAL; | 512 | val &= ~LTSSM_EN_VAL; |
| 455 | ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); | 513 | ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); |
| 456 | |||
| 457 | /* Initiate Link Training */ | ||
| 458 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); | ||
| 459 | ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); | ||
| 460 | } | 514 | } |
| 461 | 515 | ||
| 462 | /** | 516 | static int ks_pcie_start_link(struct dw_pcie *pci) |
| 463 | * ks_pcie_dw_host_init() - initialize host for v3_65 dw hardware | ||
| 464 | * | ||
| 465 | * Ioremap the register resources, initialize legacy irq domain | ||
| 466 | * and call dw_pcie_v3_65_host_init() API to initialize the Keystone | ||
| 467 | * PCI host controller. | ||
| 468 | */ | ||
| 469 | static int __init ks_pcie_dw_host_init(struct keystone_pcie *ks_pcie) | ||
| 470 | { | 517 | { |
| 471 | struct dw_pcie *pci = ks_pcie->pci; | 518 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); |
| 472 | struct pcie_port *pp = &pci->pp; | ||
| 473 | struct device *dev = pci->dev; | 519 | struct device *dev = pci->dev; |
| 474 | struct platform_device *pdev = to_platform_device(dev); | 520 | u32 val; |
| 475 | struct resource *res; | ||
| 476 | |||
| 477 | /* Index 0 is the config reg. space address */ | ||
| 478 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 479 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); | ||
| 480 | if (IS_ERR(pci->dbi_base)) | ||
| 481 | return PTR_ERR(pci->dbi_base); | ||
| 482 | |||
| 483 | /* | ||
| 484 | * We set these same and is used in pcie rd/wr_other_conf | ||
| 485 | * functions | ||
| 486 | */ | ||
| 487 | pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET; | ||
| 488 | pp->va_cfg1_base = pp->va_cfg0_base; | ||
| 489 | |||
| 490 | /* Index 1 is the application reg. space address */ | ||
| 491 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
| 492 | ks_pcie->va_app_base = devm_ioremap_resource(dev, res); | ||
| 493 | if (IS_ERR(ks_pcie->va_app_base)) | ||
| 494 | return PTR_ERR(ks_pcie->va_app_base); | ||
| 495 | |||
| 496 | ks_pcie->app = *res; | ||
| 497 | 521 | ||
| 498 | /* Create legacy IRQ domain */ | 522 | if (dw_pcie_link_up(pci)) { |
| 499 | ks_pcie->legacy_irq_domain = | 523 | dev_dbg(dev, "link is already up\n"); |
| 500 | irq_domain_add_linear(ks_pcie->legacy_intc_np, | 524 | return 0; |
| 501 | PCI_NUM_INTX, | ||
| 502 | &ks_pcie_legacy_irq_domain_ops, | ||
| 503 | NULL); | ||
| 504 | if (!ks_pcie->legacy_irq_domain) { | ||
| 505 | dev_err(dev, "Failed to add irq domain for legacy irqs\n"); | ||
| 506 | return -EINVAL; | ||
| 507 | } | 525 | } |
| 508 | 526 | ||
| 509 | return dw_pcie_host_init(pp); | 527 | /* Initiate Link Training */ |
| 528 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); | ||
| 529 | ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); | ||
| 530 | |||
| 531 | return 0; | ||
| 510 | } | 532 | } |
| 511 | 533 | ||
| 512 | static void ks_pcie_quirk(struct pci_dev *dev) | 534 | static void ks_pcie_quirk(struct pci_dev *dev) |
| @@ -552,34 +574,16 @@ static void ks_pcie_quirk(struct pci_dev *dev) | |||
| 552 | } | 574 | } |
| 553 | DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk); | 575 | DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk); |
| 554 | 576 | ||
| 555 | static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) | ||
| 556 | { | ||
| 557 | struct dw_pcie *pci = ks_pcie->pci; | ||
| 558 | struct device *dev = pci->dev; | ||
| 559 | |||
| 560 | if (dw_pcie_link_up(pci)) { | ||
| 561 | dev_info(dev, "Link already up\n"); | ||
| 562 | return 0; | ||
| 563 | } | ||
| 564 | |||
| 565 | ks_pcie_initiate_link_train(ks_pcie); | ||
| 566 | |||
| 567 | /* check if the link is up or not */ | ||
| 568 | if (!dw_pcie_wait_for_link(pci)) | ||
| 569 | return 0; | ||
| 570 | |||
| 571 | dev_err(dev, "phy link never came up\n"); | ||
| 572 | return -ETIMEDOUT; | ||
| 573 | } | ||
| 574 | |||
| 575 | static void ks_pcie_msi_irq_handler(struct irq_desc *desc) | 577 | static void ks_pcie_msi_irq_handler(struct irq_desc *desc) |
| 576 | { | 578 | { |
| 577 | unsigned int irq = irq_desc_get_irq(desc); | 579 | unsigned int irq = desc->irq_data.hwirq; |
| 578 | struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); | 580 | struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); |
| 579 | u32 offset = irq - ks_pcie->msi_host_irqs[0]; | 581 | u32 offset = irq - ks_pcie->msi_host_irq; |
| 580 | struct dw_pcie *pci = ks_pcie->pci; | 582 | struct dw_pcie *pci = ks_pcie->pci; |
| 583 | struct pcie_port *pp = &pci->pp; | ||
| 581 | struct device *dev = pci->dev; | 584 | struct device *dev = pci->dev; |
| 582 | struct irq_chip *chip = irq_desc_get_chip(desc); | 585 | struct irq_chip *chip = irq_desc_get_chip(desc); |
| 586 | u32 vector, virq, reg, pos; | ||
| 583 | 587 | ||
| 584 | dev_dbg(dev, "%s, irq %d\n", __func__, irq); | 588 | dev_dbg(dev, "%s, irq %d\n", __func__, irq); |
| 585 | 589 | ||
| @@ -589,7 +593,23 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc) | |||
| 589 | * ack operation. | 593 | * ack operation. |
| 590 | */ | 594 | */ |
| 591 | chained_irq_enter(chip, desc); | 595 | chained_irq_enter(chip, desc); |
| 592 | ks_pcie_handle_msi_irq(ks_pcie, offset); | 596 | |
| 597 | reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset)); | ||
| 598 | /* | ||
| 599 | * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit | ||
| 600 | * shows 1, 9, 17, 25 and so forth | ||
| 601 | */ | ||
| 602 | for (pos = 0; pos < 4; pos++) { | ||
| 603 | if (!(reg & BIT(pos))) | ||
| 604 | continue; | ||
| 605 | |||
| 606 | vector = offset + (pos << 3); | ||
| 607 | virq = irq_linear_revmap(pp->irq_domain, vector); | ||
| 608 | dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", pos, vector, | ||
| 609 | virq); | ||
| 610 | generic_handle_irq(virq); | ||
| 611 | } | ||
| 612 | |||
| 593 | chained_irq_exit(chip, desc); | 613 | chained_irq_exit(chip, desc); |
| 594 | } | 614 | } |
| 595 | 615 | ||
| @@ -622,89 +642,119 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) | |||
| 622 | chained_irq_exit(chip, desc); | 642 | chained_irq_exit(chip, desc); |
| 623 | } | 643 | } |
| 624 | 644 | ||
| 625 | static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, | 645 | static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie) |
| 626 | char *controller, int *num_irqs) | ||
| 627 | { | 646 | { |
| 628 | int temp, max_host_irqs, legacy = 1, *host_irqs; | ||
| 629 | struct device *dev = ks_pcie->pci->dev; | 647 | struct device *dev = ks_pcie->pci->dev; |
| 630 | struct device_node *np_pcie = dev->of_node, **np_temp; | 648 | struct device_node *np = ks_pcie->np; |
| 631 | 649 | struct device_node *intc_np; | |
| 632 | if (!strcmp(controller, "msi-interrupt-controller")) | 650 | struct irq_data *irq_data; |
| 633 | legacy = 0; | 651 | int irq_count, irq, ret, i; |
| 634 | 652 | ||
| 635 | if (legacy) { | 653 | if (!IS_ENABLED(CONFIG_PCI_MSI)) |
| 636 | np_temp = &ks_pcie->legacy_intc_np; | 654 | return 0; |
| 637 | max_host_irqs = PCI_NUM_INTX; | ||
| 638 | host_irqs = &ks_pcie->legacy_host_irqs[0]; | ||
| 639 | } else { | ||
| 640 | np_temp = &ks_pcie->msi_intc_np; | ||
| 641 | max_host_irqs = MAX_MSI_HOST_IRQS; | ||
| 642 | host_irqs = &ks_pcie->msi_host_irqs[0]; | ||
| 643 | } | ||
| 644 | 655 | ||
| 645 | /* interrupt controller is in a child node */ | 656 | intc_np = of_get_child_by_name(np, "msi-interrupt-controller"); |
| 646 | *np_temp = of_get_child_by_name(np_pcie, controller); | 657 | if (!intc_np) { |
| 647 | if (!(*np_temp)) { | 658 | if (ks_pcie->is_am6) |
| 648 | dev_err(dev, "Node for %s is absent\n", controller); | 659 | return 0; |
| 660 | dev_warn(dev, "msi-interrupt-controller node is absent\n"); | ||
| 649 | return -EINVAL; | 661 | return -EINVAL; |
| 650 | } | 662 | } |
| 651 | 663 | ||
| 652 | temp = of_irq_count(*np_temp); | 664 | irq_count = of_irq_count(intc_np); |
| 653 | if (!temp) { | 665 | if (!irq_count) { |
| 654 | dev_err(dev, "No IRQ entries in %s\n", controller); | 666 | dev_err(dev, "No IRQ entries in msi-interrupt-controller\n"); |
| 655 | of_node_put(*np_temp); | 667 | ret = -EINVAL; |
| 656 | return -EINVAL; | 668 | goto err; |
| 657 | } | 669 | } |
| 658 | 670 | ||
| 659 | if (temp > max_host_irqs) | 671 | for (i = 0; i < irq_count; i++) { |
| 660 | dev_warn(dev, "Too many %s interrupts defined %u\n", | 672 | irq = irq_of_parse_and_map(intc_np, i); |
| 661 | (legacy ? "legacy" : "MSI"), temp); | 673 | if (!irq) { |
| 662 | 674 | ret = -EINVAL; | |
| 663 | /* | 675 | goto err; |
| 664 | * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to | 676 | } |
| 665 | * 7 (MSI) | ||
| 666 | */ | ||
| 667 | for (temp = 0; temp < max_host_irqs; temp++) { | ||
| 668 | host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp); | ||
| 669 | if (!host_irqs[temp]) | ||
| 670 | break; | ||
| 671 | } | ||
| 672 | 677 | ||
| 673 | of_node_put(*np_temp); | 678 | if (!ks_pcie->msi_host_irq) { |
| 679 | irq_data = irq_get_irq_data(irq); | ||
| 680 | if (!irq_data) { | ||
| 681 | ret = -EINVAL; | ||
| 682 | goto err; | ||
| 683 | } | ||
| 684 | ks_pcie->msi_host_irq = irq_data->hwirq; | ||
| 685 | } | ||
| 674 | 686 | ||
| 675 | if (temp) { | 687 | irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler, |
| 676 | *num_irqs = temp; | 688 | ks_pcie); |
| 677 | return 0; | ||
| 678 | } | 689 | } |
| 679 | 690 | ||
| 680 | return -EINVAL; | 691 | of_node_put(intc_np); |
| 692 | return 0; | ||
| 693 | |||
| 694 | err: | ||
| 695 | of_node_put(intc_np); | ||
| 696 | return ret; | ||
| 681 | } | 697 | } |
| 682 | 698 | ||
| 683 | static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) | 699 | static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie) |
| 684 | { | 700 | { |
| 685 | int i; | 701 | struct device *dev = ks_pcie->pci->dev; |
| 702 | struct irq_domain *legacy_irq_domain; | ||
| 703 | struct device_node *np = ks_pcie->np; | ||
| 704 | struct device_node *intc_np; | ||
| 705 | int irq_count, irq, ret = 0, i; | ||
| 706 | |||
| 707 | intc_np = of_get_child_by_name(np, "legacy-interrupt-controller"); | ||
| 708 | if (!intc_np) { | ||
| 709 | /* | ||
| 710 | * Since legacy interrupts are modeled as edge-interrupts in | ||
| 711 | * AM6, keep it disabled for now. | ||
| 712 | */ | ||
| 713 | if (ks_pcie->is_am6) | ||
| 714 | return 0; | ||
| 715 | dev_warn(dev, "legacy-interrupt-controller node is absent\n"); | ||
| 716 | return -EINVAL; | ||
| 717 | } | ||
| 686 | 718 | ||
| 687 | /* Legacy IRQ */ | 719 | irq_count = of_irq_count(intc_np); |
| 688 | for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) { | 720 | if (!irq_count) { |
| 689 | irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i], | 721 | dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n"); |
| 722 | ret = -EINVAL; | ||
| 723 | goto err; | ||
| 724 | } | ||
| 725 | |||
| 726 | for (i = 0; i < irq_count; i++) { | ||
| 727 | irq = irq_of_parse_and_map(intc_np, i); | ||
| 728 | if (!irq) { | ||
| 729 | ret = -EINVAL; | ||
| 730 | goto err; | ||
| 731 | } | ||
| 732 | ks_pcie->legacy_host_irqs[i] = irq; | ||
| 733 | |||
| 734 | irq_set_chained_handler_and_data(irq, | ||
| 690 | ks_pcie_legacy_irq_handler, | 735 | ks_pcie_legacy_irq_handler, |
| 691 | ks_pcie); | 736 | ks_pcie); |
| 692 | } | 737 | } |
| 693 | ks_pcie_enable_legacy_irqs(ks_pcie); | ||
| 694 | 738 | ||
| 695 | /* MSI IRQ */ | 739 | legacy_irq_domain = |
| 696 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | 740 | irq_domain_add_linear(intc_np, PCI_NUM_INTX, |
| 697 | for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) { | 741 | &ks_pcie_legacy_irq_domain_ops, NULL); |
| 698 | irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i], | 742 | if (!legacy_irq_domain) { |
| 699 | ks_pcie_msi_irq_handler, | 743 | dev_err(dev, "Failed to add irq domain for legacy irqs\n"); |
| 700 | ks_pcie); | 744 | ret = -EINVAL; |
| 701 | } | 745 | goto err; |
| 702 | } | 746 | } |
| 747 | ks_pcie->legacy_irq_domain = legacy_irq_domain; | ||
| 748 | |||
| 749 | for (i = 0; i < PCI_NUM_INTX; i++) | ||
| 750 | ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN); | ||
| 703 | 751 | ||
| 704 | if (ks_pcie->error_irq > 0) | 752 | err: |
| 705 | ks_pcie_enable_error_irq(ks_pcie); | 753 | of_node_put(intc_np); |
| 754 | return ret; | ||
| 706 | } | 755 | } |
| 707 | 756 | ||
| 757 | #ifdef CONFIG_ARM | ||
| 708 | /* | 758 | /* |
| 709 | * When a PCI device does not exist during config cycles, keystone host gets a | 759 | * When a PCI device does not exist during config cycles, keystone host gets a |
| 710 | * bus error instead of returning 0xffffffff. This handler always returns 0 | 760 | * bus error instead of returning 0xffffffff. This handler always returns 0 |
| @@ -724,6 +774,7 @@ static int ks_pcie_fault(unsigned long addr, unsigned int fsr, | |||
| 724 | 774 | ||
| 725 | return 0; | 775 | return 0; |
| 726 | } | 776 | } |
| 777 | #endif | ||
| 727 | 778 | ||
| 728 | static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie) | 779 | static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie) |
| 729 | { | 780 | { |
| @@ -742,8 +793,10 @@ static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie) | |||
| 742 | if (ret) | 793 | if (ret) |
| 743 | return ret; | 794 | return ret; |
| 744 | 795 | ||
| 796 | dw_pcie_dbi_ro_wr_en(pci); | ||
| 745 | dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK); | 797 | dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK); |
| 746 | dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT); | 798 | dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT); |
| 799 | dw_pcie_dbi_ro_wr_dis(pci); | ||
| 747 | 800 | ||
| 748 | return 0; | 801 | return 0; |
| 749 | } | 802 | } |
| @@ -754,11 +807,18 @@ static int __init ks_pcie_host_init(struct pcie_port *pp) | |||
| 754 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | 807 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); |
| 755 | int ret; | 808 | int ret; |
| 756 | 809 | ||
| 810 | ret = ks_pcie_config_legacy_irq(ks_pcie); | ||
| 811 | if (ret) | ||
| 812 | return ret; | ||
| 813 | |||
| 814 | ret = ks_pcie_config_msi_irq(ks_pcie); | ||
| 815 | if (ret) | ||
| 816 | return ret; | ||
| 817 | |||
| 757 | dw_pcie_setup_rc(pp); | 818 | dw_pcie_setup_rc(pp); |
| 758 | 819 | ||
| 759 | ks_pcie_establish_link(ks_pcie); | 820 | ks_pcie_stop_link(pci); |
| 760 | ks_pcie_setup_rc_app_regs(ks_pcie); | 821 | ks_pcie_setup_rc_app_regs(ks_pcie); |
| 761 | ks_pcie_setup_interrupts(ks_pcie); | ||
| 762 | writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8), | 822 | writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8), |
| 763 | pci->dbi_base + PCI_IO_BASE); | 823 | pci->dbi_base + PCI_IO_BASE); |
| 764 | 824 | ||
| @@ -766,12 +826,17 @@ static int __init ks_pcie_host_init(struct pcie_port *pp) | |||
| 766 | if (ret < 0) | 826 | if (ret < 0) |
| 767 | return ret; | 827 | return ret; |
| 768 | 828 | ||
| 829 | #ifdef CONFIG_ARM | ||
| 769 | /* | 830 | /* |
| 770 | * PCIe access errors that result into OCP errors are caught by ARM as | 831 | * PCIe access errors that result into OCP errors are caught by ARM as |
| 771 | * "External aborts" | 832 | * "External aborts" |
| 772 | */ | 833 | */ |
| 773 | hook_fault_code(17, ks_pcie_fault, SIGBUS, 0, | 834 | hook_fault_code(17, ks_pcie_fault, SIGBUS, 0, |
| 774 | "Asynchronous external abort"); | 835 | "Asynchronous external abort"); |
| 836 | #endif | ||
| 837 | |||
| 838 | ks_pcie_start_link(pci); | ||
| 839 | dw_pcie_wait_for_link(pci); | ||
| 775 | 840 | ||
| 776 | return 0; | 841 | return 0; |
| 777 | } | 842 | } |
| @@ -780,14 +845,15 @@ static const struct dw_pcie_host_ops ks_pcie_host_ops = { | |||
| 780 | .rd_other_conf = ks_pcie_rd_other_conf, | 845 | .rd_other_conf = ks_pcie_rd_other_conf, |
| 781 | .wr_other_conf = ks_pcie_wr_other_conf, | 846 | .wr_other_conf = ks_pcie_wr_other_conf, |
| 782 | .host_init = ks_pcie_host_init, | 847 | .host_init = ks_pcie_host_init, |
| 783 | .msi_set_irq = ks_pcie_msi_set_irq, | ||
| 784 | .msi_clear_irq = ks_pcie_msi_clear_irq, | ||
| 785 | .get_msi_addr = ks_pcie_get_msi_addr, | ||
| 786 | .msi_host_init = ks_pcie_msi_host_init, | 848 | .msi_host_init = ks_pcie_msi_host_init, |
| 787 | .msi_irq_ack = ks_pcie_msi_irq_ack, | ||
| 788 | .scan_bus = ks_pcie_v3_65_scan_bus, | 849 | .scan_bus = ks_pcie_v3_65_scan_bus, |
| 789 | }; | 850 | }; |
| 790 | 851 | ||
| 852 | static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = { | ||
| 853 | .host_init = ks_pcie_host_init, | ||
| 854 | .msi_host_init = ks_pcie_am654_msi_host_init, | ||
| 855 | }; | ||
| 856 | |||
| 791 | static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv) | 857 | static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv) |
| 792 | { | 858 | { |
| 793 | struct keystone_pcie *ks_pcie = priv; | 859 | struct keystone_pcie *ks_pcie = priv; |
| @@ -801,41 +867,17 @@ static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie, | |||
| 801 | struct dw_pcie *pci = ks_pcie->pci; | 867 | struct dw_pcie *pci = ks_pcie->pci; |
| 802 | struct pcie_port *pp = &pci->pp; | 868 | struct pcie_port *pp = &pci->pp; |
| 803 | struct device *dev = &pdev->dev; | 869 | struct device *dev = &pdev->dev; |
| 870 | struct resource *res; | ||
| 804 | int ret; | 871 | int ret; |
| 805 | 872 | ||
| 806 | ret = ks_pcie_get_irq_controller_info(ks_pcie, | 873 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); |
| 807 | "legacy-interrupt-controller", | 874 | pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res); |
| 808 | &ks_pcie->num_legacy_host_irqs); | 875 | if (IS_ERR(pp->va_cfg0_base)) |
| 809 | if (ret) | 876 | return PTR_ERR(pp->va_cfg0_base); |
| 810 | return ret; | ||
| 811 | |||
| 812 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
| 813 | ret = ks_pcie_get_irq_controller_info(ks_pcie, | ||
| 814 | "msi-interrupt-controller", | ||
| 815 | &ks_pcie->num_msi_host_irqs); | ||
| 816 | if (ret) | ||
| 817 | return ret; | ||
| 818 | } | ||
| 819 | 877 | ||
| 820 | /* | 878 | pp->va_cfg1_base = pp->va_cfg0_base; |
| 821 | * Index 0 is the platform interrupt for error interrupt | ||
| 822 | * from RC. This is optional. | ||
| 823 | */ | ||
| 824 | ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0); | ||
| 825 | if (ks_pcie->error_irq <= 0) | ||
| 826 | dev_info(dev, "no error IRQ defined\n"); | ||
| 827 | else { | ||
| 828 | ret = request_irq(ks_pcie->error_irq, ks_pcie_err_irq_handler, | ||
| 829 | IRQF_SHARED, "pcie-error-irq", ks_pcie); | ||
| 830 | if (ret < 0) { | ||
| 831 | dev_err(dev, "failed to request error IRQ %d\n", | ||
| 832 | ks_pcie->error_irq); | ||
| 833 | return ret; | ||
| 834 | } | ||
| 835 | } | ||
| 836 | 879 | ||
| 837 | pp->ops = &ks_pcie_host_ops; | 880 | ret = dw_pcie_host_init(pp); |
| 838 | ret = ks_pcie_dw_host_init(ks_pcie); | ||
| 839 | if (ret) { | 881 | if (ret) { |
| 840 | dev_err(dev, "failed to initialize host\n"); | 882 | dev_err(dev, "failed to initialize host\n"); |
| 841 | return ret; | 883 | return ret; |
| @@ -844,18 +886,139 @@ static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie, | |||
| 844 | return 0; | 886 | return 0; |
| 845 | } | 887 | } |
| 846 | 888 | ||
| 847 | static const struct of_device_id ks_pcie_of_match[] = { | 889 | static u32 ks_pcie_am654_read_dbi2(struct dw_pcie *pci, void __iomem *base, |
| 848 | { | 890 | u32 reg, size_t size) |
| 849 | .type = "pci", | 891 | { |
| 850 | .compatible = "ti,keystone-pcie", | 892 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); |
| 851 | }, | 893 | u32 val; |
| 852 | { }, | 894 | |
| 853 | }; | 895 | ks_pcie_set_dbi_mode(ks_pcie); |
| 896 | dw_pcie_read(base + reg, size, &val); | ||
| 897 | ks_pcie_clear_dbi_mode(ks_pcie); | ||
| 898 | return val; | ||
| 899 | } | ||
| 900 | |||
| 901 | static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base, | ||
| 902 | u32 reg, size_t size, u32 val) | ||
| 903 | { | ||
| 904 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
| 905 | |||
| 906 | ks_pcie_set_dbi_mode(ks_pcie); | ||
| 907 | dw_pcie_write(base + reg, size, val); | ||
| 908 | ks_pcie_clear_dbi_mode(ks_pcie); | ||
| 909 | } | ||
| 854 | 910 | ||
| 855 | static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = { | 911 | static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = { |
| 912 | .start_link = ks_pcie_start_link, | ||
| 913 | .stop_link = ks_pcie_stop_link, | ||
| 856 | .link_up = ks_pcie_link_up, | 914 | .link_up = ks_pcie_link_up, |
| 915 | .read_dbi2 = ks_pcie_am654_read_dbi2, | ||
| 916 | .write_dbi2 = ks_pcie_am654_write_dbi2, | ||
| 917 | }; | ||
| 918 | |||
| 919 | static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep) | ||
| 920 | { | ||
| 921 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
| 922 | int flags; | ||
| 923 | |||
| 924 | ep->page_size = AM654_WIN_SIZE; | ||
| 925 | flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32; | ||
| 926 | dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1); | ||
| 927 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags); | ||
| 928 | } | ||
| 929 | |||
| 930 | static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie) | ||
| 931 | { | ||
| 932 | struct dw_pcie *pci = ks_pcie->pci; | ||
| 933 | u8 int_pin; | ||
| 934 | |||
| 935 | int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN); | ||
| 936 | if (int_pin == 0 || int_pin > 4) | ||
| 937 | return; | ||
| 938 | |||
| 939 | ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin), | ||
| 940 | INT_ENABLE); | ||
| 941 | ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE); | ||
| 942 | mdelay(1); | ||
| 943 | ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE); | ||
| 944 | ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin), | ||
| 945 | INT_ENABLE); | ||
| 946 | } | ||
| 947 | |||
| 948 | static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no, | ||
| 949 | enum pci_epc_irq_type type, | ||
| 950 | u16 interrupt_num) | ||
| 951 | { | ||
| 952 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
| 953 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
| 954 | |||
| 955 | switch (type) { | ||
| 956 | case PCI_EPC_IRQ_LEGACY: | ||
| 957 | ks_pcie_am654_raise_legacy_irq(ks_pcie); | ||
| 958 | break; | ||
| 959 | case PCI_EPC_IRQ_MSI: | ||
| 960 | dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); | ||
| 961 | break; | ||
| 962 | default: | ||
| 963 | dev_err(pci->dev, "UNKNOWN IRQ type\n"); | ||
| 964 | return -EINVAL; | ||
| 965 | } | ||
| 966 | |||
| 967 | return 0; | ||
| 968 | } | ||
| 969 | |||
| 970 | static const struct pci_epc_features ks_pcie_am654_epc_features = { | ||
| 971 | .linkup_notifier = false, | ||
| 972 | .msi_capable = true, | ||
| 973 | .msix_capable = false, | ||
| 974 | .reserved_bar = 1 << BAR_0 | 1 << BAR_1, | ||
| 975 | .bar_fixed_64bit = 1 << BAR_0, | ||
| 976 | .bar_fixed_size[2] = SZ_1M, | ||
| 977 | .bar_fixed_size[3] = SZ_64K, | ||
| 978 | .bar_fixed_size[4] = 256, | ||
| 979 | .bar_fixed_size[5] = SZ_1M, | ||
| 980 | .align = SZ_1M, | ||
| 857 | }; | 981 | }; |
| 858 | 982 | ||
| 983 | static const struct pci_epc_features* | ||
| 984 | ks_pcie_am654_get_features(struct dw_pcie_ep *ep) | ||
| 985 | { | ||
| 986 | return &ks_pcie_am654_epc_features; | ||
| 987 | } | ||
| 988 | |||
| 989 | static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = { | ||
| 990 | .ep_init = ks_pcie_am654_ep_init, | ||
| 991 | .raise_irq = ks_pcie_am654_raise_irq, | ||
| 992 | .get_features = &ks_pcie_am654_get_features, | ||
| 993 | }; | ||
| 994 | |||
| 995 | static int __init ks_pcie_add_pcie_ep(struct keystone_pcie *ks_pcie, | ||
| 996 | struct platform_device *pdev) | ||
| 997 | { | ||
| 998 | int ret; | ||
| 999 | struct dw_pcie_ep *ep; | ||
| 1000 | struct resource *res; | ||
| 1001 | struct device *dev = &pdev->dev; | ||
| 1002 | struct dw_pcie *pci = ks_pcie->pci; | ||
| 1003 | |||
| 1004 | ep = &pci->ep; | ||
| 1005 | |||
| 1006 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); | ||
| 1007 | if (!res) | ||
| 1008 | return -EINVAL; | ||
| 1009 | |||
| 1010 | ep->phys_base = res->start; | ||
| 1011 | ep->addr_size = resource_size(res); | ||
| 1012 | |||
| 1013 | ret = dw_pcie_ep_init(ep); | ||
| 1014 | if (ret) { | ||
| 1015 | dev_err(dev, "failed to initialize endpoint\n"); | ||
| 1016 | return ret; | ||
| 1017 | } | ||
| 1018 | |||
| 1019 | return 0; | ||
| 1020 | } | ||
| 1021 | |||
| 859 | static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie) | 1022 | static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie) |
| 860 | { | 1023 | { |
| 861 | int num_lanes = ks_pcie->num_lanes; | 1024 | int num_lanes = ks_pcie->num_lanes; |
| @@ -873,6 +1036,10 @@ static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie) | |||
| 873 | int num_lanes = ks_pcie->num_lanes; | 1036 | int num_lanes = ks_pcie->num_lanes; |
| 874 | 1037 | ||
| 875 | for (i = 0; i < num_lanes; i++) { | 1038 | for (i = 0; i < num_lanes; i++) { |
| 1039 | ret = phy_reset(ks_pcie->phy[i]); | ||
| 1040 | if (ret < 0) | ||
| 1041 | goto err_phy; | ||
| 1042 | |||
| 876 | ret = phy_init(ks_pcie->phy[i]); | 1043 | ret = phy_init(ks_pcie->phy[i]); |
| 877 | if (ret < 0) | 1044 | if (ret < 0) |
| 878 | goto err_phy; | 1045 | goto err_phy; |
| @@ -895,20 +1062,161 @@ err_phy: | |||
| 895 | return ret; | 1062 | return ret; |
| 896 | } | 1063 | } |
| 897 | 1064 | ||
| 1065 | static int ks_pcie_set_mode(struct device *dev) | ||
| 1066 | { | ||
| 1067 | struct device_node *np = dev->of_node; | ||
| 1068 | struct regmap *syscon; | ||
| 1069 | u32 val; | ||
| 1070 | u32 mask; | ||
| 1071 | int ret = 0; | ||
| 1072 | |||
| 1073 | syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode"); | ||
| 1074 | if (IS_ERR(syscon)) | ||
| 1075 | return 0; | ||
| 1076 | |||
| 1077 | mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN; | ||
| 1078 | val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN; | ||
| 1079 | |||
| 1080 | ret = regmap_update_bits(syscon, 0, mask, val); | ||
| 1081 | if (ret) { | ||
| 1082 | dev_err(dev, "failed to set pcie mode\n"); | ||
| 1083 | return ret; | ||
| 1084 | } | ||
| 1085 | |||
| 1086 | return 0; | ||
| 1087 | } | ||
| 1088 | |||
| 1089 | static int ks_pcie_am654_set_mode(struct device *dev, | ||
| 1090 | enum dw_pcie_device_mode mode) | ||
| 1091 | { | ||
| 1092 | struct device_node *np = dev->of_node; | ||
| 1093 | struct regmap *syscon; | ||
| 1094 | u32 val; | ||
| 1095 | u32 mask; | ||
| 1096 | int ret = 0; | ||
| 1097 | |||
| 1098 | syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode"); | ||
| 1099 | if (IS_ERR(syscon)) | ||
| 1100 | return 0; | ||
| 1101 | |||
| 1102 | mask = AM654_PCIE_DEV_TYPE_MASK; | ||
| 1103 | |||
| 1104 | switch (mode) { | ||
| 1105 | case DW_PCIE_RC_TYPE: | ||
| 1106 | val = RC; | ||
| 1107 | break; | ||
| 1108 | case DW_PCIE_EP_TYPE: | ||
| 1109 | val = EP; | ||
| 1110 | break; | ||
| 1111 | default: | ||
| 1112 | dev_err(dev, "INVALID device type %d\n", mode); | ||
| 1113 | return -EINVAL; | ||
| 1114 | } | ||
| 1115 | |||
| 1116 | ret = regmap_update_bits(syscon, 0, mask, val); | ||
| 1117 | if (ret) { | ||
| 1118 | dev_err(dev, "failed to set pcie mode\n"); | ||
| 1119 | return ret; | ||
| 1120 | } | ||
| 1121 | |||
| 1122 | return 0; | ||
| 1123 | } | ||
| 1124 | |||
| 1125 | static void ks_pcie_set_link_speed(struct dw_pcie *pci, int link_speed) | ||
| 1126 | { | ||
| 1127 | u32 val; | ||
| 1128 | |||
| 1129 | dw_pcie_dbi_ro_wr_en(pci); | ||
| 1130 | |||
| 1131 | val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP); | ||
| 1132 | if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) { | ||
| 1133 | val &= ~((u32)PCI_EXP_LNKCAP_SLS); | ||
| 1134 | val |= link_speed; | ||
| 1135 | dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP, | ||
| 1136 | val); | ||
| 1137 | } | ||
| 1138 | |||
| 1139 | val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2); | ||
| 1140 | if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) { | ||
| 1141 | val &= ~((u32)PCI_EXP_LNKCAP_SLS); | ||
| 1142 | val |= link_speed; | ||
| 1143 | dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2, | ||
| 1144 | val); | ||
| 1145 | } | ||
| 1146 | |||
| 1147 | dw_pcie_dbi_ro_wr_dis(pci); | ||
| 1148 | } | ||
| 1149 | |||
| 1150 | static const struct ks_pcie_of_data ks_pcie_rc_of_data = { | ||
| 1151 | .host_ops = &ks_pcie_host_ops, | ||
| 1152 | .version = 0x365A, | ||
| 1153 | }; | ||
| 1154 | |||
| 1155 | static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = { | ||
| 1156 | .host_ops = &ks_pcie_am654_host_ops, | ||
| 1157 | .mode = DW_PCIE_RC_TYPE, | ||
| 1158 | .version = 0x490A, | ||
| 1159 | }; | ||
| 1160 | |||
| 1161 | static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = { | ||
| 1162 | .ep_ops = &ks_pcie_am654_ep_ops, | ||
| 1163 | .mode = DW_PCIE_EP_TYPE, | ||
| 1164 | .version = 0x490A, | ||
| 1165 | }; | ||
| 1166 | |||
| 1167 | static const struct of_device_id ks_pcie_of_match[] = { | ||
| 1168 | { | ||
| 1169 | .type = "pci", | ||
| 1170 | .data = &ks_pcie_rc_of_data, | ||
| 1171 | .compatible = "ti,keystone-pcie", | ||
| 1172 | }, | ||
| 1173 | { | ||
| 1174 | .data = &ks_pcie_am654_rc_of_data, | ||
| 1175 | .compatible = "ti,am654-pcie-rc", | ||
| 1176 | }, | ||
| 1177 | { | ||
| 1178 | .data = &ks_pcie_am654_ep_of_data, | ||
| 1179 | .compatible = "ti,am654-pcie-ep", | ||
| 1180 | }, | ||
| 1181 | { }, | ||
| 1182 | }; | ||
| 1183 | |||
| 898 | static int __init ks_pcie_probe(struct platform_device *pdev) | 1184 | static int __init ks_pcie_probe(struct platform_device *pdev) |
| 899 | { | 1185 | { |
| 1186 | const struct dw_pcie_host_ops *host_ops; | ||
| 1187 | const struct dw_pcie_ep_ops *ep_ops; | ||
| 900 | struct device *dev = &pdev->dev; | 1188 | struct device *dev = &pdev->dev; |
| 901 | struct device_node *np = dev->of_node; | 1189 | struct device_node *np = dev->of_node; |
| 1190 | const struct ks_pcie_of_data *data; | ||
| 1191 | const struct of_device_id *match; | ||
| 1192 | enum dw_pcie_device_mode mode; | ||
| 902 | struct dw_pcie *pci; | 1193 | struct dw_pcie *pci; |
| 903 | struct keystone_pcie *ks_pcie; | 1194 | struct keystone_pcie *ks_pcie; |
| 904 | struct device_link **link; | 1195 | struct device_link **link; |
| 1196 | struct gpio_desc *gpiod; | ||
| 1197 | void __iomem *atu_base; | ||
| 1198 | struct resource *res; | ||
| 1199 | unsigned int version; | ||
| 1200 | void __iomem *base; | ||
| 905 | u32 num_viewport; | 1201 | u32 num_viewport; |
| 906 | struct phy **phy; | 1202 | struct phy **phy; |
| 1203 | int link_speed; | ||
| 907 | u32 num_lanes; | 1204 | u32 num_lanes; |
| 908 | char name[10]; | 1205 | char name[10]; |
| 909 | int ret; | 1206 | int ret; |
| 1207 | int irq; | ||
| 910 | int i; | 1208 | int i; |
| 911 | 1209 | ||
| 1210 | match = of_match_device(of_match_ptr(ks_pcie_of_match), dev); | ||
| 1211 | data = (struct ks_pcie_of_data *)match->data; | ||
| 1212 | if (!data) | ||
| 1213 | return -EINVAL; | ||
| 1214 | |||
| 1215 | version = data->version; | ||
| 1216 | host_ops = data->host_ops; | ||
| 1217 | ep_ops = data->ep_ops; | ||
| 1218 | mode = data->mode; | ||
| 1219 | |||
| 912 | ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL); | 1220 | ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL); |
| 913 | if (!ks_pcie) | 1221 | if (!ks_pcie) |
| 914 | return -ENOMEM; | 1222 | return -ENOMEM; |
| @@ -917,12 +1225,38 @@ static int __init ks_pcie_probe(struct platform_device *pdev) | |||
| 917 | if (!pci) | 1225 | if (!pci) |
| 918 | return -ENOMEM; | 1226 | return -ENOMEM; |
| 919 | 1227 | ||
| 1228 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app"); | ||
| 1229 | ks_pcie->va_app_base = devm_ioremap_resource(dev, res); | ||
| 1230 | if (IS_ERR(ks_pcie->va_app_base)) | ||
| 1231 | return PTR_ERR(ks_pcie->va_app_base); | ||
| 1232 | |||
| 1233 | ks_pcie->app = *res; | ||
| 1234 | |||
| 1235 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics"); | ||
| 1236 | base = devm_pci_remap_cfg_resource(dev, res); | ||
| 1237 | if (IS_ERR(base)) | ||
| 1238 | return PTR_ERR(base); | ||
| 1239 | |||
| 1240 | if (of_device_is_compatible(np, "ti,am654-pcie-rc")) | ||
| 1241 | ks_pcie->is_am6 = true; | ||
| 1242 | |||
| 1243 | pci->dbi_base = base; | ||
| 1244 | pci->dbi_base2 = base; | ||
| 920 | pci->dev = dev; | 1245 | pci->dev = dev; |
| 921 | pci->ops = &ks_pcie_dw_pcie_ops; | 1246 | pci->ops = &ks_pcie_dw_pcie_ops; |
| 1247 | pci->version = version; | ||
| 1248 | |||
| 1249 | irq = platform_get_irq(pdev, 0); | ||
| 1250 | if (irq < 0) { | ||
| 1251 | dev_err(dev, "missing IRQ resource: %d\n", irq); | ||
| 1252 | return irq; | ||
| 1253 | } | ||
| 922 | 1254 | ||
| 923 | ret = of_property_read_u32(np, "num-viewport", &num_viewport); | 1255 | ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED, |
| 1256 | "ks-pcie-error-irq", ks_pcie); | ||
| 924 | if (ret < 0) { | 1257 | if (ret < 0) { |
| 925 | dev_err(dev, "unable to read *num-viewport* property\n"); | 1258 | dev_err(dev, "failed to request error IRQ %d\n", |
| 1259 | irq); | ||
| 926 | return ret; | 1260 | return ret; |
| 927 | } | 1261 | } |
| 928 | 1262 | ||
| @@ -960,9 +1294,17 @@ static int __init ks_pcie_probe(struct platform_device *pdev) | |||
| 960 | ks_pcie->pci = pci; | 1294 | ks_pcie->pci = pci; |
| 961 | ks_pcie->link = link; | 1295 | ks_pcie->link = link; |
| 962 | ks_pcie->num_lanes = num_lanes; | 1296 | ks_pcie->num_lanes = num_lanes; |
| 963 | ks_pcie->num_viewport = num_viewport; | ||
| 964 | ks_pcie->phy = phy; | 1297 | ks_pcie->phy = phy; |
| 965 | 1298 | ||
| 1299 | gpiod = devm_gpiod_get_optional(dev, "reset", | ||
| 1300 | GPIOD_OUT_LOW); | ||
| 1301 | if (IS_ERR(gpiod)) { | ||
| 1302 | ret = PTR_ERR(gpiod); | ||
| 1303 | if (ret != -EPROBE_DEFER) | ||
| 1304 | dev_err(dev, "Failed to get reset GPIO\n"); | ||
| 1305 | goto err_link; | ||
| 1306 | } | ||
| 1307 | |||
| 966 | ret = ks_pcie_enable_phy(ks_pcie); | 1308 | ret = ks_pcie_enable_phy(ks_pcie); |
| 967 | if (ret) { | 1309 | if (ret) { |
| 968 | dev_err(dev, "failed to enable phy\n"); | 1310 | dev_err(dev, "failed to enable phy\n"); |
| @@ -977,9 +1319,79 @@ static int __init ks_pcie_probe(struct platform_device *pdev) | |||
| 977 | goto err_get_sync; | 1319 | goto err_get_sync; |
| 978 | } | 1320 | } |
| 979 | 1321 | ||
| 980 | ret = ks_pcie_add_pcie_port(ks_pcie, pdev); | 1322 | if (pci->version >= 0x480A) { |
| 981 | if (ret < 0) | 1323 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu"); |
| 982 | goto err_get_sync; | 1324 | atu_base = devm_ioremap_resource(dev, res); |
| 1325 | if (IS_ERR(atu_base)) { | ||
| 1326 | ret = PTR_ERR(atu_base); | ||
| 1327 | goto err_get_sync; | ||
| 1328 | } | ||
| 1329 | |||
| 1330 | pci->atu_base = atu_base; | ||
| 1331 | |||
| 1332 | ret = ks_pcie_am654_set_mode(dev, mode); | ||
| 1333 | if (ret < 0) | ||
| 1334 | goto err_get_sync; | ||
| 1335 | } else { | ||
| 1336 | ret = ks_pcie_set_mode(dev); | ||
| 1337 | if (ret < 0) | ||
| 1338 | goto err_get_sync; | ||
| 1339 | } | ||
| 1340 | |||
| 1341 | link_speed = of_pci_get_max_link_speed(np); | ||
| 1342 | if (link_speed < 0) | ||
| 1343 | link_speed = 2; | ||
| 1344 | |||
| 1345 | ks_pcie_set_link_speed(pci, link_speed); | ||
| 1346 | |||
| 1347 | switch (mode) { | ||
| 1348 | case DW_PCIE_RC_TYPE: | ||
| 1349 | if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) { | ||
| 1350 | ret = -ENODEV; | ||
| 1351 | goto err_get_sync; | ||
| 1352 | } | ||
| 1353 | |||
| 1354 | ret = of_property_read_u32(np, "num-viewport", &num_viewport); | ||
| 1355 | if (ret < 0) { | ||
| 1356 | dev_err(dev, "unable to read *num-viewport* property\n"); | ||
| 1357 | return ret; | ||
| 1358 | } | ||
| 1359 | |||
| 1360 | /* | ||
| 1361 | * "Power Sequencing and Reset Signal Timings" table in | ||
| 1362 | * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0 | ||
| 1363 | * indicates PERST# should be deasserted after minimum of 100us | ||
| 1364 | * once REFCLK is stable. The REFCLK to the connector in RC | ||
| 1365 | * mode is selected while enabling the PHY. So deassert PERST# | ||
| 1366 | * after 100 us. | ||
| 1367 | */ | ||
| 1368 | if (gpiod) { | ||
| 1369 | usleep_range(100, 200); | ||
| 1370 | gpiod_set_value_cansleep(gpiod, 1); | ||
| 1371 | } | ||
| 1372 | |||
| 1373 | ks_pcie->num_viewport = num_viewport; | ||
| 1374 | pci->pp.ops = host_ops; | ||
| 1375 | ret = ks_pcie_add_pcie_port(ks_pcie, pdev); | ||
| 1376 | if (ret < 0) | ||
| 1377 | goto err_get_sync; | ||
| 1378 | break; | ||
| 1379 | case DW_PCIE_EP_TYPE: | ||
| 1380 | if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) { | ||
| 1381 | ret = -ENODEV; | ||
| 1382 | goto err_get_sync; | ||
| 1383 | } | ||
| 1384 | |||
| 1385 | pci->ep.ops = ep_ops; | ||
| 1386 | ret = ks_pcie_add_pcie_ep(ks_pcie, pdev); | ||
| 1387 | if (ret < 0) | ||
| 1388 | goto err_get_sync; | ||
| 1389 | break; | ||
| 1390 | default: | ||
| 1391 | dev_err(dev, "INVALID device type %d\n", mode); | ||
| 1392 | } | ||
| 1393 | |||
| 1394 | ks_pcie_enable_error_irq(ks_pcie); | ||
| 983 | 1395 | ||
| 984 | return 0; | 1396 | return 0; |
| 985 | 1397 | ||
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c index a42c9c3ae1cc..be61d96cc95e 100644 --- a/drivers/pci/controller/dwc/pci-layerscape-ep.c +++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c | |||
| @@ -79,7 +79,7 @@ static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, | |||
| 79 | } | 79 | } |
| 80 | } | 80 | } |
| 81 | 81 | ||
| 82 | static struct dw_pcie_ep_ops pcie_ep_ops = { | 82 | static const struct dw_pcie_ep_ops pcie_ep_ops = { |
| 83 | .ep_init = ls_pcie_ep_init, | 83 | .ep_init = ls_pcie_ep_init, |
| 84 | .raise_irq = ls_pcie_ep_raise_irq, | 84 | .raise_irq = ls_pcie_ep_raise_irq, |
| 85 | .get_features = ls_pcie_ep_get_features, | 85 | .get_features = ls_pcie_ep_get_features, |
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c index ce45bde29bf8..3a5fa26d5e56 100644 --- a/drivers/pci/controller/dwc/pci-layerscape.c +++ b/drivers/pci/controller/dwc/pci-layerscape.c | |||
| @@ -201,6 +201,7 @@ static int ls_pcie_msi_host_init(struct pcie_port *pp) | |||
| 201 | return -EINVAL; | 201 | return -EINVAL; |
| 202 | } | 202 | } |
| 203 | 203 | ||
| 204 | of_node_put(msi_node); | ||
| 204 | return 0; | 205 | return 0; |
| 205 | } | 206 | } |
| 206 | 207 | ||
diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c new file mode 100644 index 000000000000..3ab58f0584a8 --- /dev/null +++ b/drivers/pci/controller/dwc/pcie-al.c | |||
| @@ -0,0 +1,93 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * PCIe host controller driver for Amazon's Annapurna Labs IP (used in chips | ||
| 4 | * such as Graviton and Alpine) | ||
| 5 | * | ||
| 6 | * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. | ||
| 7 | * | ||
| 8 | * Author: Jonathan Chocron <jonnyc@amazon.com> | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/pci.h> | ||
| 12 | #include <linux/pci-ecam.h> | ||
| 13 | #include <linux/pci-acpi.h> | ||
| 14 | #include "../../pci.h" | ||
| 15 | |||
| 16 | #if defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) | ||
| 17 | |||
| 18 | struct al_pcie_acpi { | ||
| 19 | void __iomem *dbi_base; | ||
| 20 | }; | ||
| 21 | |||
| 22 | static void __iomem *al_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, | ||
| 23 | int where) | ||
| 24 | { | ||
| 25 | struct pci_config_window *cfg = bus->sysdata; | ||
| 26 | struct al_pcie_acpi *pcie = cfg->priv; | ||
| 27 | void __iomem *dbi_base = pcie->dbi_base; | ||
| 28 | |||
| 29 | if (bus->number == cfg->busr.start) { | ||
| 30 | /* | ||
| 31 | * The DW PCIe core doesn't filter out transactions to other | ||
| 32 | * devices/functions on the root bus num, so we do this here. | ||
| 33 | */ | ||
| 34 | if (PCI_SLOT(devfn) > 0) | ||
| 35 | return NULL; | ||
| 36 | else | ||
| 37 | return dbi_base + where; | ||
| 38 | } | ||
| 39 | |||
| 40 | return pci_ecam_map_bus(bus, devfn, where); | ||
| 41 | } | ||
| 42 | |||
| 43 | static int al_pcie_init(struct pci_config_window *cfg) | ||
| 44 | { | ||
| 45 | struct device *dev = cfg->parent; | ||
| 46 | struct acpi_device *adev = to_acpi_device(dev); | ||
| 47 | struct acpi_pci_root *root = acpi_driver_data(adev); | ||
| 48 | struct al_pcie_acpi *al_pcie; | ||
| 49 | struct resource *res; | ||
| 50 | int ret; | ||
| 51 | |||
| 52 | al_pcie = devm_kzalloc(dev, sizeof(*al_pcie), GFP_KERNEL); | ||
| 53 | if (!al_pcie) | ||
| 54 | return -ENOMEM; | ||
| 55 | |||
| 56 | res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); | ||
| 57 | if (!res) | ||
| 58 | return -ENOMEM; | ||
| 59 | |||
| 60 | ret = acpi_get_rc_resources(dev, "AMZN0001", root->segment, res); | ||
| 61 | if (ret) { | ||
| 62 | dev_err(dev, "can't get rc dbi base address for SEG %d\n", | ||
| 63 | root->segment); | ||
| 64 | return ret; | ||
| 65 | } | ||
| 66 | |||
| 67 | dev_dbg(dev, "Root port dbi res: %pR\n", res); | ||
| 68 | |||
| 69 | al_pcie->dbi_base = devm_pci_remap_cfg_resource(dev, res); | ||
| 70 | if (IS_ERR(al_pcie->dbi_base)) { | ||
| 71 | long err = PTR_ERR(al_pcie->dbi_base); | ||
| 72 | |||
| 73 | dev_err(dev, "couldn't remap dbi base %pR (err:%ld)\n", | ||
| 74 | res, err); | ||
| 75 | return err; | ||
| 76 | } | ||
| 77 | |||
| 78 | cfg->priv = al_pcie; | ||
| 79 | |||
| 80 | return 0; | ||
| 81 | } | ||
| 82 | |||
| 83 | struct pci_ecam_ops al_pcie_ops = { | ||
| 84 | .bus_shift = 20, | ||
| 85 | .init = al_pcie_init, | ||
| 86 | .pci_ops = { | ||
| 87 | .map_bus = al_pcie_map_bus, | ||
| 88 | .read = pci_generic_config_read, | ||
| 89 | .write = pci_generic_config_write, | ||
| 90 | } | ||
| 91 | }; | ||
| 92 | |||
| 93 | #endif /* defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS) */ | ||
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c index dba83abfe764..d00252bd8fae 100644 --- a/drivers/pci/controller/dwc/pcie-artpec6.c +++ b/drivers/pci/controller/dwc/pcie-artpec6.c | |||
| @@ -444,7 +444,7 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, | |||
| 444 | return 0; | 444 | return 0; |
| 445 | } | 445 | } |
| 446 | 446 | ||
| 447 | static struct dw_pcie_ep_ops pcie_ep_ops = { | 447 | static const struct dw_pcie_ep_ops pcie_ep_ops = { |
| 448 | .ep_init = artpec6_pcie_ep_init, | 448 | .ep_init = artpec6_pcie_ep_init, |
| 449 | .raise_irq = artpec6_pcie_raise_irq, | 449 | .raise_irq = artpec6_pcie_raise_irq, |
| 450 | }; | 450 | }; |
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 24f5a775ad34..2bf5a35c0570 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c | |||
| @@ -46,16 +46,19 @@ static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr, | |||
| 46 | u8 cap_id, next_cap_ptr; | 46 | u8 cap_id, next_cap_ptr; |
| 47 | u16 reg; | 47 | u16 reg; |
| 48 | 48 | ||
| 49 | if (!cap_ptr) | ||
| 50 | return 0; | ||
| 51 | |||
| 49 | reg = dw_pcie_readw_dbi(pci, cap_ptr); | 52 | reg = dw_pcie_readw_dbi(pci, cap_ptr); |
| 50 | next_cap_ptr = (reg & 0xff00) >> 8; | ||
| 51 | cap_id = (reg & 0x00ff); | 53 | cap_id = (reg & 0x00ff); |
| 52 | 54 | ||
| 53 | if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX) | 55 | if (cap_id > PCI_CAP_ID_MAX) |
| 54 | return 0; | 56 | return 0; |
| 55 | 57 | ||
| 56 | if (cap_id == cap) | 58 | if (cap_id == cap) |
| 57 | return cap_ptr; | 59 | return cap_ptr; |
| 58 | 60 | ||
| 61 | next_cap_ptr = (reg & 0xff00) >> 8; | ||
| 59 | return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); | 62 | return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); |
| 60 | } | 63 | } |
| 61 | 64 | ||
| @@ -67,9 +70,6 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap) | |||
| 67 | reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST); | 70 | reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST); |
| 68 | next_cap_ptr = (reg & 0x00ff); | 71 | next_cap_ptr = (reg & 0x00ff); |
| 69 | 72 | ||
| 70 | if (!next_cap_ptr) | ||
| 71 | return 0; | ||
| 72 | |||
| 73 | return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); | 73 | return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); |
| 74 | } | 74 | } |
| 75 | 75 | ||
| @@ -397,6 +397,7 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, | |||
| 397 | { | 397 | { |
| 398 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | 398 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); |
| 399 | struct pci_epc *epc = ep->epc; | 399 | struct pci_epc *epc = ep->epc; |
| 400 | unsigned int aligned_offset; | ||
| 400 | u16 msg_ctrl, msg_data; | 401 | u16 msg_ctrl, msg_data; |
| 401 | u32 msg_addr_lower, msg_addr_upper, reg; | 402 | u32 msg_addr_lower, msg_addr_upper, reg; |
| 402 | u64 msg_addr; | 403 | u64 msg_addr; |
| @@ -422,13 +423,15 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, | |||
| 422 | reg = ep->msi_cap + PCI_MSI_DATA_32; | 423 | reg = ep->msi_cap + PCI_MSI_DATA_32; |
| 423 | msg_data = dw_pcie_readw_dbi(pci, reg); | 424 | msg_data = dw_pcie_readw_dbi(pci, reg); |
| 424 | } | 425 | } |
| 425 | msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; | 426 | aligned_offset = msg_addr_lower & (epc->mem->page_size - 1); |
| 427 | msg_addr = ((u64)msg_addr_upper) << 32 | | ||
| 428 | (msg_addr_lower & ~aligned_offset); | ||
| 426 | ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, | 429 | ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, |
| 427 | epc->mem->page_size); | 430 | epc->mem->page_size); |
| 428 | if (ret) | 431 | if (ret) |
| 429 | return ret; | 432 | return ret; |
| 430 | 433 | ||
| 431 | writel(msg_data | (interrupt_num - 1), ep->msi_mem); | 434 | writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset); |
| 432 | 435 | ||
| 433 | dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); | 436 | dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); |
| 434 | 437 | ||
| @@ -504,10 +507,32 @@ void dw_pcie_ep_exit(struct dw_pcie_ep *ep) | |||
| 504 | pci_epc_mem_exit(epc); | 507 | pci_epc_mem_exit(epc); |
| 505 | } | 508 | } |
| 506 | 509 | ||
| 510 | static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap) | ||
| 511 | { | ||
| 512 | u32 header; | ||
| 513 | int pos = PCI_CFG_SPACE_SIZE; | ||
| 514 | |||
| 515 | while (pos) { | ||
| 516 | header = dw_pcie_readl_dbi(pci, pos); | ||
| 517 | if (PCI_EXT_CAP_ID(header) == cap) | ||
| 518 | return pos; | ||
| 519 | |||
| 520 | pos = PCI_EXT_CAP_NEXT(header); | ||
| 521 | if (!pos) | ||
| 522 | break; | ||
| 523 | } | ||
| 524 | |||
| 525 | return 0; | ||
| 526 | } | ||
| 527 | |||
| 507 | int dw_pcie_ep_init(struct dw_pcie_ep *ep) | 528 | int dw_pcie_ep_init(struct dw_pcie_ep *ep) |
| 508 | { | 529 | { |
| 530 | int i; | ||
| 509 | int ret; | 531 | int ret; |
| 532 | u32 reg; | ||
| 510 | void *addr; | 533 | void *addr; |
| 534 | unsigned int nbars; | ||
| 535 | unsigned int offset; | ||
| 511 | struct pci_epc *epc; | 536 | struct pci_epc *epc; |
| 512 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | 537 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); |
| 513 | struct device *dev = pci->dev; | 538 | struct device *dev = pci->dev; |
| @@ -517,10 +542,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) | |||
| 517 | dev_err(dev, "dbi_base/dbi_base2 is not populated\n"); | 542 | dev_err(dev, "dbi_base/dbi_base2 is not populated\n"); |
| 518 | return -EINVAL; | 543 | return -EINVAL; |
| 519 | } | 544 | } |
| 520 | if (pci->iatu_unroll_enabled && !pci->atu_base) { | ||
| 521 | dev_err(dev, "atu_base is not populated\n"); | ||
| 522 | return -EINVAL; | ||
| 523 | } | ||
| 524 | 545 | ||
| 525 | ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows); | 546 | ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows); |
| 526 | if (ret < 0) { | 547 | if (ret < 0) { |
| @@ -595,6 +616,18 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) | |||
| 595 | 616 | ||
| 596 | ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX); | 617 | ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX); |
| 597 | 618 | ||
| 619 | offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); | ||
| 620 | if (offset) { | ||
| 621 | reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); | ||
| 622 | nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> | ||
| 623 | PCI_REBAR_CTRL_NBAR_SHIFT; | ||
| 624 | |||
| 625 | dw_pcie_dbi_ro_wr_en(pci); | ||
| 626 | for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) | ||
| 627 | dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0); | ||
| 628 | dw_pcie_dbi_ro_wr_dis(pci); | ||
| 629 | } | ||
| 630 | |||
| 598 | dw_pcie_setup(pci); | 631 | dw_pcie_setup(pci); |
| 599 | 632 | ||
| 600 | return 0; | 633 | return 0; |
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 25087d3c9a82..77db32529319 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c | |||
| @@ -126,18 +126,12 @@ static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) | |||
| 126 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 126 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
| 127 | u64 msi_target; | 127 | u64 msi_target; |
| 128 | 128 | ||
| 129 | if (pp->ops->get_msi_addr) | 129 | msi_target = (u64)pp->msi_data; |
| 130 | msi_target = pp->ops->get_msi_addr(pp); | ||
| 131 | else | ||
| 132 | msi_target = (u64)pp->msi_data; | ||
| 133 | 130 | ||
| 134 | msg->address_lo = lower_32_bits(msi_target); | 131 | msg->address_lo = lower_32_bits(msi_target); |
| 135 | msg->address_hi = upper_32_bits(msi_target); | 132 | msg->address_hi = upper_32_bits(msi_target); |
| 136 | 133 | ||
| 137 | if (pp->ops->get_msi_data) | 134 | msg->data = d->hwirq; |
| 138 | msg->data = pp->ops->get_msi_data(pp, d->hwirq); | ||
| 139 | else | ||
| 140 | msg->data = d->hwirq; | ||
| 141 | 135 | ||
| 142 | dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", | 136 | dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", |
| 143 | (int)d->hwirq, msg->address_hi, msg->address_lo); | 137 | (int)d->hwirq, msg->address_hi, msg->address_lo); |
| @@ -157,17 +151,13 @@ static void dw_pci_bottom_mask(struct irq_data *d) | |||
| 157 | 151 | ||
| 158 | raw_spin_lock_irqsave(&pp->lock, flags); | 152 | raw_spin_lock_irqsave(&pp->lock, flags); |
| 159 | 153 | ||
| 160 | if (pp->ops->msi_clear_irq) { | 154 | ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; |
| 161 | pp->ops->msi_clear_irq(pp, d->hwirq); | 155 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; |
| 162 | } else { | 156 | bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; |
| 163 | ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; | ||
| 164 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; | ||
| 165 | bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; | ||
| 166 | 157 | ||
| 167 | pp->irq_mask[ctrl] |= BIT(bit); | 158 | pp->irq_mask[ctrl] |= BIT(bit); |
| 168 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, | 159 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, |
| 169 | pp->irq_mask[ctrl]); | 160 | pp->irq_mask[ctrl]); |
| 170 | } | ||
| 171 | 161 | ||
| 172 | raw_spin_unlock_irqrestore(&pp->lock, flags); | 162 | raw_spin_unlock_irqrestore(&pp->lock, flags); |
| 173 | } | 163 | } |
| @@ -180,17 +170,13 @@ static void dw_pci_bottom_unmask(struct irq_data *d) | |||
| 180 | 170 | ||
| 181 | raw_spin_lock_irqsave(&pp->lock, flags); | 171 | raw_spin_lock_irqsave(&pp->lock, flags); |
| 182 | 172 | ||
| 183 | if (pp->ops->msi_set_irq) { | 173 | ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; |
| 184 | pp->ops->msi_set_irq(pp, d->hwirq); | 174 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; |
| 185 | } else { | 175 | bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; |
| 186 | ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; | ||
| 187 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; | ||
| 188 | bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; | ||
| 189 | 176 | ||
| 190 | pp->irq_mask[ctrl] &= ~BIT(bit); | 177 | pp->irq_mask[ctrl] &= ~BIT(bit); |
| 191 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, | 178 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, |
| 192 | pp->irq_mask[ctrl]); | 179 | pp->irq_mask[ctrl]); |
| 193 | } | ||
| 194 | 180 | ||
| 195 | raw_spin_unlock_irqrestore(&pp->lock, flags); | 181 | raw_spin_unlock_irqrestore(&pp->lock, flags); |
| 196 | } | 182 | } |
| @@ -199,20 +185,12 @@ static void dw_pci_bottom_ack(struct irq_data *d) | |||
| 199 | { | 185 | { |
| 200 | struct pcie_port *pp = irq_data_get_irq_chip_data(d); | 186 | struct pcie_port *pp = irq_data_get_irq_chip_data(d); |
| 201 | unsigned int res, bit, ctrl; | 187 | unsigned int res, bit, ctrl; |
| 202 | unsigned long flags; | ||
| 203 | 188 | ||
| 204 | ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; | 189 | ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; |
| 205 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; | 190 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; |
| 206 | bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; | 191 | bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; |
| 207 | 192 | ||
| 208 | raw_spin_lock_irqsave(&pp->lock, flags); | ||
| 209 | |||
| 210 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit)); | 193 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit)); |
| 211 | |||
| 212 | if (pp->ops->msi_irq_ack) | ||
| 213 | pp->ops->msi_irq_ack(d->hwirq, pp); | ||
| 214 | |||
| 215 | raw_spin_unlock_irqrestore(&pp->lock, flags); | ||
| 216 | } | 194 | } |
| 217 | 195 | ||
| 218 | static struct irq_chip dw_pci_msi_bottom_irq_chip = { | 196 | static struct irq_chip dw_pci_msi_bottom_irq_chip = { |
| @@ -245,7 +223,7 @@ static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, | |||
| 245 | 223 | ||
| 246 | for (i = 0; i < nr_irqs; i++) | 224 | for (i = 0; i < nr_irqs; i++) |
| 247 | irq_domain_set_info(domain, virq + i, bit + i, | 225 | irq_domain_set_info(domain, virq + i, bit + i, |
| 248 | &dw_pci_msi_bottom_irq_chip, | 226 | pp->msi_irq_chip, |
| 249 | pp, handle_edge_irq, | 227 | pp, handle_edge_irq, |
| 250 | NULL, NULL); | 228 | NULL, NULL); |
| 251 | 229 | ||
| @@ -298,25 +276,31 @@ int dw_pcie_allocate_domains(struct pcie_port *pp) | |||
| 298 | 276 | ||
| 299 | void dw_pcie_free_msi(struct pcie_port *pp) | 277 | void dw_pcie_free_msi(struct pcie_port *pp) |
| 300 | { | 278 | { |
| 301 | irq_set_chained_handler(pp->msi_irq, NULL); | 279 | if (pp->msi_irq) { |
| 302 | irq_set_handler_data(pp->msi_irq, NULL); | 280 | irq_set_chained_handler(pp->msi_irq, NULL); |
| 281 | irq_set_handler_data(pp->msi_irq, NULL); | ||
| 282 | } | ||
| 303 | 283 | ||
| 304 | irq_domain_remove(pp->msi_domain); | 284 | irq_domain_remove(pp->msi_domain); |
| 305 | irq_domain_remove(pp->irq_domain); | 285 | irq_domain_remove(pp->irq_domain); |
| 286 | |||
| 287 | if (pp->msi_page) | ||
| 288 | __free_page(pp->msi_page); | ||
| 306 | } | 289 | } |
| 307 | 290 | ||
| 308 | void dw_pcie_msi_init(struct pcie_port *pp) | 291 | void dw_pcie_msi_init(struct pcie_port *pp) |
| 309 | { | 292 | { |
| 310 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 293 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
| 311 | struct device *dev = pci->dev; | 294 | struct device *dev = pci->dev; |
| 312 | struct page *page; | ||
| 313 | u64 msi_target; | 295 | u64 msi_target; |
| 314 | 296 | ||
| 315 | page = alloc_page(GFP_KERNEL); | 297 | pp->msi_page = alloc_page(GFP_KERNEL); |
| 316 | pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); | 298 | pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE, |
| 299 | DMA_FROM_DEVICE); | ||
| 317 | if (dma_mapping_error(dev, pp->msi_data)) { | 300 | if (dma_mapping_error(dev, pp->msi_data)) { |
| 318 | dev_err(dev, "Failed to map MSI data\n"); | 301 | dev_err(dev, "Failed to map MSI data\n"); |
| 319 | __free_page(page); | 302 | __free_page(pp->msi_page); |
| 303 | pp->msi_page = NULL; | ||
| 320 | return; | 304 | return; |
| 321 | } | 305 | } |
| 322 | msi_target = (u64)pp->msi_data; | 306 | msi_target = (u64)pp->msi_data; |
| @@ -335,7 +319,7 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
| 335 | struct device_node *np = dev->of_node; | 319 | struct device_node *np = dev->of_node; |
| 336 | struct platform_device *pdev = to_platform_device(dev); | 320 | struct platform_device *pdev = to_platform_device(dev); |
| 337 | struct resource_entry *win, *tmp; | 321 | struct resource_entry *win, *tmp; |
| 338 | struct pci_bus *bus, *child; | 322 | struct pci_bus *child; |
| 339 | struct pci_host_bridge *bridge; | 323 | struct pci_host_bridge *bridge; |
| 340 | struct resource *cfg_res; | 324 | struct resource *cfg_res; |
| 341 | int ret; | 325 | int ret; |
| @@ -352,7 +336,7 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
| 352 | dev_err(dev, "Missing *config* reg space\n"); | 336 | dev_err(dev, "Missing *config* reg space\n"); |
| 353 | } | 337 | } |
| 354 | 338 | ||
| 355 | bridge = pci_alloc_host_bridge(0); | 339 | bridge = devm_pci_alloc_host_bridge(dev, 0); |
| 356 | if (!bridge) | 340 | if (!bridge) |
| 357 | return -ENOMEM; | 341 | return -ENOMEM; |
| 358 | 342 | ||
| @@ -363,7 +347,7 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
| 363 | 347 | ||
| 364 | ret = devm_request_pci_bus_resources(dev, &bridge->windows); | 348 | ret = devm_request_pci_bus_resources(dev, &bridge->windows); |
| 365 | if (ret) | 349 | if (ret) |
| 366 | goto error; | 350 | return ret; |
| 367 | 351 | ||
| 368 | /* Get the I/O and memory ranges from DT */ | 352 | /* Get the I/O and memory ranges from DT */ |
| 369 | resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { | 353 | resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { |
| @@ -407,8 +391,7 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
| 407 | resource_size(pp->cfg)); | 391 | resource_size(pp->cfg)); |
| 408 | if (!pci->dbi_base) { | 392 | if (!pci->dbi_base) { |
| 409 | dev_err(dev, "Error with ioremap\n"); | 393 | dev_err(dev, "Error with ioremap\n"); |
| 410 | ret = -ENOMEM; | 394 | return -ENOMEM; |
| 411 | goto error; | ||
| 412 | } | 395 | } |
| 413 | } | 396 | } |
| 414 | 397 | ||
| @@ -419,8 +402,7 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
| 419 | pp->cfg0_base, pp->cfg0_size); | 402 | pp->cfg0_base, pp->cfg0_size); |
| 420 | if (!pp->va_cfg0_base) { | 403 | if (!pp->va_cfg0_base) { |
| 421 | dev_err(dev, "Error with ioremap in function\n"); | 404 | dev_err(dev, "Error with ioremap in function\n"); |
| 422 | ret = -ENOMEM; | 405 | return -ENOMEM; |
| 423 | goto error; | ||
| 424 | } | 406 | } |
| 425 | } | 407 | } |
| 426 | 408 | ||
| @@ -430,8 +412,7 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
| 430 | pp->cfg1_size); | 412 | pp->cfg1_size); |
| 431 | if (!pp->va_cfg1_base) { | 413 | if (!pp->va_cfg1_base) { |
| 432 | dev_err(dev, "Error with ioremap\n"); | 414 | dev_err(dev, "Error with ioremap\n"); |
| 433 | ret = -ENOMEM; | 415 | return -ENOMEM; |
| 434 | goto error; | ||
| 435 | } | 416 | } |
| 436 | } | 417 | } |
| 437 | 418 | ||
| @@ -439,7 +420,7 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
| 439 | if (ret) | 420 | if (ret) |
| 440 | pci->num_viewport = 2; | 421 | pci->num_viewport = 2; |
| 441 | 422 | ||
| 442 | if (IS_ENABLED(CONFIG_PCI_MSI) && pci_msi_enabled()) { | 423 | if (pci_msi_enabled()) { |
| 443 | /* | 424 | /* |
| 444 | * If a specific SoC driver needs to change the | 425 | * If a specific SoC driver needs to change the |
| 445 | * default number of vectors, it needs to implement | 426 | * default number of vectors, it needs to implement |
| @@ -454,14 +435,16 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
| 454 | pp->num_vectors == 0) { | 435 | pp->num_vectors == 0) { |
| 455 | dev_err(dev, | 436 | dev_err(dev, |
| 456 | "Invalid number of vectors\n"); | 437 | "Invalid number of vectors\n"); |
| 457 | goto error; | 438 | return -EINVAL; |
| 458 | } | 439 | } |
| 459 | } | 440 | } |
| 460 | 441 | ||
| 461 | if (!pp->ops->msi_host_init) { | 442 | if (!pp->ops->msi_host_init) { |
| 443 | pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip; | ||
| 444 | |||
| 462 | ret = dw_pcie_allocate_domains(pp); | 445 | ret = dw_pcie_allocate_domains(pp); |
| 463 | if (ret) | 446 | if (ret) |
| 464 | goto error; | 447 | return ret; |
| 465 | 448 | ||
| 466 | if (pp->msi_irq) | 449 | if (pp->msi_irq) |
| 467 | irq_set_chained_handler_and_data(pp->msi_irq, | 450 | irq_set_chained_handler_and_data(pp->msi_irq, |
| @@ -470,14 +453,14 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
| 470 | } else { | 453 | } else { |
| 471 | ret = pp->ops->msi_host_init(pp); | 454 | ret = pp->ops->msi_host_init(pp); |
| 472 | if (ret < 0) | 455 | if (ret < 0) |
| 473 | goto error; | 456 | return ret; |
| 474 | } | 457 | } |
| 475 | } | 458 | } |
| 476 | 459 | ||
| 477 | if (pp->ops->host_init) { | 460 | if (pp->ops->host_init) { |
| 478 | ret = pp->ops->host_init(pp); | 461 | ret = pp->ops->host_init(pp); |
| 479 | if (ret) | 462 | if (ret) |
| 480 | goto error; | 463 | goto err_free_msi; |
| 481 | } | 464 | } |
| 482 | 465 | ||
| 483 | pp->root_bus_nr = pp->busn->start; | 466 | pp->root_bus_nr = pp->busn->start; |
| @@ -491,24 +474,25 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
| 491 | 474 | ||
| 492 | ret = pci_scan_root_bus_bridge(bridge); | 475 | ret = pci_scan_root_bus_bridge(bridge); |
| 493 | if (ret) | 476 | if (ret) |
| 494 | goto error; | 477 | goto err_free_msi; |
| 495 | 478 | ||
| 496 | bus = bridge->bus; | 479 | pp->root_bus = bridge->bus; |
| 497 | 480 | ||
| 498 | if (pp->ops->scan_bus) | 481 | if (pp->ops->scan_bus) |
| 499 | pp->ops->scan_bus(pp); | 482 | pp->ops->scan_bus(pp); |
| 500 | 483 | ||
| 501 | pci_bus_size_bridges(bus); | 484 | pci_bus_size_bridges(pp->root_bus); |
| 502 | pci_bus_assign_resources(bus); | 485 | pci_bus_assign_resources(pp->root_bus); |
| 503 | 486 | ||
| 504 | list_for_each_entry(child, &bus->children, node) | 487 | list_for_each_entry(child, &pp->root_bus->children, node) |
| 505 | pcie_bus_configure_settings(child); | 488 | pcie_bus_configure_settings(child); |
| 506 | 489 | ||
| 507 | pci_bus_add_devices(bus); | 490 | pci_bus_add_devices(pp->root_bus); |
| 508 | return 0; | 491 | return 0; |
| 509 | 492 | ||
| 510 | error: | 493 | err_free_msi: |
| 511 | pci_free_host_bridge(bridge); | 494 | if (pci_msi_enabled() && !pp->ops->msi_host_init) |
| 495 | dw_pcie_free_msi(pp); | ||
| 512 | return ret; | 496 | return ret; |
| 513 | } | 497 | } |
| 514 | 498 | ||
| @@ -628,17 +612,6 @@ static struct pci_ops dw_pcie_ops = { | |||
| 628 | .write = dw_pcie_wr_conf, | 612 | .write = dw_pcie_wr_conf, |
| 629 | }; | 613 | }; |
| 630 | 614 | ||
| 631 | static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci) | ||
| 632 | { | ||
| 633 | u32 val; | ||
| 634 | |||
| 635 | val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); | ||
| 636 | if (val == 0xffffffff) | ||
| 637 | return 1; | ||
| 638 | |||
| 639 | return 0; | ||
| 640 | } | ||
| 641 | |||
| 642 | void dw_pcie_setup_rc(struct pcie_port *pp) | 615 | void dw_pcie_setup_rc(struct pcie_port *pp) |
| 643 | { | 616 | { |
| 644 | u32 val, ctrl, num_ctrls; | 617 | u32 val, ctrl, num_ctrls; |
| @@ -646,17 +619,19 @@ void dw_pcie_setup_rc(struct pcie_port *pp) | |||
| 646 | 619 | ||
| 647 | dw_pcie_setup(pci); | 620 | dw_pcie_setup(pci); |
| 648 | 621 | ||
| 649 | num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; | 622 | if (!pp->ops->msi_host_init) { |
| 650 | 623 | num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; | |
| 651 | /* Initialize IRQ Status array */ | 624 | |
| 652 | for (ctrl = 0; ctrl < num_ctrls; ctrl++) { | 625 | /* Initialize IRQ Status array */ |
| 653 | pp->irq_mask[ctrl] = ~0; | 626 | for (ctrl = 0; ctrl < num_ctrls; ctrl++) { |
| 654 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + | 627 | pp->irq_mask[ctrl] = ~0; |
| 655 | (ctrl * MSI_REG_CTRL_BLOCK_SIZE), | 628 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + |
| 656 | 4, pp->irq_mask[ctrl]); | 629 | (ctrl * MSI_REG_CTRL_BLOCK_SIZE), |
| 657 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + | 630 | 4, pp->irq_mask[ctrl]); |
| 658 | (ctrl * MSI_REG_CTRL_BLOCK_SIZE), | 631 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + |
| 659 | 4, ~0); | 632 | (ctrl * MSI_REG_CTRL_BLOCK_SIZE), |
| 633 | 4, ~0); | ||
| 634 | } | ||
| 660 | } | 635 | } |
| 661 | 636 | ||
| 662 | /* Setup RC BARs */ | 637 | /* Setup RC BARs */ |
| @@ -690,14 +665,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp) | |||
| 690 | * we should not program the ATU here. | 665 | * we should not program the ATU here. |
| 691 | */ | 666 | */ |
| 692 | if (!pp->ops->rd_other_conf) { | 667 | if (!pp->ops->rd_other_conf) { |
| 693 | /* Get iATU unroll support */ | ||
| 694 | pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci); | ||
| 695 | dev_dbg(pci->dev, "iATU unroll: %s\n", | ||
| 696 | pci->iatu_unroll_enabled ? "enabled" : "disabled"); | ||
| 697 | |||
| 698 | if (pci->iatu_unroll_enabled && !pci->atu_base) | ||
| 699 | pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; | ||
| 700 | |||
| 701 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0, | 668 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0, |
| 702 | PCIE_ATU_TYPE_MEM, pp->mem_base, | 669 | PCIE_ATU_TYPE_MEM, pp->mem_base, |
| 703 | pp->mem_bus_addr, pp->mem_size); | 670 | pp->mem_bus_addr, pp->mem_size); |
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c index 932dbd0b34b6..b58fdcbc664b 100644 --- a/drivers/pci/controller/dwc/pcie-designware-plat.c +++ b/drivers/pci/controller/dwc/pcie-designware-plat.c | |||
| @@ -106,7 +106,7 @@ dw_plat_pcie_get_features(struct dw_pcie_ep *ep) | |||
| 106 | return &dw_plat_pcie_epc_features; | 106 | return &dw_plat_pcie_epc_features; |
| 107 | } | 107 | } |
| 108 | 108 | ||
| 109 | static struct dw_pcie_ep_ops pcie_ep_ops = { | 109 | static const struct dw_pcie_ep_ops pcie_ep_ops = { |
| 110 | .ep_init = dw_plat_pcie_ep_init, | 110 | .ep_init = dw_plat_pcie_ep_init, |
| 111 | .raise_irq = dw_plat_pcie_ep_raise_irq, | 111 | .raise_irq = dw_plat_pcie_ep_raise_irq, |
| 112 | .get_features = dw_plat_pcie_get_features, | 112 | .get_features = dw_plat_pcie_get_features, |
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 31f6331ca46f..9d7c51c32b3b 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c | |||
| @@ -14,12 +14,6 @@ | |||
| 14 | 14 | ||
| 15 | #include "pcie-designware.h" | 15 | #include "pcie-designware.h" |
| 16 | 16 | ||
| 17 | /* PCIe Port Logic registers */ | ||
| 18 | #define PLR_OFFSET 0x700 | ||
| 19 | #define PCIE_PHY_DEBUG_R1 (PLR_OFFSET + 0x2c) | ||
| 20 | #define PCIE_PHY_DEBUG_R1_LINK_UP (0x1 << 4) | ||
| 21 | #define PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING (0x1 << 29) | ||
| 22 | |||
| 23 | int dw_pcie_read(void __iomem *addr, int size, u32 *val) | 17 | int dw_pcie_read(void __iomem *addr, int size, u32 *val) |
| 24 | { | 18 | { |
| 25 | if (!IS_ALIGNED((uintptr_t)addr, size)) { | 19 | if (!IS_ALIGNED((uintptr_t)addr, size)) { |
| @@ -89,6 +83,37 @@ void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, | |||
| 89 | dev_err(pci->dev, "Write DBI address failed\n"); | 83 | dev_err(pci->dev, "Write DBI address failed\n"); |
| 90 | } | 84 | } |
| 91 | 85 | ||
| 86 | u32 __dw_pcie_read_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg, | ||
| 87 | size_t size) | ||
| 88 | { | ||
| 89 | int ret; | ||
| 90 | u32 val; | ||
| 91 | |||
| 92 | if (pci->ops->read_dbi2) | ||
| 93 | return pci->ops->read_dbi2(pci, base, reg, size); | ||
| 94 | |||
| 95 | ret = dw_pcie_read(base + reg, size, &val); | ||
| 96 | if (ret) | ||
| 97 | dev_err(pci->dev, "read DBI address failed\n"); | ||
| 98 | |||
| 99 | return val; | ||
| 100 | } | ||
| 101 | |||
| 102 | void __dw_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg, | ||
| 103 | size_t size, u32 val) | ||
| 104 | { | ||
| 105 | int ret; | ||
| 106 | |||
| 107 | if (pci->ops->write_dbi2) { | ||
| 108 | pci->ops->write_dbi2(pci, base, reg, size, val); | ||
| 109 | return; | ||
| 110 | } | ||
| 111 | |||
| 112 | ret = dw_pcie_write(base + reg, size, val); | ||
| 113 | if (ret) | ||
| 114 | dev_err(pci->dev, "write DBI address failed\n"); | ||
| 115 | } | ||
| 116 | |||
| 92 | static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) | 117 | static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) |
| 93 | { | 118 | { |
| 94 | u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); | 119 | u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); |
| @@ -334,9 +359,20 @@ int dw_pcie_link_up(struct dw_pcie *pci) | |||
| 334 | if (pci->ops->link_up) | 359 | if (pci->ops->link_up) |
| 335 | return pci->ops->link_up(pci); | 360 | return pci->ops->link_up(pci); |
| 336 | 361 | ||
| 337 | val = readl(pci->dbi_base + PCIE_PHY_DEBUG_R1); | 362 | val = readl(pci->dbi_base + PCIE_PORT_DEBUG1); |
| 338 | return ((val & PCIE_PHY_DEBUG_R1_LINK_UP) && | 363 | return ((val & PCIE_PORT_DEBUG1_LINK_UP) && |
| 339 | (!(val & PCIE_PHY_DEBUG_R1_LINK_IN_TRAINING))); | 364 | (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING))); |
| 365 | } | ||
| 366 | |||
| 367 | static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci) | ||
| 368 | { | ||
| 369 | u32 val; | ||
| 370 | |||
| 371 | val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); | ||
| 372 | if (val == 0xffffffff) | ||
| 373 | return 1; | ||
| 374 | |||
| 375 | return 0; | ||
| 340 | } | 376 | } |
| 341 | 377 | ||
| 342 | void dw_pcie_setup(struct dw_pcie *pci) | 378 | void dw_pcie_setup(struct dw_pcie *pci) |
| @@ -347,6 +383,16 @@ void dw_pcie_setup(struct dw_pcie *pci) | |||
| 347 | struct device *dev = pci->dev; | 383 | struct device *dev = pci->dev; |
| 348 | struct device_node *np = dev->of_node; | 384 | struct device_node *np = dev->of_node; |
| 349 | 385 | ||
| 386 | if (pci->version >= 0x480A || (!pci->version && | ||
| 387 | dw_pcie_iatu_unroll_enabled(pci))) { | ||
| 388 | pci->iatu_unroll_enabled = true; | ||
| 389 | if (!pci->atu_base) | ||
| 390 | pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; | ||
| 391 | } | ||
| 392 | dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ? | ||
| 393 | "enabled" : "disabled"); | ||
| 394 | |||
| 395 | |||
| 350 | ret = of_property_read_u32(np, "num-lanes", &lanes); | 396 | ret = of_property_read_u32(np, "num-lanes", &lanes); |
| 351 | if (ret) | 397 | if (ret) |
| 352 | lanes = 0; | 398 | lanes = 0; |
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 377f4c0b52da..b8993f2b78df 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h | |||
| @@ -41,6 +41,9 @@ | |||
| 41 | #define PCIE_PORT_DEBUG0 0x728 | 41 | #define PCIE_PORT_DEBUG0 0x728 |
| 42 | #define PORT_LOGIC_LTSSM_STATE_MASK 0x1f | 42 | #define PORT_LOGIC_LTSSM_STATE_MASK 0x1f |
| 43 | #define PORT_LOGIC_LTSSM_STATE_L0 0x11 | 43 | #define PORT_LOGIC_LTSSM_STATE_L0 0x11 |
| 44 | #define PCIE_PORT_DEBUG1 0x72C | ||
| 45 | #define PCIE_PORT_DEBUG1_LINK_UP BIT(4) | ||
| 46 | #define PCIE_PORT_DEBUG1_LINK_IN_TRAINING BIT(29) | ||
| 44 | 47 | ||
| 45 | #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C | 48 | #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C |
| 46 | #define PORT_LOGIC_SPEED_CHANGE BIT(17) | 49 | #define PORT_LOGIC_SPEED_CHANGE BIT(17) |
| @@ -145,14 +148,9 @@ struct dw_pcie_host_ops { | |||
| 145 | int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus, | 148 | int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus, |
| 146 | unsigned int devfn, int where, int size, u32 val); | 149 | unsigned int devfn, int where, int size, u32 val); |
| 147 | int (*host_init)(struct pcie_port *pp); | 150 | int (*host_init)(struct pcie_port *pp); |
| 148 | void (*msi_set_irq)(struct pcie_port *pp, int irq); | ||
| 149 | void (*msi_clear_irq)(struct pcie_port *pp, int irq); | ||
| 150 | phys_addr_t (*get_msi_addr)(struct pcie_port *pp); | ||
| 151 | u32 (*get_msi_data)(struct pcie_port *pp, int pos); | ||
| 152 | void (*scan_bus)(struct pcie_port *pp); | 151 | void (*scan_bus)(struct pcie_port *pp); |
| 153 | void (*set_num_vectors)(struct pcie_port *pp); | 152 | void (*set_num_vectors)(struct pcie_port *pp); |
| 154 | int (*msi_host_init)(struct pcie_port *pp); | 153 | int (*msi_host_init)(struct pcie_port *pp); |
| 155 | void (*msi_irq_ack)(int irq, struct pcie_port *pp); | ||
| 156 | }; | 154 | }; |
| 157 | 155 | ||
| 158 | struct pcie_port { | 156 | struct pcie_port { |
| @@ -179,8 +177,11 @@ struct pcie_port { | |||
| 179 | struct irq_domain *irq_domain; | 177 | struct irq_domain *irq_domain; |
| 180 | struct irq_domain *msi_domain; | 178 | struct irq_domain *msi_domain; |
| 181 | dma_addr_t msi_data; | 179 | dma_addr_t msi_data; |
| 180 | struct page *msi_page; | ||
| 181 | struct irq_chip *msi_irq_chip; | ||
| 182 | u32 num_vectors; | 182 | u32 num_vectors; |
| 183 | u32 irq_mask[MAX_MSI_CTRLS]; | 183 | u32 irq_mask[MAX_MSI_CTRLS]; |
| 184 | struct pci_bus *root_bus; | ||
| 184 | raw_spinlock_t lock; | 185 | raw_spinlock_t lock; |
| 185 | DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); | 186 | DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS); |
| 186 | }; | 187 | }; |
| @@ -200,7 +201,7 @@ struct dw_pcie_ep_ops { | |||
| 200 | 201 | ||
| 201 | struct dw_pcie_ep { | 202 | struct dw_pcie_ep { |
| 202 | struct pci_epc *epc; | 203 | struct pci_epc *epc; |
| 203 | struct dw_pcie_ep_ops *ops; | 204 | const struct dw_pcie_ep_ops *ops; |
| 204 | phys_addr_t phys_base; | 205 | phys_addr_t phys_base; |
| 205 | size_t addr_size; | 206 | size_t addr_size; |
| 206 | size_t page_size; | 207 | size_t page_size; |
| @@ -222,6 +223,10 @@ struct dw_pcie_ops { | |||
| 222 | size_t size); | 223 | size_t size); |
| 223 | void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg, | 224 | void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg, |
| 224 | size_t size, u32 val); | 225 | size_t size, u32 val); |
| 226 | u32 (*read_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg, | ||
| 227 | size_t size); | ||
| 228 | void (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg, | ||
| 229 | size_t size, u32 val); | ||
| 225 | int (*link_up)(struct dw_pcie *pcie); | 230 | int (*link_up)(struct dw_pcie *pcie); |
| 226 | int (*start_link)(struct dw_pcie *pcie); | 231 | int (*start_link)(struct dw_pcie *pcie); |
| 227 | void (*stop_link)(struct dw_pcie *pcie); | 232 | void (*stop_link)(struct dw_pcie *pcie); |
| @@ -238,6 +243,7 @@ struct dw_pcie { | |||
| 238 | struct pcie_port pp; | 243 | struct pcie_port pp; |
| 239 | struct dw_pcie_ep ep; | 244 | struct dw_pcie_ep ep; |
| 240 | const struct dw_pcie_ops *ops; | 245 | const struct dw_pcie_ops *ops; |
| 246 | unsigned int version; | ||
| 241 | }; | 247 | }; |
| 242 | 248 | ||
| 243 | #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) | 249 | #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) |
| @@ -252,6 +258,10 @@ u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, | |||
| 252 | size_t size); | 258 | size_t size); |
| 253 | void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, | 259 | void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, |
| 254 | size_t size, u32 val); | 260 | size_t size, u32 val); |
| 261 | u32 __dw_pcie_read_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg, | ||
| 262 | size_t size); | ||
| 263 | void __dw_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg, | ||
| 264 | size_t size, u32 val); | ||
| 255 | int dw_pcie_link_up(struct dw_pcie *pci); | 265 | int dw_pcie_link_up(struct dw_pcie *pci); |
| 256 | int dw_pcie_wait_for_link(struct dw_pcie *pci); | 266 | int dw_pcie_wait_for_link(struct dw_pcie *pci); |
| 257 | void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, | 267 | void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, |
| @@ -295,12 +305,12 @@ static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg) | |||
| 295 | 305 | ||
| 296 | static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val) | 306 | static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val) |
| 297 | { | 307 | { |
| 298 | __dw_pcie_write_dbi(pci, pci->dbi_base2, reg, 0x4, val); | 308 | __dw_pcie_write_dbi2(pci, pci->dbi_base2, reg, 0x4, val); |
| 299 | } | 309 | } |
| 300 | 310 | ||
| 301 | static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg) | 311 | static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg) |
| 302 | { | 312 | { |
| 303 | return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4); | 313 | return __dw_pcie_read_dbi2(pci, pci->dbi_base2, reg, 0x4); |
| 304 | } | 314 | } |
| 305 | 315 | ||
| 306 | static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val) | 316 | static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val) |
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index a7f703556790..0ed235d560e3 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c | |||
| @@ -1129,25 +1129,8 @@ err_deinit: | |||
| 1129 | return ret; | 1129 | return ret; |
| 1130 | } | 1130 | } |
| 1131 | 1131 | ||
| 1132 | static int qcom_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, | ||
| 1133 | u32 *val) | ||
| 1134 | { | ||
| 1135 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
| 1136 | |||
| 1137 | /* the device class is not reported correctly from the register */ | ||
| 1138 | if (where == PCI_CLASS_REVISION && size == 4) { | ||
| 1139 | *val = readl(pci->dbi_base + PCI_CLASS_REVISION); | ||
| 1140 | *val &= 0xff; /* keep revision id */ | ||
| 1141 | *val |= PCI_CLASS_BRIDGE_PCI << 16; | ||
| 1142 | return PCIBIOS_SUCCESSFUL; | ||
| 1143 | } | ||
| 1144 | |||
| 1145 | return dw_pcie_read(pci->dbi_base + where, size, val); | ||
| 1146 | } | ||
| 1147 | |||
| 1148 | static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { | 1132 | static const struct dw_pcie_host_ops qcom_pcie_dw_ops = { |
| 1149 | .host_init = qcom_pcie_host_init, | 1133 | .host_init = qcom_pcie_host_init, |
| 1150 | .rd_own_conf = qcom_pcie_rd_own_conf, | ||
| 1151 | }; | 1134 | }; |
| 1152 | 1135 | ||
| 1153 | /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ | 1136 | /* Qcom IP rev.: 2.1.0 Synopsys IP rev.: 4.01a */ |
| @@ -1309,6 +1292,12 @@ static const struct of_device_id qcom_pcie_match[] = { | |||
| 1309 | { } | 1292 | { } |
| 1310 | }; | 1293 | }; |
| 1311 | 1294 | ||
| 1295 | static void qcom_fixup_class(struct pci_dev *dev) | ||
| 1296 | { | ||
| 1297 | dev->class = PCI_CLASS_BRIDGE_PCI << 8; | ||
| 1298 | } | ||
| 1299 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_QCOM, PCI_ANY_ID, qcom_fixup_class); | ||
| 1300 | |||
| 1312 | static struct platform_driver qcom_pcie_driver = { | 1301 | static struct platform_driver qcom_pcie_driver = { |
| 1313 | .probe = qcom_pcie_probe, | 1302 | .probe = qcom_pcie_probe, |
| 1314 | .driver = { | 1303 | .driver = { |
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c index d5dc40289cce..3f30ee4a00b3 100644 --- a/drivers/pci/controller/dwc/pcie-uniphier.c +++ b/drivers/pci/controller/dwc/pcie-uniphier.c | |||
| @@ -270,6 +270,7 @@ static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp) | |||
| 270 | struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci); | 270 | struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci); |
| 271 | struct device_node *np = pci->dev->of_node; | 271 | struct device_node *np = pci->dev->of_node; |
| 272 | struct device_node *np_intc; | 272 | struct device_node *np_intc; |
| 273 | int ret = 0; | ||
| 273 | 274 | ||
| 274 | np_intc = of_get_child_by_name(np, "legacy-interrupt-controller"); | 275 | np_intc = of_get_child_by_name(np, "legacy-interrupt-controller"); |
| 275 | if (!np_intc) { | 276 | if (!np_intc) { |
| @@ -280,20 +281,24 @@ static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp) | |||
| 280 | pp->irq = irq_of_parse_and_map(np_intc, 0); | 281 | pp->irq = irq_of_parse_and_map(np_intc, 0); |
| 281 | if (!pp->irq) { | 282 | if (!pp->irq) { |
| 282 | dev_err(pci->dev, "Failed to get an IRQ entry in legacy-interrupt-controller\n"); | 283 | dev_err(pci->dev, "Failed to get an IRQ entry in legacy-interrupt-controller\n"); |
| 283 | return -EINVAL; | 284 | ret = -EINVAL; |
| 285 | goto out_put_node; | ||
| 284 | } | 286 | } |
| 285 | 287 | ||
| 286 | priv->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX, | 288 | priv->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX, |
| 287 | &uniphier_intx_domain_ops, pp); | 289 | &uniphier_intx_domain_ops, pp); |
| 288 | if (!priv->legacy_irq_domain) { | 290 | if (!priv->legacy_irq_domain) { |
| 289 | dev_err(pci->dev, "Failed to get INTx domain\n"); | 291 | dev_err(pci->dev, "Failed to get INTx domain\n"); |
| 290 | return -ENODEV; | 292 | ret = -ENODEV; |
| 293 | goto out_put_node; | ||
| 291 | } | 294 | } |
| 292 | 295 | ||
| 293 | irq_set_chained_handler_and_data(pp->irq, uniphier_pcie_irq_handler, | 296 | irq_set_chained_handler_and_data(pp->irq, uniphier_pcie_irq_handler, |
| 294 | pp); | 297 | pp); |
| 295 | 298 | ||
| 296 | return 0; | 299 | out_put_node: |
| 300 | of_node_put(np_intc); | ||
| 301 | return ret; | ||
| 297 | } | 302 | } |
| 298 | 303 | ||
| 299 | static int uniphier_pcie_host_init(struct pcie_port *pp) | 304 | static int uniphier_pcie_host_init(struct pcie_port *pp) |
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index eb58dfdaba1b..134e0306ff00 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c | |||
| @@ -794,6 +794,7 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) | |||
| 794 | struct device_node *node = dev->of_node; | 794 | struct device_node *node = dev->of_node; |
| 795 | struct device_node *pcie_intc_node; | 795 | struct device_node *pcie_intc_node; |
| 796 | struct irq_chip *irq_chip; | 796 | struct irq_chip *irq_chip; |
| 797 | int ret = 0; | ||
| 797 | 798 | ||
| 798 | pcie_intc_node = of_get_next_child(node, NULL); | 799 | pcie_intc_node = of_get_next_child(node, NULL); |
| 799 | if (!pcie_intc_node) { | 800 | if (!pcie_intc_node) { |
| @@ -806,8 +807,8 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) | |||
| 806 | irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq", | 807 | irq_chip->name = devm_kasprintf(dev, GFP_KERNEL, "%s-irq", |
| 807 | dev_name(dev)); | 808 | dev_name(dev)); |
| 808 | if (!irq_chip->name) { | 809 | if (!irq_chip->name) { |
| 809 | of_node_put(pcie_intc_node); | 810 | ret = -ENOMEM; |
| 810 | return -ENOMEM; | 811 | goto out_put_node; |
| 811 | } | 812 | } |
| 812 | 813 | ||
| 813 | irq_chip->irq_mask = advk_pcie_irq_mask; | 814 | irq_chip->irq_mask = advk_pcie_irq_mask; |
| @@ -819,11 +820,13 @@ static int advk_pcie_init_irq_domain(struct advk_pcie *pcie) | |||
| 819 | &advk_pcie_irq_domain_ops, pcie); | 820 | &advk_pcie_irq_domain_ops, pcie); |
| 820 | if (!pcie->irq_domain) { | 821 | if (!pcie->irq_domain) { |
| 821 | dev_err(dev, "Failed to get a INTx IRQ domain\n"); | 822 | dev_err(dev, "Failed to get a INTx IRQ domain\n"); |
| 822 | of_node_put(pcie_intc_node); | 823 | ret = -ENOMEM; |
| 823 | return -ENOMEM; | 824 | goto out_put_node; |
| 824 | } | 825 | } |
| 825 | 826 | ||
| 826 | return 0; | 827 | out_put_node: |
| 828 | of_node_put(pcie_intc_node); | ||
| 829 | return ret; | ||
| 827 | } | 830 | } |
| 828 | 831 | ||
| 829 | static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie) | 832 | static void advk_pcie_remove_irq_domain(struct advk_pcie *pcie) |
diff --git a/drivers/pci/controller/pci-host-generic.c b/drivers/pci/controller/pci-host-generic.c index dea3ec7592a2..75a2fb930d4b 100644 --- a/drivers/pci/controller/pci-host-generic.c +++ b/drivers/pci/controller/pci-host-generic.c | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* | 2 | /* |
| 3 | * Simple, generic PCI host controller driver targetting firmware-initialised | 3 | * Simple, generic PCI host controller driver targeting firmware-initialised |
| 4 | * systems and virtual machines (e.g. the PCI emulation provided by kvmtool). | 4 | * systems and virtual machines (e.g. the PCI emulation provided by kvmtool). |
| 5 | * | 5 | * |
| 6 | * Copyright (C) 2014 ARM Limited | 6 | * Copyright (C) 2014 ARM Limited |
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index 95441a35eceb..82acd6155adf 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c | |||
| @@ -1486,6 +1486,21 @@ static void hv_pci_assign_slots(struct hv_pcibus_device *hbus) | |||
| 1486 | } | 1486 | } |
| 1487 | } | 1487 | } |
| 1488 | 1488 | ||
| 1489 | /* | ||
| 1490 | * Remove entries in sysfs pci slot directory. | ||
| 1491 | */ | ||
| 1492 | static void hv_pci_remove_slots(struct hv_pcibus_device *hbus) | ||
| 1493 | { | ||
| 1494 | struct hv_pci_dev *hpdev; | ||
| 1495 | |||
| 1496 | list_for_each_entry(hpdev, &hbus->children, list_entry) { | ||
| 1497 | if (!hpdev->pci_slot) | ||
| 1498 | continue; | ||
| 1499 | pci_destroy_slot(hpdev->pci_slot); | ||
| 1500 | hpdev->pci_slot = NULL; | ||
| 1501 | } | ||
| 1502 | } | ||
| 1503 | |||
| 1489 | /** | 1504 | /** |
| 1490 | * create_root_hv_pci_bus() - Expose a new root PCI bus | 1505 | * create_root_hv_pci_bus() - Expose a new root PCI bus |
| 1491 | * @hbus: Root PCI bus, as understood by this driver | 1506 | * @hbus: Root PCI bus, as understood by this driver |
| @@ -1761,6 +1776,10 @@ static void pci_devices_present_work(struct work_struct *work) | |||
| 1761 | hpdev = list_first_entry(&removed, struct hv_pci_dev, | 1776 | hpdev = list_first_entry(&removed, struct hv_pci_dev, |
| 1762 | list_entry); | 1777 | list_entry); |
| 1763 | list_del(&hpdev->list_entry); | 1778 | list_del(&hpdev->list_entry); |
| 1779 | |||
| 1780 | if (hpdev->pci_slot) | ||
| 1781 | pci_destroy_slot(hpdev->pci_slot); | ||
| 1782 | |||
| 1764 | put_pcichild(hpdev); | 1783 | put_pcichild(hpdev); |
| 1765 | } | 1784 | } |
| 1766 | 1785 | ||
| @@ -1900,6 +1919,9 @@ static void hv_eject_device_work(struct work_struct *work) | |||
| 1900 | sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt, | 1919 | sizeof(*ejct_pkt), (unsigned long)&ctxt.pkt, |
| 1901 | VM_PKT_DATA_INBAND, 0); | 1920 | VM_PKT_DATA_INBAND, 0); |
| 1902 | 1921 | ||
| 1922 | /* For the get_pcichild() in hv_pci_eject_device() */ | ||
| 1923 | put_pcichild(hpdev); | ||
| 1924 | /* For the two refs got in new_pcichild_device() */ | ||
| 1903 | put_pcichild(hpdev); | 1925 | put_pcichild(hpdev); |
| 1904 | put_pcichild(hpdev); | 1926 | put_pcichild(hpdev); |
| 1905 | put_hvpcibus(hpdev->hbus); | 1927 | put_hvpcibus(hpdev->hbus); |
| @@ -2677,6 +2699,7 @@ static int hv_pci_remove(struct hv_device *hdev) | |||
| 2677 | pci_lock_rescan_remove(); | 2699 | pci_lock_rescan_remove(); |
| 2678 | pci_stop_root_bus(hbus->pci_bus); | 2700 | pci_stop_root_bus(hbus->pci_bus); |
| 2679 | pci_remove_root_bus(hbus->pci_bus); | 2701 | pci_remove_root_bus(hbus->pci_bus); |
| 2702 | hv_pci_remove_slots(hbus); | ||
| 2680 | pci_unlock_rescan_remove(); | 2703 | pci_unlock_rescan_remove(); |
| 2681 | hbus->state = hv_pcibus_removed; | 2704 | hbus->state = hv_pcibus_removed; |
| 2682 | } | 2705 | } |
diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index f4f53d092e00..464ba2538d52 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c | |||
| @@ -231,9 +231,9 @@ struct tegra_msi { | |||
| 231 | struct msi_controller chip; | 231 | struct msi_controller chip; |
| 232 | DECLARE_BITMAP(used, INT_PCI_MSI_NR); | 232 | DECLARE_BITMAP(used, INT_PCI_MSI_NR); |
| 233 | struct irq_domain *domain; | 233 | struct irq_domain *domain; |
| 234 | unsigned long pages; | ||
| 235 | struct mutex lock; | 234 | struct mutex lock; |
| 236 | u64 phys; | 235 | void *virt; |
| 236 | dma_addr_t phys; | ||
| 237 | int irq; | 237 | int irq; |
| 238 | }; | 238 | }; |
| 239 | 239 | ||
| @@ -1536,7 +1536,7 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie) | |||
| 1536 | err = platform_get_irq_byname(pdev, "msi"); | 1536 | err = platform_get_irq_byname(pdev, "msi"); |
| 1537 | if (err < 0) { | 1537 | if (err < 0) { |
| 1538 | dev_err(dev, "failed to get IRQ: %d\n", err); | 1538 | dev_err(dev, "failed to get IRQ: %d\n", err); |
| 1539 | goto err; | 1539 | goto free_irq_domain; |
| 1540 | } | 1540 | } |
| 1541 | 1541 | ||
| 1542 | msi->irq = err; | 1542 | msi->irq = err; |
| @@ -1545,17 +1545,35 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie) | |||
| 1545 | tegra_msi_irq_chip.name, pcie); | 1545 | tegra_msi_irq_chip.name, pcie); |
| 1546 | if (err < 0) { | 1546 | if (err < 0) { |
| 1547 | dev_err(dev, "failed to request IRQ: %d\n", err); | 1547 | dev_err(dev, "failed to request IRQ: %d\n", err); |
| 1548 | goto err; | 1548 | goto free_irq_domain; |
| 1549 | } | ||
| 1550 | |||
| 1551 | /* Though the PCIe controller can address >32-bit address space, to | ||
| 1552 | * facilitate endpoints that support only 32-bit MSI target address, | ||
| 1553 | * the mask is set to 32-bit to make sure that MSI target address is | ||
| 1554 | * always a 32-bit address | ||
| 1555 | */ | ||
| 1556 | err = dma_set_coherent_mask(dev, DMA_BIT_MASK(32)); | ||
| 1557 | if (err < 0) { | ||
| 1558 | dev_err(dev, "failed to set DMA coherent mask: %d\n", err); | ||
| 1559 | goto free_irq; | ||
| 1560 | } | ||
| 1561 | |||
| 1562 | msi->virt = dma_alloc_attrs(dev, PAGE_SIZE, &msi->phys, GFP_KERNEL, | ||
| 1563 | DMA_ATTR_NO_KERNEL_MAPPING); | ||
| 1564 | if (!msi->virt) { | ||
| 1565 | dev_err(dev, "failed to allocate DMA memory for MSI\n"); | ||
| 1566 | err = -ENOMEM; | ||
| 1567 | goto free_irq; | ||
| 1549 | } | 1568 | } |
| 1550 | 1569 | ||
| 1551 | /* setup AFI/FPCI range */ | ||
| 1552 | msi->pages = __get_free_pages(GFP_KERNEL, 0); | ||
| 1553 | msi->phys = virt_to_phys((void *)msi->pages); | ||
| 1554 | host->msi = &msi->chip; | 1570 | host->msi = &msi->chip; |
| 1555 | 1571 | ||
| 1556 | return 0; | 1572 | return 0; |
| 1557 | 1573 | ||
| 1558 | err: | 1574 | free_irq: |
| 1575 | free_irq(msi->irq, pcie); | ||
| 1576 | free_irq_domain: | ||
| 1559 | irq_domain_remove(msi->domain); | 1577 | irq_domain_remove(msi->domain); |
| 1560 | return err; | 1578 | return err; |
| 1561 | } | 1579 | } |
| @@ -1592,7 +1610,8 @@ static void tegra_pcie_msi_teardown(struct tegra_pcie *pcie) | |||
| 1592 | struct tegra_msi *msi = &pcie->msi; | 1610 | struct tegra_msi *msi = &pcie->msi; |
| 1593 | unsigned int i, irq; | 1611 | unsigned int i, irq; |
| 1594 | 1612 | ||
| 1595 | free_pages(msi->pages, 0); | 1613 | dma_free_attrs(pcie->dev, PAGE_SIZE, msi->virt, msi->phys, |
| 1614 | DMA_ATTR_NO_KERNEL_MAPPING); | ||
| 1596 | 1615 | ||
| 1597 | if (msi->irq > 0) | 1616 | if (msi->irq > 0) |
| 1598 | free_irq(msi->irq, pcie); | 1617 | free_irq(msi->irq, pcie); |
diff --git a/drivers/pci/controller/pcie-iproc-msi.c b/drivers/pci/controller/pcie-iproc-msi.c index cb3401a931f8..0a3f61be5625 100644 --- a/drivers/pci/controller/pcie-iproc-msi.c +++ b/drivers/pci/controller/pcie-iproc-msi.c | |||
| @@ -367,7 +367,7 @@ static void iproc_msi_handler(struct irq_desc *desc) | |||
| 367 | 367 | ||
| 368 | /* | 368 | /* |
| 369 | * Now go read the tail pointer again to see if there are new | 369 | * Now go read the tail pointer again to see if there are new |
| 370 | * oustanding events that came in during the above window. | 370 | * outstanding events that came in during the above window. |
| 371 | */ | 371 | */ |
| 372 | } while (true); | 372 | } while (true); |
| 373 | 373 | ||
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c index c20fd6bd68fd..e3ca46497470 100644 --- a/drivers/pci/controller/pcie-iproc.c +++ b/drivers/pci/controller/pcie-iproc.c | |||
| @@ -60,6 +60,10 @@ | |||
| 60 | #define APB_ERR_EN_SHIFT 0 | 60 | #define APB_ERR_EN_SHIFT 0 |
| 61 | #define APB_ERR_EN BIT(APB_ERR_EN_SHIFT) | 61 | #define APB_ERR_EN BIT(APB_ERR_EN_SHIFT) |
| 62 | 62 | ||
| 63 | #define CFG_RD_SUCCESS 0 | ||
| 64 | #define CFG_RD_UR 1 | ||
| 65 | #define CFG_RD_CRS 2 | ||
| 66 | #define CFG_RD_CA 3 | ||
| 63 | #define CFG_RETRY_STATUS 0xffff0001 | 67 | #define CFG_RETRY_STATUS 0xffff0001 |
| 64 | #define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */ | 68 | #define CFG_RETRY_STATUS_TIMEOUT_US 500000 /* 500 milliseconds */ |
| 65 | 69 | ||
| @@ -289,6 +293,9 @@ enum iproc_pcie_reg { | |||
| 289 | IPROC_PCIE_IARR4, | 293 | IPROC_PCIE_IARR4, |
| 290 | IPROC_PCIE_IMAP4, | 294 | IPROC_PCIE_IMAP4, |
| 291 | 295 | ||
| 296 | /* config read status */ | ||
| 297 | IPROC_PCIE_CFG_RD_STATUS, | ||
| 298 | |||
| 292 | /* link status */ | 299 | /* link status */ |
| 293 | IPROC_PCIE_LINK_STATUS, | 300 | IPROC_PCIE_LINK_STATUS, |
| 294 | 301 | ||
| @@ -350,6 +357,7 @@ static const u16 iproc_pcie_reg_paxb_v2[] = { | |||
| 350 | [IPROC_PCIE_IMAP3] = 0xe08, | 357 | [IPROC_PCIE_IMAP3] = 0xe08, |
| 351 | [IPROC_PCIE_IARR4] = 0xe68, | 358 | [IPROC_PCIE_IARR4] = 0xe68, |
| 352 | [IPROC_PCIE_IMAP4] = 0xe70, | 359 | [IPROC_PCIE_IMAP4] = 0xe70, |
| 360 | [IPROC_PCIE_CFG_RD_STATUS] = 0xee0, | ||
| 353 | [IPROC_PCIE_LINK_STATUS] = 0xf0c, | 361 | [IPROC_PCIE_LINK_STATUS] = 0xf0c, |
| 354 | [IPROC_PCIE_APB_ERR_EN] = 0xf40, | 362 | [IPROC_PCIE_APB_ERR_EN] = 0xf40, |
| 355 | }; | 363 | }; |
| @@ -474,10 +482,12 @@ static void __iomem *iproc_pcie_map_ep_cfg_reg(struct iproc_pcie *pcie, | |||
| 474 | return (pcie->base + offset); | 482 | return (pcie->base + offset); |
| 475 | } | 483 | } |
| 476 | 484 | ||
| 477 | static unsigned int iproc_pcie_cfg_retry(void __iomem *cfg_data_p) | 485 | static unsigned int iproc_pcie_cfg_retry(struct iproc_pcie *pcie, |
| 486 | void __iomem *cfg_data_p) | ||
| 478 | { | 487 | { |
| 479 | int timeout = CFG_RETRY_STATUS_TIMEOUT_US; | 488 | int timeout = CFG_RETRY_STATUS_TIMEOUT_US; |
| 480 | unsigned int data; | 489 | unsigned int data; |
| 490 | u32 status; | ||
| 481 | 491 | ||
| 482 | /* | 492 | /* |
| 483 | * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only | 493 | * As per PCIe spec r3.1, sec 2.3.2, CRS Software Visibility only |
| @@ -498,6 +508,15 @@ static unsigned int iproc_pcie_cfg_retry(void __iomem *cfg_data_p) | |||
| 498 | */ | 508 | */ |
| 499 | data = readl(cfg_data_p); | 509 | data = readl(cfg_data_p); |
| 500 | while (data == CFG_RETRY_STATUS && timeout--) { | 510 | while (data == CFG_RETRY_STATUS && timeout--) { |
| 511 | /* | ||
| 512 | * CRS state is set in CFG_RD status register | ||
| 513 | * This will handle the case where CFG_RETRY_STATUS is | ||
| 514 | * valid config data. | ||
| 515 | */ | ||
| 516 | status = iproc_pcie_read_reg(pcie, IPROC_PCIE_CFG_RD_STATUS); | ||
| 517 | if (status != CFG_RD_CRS) | ||
| 518 | return data; | ||
| 519 | |||
| 501 | udelay(1); | 520 | udelay(1); |
| 502 | data = readl(cfg_data_p); | 521 | data = readl(cfg_data_p); |
| 503 | } | 522 | } |
| @@ -576,7 +595,7 @@ static int iproc_pcie_config_read(struct pci_bus *bus, unsigned int devfn, | |||
| 576 | if (!cfg_data_p) | 595 | if (!cfg_data_p) |
| 577 | return PCIBIOS_DEVICE_NOT_FOUND; | 596 | return PCIBIOS_DEVICE_NOT_FOUND; |
| 578 | 597 | ||
| 579 | data = iproc_pcie_cfg_retry(cfg_data_p); | 598 | data = iproc_pcie_cfg_retry(pcie, cfg_data_p); |
| 580 | 599 | ||
| 581 | *val = data; | 600 | *val = data; |
| 582 | if (size <= 2) | 601 | if (size <= 2) |
| @@ -936,8 +955,25 @@ static int iproc_pcie_setup_ob(struct iproc_pcie *pcie, u64 axi_addr, | |||
| 936 | resource_size_t window_size = | 955 | resource_size_t window_size = |
| 937 | ob_map->window_sizes[size_idx] * SZ_1M; | 956 | ob_map->window_sizes[size_idx] * SZ_1M; |
| 938 | 957 | ||
| 939 | if (size < window_size) | 958 | /* |
| 940 | continue; | 959 | * Keep iterating until we reach the last window and |
| 960 | * with the minimal window size at index zero. In this | ||
| 961 | * case, we take a compromise by mapping it using the | ||
| 962 | * minimum window size that can be supported | ||
| 963 | */ | ||
| 964 | if (size < window_size) { | ||
| 965 | if (size_idx > 0 || window_idx > 0) | ||
| 966 | continue; | ||
| 967 | |||
| 968 | /* | ||
| 969 | * For the corner case of reaching the minimal | ||
| 970 | * window size that can be supported on the | ||
| 971 | * last window | ||
| 972 | */ | ||
| 973 | axi_addr = ALIGN_DOWN(axi_addr, window_size); | ||
| 974 | pci_addr = ALIGN_DOWN(pci_addr, window_size); | ||
| 975 | size = window_size; | ||
| 976 | } | ||
| 941 | 977 | ||
| 942 | if (!IS_ALIGNED(axi_addr, window_size) || | 978 | if (!IS_ALIGNED(axi_addr, window_size) || |
| 943 | !IS_ALIGNED(pci_addr, window_size)) { | 979 | !IS_ALIGNED(pci_addr, window_size)) { |
| @@ -1146,11 +1182,43 @@ err_ib: | |||
| 1146 | return ret; | 1182 | return ret; |
| 1147 | } | 1183 | } |
| 1148 | 1184 | ||
| 1185 | static int iproc_pcie_add_dma_range(struct device *dev, | ||
| 1186 | struct list_head *resources, | ||
| 1187 | struct of_pci_range *range) | ||
| 1188 | { | ||
| 1189 | struct resource *res; | ||
| 1190 | struct resource_entry *entry, *tmp; | ||
| 1191 | struct list_head *head = resources; | ||
| 1192 | |||
| 1193 | res = devm_kzalloc(dev, sizeof(struct resource), GFP_KERNEL); | ||
| 1194 | if (!res) | ||
| 1195 | return -ENOMEM; | ||
| 1196 | |||
| 1197 | resource_list_for_each_entry(tmp, resources) { | ||
| 1198 | if (tmp->res->start < range->cpu_addr) | ||
| 1199 | head = &tmp->node; | ||
| 1200 | } | ||
| 1201 | |||
| 1202 | res->start = range->cpu_addr; | ||
| 1203 | res->end = res->start + range->size - 1; | ||
| 1204 | |||
| 1205 | entry = resource_list_create_entry(res, 0); | ||
| 1206 | if (!entry) | ||
| 1207 | return -ENOMEM; | ||
| 1208 | |||
| 1209 | entry->offset = res->start - range->cpu_addr; | ||
| 1210 | resource_list_add(entry, head); | ||
| 1211 | |||
| 1212 | return 0; | ||
| 1213 | } | ||
| 1214 | |||
| 1149 | static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie) | 1215 | static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie) |
| 1150 | { | 1216 | { |
| 1217 | struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); | ||
| 1151 | struct of_pci_range range; | 1218 | struct of_pci_range range; |
| 1152 | struct of_pci_range_parser parser; | 1219 | struct of_pci_range_parser parser; |
| 1153 | int ret; | 1220 | int ret; |
| 1221 | LIST_HEAD(resources); | ||
| 1154 | 1222 | ||
| 1155 | /* Get the dma-ranges from DT */ | 1223 | /* Get the dma-ranges from DT */ |
| 1156 | ret = of_pci_dma_range_parser_init(&parser, pcie->dev->of_node); | 1224 | ret = of_pci_dma_range_parser_init(&parser, pcie->dev->of_node); |
| @@ -1158,13 +1226,23 @@ static int iproc_pcie_map_dma_ranges(struct iproc_pcie *pcie) | |||
| 1158 | return ret; | 1226 | return ret; |
| 1159 | 1227 | ||
| 1160 | for_each_of_pci_range(&parser, &range) { | 1228 | for_each_of_pci_range(&parser, &range) { |
| 1229 | ret = iproc_pcie_add_dma_range(pcie->dev, | ||
| 1230 | &resources, | ||
| 1231 | &range); | ||
| 1232 | if (ret) | ||
| 1233 | goto out; | ||
| 1161 | /* Each range entry corresponds to an inbound mapping region */ | 1234 | /* Each range entry corresponds to an inbound mapping region */ |
| 1162 | ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM); | 1235 | ret = iproc_pcie_setup_ib(pcie, &range, IPROC_PCIE_IB_MAP_MEM); |
| 1163 | if (ret) | 1236 | if (ret) |
| 1164 | return ret; | 1237 | goto out; |
| 1165 | } | 1238 | } |
| 1166 | 1239 | ||
| 1240 | list_splice_init(&resources, &host->dma_ranges); | ||
| 1241 | |||
| 1167 | return 0; | 1242 | return 0; |
| 1243 | out: | ||
| 1244 | pci_free_resource_list(&resources); | ||
| 1245 | return ret; | ||
| 1168 | } | 1246 | } |
| 1169 | 1247 | ||
| 1170 | static int iproce_pcie_get_msi(struct iproc_pcie *pcie, | 1248 | static int iproce_pcie_get_msi(struct iproc_pcie *pcie, |
| @@ -1320,14 +1398,18 @@ static int iproc_pcie_msi_enable(struct iproc_pcie *pcie) | |||
| 1320 | if (pcie->need_msi_steer) { | 1398 | if (pcie->need_msi_steer) { |
| 1321 | ret = iproc_pcie_msi_steer(pcie, msi_node); | 1399 | ret = iproc_pcie_msi_steer(pcie, msi_node); |
| 1322 | if (ret) | 1400 | if (ret) |
| 1323 | return ret; | 1401 | goto out_put_node; |
| 1324 | } | 1402 | } |
| 1325 | 1403 | ||
| 1326 | /* | 1404 | /* |
| 1327 | * If another MSI controller is being used, the call below should fail | 1405 | * If another MSI controller is being used, the call below should fail |
| 1328 | * but that is okay | 1406 | * but that is okay |
| 1329 | */ | 1407 | */ |
| 1330 | return iproc_msi_init(pcie, msi_node); | 1408 | ret = iproc_msi_init(pcie, msi_node); |
| 1409 | |||
| 1410 | out_put_node: | ||
| 1411 | of_node_put(msi_node); | ||
| 1412 | return ret; | ||
| 1331 | } | 1413 | } |
| 1332 | 1414 | ||
| 1333 | static void iproc_pcie_msi_disable(struct iproc_pcie *pcie) | 1415 | static void iproc_pcie_msi_disable(struct iproc_pcie *pcie) |
| @@ -1347,7 +1429,6 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie) | |||
| 1347 | break; | 1429 | break; |
| 1348 | case IPROC_PCIE_PAXB: | 1430 | case IPROC_PCIE_PAXB: |
| 1349 | regs = iproc_pcie_reg_paxb; | 1431 | regs = iproc_pcie_reg_paxb; |
| 1350 | pcie->iproc_cfg_read = true; | ||
| 1351 | pcie->has_apb_err_disable = true; | 1432 | pcie->has_apb_err_disable = true; |
| 1352 | if (pcie->need_ob_cfg) { | 1433 | if (pcie->need_ob_cfg) { |
| 1353 | pcie->ob_map = paxb_ob_map; | 1434 | pcie->ob_map = paxb_ob_map; |
| @@ -1356,6 +1437,7 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie) | |||
| 1356 | break; | 1437 | break; |
| 1357 | case IPROC_PCIE_PAXB_V2: | 1438 | case IPROC_PCIE_PAXB_V2: |
| 1358 | regs = iproc_pcie_reg_paxb_v2; | 1439 | regs = iproc_pcie_reg_paxb_v2; |
| 1440 | pcie->iproc_cfg_read = true; | ||
| 1359 | pcie->has_apb_err_disable = true; | 1441 | pcie->has_apb_err_disable = true; |
| 1360 | if (pcie->need_ob_cfg) { | 1442 | if (pcie->need_ob_cfg) { |
| 1361 | pcie->ob_map = paxb_v2_ob_map; | 1443 | pcie->ob_map = paxb_v2_ob_map; |
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index 0b6c72804e03..80601e1b939e 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c | |||
| @@ -578,6 +578,7 @@ static int mtk_pcie_init_irq_domain(struct mtk_pcie_port *port, | |||
| 578 | 578 | ||
| 579 | port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, | 579 | port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, |
| 580 | &intx_domain_ops, port); | 580 | &intx_domain_ops, port); |
| 581 | of_node_put(pcie_intc_node); | ||
| 581 | if (!port->irq_domain) { | 582 | if (!port->irq_domain) { |
| 582 | dev_err(dev, "failed to get INTx IRQ domain\n"); | 583 | dev_err(dev, "failed to get INTx IRQ domain\n"); |
| 583 | return -ENODEV; | 584 | return -ENODEV; |
| @@ -915,49 +916,29 @@ static int mtk_pcie_parse_port(struct mtk_pcie *pcie, | |||
| 915 | 916 | ||
| 916 | /* sys_ck might be divided into the following parts in some chips */ | 917 | /* sys_ck might be divided into the following parts in some chips */ |
| 917 | snprintf(name, sizeof(name), "ahb_ck%d", slot); | 918 | snprintf(name, sizeof(name), "ahb_ck%d", slot); |
| 918 | port->ahb_ck = devm_clk_get(dev, name); | 919 | port->ahb_ck = devm_clk_get_optional(dev, name); |
| 919 | if (IS_ERR(port->ahb_ck)) { | 920 | if (IS_ERR(port->ahb_ck)) |
| 920 | if (PTR_ERR(port->ahb_ck) == -EPROBE_DEFER) | 921 | return PTR_ERR(port->ahb_ck); |
| 921 | return -EPROBE_DEFER; | ||
| 922 | |||
| 923 | port->ahb_ck = NULL; | ||
| 924 | } | ||
| 925 | 922 | ||
| 926 | snprintf(name, sizeof(name), "axi_ck%d", slot); | 923 | snprintf(name, sizeof(name), "axi_ck%d", slot); |
| 927 | port->axi_ck = devm_clk_get(dev, name); | 924 | port->axi_ck = devm_clk_get_optional(dev, name); |
| 928 | if (IS_ERR(port->axi_ck)) { | 925 | if (IS_ERR(port->axi_ck)) |
| 929 | if (PTR_ERR(port->axi_ck) == -EPROBE_DEFER) | 926 | return PTR_ERR(port->axi_ck); |
| 930 | return -EPROBE_DEFER; | ||
| 931 | |||
| 932 | port->axi_ck = NULL; | ||
| 933 | } | ||
| 934 | 927 | ||
| 935 | snprintf(name, sizeof(name), "aux_ck%d", slot); | 928 | snprintf(name, sizeof(name), "aux_ck%d", slot); |
| 936 | port->aux_ck = devm_clk_get(dev, name); | 929 | port->aux_ck = devm_clk_get_optional(dev, name); |
| 937 | if (IS_ERR(port->aux_ck)) { | 930 | if (IS_ERR(port->aux_ck)) |
| 938 | if (PTR_ERR(port->aux_ck) == -EPROBE_DEFER) | 931 | return PTR_ERR(port->aux_ck); |
| 939 | return -EPROBE_DEFER; | ||
| 940 | |||
| 941 | port->aux_ck = NULL; | ||
| 942 | } | ||
| 943 | 932 | ||
| 944 | snprintf(name, sizeof(name), "obff_ck%d", slot); | 933 | snprintf(name, sizeof(name), "obff_ck%d", slot); |
| 945 | port->obff_ck = devm_clk_get(dev, name); | 934 | port->obff_ck = devm_clk_get_optional(dev, name); |
| 946 | if (IS_ERR(port->obff_ck)) { | 935 | if (IS_ERR(port->obff_ck)) |
| 947 | if (PTR_ERR(port->obff_ck) == -EPROBE_DEFER) | 936 | return PTR_ERR(port->obff_ck); |
| 948 | return -EPROBE_DEFER; | ||
| 949 | |||
| 950 | port->obff_ck = NULL; | ||
| 951 | } | ||
| 952 | 937 | ||
| 953 | snprintf(name, sizeof(name), "pipe_ck%d", slot); | 938 | snprintf(name, sizeof(name), "pipe_ck%d", slot); |
| 954 | port->pipe_ck = devm_clk_get(dev, name); | 939 | port->pipe_ck = devm_clk_get_optional(dev, name); |
| 955 | if (IS_ERR(port->pipe_ck)) { | 940 | if (IS_ERR(port->pipe_ck)) |
| 956 | if (PTR_ERR(port->pipe_ck) == -EPROBE_DEFER) | 941 | return PTR_ERR(port->pipe_ck); |
| 957 | return -EPROBE_DEFER; | ||
| 958 | |||
| 959 | port->pipe_ck = NULL; | ||
| 960 | } | ||
| 961 | 942 | ||
| 962 | snprintf(name, sizeof(name), "pcie-rst%d", slot); | 943 | snprintf(name, sizeof(name), "pcie-rst%d", slot); |
| 963 | port->reset = devm_reset_control_get_optional_exclusive(dev, name); | 944 | port->reset = devm_reset_control_get_optional_exclusive(dev, name); |
diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c index c8febb009454..f6a669a9af41 100644 --- a/drivers/pci/controller/pcie-rcar.c +++ b/drivers/pci/controller/pcie-rcar.c | |||
| @@ -46,14 +46,15 @@ | |||
| 46 | 46 | ||
| 47 | /* Transfer control */ | 47 | /* Transfer control */ |
| 48 | #define PCIETCTLR 0x02000 | 48 | #define PCIETCTLR 0x02000 |
| 49 | #define CFINIT 1 | 49 | #define DL_DOWN BIT(3) |
| 50 | #define CFINIT BIT(0) | ||
| 50 | #define PCIETSTR 0x02004 | 51 | #define PCIETSTR 0x02004 |
| 51 | #define DATA_LINK_ACTIVE 1 | 52 | #define DATA_LINK_ACTIVE BIT(0) |
| 52 | #define PCIEERRFR 0x02020 | 53 | #define PCIEERRFR 0x02020 |
| 53 | #define UNSUPPORTED_REQUEST BIT(4) | 54 | #define UNSUPPORTED_REQUEST BIT(4) |
| 54 | #define PCIEMSIFR 0x02044 | 55 | #define PCIEMSIFR 0x02044 |
| 55 | #define PCIEMSIALR 0x02048 | 56 | #define PCIEMSIALR 0x02048 |
| 56 | #define MSIFE 1 | 57 | #define MSIFE BIT(0) |
| 57 | #define PCIEMSIAUR 0x0204c | 58 | #define PCIEMSIAUR 0x0204c |
| 58 | #define PCIEMSIIER 0x02050 | 59 | #define PCIEMSIIER 0x02050 |
| 59 | 60 | ||
| @@ -94,6 +95,7 @@ | |||
| 94 | #define MACCTLR 0x011058 | 95 | #define MACCTLR 0x011058 |
| 95 | #define SPEED_CHANGE BIT(24) | 96 | #define SPEED_CHANGE BIT(24) |
| 96 | #define SCRAMBLE_DISABLE BIT(27) | 97 | #define SCRAMBLE_DISABLE BIT(27) |
| 98 | #define PMSR 0x01105c | ||
| 97 | #define MACS2R 0x011078 | 99 | #define MACS2R 0x011078 |
| 98 | #define MACCGSPSETR 0x011084 | 100 | #define MACCGSPSETR 0x011084 |
| 99 | #define SPCNGRSN BIT(31) | 101 | #define SPCNGRSN BIT(31) |
| @@ -152,14 +154,13 @@ struct rcar_pcie { | |||
| 152 | struct rcar_msi msi; | 154 | struct rcar_msi msi; |
| 153 | }; | 155 | }; |
| 154 | 156 | ||
| 155 | static void rcar_pci_write_reg(struct rcar_pcie *pcie, unsigned long val, | 157 | static void rcar_pci_write_reg(struct rcar_pcie *pcie, u32 val, |
| 156 | unsigned long reg) | 158 | unsigned int reg) |
| 157 | { | 159 | { |
| 158 | writel(val, pcie->base + reg); | 160 | writel(val, pcie->base + reg); |
| 159 | } | 161 | } |
| 160 | 162 | ||
| 161 | static unsigned long rcar_pci_read_reg(struct rcar_pcie *pcie, | 163 | static u32 rcar_pci_read_reg(struct rcar_pcie *pcie, unsigned int reg) |
| 162 | unsigned long reg) | ||
| 163 | { | 164 | { |
| 164 | return readl(pcie->base + reg); | 165 | return readl(pcie->base + reg); |
| 165 | } | 166 | } |
| @@ -171,7 +172,7 @@ enum { | |||
| 171 | 172 | ||
| 172 | static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data) | 173 | static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data) |
| 173 | { | 174 | { |
| 174 | int shift = 8 * (where & 3); | 175 | unsigned int shift = BITS_PER_BYTE * (where & 3); |
| 175 | u32 val = rcar_pci_read_reg(pcie, where & ~3); | 176 | u32 val = rcar_pci_read_reg(pcie, where & ~3); |
| 176 | 177 | ||
| 177 | val &= ~(mask << shift); | 178 | val &= ~(mask << shift); |
| @@ -181,7 +182,7 @@ static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data) | |||
| 181 | 182 | ||
| 182 | static u32 rcar_read_conf(struct rcar_pcie *pcie, int where) | 183 | static u32 rcar_read_conf(struct rcar_pcie *pcie, int where) |
| 183 | { | 184 | { |
| 184 | int shift = 8 * (where & 3); | 185 | unsigned int shift = BITS_PER_BYTE * (where & 3); |
| 185 | u32 val = rcar_pci_read_reg(pcie, where & ~3); | 186 | u32 val = rcar_pci_read_reg(pcie, where & ~3); |
| 186 | 187 | ||
| 187 | return val >> shift; | 188 | return val >> shift; |
| @@ -192,7 +193,7 @@ static int rcar_pcie_config_access(struct rcar_pcie *pcie, | |||
| 192 | unsigned char access_type, struct pci_bus *bus, | 193 | unsigned char access_type, struct pci_bus *bus, |
| 193 | unsigned int devfn, int where, u32 *data) | 194 | unsigned int devfn, int where, u32 *data) |
| 194 | { | 195 | { |
| 195 | int dev, func, reg, index; | 196 | unsigned int dev, func, reg, index; |
| 196 | 197 | ||
| 197 | dev = PCI_SLOT(devfn); | 198 | dev = PCI_SLOT(devfn); |
| 198 | func = PCI_FUNC(devfn); | 199 | func = PCI_FUNC(devfn); |
| @@ -281,12 +282,12 @@ static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, | |||
| 281 | } | 282 | } |
| 282 | 283 | ||
| 283 | if (size == 1) | 284 | if (size == 1) |
| 284 | *val = (*val >> (8 * (where & 3))) & 0xff; | 285 | *val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff; |
| 285 | else if (size == 2) | 286 | else if (size == 2) |
| 286 | *val = (*val >> (8 * (where & 2))) & 0xffff; | 287 | *val = (*val >> (BITS_PER_BYTE * (where & 2))) & 0xffff; |
| 287 | 288 | ||
| 288 | dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n", | 289 | dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n", |
| 289 | bus->number, devfn, where, size, (unsigned long)*val); | 290 | bus->number, devfn, where, size, *val); |
| 290 | 291 | ||
| 291 | return ret; | 292 | return ret; |
| 292 | } | 293 | } |
| @@ -296,23 +297,24 @@ static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn, | |||
| 296 | int where, int size, u32 val) | 297 | int where, int size, u32 val) |
| 297 | { | 298 | { |
| 298 | struct rcar_pcie *pcie = bus->sysdata; | 299 | struct rcar_pcie *pcie = bus->sysdata; |
| 299 | int shift, ret; | 300 | unsigned int shift; |
| 300 | u32 data; | 301 | u32 data; |
| 302 | int ret; | ||
| 301 | 303 | ||
| 302 | ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ, | 304 | ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ, |
| 303 | bus, devfn, where, &data); | 305 | bus, devfn, where, &data); |
| 304 | if (ret != PCIBIOS_SUCCESSFUL) | 306 | if (ret != PCIBIOS_SUCCESSFUL) |
| 305 | return ret; | 307 | return ret; |
| 306 | 308 | ||
| 307 | dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n", | 309 | dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08x\n", |
| 308 | bus->number, devfn, where, size, (unsigned long)val); | 310 | bus->number, devfn, where, size, val); |
| 309 | 311 | ||
| 310 | if (size == 1) { | 312 | if (size == 1) { |
| 311 | shift = 8 * (where & 3); | 313 | shift = BITS_PER_BYTE * (where & 3); |
| 312 | data &= ~(0xff << shift); | 314 | data &= ~(0xff << shift); |
| 313 | data |= ((val & 0xff) << shift); | 315 | data |= ((val & 0xff) << shift); |
| 314 | } else if (size == 2) { | 316 | } else if (size == 2) { |
| 315 | shift = 8 * (where & 2); | 317 | shift = BITS_PER_BYTE * (where & 2); |
| 316 | data &= ~(0xffff << shift); | 318 | data &= ~(0xffff << shift); |
| 317 | data |= ((val & 0xffff) << shift); | 319 | data |= ((val & 0xffff) << shift); |
| 318 | } else | 320 | } else |
| @@ -507,10 +509,10 @@ static int phy_wait_for_ack(struct rcar_pcie *pcie) | |||
| 507 | } | 509 | } |
| 508 | 510 | ||
| 509 | static void phy_write_reg(struct rcar_pcie *pcie, | 511 | static void phy_write_reg(struct rcar_pcie *pcie, |
| 510 | unsigned int rate, unsigned int addr, | 512 | unsigned int rate, u32 addr, |
| 511 | unsigned int lane, unsigned int data) | 513 | unsigned int lane, u32 data) |
| 512 | { | 514 | { |
| 513 | unsigned long phyaddr; | 515 | u32 phyaddr; |
| 514 | 516 | ||
| 515 | phyaddr = WRITE_CMD | | 517 | phyaddr = WRITE_CMD | |
| 516 | ((rate & 1) << RATE_POS) | | 518 | ((rate & 1) << RATE_POS) | |
| @@ -738,15 +740,15 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) | |||
| 738 | 740 | ||
| 739 | while (reg) { | 741 | while (reg) { |
| 740 | unsigned int index = find_first_bit(®, 32); | 742 | unsigned int index = find_first_bit(®, 32); |
| 741 | unsigned int irq; | 743 | unsigned int msi_irq; |
| 742 | 744 | ||
| 743 | /* clear the interrupt */ | 745 | /* clear the interrupt */ |
| 744 | rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR); | 746 | rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR); |
| 745 | 747 | ||
| 746 | irq = irq_find_mapping(msi->domain, index); | 748 | msi_irq = irq_find_mapping(msi->domain, index); |
| 747 | if (irq) { | 749 | if (msi_irq) { |
| 748 | if (test_bit(index, msi->used)) | 750 | if (test_bit(index, msi->used)) |
| 749 | generic_handle_irq(irq); | 751 | generic_handle_irq(msi_irq); |
| 750 | else | 752 | else |
| 751 | dev_info(dev, "unhandled MSI\n"); | 753 | dev_info(dev, "unhandled MSI\n"); |
| 752 | } else { | 754 | } else { |
| @@ -890,7 +892,7 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) | |||
| 890 | { | 892 | { |
| 891 | struct device *dev = pcie->dev; | 893 | struct device *dev = pcie->dev; |
| 892 | struct rcar_msi *msi = &pcie->msi; | 894 | struct rcar_msi *msi = &pcie->msi; |
| 893 | unsigned long base; | 895 | phys_addr_t base; |
| 894 | int err, i; | 896 | int err, i; |
| 895 | 897 | ||
| 896 | mutex_init(&msi->lock); | 898 | mutex_init(&msi->lock); |
| @@ -929,10 +931,14 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie) | |||
| 929 | 931 | ||
| 930 | /* setup MSI data target */ | 932 | /* setup MSI data target */ |
| 931 | msi->pages = __get_free_pages(GFP_KERNEL, 0); | 933 | msi->pages = __get_free_pages(GFP_KERNEL, 0); |
| 934 | if (!msi->pages) { | ||
| 935 | err = -ENOMEM; | ||
| 936 | goto err; | ||
| 937 | } | ||
| 932 | base = virt_to_phys((void *)msi->pages); | 938 | base = virt_to_phys((void *)msi->pages); |
| 933 | 939 | ||
| 934 | rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR); | 940 | rcar_pci_write_reg(pcie, lower_32_bits(base) | MSIFE, PCIEMSIALR); |
| 935 | rcar_pci_write_reg(pcie, 0, PCIEMSIAUR); | 941 | rcar_pci_write_reg(pcie, upper_32_bits(base), PCIEMSIAUR); |
| 936 | 942 | ||
| 937 | /* enable all MSI interrupts */ | 943 | /* enable all MSI interrupts */ |
| 938 | rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER); | 944 | rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER); |
| @@ -1118,7 +1124,7 @@ static int rcar_pcie_probe(struct platform_device *pdev) | |||
| 1118 | { | 1124 | { |
| 1119 | struct device *dev = &pdev->dev; | 1125 | struct device *dev = &pdev->dev; |
| 1120 | struct rcar_pcie *pcie; | 1126 | struct rcar_pcie *pcie; |
| 1121 | unsigned int data; | 1127 | u32 data; |
| 1122 | int err; | 1128 | int err; |
| 1123 | int (*phy_init_fn)(struct rcar_pcie *); | 1129 | int (*phy_init_fn)(struct rcar_pcie *); |
| 1124 | struct pci_host_bridge *bridge; | 1130 | struct pci_host_bridge *bridge; |
| @@ -1130,6 +1136,7 @@ static int rcar_pcie_probe(struct platform_device *pdev) | |||
| 1130 | pcie = pci_host_bridge_priv(bridge); | 1136 | pcie = pci_host_bridge_priv(bridge); |
| 1131 | 1137 | ||
| 1132 | pcie->dev = dev; | 1138 | pcie->dev = dev; |
| 1139 | platform_set_drvdata(pdev, pcie); | ||
| 1133 | 1140 | ||
| 1134 | err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL); | 1141 | err = pci_parse_request_of_pci_ranges(dev, &pcie->resources, NULL); |
| 1135 | if (err) | 1142 | if (err) |
| @@ -1221,10 +1228,28 @@ err_free_bridge: | |||
| 1221 | return err; | 1228 | return err; |
| 1222 | } | 1229 | } |
| 1223 | 1230 | ||
| 1231 | static int rcar_pcie_resume_noirq(struct device *dev) | ||
| 1232 | { | ||
| 1233 | struct rcar_pcie *pcie = dev_get_drvdata(dev); | ||
| 1234 | |||
| 1235 | if (rcar_pci_read_reg(pcie, PMSR) && | ||
| 1236 | !(rcar_pci_read_reg(pcie, PCIETCTLR) & DL_DOWN)) | ||
| 1237 | return 0; | ||
| 1238 | |||
| 1239 | /* Re-establish the PCIe link */ | ||
| 1240 | rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR); | ||
| 1241 | return rcar_pcie_wait_for_dl(pcie); | ||
| 1242 | } | ||
| 1243 | |||
| 1244 | static const struct dev_pm_ops rcar_pcie_pm_ops = { | ||
| 1245 | .resume_noirq = rcar_pcie_resume_noirq, | ||
| 1246 | }; | ||
| 1247 | |||
| 1224 | static struct platform_driver rcar_pcie_driver = { | 1248 | static struct platform_driver rcar_pcie_driver = { |
| 1225 | .driver = { | 1249 | .driver = { |
| 1226 | .name = "rcar-pcie", | 1250 | .name = "rcar-pcie", |
| 1227 | .of_match_table = rcar_pcie_of_match, | 1251 | .of_match_table = rcar_pcie_of_match, |
| 1252 | .pm = &rcar_pcie_pm_ops, | ||
| 1228 | .suppress_bind_attrs = true, | 1253 | .suppress_bind_attrs = true, |
| 1229 | }, | 1254 | }, |
| 1230 | .probe = rcar_pcie_probe, | 1255 | .probe = rcar_pcie_probe, |
diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c index a5d799e2dff2..d743b0a48988 100644 --- a/drivers/pci/controller/pcie-rockchip-ep.c +++ b/drivers/pci/controller/pcie-rockchip-ep.c | |||
| @@ -350,7 +350,7 @@ static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn, | |||
| 350 | struct rockchip_pcie *rockchip = &ep->rockchip; | 350 | struct rockchip_pcie *rockchip = &ep->rockchip; |
| 351 | u32 r = ep->max_regions - 1; | 351 | u32 r = ep->max_regions - 1; |
| 352 | u32 offset; | 352 | u32 offset; |
| 353 | u16 status; | 353 | u32 status; |
| 354 | u8 msg_code; | 354 | u8 msg_code; |
| 355 | 355 | ||
| 356 | if (unlikely(ep->irq_pci_addr != ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR || | 356 | if (unlikely(ep->irq_pci_addr != ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR || |
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index 1372d270764f..8d20f1793a61 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c | |||
| @@ -724,6 +724,7 @@ static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip) | |||
| 724 | 724 | ||
| 725 | rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX, | 725 | rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX, |
| 726 | &intx_domain_ops, rockchip); | 726 | &intx_domain_ops, rockchip); |
| 727 | of_node_put(intc); | ||
| 727 | if (!rockchip->irq_domain) { | 728 | if (!rockchip->irq_domain) { |
| 728 | dev_err(dev, "failed to get a INTx IRQ domain\n"); | 729 | dev_err(dev, "failed to get a INTx IRQ domain\n"); |
| 729 | return -EINVAL; | 730 | return -EINVAL; |
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index 81538d77f790..3b031f00a94a 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c | |||
| @@ -438,11 +438,10 @@ static const struct irq_domain_ops legacy_domain_ops = { | |||
| 438 | #ifdef CONFIG_PCI_MSI | 438 | #ifdef CONFIG_PCI_MSI |
| 439 | static struct irq_chip nwl_msi_irq_chip = { | 439 | static struct irq_chip nwl_msi_irq_chip = { |
| 440 | .name = "nwl_pcie:msi", | 440 | .name = "nwl_pcie:msi", |
| 441 | .irq_enable = unmask_msi_irq, | 441 | .irq_enable = pci_msi_unmask_irq, |
| 442 | .irq_disable = mask_msi_irq, | 442 | .irq_disable = pci_msi_mask_irq, |
| 443 | .irq_mask = mask_msi_irq, | 443 | .irq_mask = pci_msi_mask_irq, |
| 444 | .irq_unmask = unmask_msi_irq, | 444 | .irq_unmask = pci_msi_unmask_irq, |
| 445 | |||
| 446 | }; | 445 | }; |
| 447 | 446 | ||
| 448 | static struct msi_domain_info nwl_msi_domain_info = { | 447 | static struct msi_domain_info nwl_msi_domain_info = { |
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c index 9bd1a35cd5d8..5bf3af3b28e6 100644 --- a/drivers/pci/controller/pcie-xilinx.c +++ b/drivers/pci/controller/pcie-xilinx.c | |||
| @@ -336,14 +336,19 @@ static const struct irq_domain_ops msi_domain_ops = { | |||
| 336 | * xilinx_pcie_enable_msi - Enable MSI support | 336 | * xilinx_pcie_enable_msi - Enable MSI support |
| 337 | * @port: PCIe port information | 337 | * @port: PCIe port information |
| 338 | */ | 338 | */ |
| 339 | static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) | 339 | static int xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) |
| 340 | { | 340 | { |
| 341 | phys_addr_t msg_addr; | 341 | phys_addr_t msg_addr; |
| 342 | 342 | ||
| 343 | port->msi_pages = __get_free_pages(GFP_KERNEL, 0); | 343 | port->msi_pages = __get_free_pages(GFP_KERNEL, 0); |
| 344 | if (!port->msi_pages) | ||
| 345 | return -ENOMEM; | ||
| 346 | |||
| 344 | msg_addr = virt_to_phys((void *)port->msi_pages); | 347 | msg_addr = virt_to_phys((void *)port->msi_pages); |
| 345 | pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1); | 348 | pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1); |
| 346 | pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2); | 349 | pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2); |
| 350 | |||
| 351 | return 0; | ||
| 347 | } | 352 | } |
| 348 | 353 | ||
| 349 | /* INTx Functions */ | 354 | /* INTx Functions */ |
| @@ -498,6 +503,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) | |||
| 498 | struct device *dev = port->dev; | 503 | struct device *dev = port->dev; |
| 499 | struct device_node *node = dev->of_node; | 504 | struct device_node *node = dev->of_node; |
| 500 | struct device_node *pcie_intc_node; | 505 | struct device_node *pcie_intc_node; |
| 506 | int ret; | ||
| 501 | 507 | ||
| 502 | /* Setup INTx */ | 508 | /* Setup INTx */ |
| 503 | pcie_intc_node = of_get_next_child(node, NULL); | 509 | pcie_intc_node = of_get_next_child(node, NULL); |
| @@ -526,7 +532,9 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port) | |||
| 526 | return -ENODEV; | 532 | return -ENODEV; |
| 527 | } | 533 | } |
| 528 | 534 | ||
| 529 | xilinx_pcie_enable_msi(port); | 535 | ret = xilinx_pcie_enable_msi(port); |
| 536 | if (ret) | ||
| 537 | return ret; | ||
| 530 | } | 538 | } |
| 531 | 539 | ||
| 532 | return 0; | 540 | return 0; |
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index d0b91da49bf4..27806987e93b 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c | |||
| @@ -438,7 +438,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf) | |||
| 438 | epc_features = epf_test->epc_features; | 438 | epc_features = epf_test->epc_features; |
| 439 | 439 | ||
| 440 | base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg), | 440 | base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg), |
| 441 | test_reg_bar); | 441 | test_reg_bar, epc_features->align); |
| 442 | if (!base) { | 442 | if (!base) { |
| 443 | dev_err(dev, "Failed to allocated register space\n"); | 443 | dev_err(dev, "Failed to allocated register space\n"); |
| 444 | return -ENOMEM; | 444 | return -ENOMEM; |
| @@ -453,7 +453,8 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf) | |||
| 453 | if (!!(epc_features->reserved_bar & (1 << bar))) | 453 | if (!!(epc_features->reserved_bar & (1 << bar))) |
| 454 | continue; | 454 | continue; |
| 455 | 455 | ||
| 456 | base = pci_epf_alloc_space(epf, bar_size[bar], bar); | 456 | base = pci_epf_alloc_space(epf, bar_size[bar], bar, |
| 457 | epc_features->align); | ||
| 457 | if (!base) | 458 | if (!base) |
| 458 | dev_err(dev, "Failed to allocate space for BAR%d\n", | 459 | dev_err(dev, "Failed to allocate space for BAR%d\n", |
| 459 | bar); | 460 | bar); |
| @@ -591,6 +592,11 @@ static int __init pci_epf_test_init(void) | |||
| 591 | 592 | ||
| 592 | kpcitest_workqueue = alloc_workqueue("kpcitest", | 593 | kpcitest_workqueue = alloc_workqueue("kpcitest", |
| 593 | WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); | 594 | WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); |
| 595 | if (!kpcitest_workqueue) { | ||
| 596 | pr_err("Failed to allocate the kpcitest work queue\n"); | ||
| 597 | return -ENOMEM; | ||
| 598 | } | ||
| 599 | |||
| 594 | ret = pci_epf_register_driver(&test_driver); | 600 | ret = pci_epf_register_driver(&test_driver); |
| 595 | if (ret) { | 601 | if (ret) { |
| 596 | pr_err("Failed to register pci epf test driver --> %d\n", ret); | 602 | pr_err("Failed to register pci epf test driver --> %d\n", ret); |
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c index 8bfdcd291196..fb1306de8f40 100644 --- a/drivers/pci/endpoint/pci-epf-core.c +++ b/drivers/pci/endpoint/pci-epf-core.c | |||
| @@ -109,10 +109,12 @@ EXPORT_SYMBOL_GPL(pci_epf_free_space); | |||
| 109 | * pci_epf_alloc_space() - allocate memory for the PCI EPF register space | 109 | * pci_epf_alloc_space() - allocate memory for the PCI EPF register space |
| 110 | * @size: the size of the memory that has to be allocated | 110 | * @size: the size of the memory that has to be allocated |
| 111 | * @bar: the BAR number corresponding to the allocated register space | 111 | * @bar: the BAR number corresponding to the allocated register space |
| 112 | * @align: alignment size for the allocation region | ||
| 112 | * | 113 | * |
| 113 | * Invoke to allocate memory for the PCI EPF register space. | 114 | * Invoke to allocate memory for the PCI EPF register space. |
| 114 | */ | 115 | */ |
| 115 | void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar) | 116 | void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, |
| 117 | size_t align) | ||
| 116 | { | 118 | { |
| 117 | void *space; | 119 | void *space; |
| 118 | struct device *dev = epf->epc->dev.parent; | 120 | struct device *dev = epf->epc->dev.parent; |
| @@ -120,7 +122,11 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar) | |||
| 120 | 122 | ||
| 121 | if (size < 128) | 123 | if (size < 128) |
| 122 | size = 128; | 124 | size = 128; |
| 123 | size = roundup_pow_of_two(size); | 125 | |
| 126 | if (align) | ||
| 127 | size = ALIGN(size, align); | ||
| 128 | else | ||
| 129 | size = roundup_pow_of_two(size); | ||
| 124 | 130 | ||
| 125 | space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL); | 131 | space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL); |
| 126 | if (!space) { | 132 | if (!space) { |
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index 506e1d923a1f..8c51a04b8083 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h | |||
| @@ -25,36 +25,21 @@ | |||
| 25 | 25 | ||
| 26 | #include "../pcie/portdrv.h" | 26 | #include "../pcie/portdrv.h" |
| 27 | 27 | ||
| 28 | #define MY_NAME "pciehp" | ||
| 29 | |||
| 30 | extern bool pciehp_poll_mode; | 28 | extern bool pciehp_poll_mode; |
| 31 | extern int pciehp_poll_time; | 29 | extern int pciehp_poll_time; |
| 32 | extern bool pciehp_debug; | ||
| 33 | |||
| 34 | #define dbg(format, arg...) \ | ||
| 35 | do { \ | ||
| 36 | if (pciehp_debug) \ | ||
| 37 | printk(KERN_DEBUG "%s: " format, MY_NAME, ## arg); \ | ||
| 38 | } while (0) | ||
| 39 | #define err(format, arg...) \ | ||
| 40 | printk(KERN_ERR "%s: " format, MY_NAME, ## arg) | ||
| 41 | #define info(format, arg...) \ | ||
| 42 | printk(KERN_INFO "%s: " format, MY_NAME, ## arg) | ||
| 43 | #define warn(format, arg...) \ | ||
| 44 | printk(KERN_WARNING "%s: " format, MY_NAME, ## arg) | ||
| 45 | 30 | ||
| 31 | /* | ||
| 32 | * Set CONFIG_DYNAMIC_DEBUG=y and boot with 'dyndbg="file pciehp* +p"' to | ||
| 33 | * enable debug messages. | ||
| 34 | */ | ||
| 46 | #define ctrl_dbg(ctrl, format, arg...) \ | 35 | #define ctrl_dbg(ctrl, format, arg...) \ |
| 47 | do { \ | 36 | pci_dbg(ctrl->pcie->port, format, ## arg) |
| 48 | if (pciehp_debug) \ | ||
| 49 | dev_printk(KERN_DEBUG, &ctrl->pcie->device, \ | ||
| 50 | format, ## arg); \ | ||
| 51 | } while (0) | ||
| 52 | #define ctrl_err(ctrl, format, arg...) \ | 37 | #define ctrl_err(ctrl, format, arg...) \ |
| 53 | dev_err(&ctrl->pcie->device, format, ## arg) | 38 | pci_err(ctrl->pcie->port, format, ## arg) |
| 54 | #define ctrl_info(ctrl, format, arg...) \ | 39 | #define ctrl_info(ctrl, format, arg...) \ |
| 55 | dev_info(&ctrl->pcie->device, format, ## arg) | 40 | pci_info(ctrl->pcie->port, format, ## arg) |
| 56 | #define ctrl_warn(ctrl, format, arg...) \ | 41 | #define ctrl_warn(ctrl, format, arg...) \ |
| 57 | dev_warn(&ctrl->pcie->device, format, ## arg) | 42 | pci_warn(ctrl->pcie->port, format, ## arg) |
| 58 | 43 | ||
| 59 | #define SLOT_NAME_SIZE 10 | 44 | #define SLOT_NAME_SIZE 10 |
| 60 | 45 | ||
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index fc5366b50e95..6ad0d86762cb 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c | |||
| @@ -17,6 +17,9 @@ | |||
| 17 | * Dely Sy <dely.l.sy@intel.com>" | 17 | * Dely Sy <dely.l.sy@intel.com>" |
| 18 | */ | 18 | */ |
| 19 | 19 | ||
| 20 | #define pr_fmt(fmt) "pciehp: " fmt | ||
| 21 | #define dev_fmt pr_fmt | ||
| 22 | |||
| 20 | #include <linux/moduleparam.h> | 23 | #include <linux/moduleparam.h> |
| 21 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
| 22 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
| @@ -27,7 +30,6 @@ | |||
| 27 | #include "../pci.h" | 30 | #include "../pci.h" |
| 28 | 31 | ||
| 29 | /* Global variables */ | 32 | /* Global variables */ |
| 30 | bool pciehp_debug; | ||
| 31 | bool pciehp_poll_mode; | 33 | bool pciehp_poll_mode; |
| 32 | int pciehp_poll_time; | 34 | int pciehp_poll_time; |
| 33 | 35 | ||
| @@ -35,15 +37,11 @@ int pciehp_poll_time; | |||
| 35 | * not really modular, but the easiest way to keep compat with existing | 37 | * not really modular, but the easiest way to keep compat with existing |
| 36 | * bootargs behaviour is to continue using module_param here. | 38 | * bootargs behaviour is to continue using module_param here. |
| 37 | */ | 39 | */ |
| 38 | module_param(pciehp_debug, bool, 0644); | ||
| 39 | module_param(pciehp_poll_mode, bool, 0644); | 40 | module_param(pciehp_poll_mode, bool, 0644); |
| 40 | module_param(pciehp_poll_time, int, 0644); | 41 | module_param(pciehp_poll_time, int, 0644); |
| 41 | MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not"); | ||
| 42 | MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not"); | 42 | MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not"); |
| 43 | MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds"); | 43 | MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds"); |
| 44 | 44 | ||
| 45 | #define PCIE_MODULE_NAME "pciehp" | ||
| 46 | |||
| 47 | static int set_attention_status(struct hotplug_slot *slot, u8 value); | 45 | static int set_attention_status(struct hotplug_slot *slot, u8 value); |
| 48 | static int get_power_status(struct hotplug_slot *slot, u8 *value); | 46 | static int get_power_status(struct hotplug_slot *slot, u8 *value); |
| 49 | static int get_latch_status(struct hotplug_slot *slot, u8 *value); | 47 | static int get_latch_status(struct hotplug_slot *slot, u8 *value); |
| @@ -182,14 +180,14 @@ static int pciehp_probe(struct pcie_device *dev) | |||
| 182 | 180 | ||
| 183 | if (!dev->port->subordinate) { | 181 | if (!dev->port->subordinate) { |
| 184 | /* Can happen if we run out of bus numbers during probe */ | 182 | /* Can happen if we run out of bus numbers during probe */ |
| 185 | dev_err(&dev->device, | 183 | pci_err(dev->port, |
| 186 | "Hotplug bridge without secondary bus, ignoring\n"); | 184 | "Hotplug bridge without secondary bus, ignoring\n"); |
| 187 | return -ENODEV; | 185 | return -ENODEV; |
| 188 | } | 186 | } |
| 189 | 187 | ||
| 190 | ctrl = pcie_init(dev); | 188 | ctrl = pcie_init(dev); |
| 191 | if (!ctrl) { | 189 | if (!ctrl) { |
| 192 | dev_err(&dev->device, "Controller initialization failed\n"); | 190 | pci_err(dev->port, "Controller initialization failed\n"); |
| 193 | return -ENODEV; | 191 | return -ENODEV; |
| 194 | } | 192 | } |
| 195 | set_service_data(dev, ctrl); | 193 | set_service_data(dev, ctrl); |
| @@ -307,7 +305,7 @@ static int pciehp_runtime_resume(struct pcie_device *dev) | |||
| 307 | #endif /* PM */ | 305 | #endif /* PM */ |
| 308 | 306 | ||
| 309 | static struct pcie_port_service_driver hpdriver_portdrv = { | 307 | static struct pcie_port_service_driver hpdriver_portdrv = { |
| 310 | .name = PCIE_MODULE_NAME, | 308 | .name = "pciehp", |
| 311 | .port_type = PCIE_ANY_PORT, | 309 | .port_type = PCIE_ANY_PORT, |
| 312 | .service = PCIE_PORT_SERVICE_HP, | 310 | .service = PCIE_PORT_SERVICE_HP, |
| 313 | 311 | ||
| @@ -328,9 +326,9 @@ int __init pcie_hp_init(void) | |||
| 328 | int retval = 0; | 326 | int retval = 0; |
| 329 | 327 | ||
| 330 | retval = pcie_port_service_register(&hpdriver_portdrv); | 328 | retval = pcie_port_service_register(&hpdriver_portdrv); |
| 331 | dbg("pcie_port_service_register = %d\n", retval); | 329 | pr_debug("pcie_port_service_register = %d\n", retval); |
| 332 | if (retval) | 330 | if (retval) |
| 333 | dbg("Failure to register service\n"); | 331 | pr_debug("Failure to register service\n"); |
| 334 | 332 | ||
| 335 | return retval; | 333 | return retval; |
| 336 | } | 334 | } |
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 905282a8ddaa..631ced0ab28a 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c | |||
| @@ -13,6 +13,8 @@ | |||
| 13 | * | 13 | * |
| 14 | */ | 14 | */ |
| 15 | 15 | ||
| 16 | #define dev_fmt(fmt) "pciehp: " fmt | ||
| 17 | |||
| 16 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
| 17 | #include <linux/types.h> | 19 | #include <linux/types.h> |
| 18 | #include <linux/pm_runtime.h> | 20 | #include <linux/pm_runtime.h> |
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 6a2365cd794e..bd990e3371e3 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c | |||
| @@ -12,6 +12,8 @@ | |||
| 12 | * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com> | 12 | * Send feedback to <greg@kroah.com>,<kristen.c.accardi@intel.com> |
| 13 | */ | 13 | */ |
| 14 | 14 | ||
| 15 | #define dev_fmt(fmt) "pciehp: " fmt | ||
| 16 | |||
| 15 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
| 16 | #include <linux/types.h> | 18 | #include <linux/types.h> |
| 17 | #include <linux/jiffies.h> | 19 | #include <linux/jiffies.h> |
| @@ -46,7 +48,7 @@ static inline int pciehp_request_irq(struct controller *ctrl) | |||
| 46 | 48 | ||
| 47 | /* Installs the interrupt handler */ | 49 | /* Installs the interrupt handler */ |
| 48 | retval = request_threaded_irq(irq, pciehp_isr, pciehp_ist, | 50 | retval = request_threaded_irq(irq, pciehp_isr, pciehp_ist, |
| 49 | IRQF_SHARED, MY_NAME, ctrl); | 51 | IRQF_SHARED, "pciehp", ctrl); |
| 50 | if (retval) | 52 | if (retval) |
| 51 | ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n", | 53 | ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n", |
| 52 | irq); | 54 | irq); |
| @@ -232,8 +234,8 @@ static bool pci_bus_check_dev(struct pci_bus *bus, int devfn) | |||
| 232 | delay -= step; | 234 | delay -= step; |
| 233 | } while (delay > 0); | 235 | } while (delay > 0); |
| 234 | 236 | ||
| 235 | if (count > 1 && pciehp_debug) | 237 | if (count > 1) |
| 236 | printk(KERN_DEBUG "pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n", | 238 | pr_debug("pci %04x:%02x:%02x.%d id reading try %d times with interval %d ms to get %08x\n", |
| 237 | pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), | 239 | pci_domain_nr(bus), bus->number, PCI_SLOT(devfn), |
| 238 | PCI_FUNC(devfn), count, step, l); | 240 | PCI_FUNC(devfn), count, step, l); |
| 239 | 241 | ||
| @@ -822,14 +824,11 @@ static inline void dbg_ctrl(struct controller *ctrl) | |||
| 822 | struct pci_dev *pdev = ctrl->pcie->port; | 824 | struct pci_dev *pdev = ctrl->pcie->port; |
| 823 | u16 reg16; | 825 | u16 reg16; |
| 824 | 826 | ||
| 825 | if (!pciehp_debug) | 827 | ctrl_dbg(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap); |
| 826 | return; | ||
| 827 | |||
| 828 | ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap); | ||
| 829 | pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, ®16); | 828 | pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, ®16); |
| 830 | ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16); | 829 | ctrl_dbg(ctrl, "Slot Status : 0x%04x\n", reg16); |
| 831 | pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, ®16); | 830 | pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, ®16); |
| 832 | ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16); | 831 | ctrl_dbg(ctrl, "Slot Control : 0x%04x\n", reg16); |
| 833 | } | 832 | } |
| 834 | 833 | ||
| 835 | #define FLAG(x, y) (((x) & (y)) ? '+' : '-') | 834 | #define FLAG(x, y) (((x) & (y)) ? '+' : '-') |
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index b9c1396db6fe..d17f3bf36f70 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c | |||
| @@ -13,6 +13,8 @@ | |||
| 13 | * | 13 | * |
| 14 | */ | 14 | */ |
| 15 | 15 | ||
| 16 | #define dev_fmt(fmt) "pciehp: " fmt | ||
| 17 | |||
| 16 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
| 17 | #include <linux/types.h> | 19 | #include <linux/types.h> |
| 18 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index e2356a9c7088..182f9e3443ee 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c | |||
| @@ -51,6 +51,7 @@ static struct device_node *find_vio_slot_node(char *drc_name) | |||
| 51 | if (rc == 0) | 51 | if (rc == 0) |
| 52 | break; | 52 | break; |
| 53 | } | 53 | } |
| 54 | of_node_put(parent); | ||
| 54 | 55 | ||
| 55 | return dn; | 56 | return dn; |
| 56 | } | 57 | } |
| @@ -71,6 +72,7 @@ static struct device_node *find_php_slot_pci_node(char *drc_name, | |||
| 71 | return np; | 72 | return np; |
| 72 | } | 73 | } |
| 73 | 74 | ||
| 75 | /* Returns a device_node with its reference count incremented */ | ||
| 74 | static struct device_node *find_dlpar_node(char *drc_name, int *node_type) | 76 | static struct device_node *find_dlpar_node(char *drc_name, int *node_type) |
| 75 | { | 77 | { |
| 76 | struct device_node *dn; | 78 | struct device_node *dn; |
| @@ -306,6 +308,7 @@ int dlpar_add_slot(char *drc_name) | |||
| 306 | rc = dlpar_add_phb(drc_name, dn); | 308 | rc = dlpar_add_phb(drc_name, dn); |
| 307 | break; | 309 | break; |
| 308 | } | 310 | } |
| 311 | of_node_put(dn); | ||
| 309 | 312 | ||
| 310 | printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name); | 313 | printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name); |
| 311 | exit: | 314 | exit: |
| @@ -439,6 +442,7 @@ int dlpar_remove_slot(char *drc_name) | |||
| 439 | rc = dlpar_remove_pci_slot(drc_name, dn); | 442 | rc = dlpar_remove_pci_slot(drc_name, dn); |
| 440 | break; | 443 | break; |
| 441 | } | 444 | } |
| 445 | of_node_put(dn); | ||
| 442 | vm_unmap_aliases(); | 446 | vm_unmap_aliases(); |
| 443 | 447 | ||
| 444 | printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name); | 448 | printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name); |
diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c index 5282aa3e33c5..93b4a945c55d 100644 --- a/drivers/pci/hotplug/rpaphp_slot.c +++ b/drivers/pci/hotplug/rpaphp_slot.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | /* free up the memory used by a slot */ | 21 | /* free up the memory used by a slot */ |
| 22 | void dealloc_slot_struct(struct slot *slot) | 22 | void dealloc_slot_struct(struct slot *slot) |
| 23 | { | 23 | { |
| 24 | of_node_put(slot->dn); | ||
| 24 | kfree(slot->name); | 25 | kfree(slot->name); |
| 25 | kfree(slot); | 26 | kfree(slot); |
| 26 | } | 27 | } |
| @@ -36,7 +37,7 @@ struct slot *alloc_slot_struct(struct device_node *dn, | |||
| 36 | slot->name = kstrdup(drc_name, GFP_KERNEL); | 37 | slot->name = kstrdup(drc_name, GFP_KERNEL); |
| 37 | if (!slot->name) | 38 | if (!slot->name) |
| 38 | goto error_slot; | 39 | goto error_slot; |
| 39 | slot->dn = dn; | 40 | slot->dn = of_node_get(dn); |
| 40 | slot->index = drc_index; | 41 | slot->index = drc_index; |
| 41 | slot->power_domain = power_domain; | 42 | slot->power_domain = power_domain; |
| 42 | slot->hotplug_slot.ops = &rpaphp_hotplug_slot_ops; | 43 | slot->hotplug_slot.ops = &rpaphp_hotplug_slot_ops; |
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 73986825d221..e039b740fe74 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
| @@ -1338,7 +1338,7 @@ irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, | |||
| 1338 | struct msi_desc *desc) | 1338 | struct msi_desc *desc) |
| 1339 | { | 1339 | { |
| 1340 | return (irq_hw_number_t)desc->msi_attrib.entry_nr | | 1340 | return (irq_hw_number_t)desc->msi_attrib.entry_nr | |
| 1341 | PCI_DEVID(dev->bus->number, dev->devfn) << 11 | | 1341 | pci_dev_id(dev) << 11 | |
| 1342 | (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27; | 1342 | (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27; |
| 1343 | } | 1343 | } |
| 1344 | 1344 | ||
| @@ -1508,7 +1508,7 @@ static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data) | |||
| 1508 | u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) | 1508 | u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) |
| 1509 | { | 1509 | { |
| 1510 | struct device_node *of_node; | 1510 | struct device_node *of_node; |
| 1511 | u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn); | 1511 | u32 rid = pci_dev_id(pdev); |
| 1512 | 1512 | ||
| 1513 | pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); | 1513 | pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); |
| 1514 | 1514 | ||
| @@ -1531,7 +1531,7 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) | |||
| 1531 | struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) | 1531 | struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) |
| 1532 | { | 1532 | { |
| 1533 | struct irq_domain *dom; | 1533 | struct irq_domain *dom; |
| 1534 | u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn); | 1534 | u32 rid = pci_dev_id(pdev); |
| 1535 | 1535 | ||
| 1536 | pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); | 1536 | pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); |
| 1537 | dom = of_msi_map_get_device_domain(&pdev->dev, rid); | 1537 | dom = of_msi_map_get_device_domain(&pdev->dev, rid); |
diff --git a/drivers/pci/of.c b/drivers/pci/of.c index 3d32da15c215..73d5adec0a28 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/of_pci.h> | 15 | #include <linux/of_pci.h> |
| 16 | #include "pci.h" | 16 | #include "pci.h" |
| 17 | 17 | ||
| 18 | #ifdef CONFIG_PCI | ||
| 18 | void pci_set_of_node(struct pci_dev *dev) | 19 | void pci_set_of_node(struct pci_dev *dev) |
| 19 | { | 20 | { |
| 20 | if (!dev->bus->dev.of_node) | 21 | if (!dev->bus->dev.of_node) |
| @@ -31,10 +32,16 @@ void pci_release_of_node(struct pci_dev *dev) | |||
| 31 | 32 | ||
| 32 | void pci_set_bus_of_node(struct pci_bus *bus) | 33 | void pci_set_bus_of_node(struct pci_bus *bus) |
| 33 | { | 34 | { |
| 34 | if (bus->self == NULL) | 35 | struct device_node *node; |
| 35 | bus->dev.of_node = pcibios_get_phb_of_node(bus); | 36 | |
| 36 | else | 37 | if (bus->self == NULL) { |
| 37 | bus->dev.of_node = of_node_get(bus->self->dev.of_node); | 38 | node = pcibios_get_phb_of_node(bus); |
| 39 | } else { | ||
| 40 | node = of_node_get(bus->self->dev.of_node); | ||
| 41 | if (node && of_property_read_bool(node, "external-facing")) | ||
| 42 | bus->self->untrusted = true; | ||
| 43 | } | ||
| 44 | bus->dev.of_node = node; | ||
| 38 | } | 45 | } |
| 39 | 46 | ||
| 40 | void pci_release_bus_of_node(struct pci_bus *bus) | 47 | void pci_release_bus_of_node(struct pci_bus *bus) |
| @@ -197,27 +204,6 @@ int of_get_pci_domain_nr(struct device_node *node) | |||
| 197 | EXPORT_SYMBOL_GPL(of_get_pci_domain_nr); | 204 | EXPORT_SYMBOL_GPL(of_get_pci_domain_nr); |
| 198 | 205 | ||
| 199 | /** | 206 | /** |
| 200 | * This function will try to find the limitation of link speed by finding | ||
| 201 | * a property called "max-link-speed" of the given device node. | ||
| 202 | * | ||
| 203 | * @node: device tree node with the max link speed information | ||
| 204 | * | ||
| 205 | * Returns the associated max link speed from DT, or a negative value if the | ||
| 206 | * required property is not found or is invalid. | ||
| 207 | */ | ||
| 208 | int of_pci_get_max_link_speed(struct device_node *node) | ||
| 209 | { | ||
| 210 | u32 max_link_speed; | ||
| 211 | |||
| 212 | if (of_property_read_u32(node, "max-link-speed", &max_link_speed) || | ||
| 213 | max_link_speed > 4) | ||
| 214 | return -EINVAL; | ||
| 215 | |||
| 216 | return max_link_speed; | ||
| 217 | } | ||
| 218 | EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed); | ||
| 219 | |||
| 220 | /** | ||
| 221 | * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only | 207 | * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only |
| 222 | * is present and valid | 208 | * is present and valid |
| 223 | */ | 209 | */ |
| @@ -537,3 +523,25 @@ int pci_parse_request_of_pci_ranges(struct device *dev, | |||
| 537 | return err; | 523 | return err; |
| 538 | } | 524 | } |
| 539 | 525 | ||
| 526 | #endif /* CONFIG_PCI */ | ||
| 527 | |||
| 528 | /** | ||
| 529 | * This function will try to find the limitation of link speed by finding | ||
| 530 | * a property called "max-link-speed" of the given device node. | ||
| 531 | * | ||
| 532 | * @node: device tree node with the max link speed information | ||
| 533 | * | ||
| 534 | * Returns the associated max link speed from DT, or a negative value if the | ||
| 535 | * required property is not found or is invalid. | ||
| 536 | */ | ||
| 537 | int of_pci_get_max_link_speed(struct device_node *node) | ||
| 538 | { | ||
| 539 | u32 max_link_speed; | ||
| 540 | |||
| 541 | if (of_property_read_u32(node, "max-link-speed", &max_link_speed) || | ||
| 542 | max_link_speed > 4) | ||
| 543 | return -EINVAL; | ||
| 544 | |||
| 545 | return max_link_speed; | ||
| 546 | } | ||
| 547 | EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed); | ||
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index c52298d76e64..742928d0053e 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c | |||
| @@ -275,6 +275,30 @@ static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev) | |||
| 275 | } | 275 | } |
| 276 | 276 | ||
| 277 | /* | 277 | /* |
| 278 | * If we can't find a common upstream bridge take a look at the root | ||
| 279 | * complex and compare it to a whitelist of known good hardware. | ||
| 280 | */ | ||
| 281 | static bool root_complex_whitelist(struct pci_dev *dev) | ||
| 282 | { | ||
| 283 | struct pci_host_bridge *host = pci_find_host_bridge(dev->bus); | ||
| 284 | struct pci_dev *root = pci_get_slot(host->bus, PCI_DEVFN(0, 0)); | ||
| 285 | unsigned short vendor, device; | ||
| 286 | |||
| 287 | if (!root) | ||
| 288 | return false; | ||
| 289 | |||
| 290 | vendor = root->vendor; | ||
| 291 | device = root->device; | ||
| 292 | pci_dev_put(root); | ||
| 293 | |||
| 294 | /* AMD ZEN host bridges can do peer to peer */ | ||
| 295 | if (vendor == PCI_VENDOR_ID_AMD && device == 0x1450) | ||
| 296 | return true; | ||
| 297 | |||
| 298 | return false; | ||
| 299 | } | ||
| 300 | |||
| 301 | /* | ||
| 278 | * Find the distance through the nearest common upstream bridge between | 302 | * Find the distance through the nearest common upstream bridge between |
| 279 | * two PCI devices. | 303 | * two PCI devices. |
| 280 | * | 304 | * |
| @@ -317,13 +341,13 @@ static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev) | |||
| 317 | * In this case, a list of all infringing bridge addresses will be | 341 | * In this case, a list of all infringing bridge addresses will be |
| 318 | * populated in acs_list (assuming it's non-null) for printk purposes. | 342 | * populated in acs_list (assuming it's non-null) for printk purposes. |
| 319 | */ | 343 | */ |
| 320 | static int upstream_bridge_distance(struct pci_dev *a, | 344 | static int upstream_bridge_distance(struct pci_dev *provider, |
| 321 | struct pci_dev *b, | 345 | struct pci_dev *client, |
| 322 | struct seq_buf *acs_list) | 346 | struct seq_buf *acs_list) |
| 323 | { | 347 | { |
| 348 | struct pci_dev *a = provider, *b = client, *bb; | ||
| 324 | int dist_a = 0; | 349 | int dist_a = 0; |
| 325 | int dist_b = 0; | 350 | int dist_b = 0; |
| 326 | struct pci_dev *bb = NULL; | ||
| 327 | int acs_cnt = 0; | 351 | int acs_cnt = 0; |
| 328 | 352 | ||
| 329 | /* | 353 | /* |
| @@ -354,6 +378,14 @@ static int upstream_bridge_distance(struct pci_dev *a, | |||
| 354 | dist_a++; | 378 | dist_a++; |
| 355 | } | 379 | } |
| 356 | 380 | ||
| 381 | /* | ||
| 382 | * Allow the connection if both devices are on a whitelisted root | ||
| 383 | * complex, but add an arbitary large value to the distance. | ||
| 384 | */ | ||
| 385 | if (root_complex_whitelist(provider) && | ||
| 386 | root_complex_whitelist(client)) | ||
| 387 | return 0x1000 + dist_a + dist_b; | ||
| 388 | |||
| 357 | return -1; | 389 | return -1; |
| 358 | 390 | ||
| 359 | check_b_path_acs: | 391 | check_b_path_acs: |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index e1949f7efd9c..c5e1a097d7e3 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
| @@ -119,7 +119,7 @@ phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle) | |||
| 119 | } | 119 | } |
| 120 | 120 | ||
| 121 | static acpi_status decode_type0_hpx_record(union acpi_object *record, | 121 | static acpi_status decode_type0_hpx_record(union acpi_object *record, |
| 122 | struct hotplug_params *hpx) | 122 | struct hpp_type0 *hpx0) |
| 123 | { | 123 | { |
| 124 | int i; | 124 | int i; |
| 125 | union acpi_object *fields = record->package.elements; | 125 | union acpi_object *fields = record->package.elements; |
| @@ -132,16 +132,14 @@ static acpi_status decode_type0_hpx_record(union acpi_object *record, | |||
| 132 | for (i = 2; i < 6; i++) | 132 | for (i = 2; i < 6; i++) |
| 133 | if (fields[i].type != ACPI_TYPE_INTEGER) | 133 | if (fields[i].type != ACPI_TYPE_INTEGER) |
| 134 | return AE_ERROR; | 134 | return AE_ERROR; |
| 135 | hpx->t0 = &hpx->type0_data; | 135 | hpx0->revision = revision; |
| 136 | hpx->t0->revision = revision; | 136 | hpx0->cache_line_size = fields[2].integer.value; |
| 137 | hpx->t0->cache_line_size = fields[2].integer.value; | 137 | hpx0->latency_timer = fields[3].integer.value; |
| 138 | hpx->t0->latency_timer = fields[3].integer.value; | 138 | hpx0->enable_serr = fields[4].integer.value; |
| 139 | hpx->t0->enable_serr = fields[4].integer.value; | 139 | hpx0->enable_perr = fields[5].integer.value; |
| 140 | hpx->t0->enable_perr = fields[5].integer.value; | ||
| 141 | break; | 140 | break; |
| 142 | default: | 141 | default: |
| 143 | printk(KERN_WARNING | 142 | pr_warn("%s: Type 0 Revision %d record not supported\n", |
| 144 | "%s: Type 0 Revision %d record not supported\n", | ||
| 145 | __func__, revision); | 143 | __func__, revision); |
| 146 | return AE_ERROR; | 144 | return AE_ERROR; |
| 147 | } | 145 | } |
| @@ -149,7 +147,7 @@ static acpi_status decode_type0_hpx_record(union acpi_object *record, | |||
| 149 | } | 147 | } |
| 150 | 148 | ||
| 151 | static acpi_status decode_type1_hpx_record(union acpi_object *record, | 149 | static acpi_status decode_type1_hpx_record(union acpi_object *record, |
| 152 | struct hotplug_params *hpx) | 150 | struct hpp_type1 *hpx1) |
| 153 | { | 151 | { |
| 154 | int i; | 152 | int i; |
| 155 | union acpi_object *fields = record->package.elements; | 153 | union acpi_object *fields = record->package.elements; |
| @@ -162,15 +160,13 @@ static acpi_status decode_type1_hpx_record(union acpi_object *record, | |||
| 162 | for (i = 2; i < 5; i++) | 160 | for (i = 2; i < 5; i++) |
| 163 | if (fields[i].type != ACPI_TYPE_INTEGER) | 161 | if (fields[i].type != ACPI_TYPE_INTEGER) |
| 164 | return AE_ERROR; | 162 | return AE_ERROR; |
| 165 | hpx->t1 = &hpx->type1_data; | 163 | hpx1->revision = revision; |
| 166 | hpx->t1->revision = revision; | 164 | hpx1->max_mem_read = fields[2].integer.value; |
| 167 | hpx->t1->max_mem_read = fields[2].integer.value; | 165 | hpx1->avg_max_split = fields[3].integer.value; |
| 168 | hpx->t1->avg_max_split = fields[3].integer.value; | 166 | hpx1->tot_max_split = fields[4].integer.value; |
| 169 | hpx->t1->tot_max_split = fields[4].integer.value; | ||
| 170 | break; | 167 | break; |
| 171 | default: | 168 | default: |
| 172 | printk(KERN_WARNING | 169 | pr_warn("%s: Type 1 Revision %d record not supported\n", |
| 173 | "%s: Type 1 Revision %d record not supported\n", | ||
| 174 | __func__, revision); | 170 | __func__, revision); |
| 175 | return AE_ERROR; | 171 | return AE_ERROR; |
| 176 | } | 172 | } |
| @@ -178,7 +174,7 @@ static acpi_status decode_type1_hpx_record(union acpi_object *record, | |||
| 178 | } | 174 | } |
| 179 | 175 | ||
| 180 | static acpi_status decode_type2_hpx_record(union acpi_object *record, | 176 | static acpi_status decode_type2_hpx_record(union acpi_object *record, |
| 181 | struct hotplug_params *hpx) | 177 | struct hpp_type2 *hpx2) |
| 182 | { | 178 | { |
| 183 | int i; | 179 | int i; |
| 184 | union acpi_object *fields = record->package.elements; | 180 | union acpi_object *fields = record->package.elements; |
| @@ -191,45 +187,102 @@ static acpi_status decode_type2_hpx_record(union acpi_object *record, | |||
| 191 | for (i = 2; i < 18; i++) | 187 | for (i = 2; i < 18; i++) |
| 192 | if (fields[i].type != ACPI_TYPE_INTEGER) | 188 | if (fields[i].type != ACPI_TYPE_INTEGER) |
| 193 | return AE_ERROR; | 189 | return AE_ERROR; |
| 194 | hpx->t2 = &hpx->type2_data; | 190 | hpx2->revision = revision; |
| 195 | hpx->t2->revision = revision; | 191 | hpx2->unc_err_mask_and = fields[2].integer.value; |
| 196 | hpx->t2->unc_err_mask_and = fields[2].integer.value; | 192 | hpx2->unc_err_mask_or = fields[3].integer.value; |
| 197 | hpx->t2->unc_err_mask_or = fields[3].integer.value; | 193 | hpx2->unc_err_sever_and = fields[4].integer.value; |
| 198 | hpx->t2->unc_err_sever_and = fields[4].integer.value; | 194 | hpx2->unc_err_sever_or = fields[5].integer.value; |
| 199 | hpx->t2->unc_err_sever_or = fields[5].integer.value; | 195 | hpx2->cor_err_mask_and = fields[6].integer.value; |
| 200 | hpx->t2->cor_err_mask_and = fields[6].integer.value; | 196 | hpx2->cor_err_mask_or = fields[7].integer.value; |
| 201 | hpx->t2->cor_err_mask_or = fields[7].integer.value; | 197 | hpx2->adv_err_cap_and = fields[8].integer.value; |
| 202 | hpx->t2->adv_err_cap_and = fields[8].integer.value; | 198 | hpx2->adv_err_cap_or = fields[9].integer.value; |
| 203 | hpx->t2->adv_err_cap_or = fields[9].integer.value; | 199 | hpx2->pci_exp_devctl_and = fields[10].integer.value; |
| 204 | hpx->t2->pci_exp_devctl_and = fields[10].integer.value; | 200 | hpx2->pci_exp_devctl_or = fields[11].integer.value; |
| 205 | hpx->t2->pci_exp_devctl_or = fields[11].integer.value; | 201 | hpx2->pci_exp_lnkctl_and = fields[12].integer.value; |
| 206 | hpx->t2->pci_exp_lnkctl_and = fields[12].integer.value; | 202 | hpx2->pci_exp_lnkctl_or = fields[13].integer.value; |
| 207 | hpx->t2->pci_exp_lnkctl_or = fields[13].integer.value; | 203 | hpx2->sec_unc_err_sever_and = fields[14].integer.value; |
| 208 | hpx->t2->sec_unc_err_sever_and = fields[14].integer.value; | 204 | hpx2->sec_unc_err_sever_or = fields[15].integer.value; |
| 209 | hpx->t2->sec_unc_err_sever_or = fields[15].integer.value; | 205 | hpx2->sec_unc_err_mask_and = fields[16].integer.value; |
| 210 | hpx->t2->sec_unc_err_mask_and = fields[16].integer.value; | 206 | hpx2->sec_unc_err_mask_or = fields[17].integer.value; |
| 211 | hpx->t2->sec_unc_err_mask_or = fields[17].integer.value; | ||
| 212 | break; | 207 | break; |
| 213 | default: | 208 | default: |
| 214 | printk(KERN_WARNING | 209 | pr_warn("%s: Type 2 Revision %d record not supported\n", |
| 215 | "%s: Type 2 Revision %d record not supported\n", | ||
| 216 | __func__, revision); | 210 | __func__, revision); |
| 217 | return AE_ERROR; | 211 | return AE_ERROR; |
| 218 | } | 212 | } |
| 219 | return AE_OK; | 213 | return AE_OK; |
| 220 | } | 214 | } |
| 221 | 215 | ||
| 222 | static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx) | 216 | static void parse_hpx3_register(struct hpx_type3 *hpx3_reg, |
| 217 | union acpi_object *reg_fields) | ||
| 218 | { | ||
| 219 | hpx3_reg->device_type = reg_fields[0].integer.value; | ||
| 220 | hpx3_reg->function_type = reg_fields[1].integer.value; | ||
| 221 | hpx3_reg->config_space_location = reg_fields[2].integer.value; | ||
| 222 | hpx3_reg->pci_exp_cap_id = reg_fields[3].integer.value; | ||
| 223 | hpx3_reg->pci_exp_cap_ver = reg_fields[4].integer.value; | ||
| 224 | hpx3_reg->pci_exp_vendor_id = reg_fields[5].integer.value; | ||
| 225 | hpx3_reg->dvsec_id = reg_fields[6].integer.value; | ||
| 226 | hpx3_reg->dvsec_rev = reg_fields[7].integer.value; | ||
| 227 | hpx3_reg->match_offset = reg_fields[8].integer.value; | ||
| 228 | hpx3_reg->match_mask_and = reg_fields[9].integer.value; | ||
| 229 | hpx3_reg->match_value = reg_fields[10].integer.value; | ||
| 230 | hpx3_reg->reg_offset = reg_fields[11].integer.value; | ||
| 231 | hpx3_reg->reg_mask_and = reg_fields[12].integer.value; | ||
| 232 | hpx3_reg->reg_mask_or = reg_fields[13].integer.value; | ||
| 233 | } | ||
| 234 | |||
| 235 | static acpi_status program_type3_hpx_record(struct pci_dev *dev, | ||
| 236 | union acpi_object *record, | ||
| 237 | const struct hotplug_program_ops *hp_ops) | ||
| 238 | { | ||
| 239 | union acpi_object *fields = record->package.elements; | ||
| 240 | u32 desc_count, expected_length, revision; | ||
| 241 | union acpi_object *reg_fields; | ||
| 242 | struct hpx_type3 hpx3; | ||
| 243 | int i; | ||
| 244 | |||
| 245 | revision = fields[1].integer.value; | ||
| 246 | switch (revision) { | ||
| 247 | case 1: | ||
| 248 | desc_count = fields[2].integer.value; | ||
| 249 | expected_length = 3 + desc_count * 14; | ||
| 250 | |||
| 251 | if (record->package.count != expected_length) | ||
| 252 | return AE_ERROR; | ||
| 253 | |||
| 254 | for (i = 2; i < expected_length; i++) | ||
| 255 | if (fields[i].type != ACPI_TYPE_INTEGER) | ||
| 256 | return AE_ERROR; | ||
| 257 | |||
| 258 | for (i = 0; i < desc_count; i++) { | ||
| 259 | reg_fields = fields + 3 + i * 14; | ||
| 260 | parse_hpx3_register(&hpx3, reg_fields); | ||
| 261 | hp_ops->program_type3(dev, &hpx3); | ||
| 262 | } | ||
| 263 | |||
| 264 | break; | ||
| 265 | default: | ||
| 266 | printk(KERN_WARNING | ||
| 267 | "%s: Type 3 Revision %d record not supported\n", | ||
| 268 | __func__, revision); | ||
| 269 | return AE_ERROR; | ||
| 270 | } | ||
| 271 | return AE_OK; | ||
| 272 | } | ||
| 273 | |||
| 274 | static acpi_status acpi_run_hpx(struct pci_dev *dev, acpi_handle handle, | ||
| 275 | const struct hotplug_program_ops *hp_ops) | ||
| 223 | { | 276 | { |
| 224 | acpi_status status; | 277 | acpi_status status; |
| 225 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | 278 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; |
| 226 | union acpi_object *package, *record, *fields; | 279 | union acpi_object *package, *record, *fields; |
| 280 | struct hpp_type0 hpx0; | ||
| 281 | struct hpp_type1 hpx1; | ||
| 282 | struct hpp_type2 hpx2; | ||
| 227 | u32 type; | 283 | u32 type; |
| 228 | int i; | 284 | int i; |
| 229 | 285 | ||
| 230 | /* Clear the return buffer with zeros */ | ||
| 231 | memset(hpx, 0, sizeof(struct hotplug_params)); | ||
| 232 | |||
| 233 | status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer); | 286 | status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer); |
| 234 | if (ACPI_FAILURE(status)) | 287 | if (ACPI_FAILURE(status)) |
| 235 | return status; | 288 | return status; |
| @@ -257,22 +310,33 @@ static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx) | |||
| 257 | type = fields[0].integer.value; | 310 | type = fields[0].integer.value; |
| 258 | switch (type) { | 311 | switch (type) { |
| 259 | case 0: | 312 | case 0: |
| 260 | status = decode_type0_hpx_record(record, hpx); | 313 | memset(&hpx0, 0, sizeof(hpx0)); |
| 314 | status = decode_type0_hpx_record(record, &hpx0); | ||
| 261 | if (ACPI_FAILURE(status)) | 315 | if (ACPI_FAILURE(status)) |
| 262 | goto exit; | 316 | goto exit; |
| 317 | hp_ops->program_type0(dev, &hpx0); | ||
| 263 | break; | 318 | break; |
| 264 | case 1: | 319 | case 1: |
| 265 | status = decode_type1_hpx_record(record, hpx); | 320 | memset(&hpx1, 0, sizeof(hpx1)); |
| 321 | status = decode_type1_hpx_record(record, &hpx1); | ||
| 266 | if (ACPI_FAILURE(status)) | 322 | if (ACPI_FAILURE(status)) |
| 267 | goto exit; | 323 | goto exit; |
| 324 | hp_ops->program_type1(dev, &hpx1); | ||
| 268 | break; | 325 | break; |
| 269 | case 2: | 326 | case 2: |
| 270 | status = decode_type2_hpx_record(record, hpx); | 327 | memset(&hpx2, 0, sizeof(hpx2)); |
| 328 | status = decode_type2_hpx_record(record, &hpx2); | ||
| 329 | if (ACPI_FAILURE(status)) | ||
| 330 | goto exit; | ||
| 331 | hp_ops->program_type2(dev, &hpx2); | ||
| 332 | break; | ||
| 333 | case 3: | ||
| 334 | status = program_type3_hpx_record(dev, record, hp_ops); | ||
| 271 | if (ACPI_FAILURE(status)) | 335 | if (ACPI_FAILURE(status)) |
| 272 | goto exit; | 336 | goto exit; |
| 273 | break; | 337 | break; |
| 274 | default: | 338 | default: |
| 275 | printk(KERN_ERR "%s: Type %d record not supported\n", | 339 | pr_err("%s: Type %d record not supported\n", |
| 276 | __func__, type); | 340 | __func__, type); |
| 277 | status = AE_ERROR; | 341 | status = AE_ERROR; |
| 278 | goto exit; | 342 | goto exit; |
| @@ -283,14 +347,16 @@ static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx) | |||
| 283 | return status; | 347 | return status; |
| 284 | } | 348 | } |
| 285 | 349 | ||
| 286 | static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp) | 350 | static acpi_status acpi_run_hpp(struct pci_dev *dev, acpi_handle handle, |
| 351 | const struct hotplug_program_ops *hp_ops) | ||
| 287 | { | 352 | { |
| 288 | acpi_status status; | 353 | acpi_status status; |
| 289 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 354 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
| 290 | union acpi_object *package, *fields; | 355 | union acpi_object *package, *fields; |
| 356 | struct hpp_type0 hpp0; | ||
| 291 | int i; | 357 | int i; |
| 292 | 358 | ||
| 293 | memset(hpp, 0, sizeof(struct hotplug_params)); | 359 | memset(&hpp0, 0, sizeof(hpp0)); |
| 294 | 360 | ||
| 295 | status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer); | 361 | status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer); |
| 296 | if (ACPI_FAILURE(status)) | 362 | if (ACPI_FAILURE(status)) |
| @@ -311,12 +377,13 @@ static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp) | |||
| 311 | } | 377 | } |
| 312 | } | 378 | } |
| 313 | 379 | ||
| 314 | hpp->t0 = &hpp->type0_data; | 380 | hpp0.revision = 1; |
| 315 | hpp->t0->revision = 1; | 381 | hpp0.cache_line_size = fields[0].integer.value; |
| 316 | hpp->t0->cache_line_size = fields[0].integer.value; | 382 | hpp0.latency_timer = fields[1].integer.value; |
| 317 | hpp->t0->latency_timer = fields[1].integer.value; | 383 | hpp0.enable_serr = fields[2].integer.value; |
| 318 | hpp->t0->enable_serr = fields[2].integer.value; | 384 | hpp0.enable_perr = fields[3].integer.value; |
| 319 | hpp->t0->enable_perr = fields[3].integer.value; | 385 | |
| 386 | hp_ops->program_type0(dev, &hpp0); | ||
| 320 | 387 | ||
| 321 | exit: | 388 | exit: |
| 322 | kfree(buffer.pointer); | 389 | kfree(buffer.pointer); |
| @@ -328,7 +395,8 @@ exit: | |||
| 328 | * @dev - the pci_dev for which we want parameters | 395 | * @dev - the pci_dev for which we want parameters |
| 329 | * @hpp - allocated by the caller | 396 | * @hpp - allocated by the caller |
| 330 | */ | 397 | */ |
| 331 | int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp) | 398 | int pci_acpi_program_hp_params(struct pci_dev *dev, |
| 399 | const struct hotplug_program_ops *hp_ops) | ||
| 332 | { | 400 | { |
| 333 | acpi_status status; | 401 | acpi_status status; |
| 334 | acpi_handle handle, phandle; | 402 | acpi_handle handle, phandle; |
| @@ -351,10 +419,10 @@ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp) | |||
| 351 | * this pci dev. | 419 | * this pci dev. |
| 352 | */ | 420 | */ |
| 353 | while (handle) { | 421 | while (handle) { |
| 354 | status = acpi_run_hpx(handle, hpp); | 422 | status = acpi_run_hpx(dev, handle, hp_ops); |
| 355 | if (ACPI_SUCCESS(status)) | 423 | if (ACPI_SUCCESS(status)) |
| 356 | return 0; | 424 | return 0; |
| 357 | status = acpi_run_hpp(handle, hpp); | 425 | status = acpi_run_hpp(dev, handle, hp_ops); |
| 358 | if (ACPI_SUCCESS(status)) | 426 | if (ACPI_SUCCESS(status)) |
| 359 | return 0; | 427 | return 0; |
| 360 | if (acpi_is_root_bridge(handle)) | 428 | if (acpi_is_root_bridge(handle)) |
| @@ -366,7 +434,6 @@ int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp) | |||
| 366 | } | 434 | } |
| 367 | return -ENODEV; | 435 | return -ENODEV; |
| 368 | } | 436 | } |
| 369 | EXPORT_SYMBOL_GPL(pci_get_hp_params); | ||
| 370 | 437 | ||
| 371 | /** | 438 | /** |
| 372 | * pciehp_is_native - Check whether a hotplug port is handled by the OS | 439 | * pciehp_is_native - Check whether a hotplug port is handled by the OS |
diff --git a/drivers/pci/pci-stub.c b/drivers/pci/pci-stub.c index 66f8a59fadbd..e408099fea52 100644 --- a/drivers/pci/pci-stub.c +++ b/drivers/pci/pci-stub.c | |||
| @@ -66,20 +66,18 @@ static int __init pci_stub_init(void) | |||
| 66 | &class, &class_mask); | 66 | &class, &class_mask); |
| 67 | 67 | ||
| 68 | if (fields < 2) { | 68 | if (fields < 2) { |
| 69 | printk(KERN_WARNING | 69 | pr_warn("pci-stub: invalid ID string \"%s\"\n", id); |
| 70 | "pci-stub: invalid id string \"%s\"\n", id); | ||
| 71 | continue; | 70 | continue; |
| 72 | } | 71 | } |
| 73 | 72 | ||
| 74 | printk(KERN_INFO | 73 | pr_info("pci-stub: add %04X:%04X sub=%04X:%04X cls=%08X/%08X\n", |
| 75 | "pci-stub: add %04X:%04X sub=%04X:%04X cls=%08X/%08X\n", | ||
| 76 | vendor, device, subvendor, subdevice, class, class_mask); | 74 | vendor, device, subvendor, subdevice, class, class_mask); |
| 77 | 75 | ||
| 78 | rc = pci_add_dynid(&stub_driver, vendor, device, | 76 | rc = pci_add_dynid(&stub_driver, vendor, device, |
| 79 | subvendor, subdevice, class, class_mask, 0); | 77 | subvendor, subdevice, class, class_mask, 0); |
| 80 | if (rc) | 78 | if (rc) |
| 81 | printk(KERN_WARNING | 79 | pr_warn("pci-stub: failed to add dynamic ID (%d)\n", |
| 82 | "pci-stub: failed to add dynamic id (%d)\n", rc); | 80 | rc); |
| 83 | } | 81 | } |
| 84 | 82 | ||
| 85 | return 0; | 83 | return 0; |
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 25794c27c7a4..6d27475e39b2 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c | |||
| @@ -1111,8 +1111,7 @@ legacy_io_err: | |||
| 1111 | kfree(b->legacy_io); | 1111 | kfree(b->legacy_io); |
| 1112 | b->legacy_io = NULL; | 1112 | b->legacy_io = NULL; |
| 1113 | kzalloc_err: | 1113 | kzalloc_err: |
| 1114 | printk(KERN_WARNING "pci: warning: could not create legacy I/O port and ISA memory resources to sysfs\n"); | 1114 | dev_warn(&b->dev, "could not create legacy I/O port and ISA memory resources in sysfs\n"); |
| 1115 | return; | ||
| 1116 | } | 1115 | } |
| 1117 | 1116 | ||
| 1118 | void pci_remove_legacy_files(struct pci_bus *b) | 1117 | void pci_remove_legacy_files(struct pci_bus *b) |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 766f5779db92..8abc843b1615 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
| @@ -197,8 +197,8 @@ EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar); | |||
| 197 | 197 | ||
| 198 | /** | 198 | /** |
| 199 | * pci_dev_str_match_path - test if a path string matches a device | 199 | * pci_dev_str_match_path - test if a path string matches a device |
| 200 | * @dev: the PCI device to test | 200 | * @dev: the PCI device to test |
| 201 | * @path: string to match the device against | 201 | * @path: string to match the device against |
| 202 | * @endptr: pointer to the string after the match | 202 | * @endptr: pointer to the string after the match |
| 203 | * | 203 | * |
| 204 | * Test if a string (typically from a kernel parameter) formatted as a | 204 | * Test if a string (typically from a kernel parameter) formatted as a |
| @@ -280,8 +280,8 @@ free_and_exit: | |||
| 280 | 280 | ||
| 281 | /** | 281 | /** |
| 282 | * pci_dev_str_match - test if a string matches a device | 282 | * pci_dev_str_match - test if a string matches a device |
| 283 | * @dev: the PCI device to test | 283 | * @dev: the PCI device to test |
| 284 | * @p: string to match the device against | 284 | * @p: string to match the device against |
| 285 | * @endptr: pointer to the string after the match | 285 | * @endptr: pointer to the string after the match |
| 286 | * | 286 | * |
| 287 | * Test if a string (typically from a kernel parameter) matches a specified | 287 | * Test if a string (typically from a kernel parameter) matches a specified |
| @@ -341,7 +341,7 @@ static int pci_dev_str_match(struct pci_dev *dev, const char *p, | |||
| 341 | } else { | 341 | } else { |
| 342 | /* | 342 | /* |
| 343 | * PCI Bus, Device, Function IDs are specified | 343 | * PCI Bus, Device, Function IDs are specified |
| 344 | * (optionally, may include a path of devfns following it) | 344 | * (optionally, may include a path of devfns following it) |
| 345 | */ | 345 | */ |
| 346 | ret = pci_dev_str_match_path(dev, p, &p); | 346 | ret = pci_dev_str_match_path(dev, p, &p); |
| 347 | if (ret < 0) | 347 | if (ret < 0) |
| @@ -425,7 +425,7 @@ static int __pci_bus_find_cap_start(struct pci_bus *bus, | |||
| 425 | * Tell if a device supports a given PCI capability. | 425 | * Tell if a device supports a given PCI capability. |
| 426 | * Returns the address of the requested capability structure within the | 426 | * Returns the address of the requested capability structure within the |
| 427 | * device's PCI configuration space or 0 in case the device does not | 427 | * device's PCI configuration space or 0 in case the device does not |
| 428 | * support it. Possible values for @cap: | 428 | * support it. Possible values for @cap include: |
| 429 | * | 429 | * |
| 430 | * %PCI_CAP_ID_PM Power Management | 430 | * %PCI_CAP_ID_PM Power Management |
| 431 | * %PCI_CAP_ID_AGP Accelerated Graphics Port | 431 | * %PCI_CAP_ID_AGP Accelerated Graphics Port |
| @@ -450,11 +450,11 @@ EXPORT_SYMBOL(pci_find_capability); | |||
| 450 | 450 | ||
| 451 | /** | 451 | /** |
| 452 | * pci_bus_find_capability - query for devices' capabilities | 452 | * pci_bus_find_capability - query for devices' capabilities |
| 453 | * @bus: the PCI bus to query | 453 | * @bus: the PCI bus to query |
| 454 | * @devfn: PCI device to query | 454 | * @devfn: PCI device to query |
| 455 | * @cap: capability code | 455 | * @cap: capability code |
| 456 | * | 456 | * |
| 457 | * Like pci_find_capability() but works for pci devices that do not have a | 457 | * Like pci_find_capability() but works for PCI devices that do not have a |
| 458 | * pci_dev structure set up yet. | 458 | * pci_dev structure set up yet. |
| 459 | * | 459 | * |
| 460 | * Returns the address of the requested capability structure within the | 460 | * Returns the address of the requested capability structure within the |
| @@ -535,7 +535,7 @@ EXPORT_SYMBOL_GPL(pci_find_next_ext_capability); | |||
| 535 | * | 535 | * |
| 536 | * Returns the address of the requested extended capability structure | 536 | * Returns the address of the requested extended capability structure |
| 537 | * within the device's PCI configuration space or 0 if the device does | 537 | * within the device's PCI configuration space or 0 if the device does |
| 538 | * not support it. Possible values for @cap: | 538 | * not support it. Possible values for @cap include: |
| 539 | * | 539 | * |
| 540 | * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting | 540 | * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting |
| 541 | * %PCI_EXT_CAP_ID_VC Virtual Channel | 541 | * %PCI_EXT_CAP_ID_VC Virtual Channel |
| @@ -618,12 +618,13 @@ int pci_find_ht_capability(struct pci_dev *dev, int ht_cap) | |||
| 618 | EXPORT_SYMBOL_GPL(pci_find_ht_capability); | 618 | EXPORT_SYMBOL_GPL(pci_find_ht_capability); |
| 619 | 619 | ||
| 620 | /** | 620 | /** |
| 621 | * pci_find_parent_resource - return resource region of parent bus of given region | 621 | * pci_find_parent_resource - return resource region of parent bus of given |
| 622 | * region | ||
| 622 | * @dev: PCI device structure contains resources to be searched | 623 | * @dev: PCI device structure contains resources to be searched |
| 623 | * @res: child resource record for which parent is sought | 624 | * @res: child resource record for which parent is sought |
| 624 | * | 625 | * |
| 625 | * For given resource region of given device, return the resource | 626 | * For given resource region of given device, return the resource region of |
| 626 | * region of parent bus the given region is contained in. | 627 | * parent bus the given region is contained in. |
| 627 | */ | 628 | */ |
| 628 | struct resource *pci_find_parent_resource(const struct pci_dev *dev, | 629 | struct resource *pci_find_parent_resource(const struct pci_dev *dev, |
| 629 | struct resource *res) | 630 | struct resource *res) |
| @@ -800,7 +801,7 @@ static inline bool platform_pci_bridge_d3(struct pci_dev *dev) | |||
| 800 | 801 | ||
| 801 | /** | 802 | /** |
| 802 | * pci_raw_set_power_state - Use PCI PM registers to set the power state of | 803 | * pci_raw_set_power_state - Use PCI PM registers to set the power state of |
| 803 | * given PCI device | 804 | * given PCI device |
| 804 | * @dev: PCI device to handle. | 805 | * @dev: PCI device to handle. |
| 805 | * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. | 806 | * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. |
| 806 | * | 807 | * |
| @@ -826,7 +827,8 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
| 826 | if (state < PCI_D0 || state > PCI_D3hot) | 827 | if (state < PCI_D0 || state > PCI_D3hot) |
| 827 | return -EINVAL; | 828 | return -EINVAL; |
| 828 | 829 | ||
| 829 | /* Validate current state: | 830 | /* |
| 831 | * Validate current state: | ||
| 830 | * Can enter D0 from any state, but if we can only go deeper | 832 | * Can enter D0 from any state, but if we can only go deeper |
| 831 | * to sleep if we're already in a low power state | 833 | * to sleep if we're already in a low power state |
| 832 | */ | 834 | */ |
| @@ -837,14 +839,15 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
| 837 | return -EINVAL; | 839 | return -EINVAL; |
| 838 | } | 840 | } |
| 839 | 841 | ||
| 840 | /* check if this device supports the desired state */ | 842 | /* Check if this device supports the desired state */ |
| 841 | if ((state == PCI_D1 && !dev->d1_support) | 843 | if ((state == PCI_D1 && !dev->d1_support) |
| 842 | || (state == PCI_D2 && !dev->d2_support)) | 844 | || (state == PCI_D2 && !dev->d2_support)) |
| 843 | return -EIO; | 845 | return -EIO; |
| 844 | 846 | ||
| 845 | pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); | 847 | pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); |
| 846 | 848 | ||
| 847 | /* If we're (effectively) in D3, force entire word to 0. | 849 | /* |
| 850 | * If we're (effectively) in D3, force entire word to 0. | ||
| 848 | * This doesn't affect PME_Status, disables PME_En, and | 851 | * This doesn't affect PME_Status, disables PME_En, and |
| 849 | * sets PowerState to 0. | 852 | * sets PowerState to 0. |
| 850 | */ | 853 | */ |
| @@ -867,11 +870,13 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
| 867 | break; | 870 | break; |
| 868 | } | 871 | } |
| 869 | 872 | ||
| 870 | /* enter specified state */ | 873 | /* Enter specified state */ |
| 871 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); | 874 | pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); |
| 872 | 875 | ||
| 873 | /* Mandatory power management transition delays */ | 876 | /* |
| 874 | /* see PCI PM 1.1 5.6.1 table 18 */ | 877 | * Mandatory power management transition delays; see PCI PM 1.1 |
| 878 | * 5.6.1 table 18 | ||
| 879 | */ | ||
| 875 | if (state == PCI_D3hot || dev->current_state == PCI_D3hot) | 880 | if (state == PCI_D3hot || dev->current_state == PCI_D3hot) |
| 876 | pci_dev_d3_sleep(dev); | 881 | pci_dev_d3_sleep(dev); |
| 877 | else if (state == PCI_D2 || dev->current_state == PCI_D2) | 882 | else if (state == PCI_D2 || dev->current_state == PCI_D2) |
| @@ -1085,16 +1090,18 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
| 1085 | { | 1090 | { |
| 1086 | int error; | 1091 | int error; |
| 1087 | 1092 | ||
| 1088 | /* bound the state we're entering */ | 1093 | /* Bound the state we're entering */ |
| 1089 | if (state > PCI_D3cold) | 1094 | if (state > PCI_D3cold) |
| 1090 | state = PCI_D3cold; | 1095 | state = PCI_D3cold; |
| 1091 | else if (state < PCI_D0) | 1096 | else if (state < PCI_D0) |
| 1092 | state = PCI_D0; | 1097 | state = PCI_D0; |
| 1093 | else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) | 1098 | else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) |
| 1099 | |||
| 1094 | /* | 1100 | /* |
| 1095 | * If the device or the parent bridge do not support PCI PM, | 1101 | * If the device or the parent bridge do not support PCI |
| 1096 | * ignore the request if we're doing anything other than putting | 1102 | * PM, ignore the request if we're doing anything other |
| 1097 | * it into D0 (which would only happen on boot). | 1103 | * than putting it into D0 (which would only happen on |
| 1104 | * boot). | ||
| 1098 | */ | 1105 | */ |
| 1099 | return 0; | 1106 | return 0; |
| 1100 | 1107 | ||
| @@ -1104,8 +1111,10 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) | |||
| 1104 | 1111 | ||
| 1105 | __pci_start_power_transition(dev, state); | 1112 | __pci_start_power_transition(dev, state); |
| 1106 | 1113 | ||
| 1107 | /* This device is quirked not to be put into D3, so | 1114 | /* |
| 1108 | don't put it in D3 */ | 1115 | * This device is quirked not to be put into D3, so don't put it in |
| 1116 | * D3 | ||
| 1117 | */ | ||
| 1109 | if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) | 1118 | if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) |
| 1110 | return 0; | 1119 | return 0; |
| 1111 | 1120 | ||
| @@ -1127,12 +1136,11 @@ EXPORT_SYMBOL(pci_set_power_state); | |||
| 1127 | * pci_choose_state - Choose the power state of a PCI device | 1136 | * pci_choose_state - Choose the power state of a PCI device |
| 1128 | * @dev: PCI device to be suspended | 1137 | * @dev: PCI device to be suspended |
| 1129 | * @state: target sleep state for the whole system. This is the value | 1138 | * @state: target sleep state for the whole system. This is the value |
| 1130 | * that is passed to suspend() function. | 1139 | * that is passed to suspend() function. |
| 1131 | * | 1140 | * |
| 1132 | * Returns PCI power state suitable for given device and given system | 1141 | * Returns PCI power state suitable for given device and given system |
| 1133 | * message. | 1142 | * message. |
| 1134 | */ | 1143 | */ |
| 1135 | |||
| 1136 | pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) | 1144 | pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) |
| 1137 | { | 1145 | { |
| 1138 | pci_power_t ret; | 1146 | pci_power_t ret; |
| @@ -1310,8 +1318,9 @@ static void pci_restore_ltr_state(struct pci_dev *dev) | |||
| 1310 | } | 1318 | } |
| 1311 | 1319 | ||
| 1312 | /** | 1320 | /** |
| 1313 | * pci_save_state - save the PCI configuration space of a device before suspending | 1321 | * pci_save_state - save the PCI configuration space of a device before |
| 1314 | * @dev: - PCI device that we're dealing with | 1322 | * suspending |
| 1323 | * @dev: PCI device that we're dealing with | ||
| 1315 | */ | 1324 | */ |
| 1316 | int pci_save_state(struct pci_dev *dev) | 1325 | int pci_save_state(struct pci_dev *dev) |
| 1317 | { | 1326 | { |
| @@ -1422,7 +1431,7 @@ static void pci_restore_rebar_state(struct pci_dev *pdev) | |||
| 1422 | 1431 | ||
| 1423 | /** | 1432 | /** |
| 1424 | * pci_restore_state - Restore the saved state of a PCI device | 1433 | * pci_restore_state - Restore the saved state of a PCI device |
| 1425 | * @dev: - PCI device that we're dealing with | 1434 | * @dev: PCI device that we're dealing with |
| 1426 | */ | 1435 | */ |
| 1427 | void pci_restore_state(struct pci_dev *dev) | 1436 | void pci_restore_state(struct pci_dev *dev) |
| 1428 | { | 1437 | { |
| @@ -1599,8 +1608,8 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars) | |||
| 1599 | * pci_reenable_device - Resume abandoned device | 1608 | * pci_reenable_device - Resume abandoned device |
| 1600 | * @dev: PCI device to be resumed | 1609 | * @dev: PCI device to be resumed |
| 1601 | * | 1610 | * |
| 1602 | * Note this function is a backend of pci_default_resume and is not supposed | 1611 | * NOTE: This function is a backend of pci_default_resume() and is not supposed |
| 1603 | * to be called by normal code, write proper resume handler and use it instead. | 1612 | * to be called by normal code, write proper resume handler and use it instead. |
| 1604 | */ | 1613 | */ |
| 1605 | int pci_reenable_device(struct pci_dev *dev) | 1614 | int pci_reenable_device(struct pci_dev *dev) |
| 1606 | { | 1615 | { |
| @@ -1675,9 +1684,9 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags) | |||
| 1675 | * pci_enable_device_io - Initialize a device for use with IO space | 1684 | * pci_enable_device_io - Initialize a device for use with IO space |
| 1676 | * @dev: PCI device to be initialized | 1685 | * @dev: PCI device to be initialized |
| 1677 | * | 1686 | * |
| 1678 | * Initialize device before it's used by a driver. Ask low-level code | 1687 | * Initialize device before it's used by a driver. Ask low-level code |
| 1679 | * to enable I/O resources. Wake up the device if it was suspended. | 1688 | * to enable I/O resources. Wake up the device if it was suspended. |
| 1680 | * Beware, this function can fail. | 1689 | * Beware, this function can fail. |
| 1681 | */ | 1690 | */ |
| 1682 | int pci_enable_device_io(struct pci_dev *dev) | 1691 | int pci_enable_device_io(struct pci_dev *dev) |
| 1683 | { | 1692 | { |
| @@ -1689,9 +1698,9 @@ EXPORT_SYMBOL(pci_enable_device_io); | |||
| 1689 | * pci_enable_device_mem - Initialize a device for use with Memory space | 1698 | * pci_enable_device_mem - Initialize a device for use with Memory space |
| 1690 | * @dev: PCI device to be initialized | 1699 | * @dev: PCI device to be initialized |
| 1691 | * | 1700 | * |
| 1692 | * Initialize device before it's used by a driver. Ask low-level code | 1701 | * Initialize device before it's used by a driver. Ask low-level code |
| 1693 | * to enable Memory resources. Wake up the device if it was suspended. | 1702 | * to enable Memory resources. Wake up the device if it was suspended. |
| 1694 | * Beware, this function can fail. | 1703 | * Beware, this function can fail. |
| 1695 | */ | 1704 | */ |
| 1696 | int pci_enable_device_mem(struct pci_dev *dev) | 1705 | int pci_enable_device_mem(struct pci_dev *dev) |
| 1697 | { | 1706 | { |
| @@ -1703,12 +1712,12 @@ EXPORT_SYMBOL(pci_enable_device_mem); | |||
| 1703 | * pci_enable_device - Initialize device before it's used by a driver. | 1712 | * pci_enable_device - Initialize device before it's used by a driver. |
| 1704 | * @dev: PCI device to be initialized | 1713 | * @dev: PCI device to be initialized |
| 1705 | * | 1714 | * |
| 1706 | * Initialize device before it's used by a driver. Ask low-level code | 1715 | * Initialize device before it's used by a driver. Ask low-level code |
| 1707 | * to enable I/O and memory. Wake up the device if it was suspended. | 1716 | * to enable I/O and memory. Wake up the device if it was suspended. |
| 1708 | * Beware, this function can fail. | 1717 | * Beware, this function can fail. |
| 1709 | * | 1718 | * |
| 1710 | * Note we don't actually enable the device many times if we call | 1719 | * Note we don't actually enable the device many times if we call |
| 1711 | * this function repeatedly (we just increment the count). | 1720 | * this function repeatedly (we just increment the count). |
| 1712 | */ | 1721 | */ |
| 1713 | int pci_enable_device(struct pci_dev *dev) | 1722 | int pci_enable_device(struct pci_dev *dev) |
| 1714 | { | 1723 | { |
| @@ -1717,8 +1726,8 @@ int pci_enable_device(struct pci_dev *dev) | |||
| 1717 | EXPORT_SYMBOL(pci_enable_device); | 1726 | EXPORT_SYMBOL(pci_enable_device); |
| 1718 | 1727 | ||
| 1719 | /* | 1728 | /* |
| 1720 | * Managed PCI resources. This manages device on/off, intx/msi/msix | 1729 | * Managed PCI resources. This manages device on/off, INTx/MSI/MSI-X |
| 1721 | * on/off and BAR regions. pci_dev itself records msi/msix status, so | 1730 | * on/off and BAR regions. pci_dev itself records MSI/MSI-X status, so |
| 1722 | * there's no need to track it separately. pci_devres is initialized | 1731 | * there's no need to track it separately. pci_devres is initialized |
| 1723 | * when a device is enabled using managed PCI device enable interface. | 1732 | * when a device is enabled using managed PCI device enable interface. |
| 1724 | */ | 1733 | */ |
| @@ -1836,7 +1845,8 @@ int __weak pcibios_add_device(struct pci_dev *dev) | |||
| 1836 | } | 1845 | } |
| 1837 | 1846 | ||
| 1838 | /** | 1847 | /** |
| 1839 | * pcibios_release_device - provide arch specific hooks when releasing device dev | 1848 | * pcibios_release_device - provide arch specific hooks when releasing |
| 1849 | * device dev | ||
| 1840 | * @dev: the PCI device being released | 1850 | * @dev: the PCI device being released |
| 1841 | * | 1851 | * |
| 1842 | * Permits the platform to provide architecture specific functionality when | 1852 | * Permits the platform to provide architecture specific functionality when |
| @@ -1927,8 +1937,7 @@ EXPORT_SYMBOL(pci_disable_device); | |||
| 1927 | * @dev: the PCIe device reset | 1937 | * @dev: the PCIe device reset |
| 1928 | * @state: Reset state to enter into | 1938 | * @state: Reset state to enter into |
| 1929 | * | 1939 | * |
| 1930 | * | 1940 | * Set the PCIe reset state for the device. This is the default |
| 1931 | * Sets the PCIe reset state for the device. This is the default | ||
| 1932 | * implementation. Architecture implementations can override this. | 1941 | * implementation. Architecture implementations can override this. |
| 1933 | */ | 1942 | */ |
| 1934 | int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev, | 1943 | int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev, |
| @@ -1942,7 +1951,6 @@ int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev, | |||
| 1942 | * @dev: the PCIe device reset | 1951 | * @dev: the PCIe device reset |
| 1943 | * @state: Reset state to enter into | 1952 | * @state: Reset state to enter into |
| 1944 | * | 1953 | * |
| 1945 | * | ||
| 1946 | * Sets the PCI reset state for the device. | 1954 | * Sets the PCI reset state for the device. |
| 1947 | */ | 1955 | */ |
| 1948 | int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) | 1956 | int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) |
| @@ -2339,7 +2347,8 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup) | |||
| 2339 | } | 2347 | } |
| 2340 | 2348 | ||
| 2341 | /** | 2349 | /** |
| 2342 | * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state | 2350 | * pci_prepare_to_sleep - prepare PCI device for system-wide transition |
| 2351 | * into a sleep state | ||
| 2343 | * @dev: Device to handle. | 2352 | * @dev: Device to handle. |
| 2344 | * | 2353 | * |
| 2345 | * Choose the power state appropriate for the device depending on whether | 2354 | * Choose the power state appropriate for the device depending on whether |
| @@ -2367,7 +2376,8 @@ int pci_prepare_to_sleep(struct pci_dev *dev) | |||
| 2367 | EXPORT_SYMBOL(pci_prepare_to_sleep); | 2376 | EXPORT_SYMBOL(pci_prepare_to_sleep); |
| 2368 | 2377 | ||
| 2369 | /** | 2378 | /** |
| 2370 | * pci_back_from_sleep - turn PCI device on during system-wide transition into working state | 2379 | * pci_back_from_sleep - turn PCI device on during system-wide transition |
| 2380 | * into working state | ||
| 2371 | * @dev: Device to handle. | 2381 | * @dev: Device to handle. |
| 2372 | * | 2382 | * |
| 2373 | * Disable device's system wake-up capability and put it into D0. | 2383 | * Disable device's system wake-up capability and put it into D0. |
| @@ -2777,14 +2787,14 @@ void pci_pm_init(struct pci_dev *dev) | |||
| 2777 | dev->d2_support = true; | 2787 | dev->d2_support = true; |
| 2778 | 2788 | ||
| 2779 | if (dev->d1_support || dev->d2_support) | 2789 | if (dev->d1_support || dev->d2_support) |
| 2780 | pci_printk(KERN_DEBUG, dev, "supports%s%s\n", | 2790 | pci_info(dev, "supports%s%s\n", |
| 2781 | dev->d1_support ? " D1" : "", | 2791 | dev->d1_support ? " D1" : "", |
| 2782 | dev->d2_support ? " D2" : ""); | 2792 | dev->d2_support ? " D2" : ""); |
| 2783 | } | 2793 | } |
| 2784 | 2794 | ||
| 2785 | pmc &= PCI_PM_CAP_PME_MASK; | 2795 | pmc &= PCI_PM_CAP_PME_MASK; |
| 2786 | if (pmc) { | 2796 | if (pmc) { |
| 2787 | pci_printk(KERN_DEBUG, dev, "PME# supported from%s%s%s%s%s\n", | 2797 | pci_info(dev, "PME# supported from%s%s%s%s%s\n", |
| 2788 | (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", | 2798 | (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", |
| 2789 | (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", | 2799 | (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", |
| 2790 | (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", | 2800 | (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", |
| @@ -2952,16 +2962,16 @@ static int pci_ea_read(struct pci_dev *dev, int offset) | |||
| 2952 | res->flags = flags; | 2962 | res->flags = flags; |
| 2953 | 2963 | ||
| 2954 | if (bei <= PCI_EA_BEI_BAR5) | 2964 | if (bei <= PCI_EA_BEI_BAR5) |
| 2955 | pci_printk(KERN_DEBUG, dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", | 2965 | pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", |
| 2956 | bei, res, prop); | 2966 | bei, res, prop); |
| 2957 | else if (bei == PCI_EA_BEI_ROM) | 2967 | else if (bei == PCI_EA_BEI_ROM) |
| 2958 | pci_printk(KERN_DEBUG, dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n", | 2968 | pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n", |
| 2959 | res, prop); | 2969 | res, prop); |
| 2960 | else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5) | 2970 | else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5) |
| 2961 | pci_printk(KERN_DEBUG, dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", | 2971 | pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n", |
| 2962 | bei - PCI_EA_BEI_VF_BAR0, res, prop); | 2972 | bei - PCI_EA_BEI_VF_BAR0, res, prop); |
| 2963 | else | 2973 | else |
| 2964 | pci_printk(KERN_DEBUG, dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n", | 2974 | pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n", |
| 2965 | bei, res, prop); | 2975 | bei, res, prop); |
| 2966 | 2976 | ||
| 2967 | out: | 2977 | out: |
| @@ -3005,7 +3015,7 @@ static void pci_add_saved_cap(struct pci_dev *pci_dev, | |||
| 3005 | 3015 | ||
| 3006 | /** | 3016 | /** |
| 3007 | * _pci_add_cap_save_buffer - allocate buffer for saving given | 3017 | * _pci_add_cap_save_buffer - allocate buffer for saving given |
| 3008 | * capability registers | 3018 | * capability registers |
| 3009 | * @dev: the PCI device | 3019 | * @dev: the PCI device |
| 3010 | * @cap: the capability to allocate the buffer for | 3020 | * @cap: the capability to allocate the buffer for |
| 3011 | * @extended: Standard or Extended capability ID | 3021 | * @extended: Standard or Extended capability ID |
| @@ -3186,7 +3196,7 @@ static void pci_disable_acs_redir(struct pci_dev *dev) | |||
| 3186 | } | 3196 | } |
| 3187 | 3197 | ||
| 3188 | /** | 3198 | /** |
| 3189 | * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites | 3199 | * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities |
| 3190 | * @dev: the PCI device | 3200 | * @dev: the PCI device |
| 3191 | */ | 3201 | */ |
| 3192 | static void pci_std_enable_acs(struct pci_dev *dev) | 3202 | static void pci_std_enable_acs(struct pci_dev *dev) |
| @@ -3609,13 +3619,14 @@ u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) | |||
| 3609 | EXPORT_SYMBOL_GPL(pci_common_swizzle); | 3619 | EXPORT_SYMBOL_GPL(pci_common_swizzle); |
| 3610 | 3620 | ||
| 3611 | /** | 3621 | /** |
| 3612 | * pci_release_region - Release a PCI bar | 3622 | * pci_release_region - Release a PCI bar |
| 3613 | * @pdev: PCI device whose resources were previously reserved by pci_request_region | 3623 | * @pdev: PCI device whose resources were previously reserved by |
| 3614 | * @bar: BAR to release | 3624 | * pci_request_region() |
| 3625 | * @bar: BAR to release | ||
| 3615 | * | 3626 | * |
| 3616 | * Releases the PCI I/O and memory resources previously reserved by a | 3627 | * Releases the PCI I/O and memory resources previously reserved by a |
| 3617 | * successful call to pci_request_region. Call this function only | 3628 | * successful call to pci_request_region(). Call this function only |
| 3618 | * after all use of the PCI regions has ceased. | 3629 | * after all use of the PCI regions has ceased. |
| 3619 | */ | 3630 | */ |
| 3620 | void pci_release_region(struct pci_dev *pdev, int bar) | 3631 | void pci_release_region(struct pci_dev *pdev, int bar) |
| 3621 | { | 3632 | { |
| @@ -3637,23 +3648,23 @@ void pci_release_region(struct pci_dev *pdev, int bar) | |||
| 3637 | EXPORT_SYMBOL(pci_release_region); | 3648 | EXPORT_SYMBOL(pci_release_region); |
| 3638 | 3649 | ||
| 3639 | /** | 3650 | /** |
| 3640 | * __pci_request_region - Reserved PCI I/O and memory resource | 3651 | * __pci_request_region - Reserved PCI I/O and memory resource |
| 3641 | * @pdev: PCI device whose resources are to be reserved | 3652 | * @pdev: PCI device whose resources are to be reserved |
| 3642 | * @bar: BAR to be reserved | 3653 | * @bar: BAR to be reserved |
| 3643 | * @res_name: Name to be associated with resource. | 3654 | * @res_name: Name to be associated with resource. |
| 3644 | * @exclusive: whether the region access is exclusive or not | 3655 | * @exclusive: whether the region access is exclusive or not |
| 3645 | * | 3656 | * |
| 3646 | * Mark the PCI region associated with PCI device @pdev BR @bar as | 3657 | * Mark the PCI region associated with PCI device @pdev BAR @bar as |
| 3647 | * being reserved by owner @res_name. Do not access any | 3658 | * being reserved by owner @res_name. Do not access any |
| 3648 | * address inside the PCI regions unless this call returns | 3659 | * address inside the PCI regions unless this call returns |
| 3649 | * successfully. | 3660 | * successfully. |
| 3650 | * | 3661 | * |
| 3651 | * If @exclusive is set, then the region is marked so that userspace | 3662 | * If @exclusive is set, then the region is marked so that userspace |
| 3652 | * is explicitly not allowed to map the resource via /dev/mem or | 3663 | * is explicitly not allowed to map the resource via /dev/mem or |
| 3653 | * sysfs MMIO access. | 3664 | * sysfs MMIO access. |
| 3654 | * | 3665 | * |
| 3655 | * Returns 0 on success, or %EBUSY on error. A warning | 3666 | * Returns 0 on success, or %EBUSY on error. A warning |
| 3656 | * message is also printed on failure. | 3667 | * message is also printed on failure. |
| 3657 | */ | 3668 | */ |
| 3658 | static int __pci_request_region(struct pci_dev *pdev, int bar, | 3669 | static int __pci_request_region(struct pci_dev *pdev, int bar, |
| 3659 | const char *res_name, int exclusive) | 3670 | const char *res_name, int exclusive) |
| @@ -3687,18 +3698,18 @@ err_out: | |||
| 3687 | } | 3698 | } |
| 3688 | 3699 | ||
| 3689 | /** | 3700 | /** |
| 3690 | * pci_request_region - Reserve PCI I/O and memory resource | 3701 | * pci_request_region - Reserve PCI I/O and memory resource |
| 3691 | * @pdev: PCI device whose resources are to be reserved | 3702 | * @pdev: PCI device whose resources are to be reserved |
| 3692 | * @bar: BAR to be reserved | 3703 | * @bar: BAR to be reserved |
| 3693 | * @res_name: Name to be associated with resource | 3704 | * @res_name: Name to be associated with resource |
| 3694 | * | 3705 | * |
| 3695 | * Mark the PCI region associated with PCI device @pdev BAR @bar as | 3706 | * Mark the PCI region associated with PCI device @pdev BAR @bar as |
| 3696 | * being reserved by owner @res_name. Do not access any | 3707 | * being reserved by owner @res_name. Do not access any |
| 3697 | * address inside the PCI regions unless this call returns | 3708 | * address inside the PCI regions unless this call returns |
| 3698 | * successfully. | 3709 | * successfully. |
| 3699 | * | 3710 | * |
| 3700 | * Returns 0 on success, or %EBUSY on error. A warning | 3711 | * Returns 0 on success, or %EBUSY on error. A warning |
| 3701 | * message is also printed on failure. | 3712 | * message is also printed on failure. |
| 3702 | */ | 3713 | */ |
| 3703 | int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) | 3714 | int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) |
| 3704 | { | 3715 | { |
| @@ -3707,31 +3718,6 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) | |||
| 3707 | EXPORT_SYMBOL(pci_request_region); | 3718 | EXPORT_SYMBOL(pci_request_region); |
| 3708 | 3719 | ||
| 3709 | /** | 3720 | /** |
| 3710 | * pci_request_region_exclusive - Reserved PCI I/O and memory resource | ||
| 3711 | * @pdev: PCI device whose resources are to be reserved | ||
| 3712 | * @bar: BAR to be reserved | ||
| 3713 | * @res_name: Name to be associated with resource. | ||
| 3714 | * | ||
| 3715 | * Mark the PCI region associated with PCI device @pdev BR @bar as | ||
| 3716 | * being reserved by owner @res_name. Do not access any | ||
| 3717 | * address inside the PCI regions unless this call returns | ||
| 3718 | * successfully. | ||
| 3719 | * | ||
| 3720 | * Returns 0 on success, or %EBUSY on error. A warning | ||
| 3721 | * message is also printed on failure. | ||
| 3722 | * | ||
| 3723 | * The key difference that _exclusive makes it that userspace is | ||
| 3724 | * explicitly not allowed to map the resource via /dev/mem or | ||
| 3725 | * sysfs. | ||
| 3726 | */ | ||
| 3727 | int pci_request_region_exclusive(struct pci_dev *pdev, int bar, | ||
| 3728 | const char *res_name) | ||
| 3729 | { | ||
| 3730 | return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE); | ||
| 3731 | } | ||
| 3732 | EXPORT_SYMBOL(pci_request_region_exclusive); | ||
| 3733 | |||
| 3734 | /** | ||
| 3735 | * pci_release_selected_regions - Release selected PCI I/O and memory resources | 3721 | * pci_release_selected_regions - Release selected PCI I/O and memory resources |
| 3736 | * @pdev: PCI device whose resources were previously reserved | 3722 | * @pdev: PCI device whose resources were previously reserved |
| 3737 | * @bars: Bitmask of BARs to be released | 3723 | * @bars: Bitmask of BARs to be released |
| @@ -3791,12 +3777,13 @@ int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars, | |||
| 3791 | EXPORT_SYMBOL(pci_request_selected_regions_exclusive); | 3777 | EXPORT_SYMBOL(pci_request_selected_regions_exclusive); |
| 3792 | 3778 | ||
| 3793 | /** | 3779 | /** |
| 3794 | * pci_release_regions - Release reserved PCI I/O and memory resources | 3780 | * pci_release_regions - Release reserved PCI I/O and memory resources |
| 3795 | * @pdev: PCI device whose resources were previously reserved by pci_request_regions | 3781 | * @pdev: PCI device whose resources were previously reserved by |
| 3782 | * pci_request_regions() | ||
| 3796 | * | 3783 | * |
| 3797 | * Releases all PCI I/O and memory resources previously reserved by a | 3784 | * Releases all PCI I/O and memory resources previously reserved by a |
| 3798 | * successful call to pci_request_regions. Call this function only | 3785 | * successful call to pci_request_regions(). Call this function only |
| 3799 | * after all use of the PCI regions has ceased. | 3786 | * after all use of the PCI regions has ceased. |
| 3800 | */ | 3787 | */ |
| 3801 | 3788 | ||
| 3802 | void pci_release_regions(struct pci_dev *pdev) | 3789 | void pci_release_regions(struct pci_dev *pdev) |
| @@ -3806,17 +3793,17 @@ void pci_release_regions(struct pci_dev *pdev) | |||
| 3806 | EXPORT_SYMBOL(pci_release_regions); | 3793 | EXPORT_SYMBOL(pci_release_regions); |
| 3807 | 3794 | ||
| 3808 | /** | 3795 | /** |
| 3809 | * pci_request_regions - Reserved PCI I/O and memory resources | 3796 | * pci_request_regions - Reserve PCI I/O and memory resources |
| 3810 | * @pdev: PCI device whose resources are to be reserved | 3797 | * @pdev: PCI device whose resources are to be reserved |
| 3811 | * @res_name: Name to be associated with resource. | 3798 | * @res_name: Name to be associated with resource. |
| 3812 | * | 3799 | * |
| 3813 | * Mark all PCI regions associated with PCI device @pdev as | 3800 | * Mark all PCI regions associated with PCI device @pdev as |
| 3814 | * being reserved by owner @res_name. Do not access any | 3801 | * being reserved by owner @res_name. Do not access any |
| 3815 | * address inside the PCI regions unless this call returns | 3802 | * address inside the PCI regions unless this call returns |
| 3816 | * successfully. | 3803 | * successfully. |
| 3817 | * | 3804 | * |
| 3818 | * Returns 0 on success, or %EBUSY on error. A warning | 3805 | * Returns 0 on success, or %EBUSY on error. A warning |
| 3819 | * message is also printed on failure. | 3806 | * message is also printed on failure. |
| 3820 | */ | 3807 | */ |
| 3821 | int pci_request_regions(struct pci_dev *pdev, const char *res_name) | 3808 | int pci_request_regions(struct pci_dev *pdev, const char *res_name) |
| 3822 | { | 3809 | { |
| @@ -3825,20 +3812,19 @@ int pci_request_regions(struct pci_dev *pdev, const char *res_name) | |||
| 3825 | EXPORT_SYMBOL(pci_request_regions); | 3812 | EXPORT_SYMBOL(pci_request_regions); |
| 3826 | 3813 | ||
| 3827 | /** | 3814 | /** |
| 3828 | * pci_request_regions_exclusive - Reserved PCI I/O and memory resources | 3815 | * pci_request_regions_exclusive - Reserve PCI I/O and memory resources |
| 3829 | * @pdev: PCI device whose resources are to be reserved | 3816 | * @pdev: PCI device whose resources are to be reserved |
| 3830 | * @res_name: Name to be associated with resource. | 3817 | * @res_name: Name to be associated with resource. |
| 3831 | * | 3818 | * |
| 3832 | * Mark all PCI regions associated with PCI device @pdev as | 3819 | * Mark all PCI regions associated with PCI device @pdev as being reserved |
| 3833 | * being reserved by owner @res_name. Do not access any | 3820 | * by owner @res_name. Do not access any address inside the PCI regions |
| 3834 | * address inside the PCI regions unless this call returns | 3821 | * unless this call returns successfully. |
| 3835 | * successfully. | ||
| 3836 | * | 3822 | * |
| 3837 | * pci_request_regions_exclusive() will mark the region so that | 3823 | * pci_request_regions_exclusive() will mark the region so that /dev/mem |
| 3838 | * /dev/mem and the sysfs MMIO access will not be allowed. | 3824 | * and the sysfs MMIO access will not be allowed. |
| 3839 | * | 3825 | * |
| 3840 | * Returns 0 on success, or %EBUSY on error. A warning | 3826 | * Returns 0 on success, or %EBUSY on error. A warning message is also |
| 3841 | * message is also printed on failure. | 3827 | * printed on failure. |
| 3842 | */ | 3828 | */ |
| 3843 | int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) | 3829 | int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) |
| 3844 | { | 3830 | { |
| @@ -3849,7 +3835,7 @@ EXPORT_SYMBOL(pci_request_regions_exclusive); | |||
| 3849 | 3835 | ||
| 3850 | /* | 3836 | /* |
| 3851 | * Record the PCI IO range (expressed as CPU physical address + size). | 3837 | * Record the PCI IO range (expressed as CPU physical address + size). |
| 3852 | * Return a negative value if an error has occured, zero otherwise | 3838 | * Return a negative value if an error has occurred, zero otherwise |
| 3853 | */ | 3839 | */ |
| 3854 | int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr, | 3840 | int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr, |
| 3855 | resource_size_t size) | 3841 | resource_size_t size) |
| @@ -3905,14 +3891,14 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address) | |||
| 3905 | } | 3891 | } |
| 3906 | 3892 | ||
| 3907 | /** | 3893 | /** |
| 3908 | * pci_remap_iospace - Remap the memory mapped I/O space | 3894 | * pci_remap_iospace - Remap the memory mapped I/O space |
| 3909 | * @res: Resource describing the I/O space | 3895 | * @res: Resource describing the I/O space |
| 3910 | * @phys_addr: physical address of range to be mapped | 3896 | * @phys_addr: physical address of range to be mapped |
| 3911 | * | 3897 | * |
| 3912 | * Remap the memory mapped I/O space described by the @res | 3898 | * Remap the memory mapped I/O space described by the @res and the CPU |
| 3913 | * and the CPU physical address @phys_addr into virtual address space. | 3899 | * physical address @phys_addr into virtual address space. Only |
| 3914 | * Only architectures that have memory mapped IO functions defined | 3900 | * architectures that have memory mapped IO functions defined (and the |
| 3915 | * (and the PCI_IOBASE value defined) should call this function. | 3901 | * PCI_IOBASE value defined) should call this function. |
| 3916 | */ | 3902 | */ |
| 3917 | int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) | 3903 | int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) |
| 3918 | { | 3904 | { |
| @@ -3928,8 +3914,10 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) | |||
| 3928 | return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr, | 3914 | return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr, |
| 3929 | pgprot_device(PAGE_KERNEL)); | 3915 | pgprot_device(PAGE_KERNEL)); |
| 3930 | #else | 3916 | #else |
| 3931 | /* this architecture does not have memory mapped I/O space, | 3917 | /* |
| 3932 | so this function should never be called */ | 3918 | * This architecture does not have memory mapped I/O space, |
| 3919 | * so this function should never be called | ||
| 3920 | */ | ||
| 3933 | WARN_ONCE(1, "This architecture does not support memory mapped I/O\n"); | 3921 | WARN_ONCE(1, "This architecture does not support memory mapped I/O\n"); |
| 3934 | return -ENODEV; | 3922 | return -ENODEV; |
| 3935 | #endif | 3923 | #endif |
| @@ -3937,12 +3925,12 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr) | |||
| 3937 | EXPORT_SYMBOL(pci_remap_iospace); | 3925 | EXPORT_SYMBOL(pci_remap_iospace); |
| 3938 | 3926 | ||
| 3939 | /** | 3927 | /** |
| 3940 | * pci_unmap_iospace - Unmap the memory mapped I/O space | 3928 | * pci_unmap_iospace - Unmap the memory mapped I/O space |
| 3941 | * @res: resource to be unmapped | 3929 | * @res: resource to be unmapped |
| 3942 | * | 3930 | * |
| 3943 | * Unmap the CPU virtual address @res from virtual address space. | 3931 | * Unmap the CPU virtual address @res from virtual address space. Only |
| 3944 | * Only architectures that have memory mapped IO functions defined | 3932 | * architectures that have memory mapped IO functions defined (and the |
| 3945 | * (and the PCI_IOBASE value defined) should call this function. | 3933 | * PCI_IOBASE value defined) should call this function. |
| 3946 | */ | 3934 | */ |
| 3947 | void pci_unmap_iospace(struct resource *res) | 3935 | void pci_unmap_iospace(struct resource *res) |
| 3948 | { | 3936 | { |
| @@ -4185,7 +4173,7 @@ int pci_set_cacheline_size(struct pci_dev *dev) | |||
| 4185 | if (cacheline_size == pci_cache_line_size) | 4173 | if (cacheline_size == pci_cache_line_size) |
| 4186 | return 0; | 4174 | return 0; |
| 4187 | 4175 | ||
| 4188 | pci_printk(KERN_DEBUG, dev, "cache line size of %d is not supported\n", | 4176 | pci_info(dev, "cache line size of %d is not supported\n", |
| 4189 | pci_cache_line_size << 2); | 4177 | pci_cache_line_size << 2); |
| 4190 | 4178 | ||
| 4191 | return -EINVAL; | 4179 | return -EINVAL; |
| @@ -4288,7 +4276,7 @@ EXPORT_SYMBOL(pci_clear_mwi); | |||
| 4288 | * @pdev: the PCI device to operate on | 4276 | * @pdev: the PCI device to operate on |
| 4289 | * @enable: boolean: whether to enable or disable PCI INTx | 4277 | * @enable: boolean: whether to enable or disable PCI INTx |
| 4290 | * | 4278 | * |
| 4291 | * Enables/disables PCI INTx for device dev | 4279 | * Enables/disables PCI INTx for device @pdev |
| 4292 | */ | 4280 | */ |
| 4293 | void pci_intx(struct pci_dev *pdev, int enable) | 4281 | void pci_intx(struct pci_dev *pdev, int enable) |
| 4294 | { | 4282 | { |
| @@ -4364,9 +4352,8 @@ done: | |||
| 4364 | * pci_check_and_mask_intx - mask INTx on pending interrupt | 4352 | * pci_check_and_mask_intx - mask INTx on pending interrupt |
| 4365 | * @dev: the PCI device to operate on | 4353 | * @dev: the PCI device to operate on |
| 4366 | * | 4354 | * |
| 4367 | * Check if the device dev has its INTx line asserted, mask it and | 4355 | * Check if the device dev has its INTx line asserted, mask it and return |
| 4368 | * return true in that case. False is returned if no interrupt was | 4356 | * true in that case. False is returned if no interrupt was pending. |
| 4369 | * pending. | ||
| 4370 | */ | 4357 | */ |
| 4371 | bool pci_check_and_mask_intx(struct pci_dev *dev) | 4358 | bool pci_check_and_mask_intx(struct pci_dev *dev) |
| 4372 | { | 4359 | { |
| @@ -4378,9 +4365,9 @@ EXPORT_SYMBOL_GPL(pci_check_and_mask_intx); | |||
| 4378 | * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending | 4365 | * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending |
| 4379 | * @dev: the PCI device to operate on | 4366 | * @dev: the PCI device to operate on |
| 4380 | * | 4367 | * |
| 4381 | * Check if the device dev has its INTx line asserted, unmask it if not | 4368 | * Check if the device dev has its INTx line asserted, unmask it if not and |
| 4382 | * and return true. False is returned and the mask remains active if | 4369 | * return true. False is returned and the mask remains active if there was |
| 4383 | * there was still an interrupt pending. | 4370 | * still an interrupt pending. |
| 4384 | */ | 4371 | */ |
| 4385 | bool pci_check_and_unmask_intx(struct pci_dev *dev) | 4372 | bool pci_check_and_unmask_intx(struct pci_dev *dev) |
| 4386 | { | 4373 | { |
| @@ -4389,7 +4376,7 @@ bool pci_check_and_unmask_intx(struct pci_dev *dev) | |||
| 4389 | EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx); | 4376 | EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx); |
| 4390 | 4377 | ||
| 4391 | /** | 4378 | /** |
| 4392 | * pci_wait_for_pending_transaction - waits for pending transaction | 4379 | * pci_wait_for_pending_transaction - wait for pending transaction |
| 4393 | * @dev: the PCI device to operate on | 4380 | * @dev: the PCI device to operate on |
| 4394 | * | 4381 | * |
| 4395 | * Return 0 if transaction is pending 1 otherwise. | 4382 | * Return 0 if transaction is pending 1 otherwise. |
| @@ -4447,7 +4434,7 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout) | |||
| 4447 | 4434 | ||
| 4448 | /** | 4435 | /** |
| 4449 | * pcie_has_flr - check if a device supports function level resets | 4436 | * pcie_has_flr - check if a device supports function level resets |
| 4450 | * @dev: device to check | 4437 | * @dev: device to check |
| 4451 | * | 4438 | * |
| 4452 | * Returns true if the device advertises support for PCIe function level | 4439 | * Returns true if the device advertises support for PCIe function level |
| 4453 | * resets. | 4440 | * resets. |
| @@ -4466,7 +4453,7 @@ EXPORT_SYMBOL_GPL(pcie_has_flr); | |||
| 4466 | 4453 | ||
| 4467 | /** | 4454 | /** |
| 4468 | * pcie_flr - initiate a PCIe function level reset | 4455 | * pcie_flr - initiate a PCIe function level reset |
| 4469 | * @dev: device to reset | 4456 | * @dev: device to reset |
| 4470 | * | 4457 | * |
| 4471 | * Initiate a function level reset on @dev. The caller should ensure the | 4458 | * Initiate a function level reset on @dev. The caller should ensure the |
| 4472 | * device supports FLR before calling this function, e.g. by using the | 4459 | * device supports FLR before calling this function, e.g. by using the |
| @@ -4810,6 +4797,7 @@ static void pci_dev_restore(struct pci_dev *dev) | |||
| 4810 | * | 4797 | * |
| 4811 | * The device function is presumed to be unused and the caller is holding | 4798 | * The device function is presumed to be unused and the caller is holding |
| 4812 | * the device mutex lock when this function is called. | 4799 | * the device mutex lock when this function is called. |
| 4800 | * | ||
| 4813 | * Resetting the device will make the contents of PCI configuration space | 4801 | * Resetting the device will make the contents of PCI configuration space |
| 4814 | * random, so any caller of this must be prepared to reinitialise the | 4802 | * random, so any caller of this must be prepared to reinitialise the |
| 4815 | * device including MSI, bus mastering, BARs, decoding IO and memory spaces, | 4803 | * device including MSI, bus mastering, BARs, decoding IO and memory spaces, |
| @@ -5373,8 +5361,8 @@ EXPORT_SYMBOL_GPL(pci_reset_bus); | |||
| 5373 | * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count | 5361 | * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count |
| 5374 | * @dev: PCI device to query | 5362 | * @dev: PCI device to query |
| 5375 | * | 5363 | * |
| 5376 | * Returns mmrbc: maximum designed memory read count in bytes | 5364 | * Returns mmrbc: maximum designed memory read count in bytes or |
| 5377 | * or appropriate error value. | 5365 | * appropriate error value. |
| 5378 | */ | 5366 | */ |
| 5379 | int pcix_get_max_mmrbc(struct pci_dev *dev) | 5367 | int pcix_get_max_mmrbc(struct pci_dev *dev) |
| 5380 | { | 5368 | { |
| @@ -5396,8 +5384,8 @@ EXPORT_SYMBOL(pcix_get_max_mmrbc); | |||
| 5396 | * pcix_get_mmrbc - get PCI-X maximum memory read byte count | 5384 | * pcix_get_mmrbc - get PCI-X maximum memory read byte count |
| 5397 | * @dev: PCI device to query | 5385 | * @dev: PCI device to query |
| 5398 | * | 5386 | * |
| 5399 | * Returns mmrbc: maximum memory read count in bytes | 5387 | * Returns mmrbc: maximum memory read count in bytes or appropriate error |
| 5400 | * or appropriate error value. | 5388 | * value. |
| 5401 | */ | 5389 | */ |
| 5402 | int pcix_get_mmrbc(struct pci_dev *dev) | 5390 | int pcix_get_mmrbc(struct pci_dev *dev) |
| 5403 | { | 5391 | { |
| @@ -5421,7 +5409,7 @@ EXPORT_SYMBOL(pcix_get_mmrbc); | |||
| 5421 | * @mmrbc: maximum memory read count in bytes | 5409 | * @mmrbc: maximum memory read count in bytes |
| 5422 | * valid values are 512, 1024, 2048, 4096 | 5410 | * valid values are 512, 1024, 2048, 4096 |
| 5423 | * | 5411 | * |
| 5424 | * If possible sets maximum memory read byte count, some bridges have erratas | 5412 | * If possible sets maximum memory read byte count, some bridges have errata |
| 5425 | * that prevent this. | 5413 | * that prevent this. |
| 5426 | */ | 5414 | */ |
| 5427 | int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) | 5415 | int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) |
| @@ -5466,8 +5454,7 @@ EXPORT_SYMBOL(pcix_set_mmrbc); | |||
| 5466 | * pcie_get_readrq - get PCI Express read request size | 5454 | * pcie_get_readrq - get PCI Express read request size |
| 5467 | * @dev: PCI device to query | 5455 | * @dev: PCI device to query |
| 5468 | * | 5456 | * |
| 5469 | * Returns maximum memory read request in bytes | 5457 | * Returns maximum memory read request in bytes or appropriate error value. |
| 5470 | * or appropriate error value. | ||
| 5471 | */ | 5458 | */ |
| 5472 | int pcie_get_readrq(struct pci_dev *dev) | 5459 | int pcie_get_readrq(struct pci_dev *dev) |
| 5473 | { | 5460 | { |
| @@ -5495,10 +5482,9 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) | |||
| 5495 | return -EINVAL; | 5482 | return -EINVAL; |
| 5496 | 5483 | ||
| 5497 | /* | 5484 | /* |
| 5498 | * If using the "performance" PCIe config, we clamp the | 5485 | * If using the "performance" PCIe config, we clamp the read rq |
| 5499 | * read rq size to the max packet size to prevent the | 5486 | * size to the max packet size to keep the host bridge from |
| 5500 | * host bridge generating requests larger than we can | 5487 | * generating requests larger than we can cope with. |
| 5501 | * cope with | ||
| 5502 | */ | 5488 | */ |
| 5503 | if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { | 5489 | if (pcie_bus_config == PCIE_BUS_PERFORMANCE) { |
| 5504 | int mps = pcie_get_mps(dev); | 5490 | int mps = pcie_get_mps(dev); |
| @@ -6144,6 +6130,7 @@ static int of_pci_bus_find_domain_nr(struct device *parent) | |||
| 6144 | 6130 | ||
| 6145 | if (parent) | 6131 | if (parent) |
| 6146 | domain = of_get_pci_domain_nr(parent->of_node); | 6132 | domain = of_get_pci_domain_nr(parent->of_node); |
| 6133 | |||
| 6147 | /* | 6134 | /* |
| 6148 | * Check DT domain and use_dt_domains values. | 6135 | * Check DT domain and use_dt_domains values. |
| 6149 | * | 6136 | * |
| @@ -6264,8 +6251,7 @@ static int __init pci_setup(char *str) | |||
| 6264 | } else if (!strncmp(str, "disable_acs_redir=", 18)) { | 6251 | } else if (!strncmp(str, "disable_acs_redir=", 18)) { |
| 6265 | disable_acs_redir_param = str + 18; | 6252 | disable_acs_redir_param = str + 18; |
| 6266 | } else { | 6253 | } else { |
| 6267 | printk(KERN_ERR "PCI: Unknown option `%s'\n", | 6254 | pr_err("PCI: Unknown option `%s'\n", str); |
| 6268 | str); | ||
| 6269 | } | 6255 | } |
| 6270 | } | 6256 | } |
| 6271 | str = k; | 6257 | str = k; |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index d994839a3e24..9cb99380c61e 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
| @@ -597,7 +597,7 @@ void pci_aer_clear_fatal_status(struct pci_dev *dev); | |||
| 597 | void pci_aer_clear_device_status(struct pci_dev *dev); | 597 | void pci_aer_clear_device_status(struct pci_dev *dev); |
| 598 | #else | 598 | #else |
| 599 | static inline void pci_no_aer(void) { } | 599 | static inline void pci_no_aer(void) { } |
| 600 | static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; } | 600 | static inline void pci_aer_init(struct pci_dev *d) { } |
| 601 | static inline void pci_aer_exit(struct pci_dev *d) { } | 601 | static inline void pci_aer_exit(struct pci_dev *d) { } |
| 602 | static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { } | 602 | static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { } |
| 603 | static inline void pci_aer_clear_device_status(struct pci_dev *dev) { } | 603 | static inline void pci_aer_clear_device_status(struct pci_dev *dev) { } |
diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index f8fc2114ad39..b45bc47d04fe 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c | |||
| @@ -12,6 +12,9 @@ | |||
| 12 | * Andrew Patterson <andrew.patterson@hp.com> | 12 | * Andrew Patterson <andrew.patterson@hp.com> |
| 13 | */ | 13 | */ |
| 14 | 14 | ||
| 15 | #define pr_fmt(fmt) "AER: " fmt | ||
| 16 | #define dev_fmt pr_fmt | ||
| 17 | |||
| 15 | #include <linux/cper.h> | 18 | #include <linux/cper.h> |
| 16 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
| 17 | #include <linux/pci-acpi.h> | 20 | #include <linux/pci-acpi.h> |
| @@ -779,10 +782,11 @@ static void aer_print_port_info(struct pci_dev *dev, struct aer_err_info *info) | |||
| 779 | u8 bus = info->id >> 8; | 782 | u8 bus = info->id >> 8; |
| 780 | u8 devfn = info->id & 0xff; | 783 | u8 devfn = info->id & 0xff; |
| 781 | 784 | ||
| 782 | pci_info(dev, "AER: %s%s error received: %04x:%02x:%02x.%d\n", | 785 | pci_info(dev, "%s%s error received: %04x:%02x:%02x.%d\n", |
| 783 | info->multi_error_valid ? "Multiple " : "", | 786 | info->multi_error_valid ? "Multiple " : "", |
| 784 | aer_error_severity_string[info->severity], | 787 | aer_error_severity_string[info->severity], |
| 785 | pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); | 788 | pci_domain_nr(dev->bus), bus, PCI_SLOT(devfn), |
| 789 | PCI_FUNC(devfn)); | ||
| 786 | } | 790 | } |
| 787 | 791 | ||
| 788 | #ifdef CONFIG_ACPI_APEI_PCIEAER | 792 | #ifdef CONFIG_ACPI_APEI_PCIEAER |
| @@ -964,8 +968,7 @@ static bool find_source_device(struct pci_dev *parent, | |||
| 964 | pci_walk_bus(parent->subordinate, find_device_iter, e_info); | 968 | pci_walk_bus(parent->subordinate, find_device_iter, e_info); |
| 965 | 969 | ||
| 966 | if (!e_info->error_dev_num) { | 970 | if (!e_info->error_dev_num) { |
| 967 | pci_printk(KERN_DEBUG, parent, "can't find device of ID%04x\n", | 971 | pci_info(parent, "can't find device of ID%04x\n", e_info->id); |
| 968 | e_info->id); | ||
| 969 | return false; | 972 | return false; |
| 970 | } | 973 | } |
| 971 | return true; | 974 | return true; |
| @@ -1377,25 +1380,24 @@ static int aer_probe(struct pcie_device *dev) | |||
| 1377 | int status; | 1380 | int status; |
| 1378 | struct aer_rpc *rpc; | 1381 | struct aer_rpc *rpc; |
| 1379 | struct device *device = &dev->device; | 1382 | struct device *device = &dev->device; |
| 1383 | struct pci_dev *port = dev->port; | ||
| 1380 | 1384 | ||
| 1381 | rpc = devm_kzalloc(device, sizeof(struct aer_rpc), GFP_KERNEL); | 1385 | rpc = devm_kzalloc(device, sizeof(struct aer_rpc), GFP_KERNEL); |
| 1382 | if (!rpc) { | 1386 | if (!rpc) |
| 1383 | dev_printk(KERN_DEBUG, device, "alloc AER rpc failed\n"); | ||
| 1384 | return -ENOMEM; | 1387 | return -ENOMEM; |
| 1385 | } | 1388 | |
| 1386 | rpc->rpd = dev->port; | 1389 | rpc->rpd = port; |
| 1387 | set_service_data(dev, rpc); | 1390 | set_service_data(dev, rpc); |
| 1388 | 1391 | ||
| 1389 | status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr, | 1392 | status = devm_request_threaded_irq(device, dev->irq, aer_irq, aer_isr, |
| 1390 | IRQF_SHARED, "aerdrv", dev); | 1393 | IRQF_SHARED, "aerdrv", dev); |
| 1391 | if (status) { | 1394 | if (status) { |
| 1392 | dev_printk(KERN_DEBUG, device, "request AER IRQ %d failed\n", | 1395 | pci_err(port, "request AER IRQ %d failed\n", dev->irq); |
| 1393 | dev->irq); | ||
| 1394 | return status; | 1396 | return status; |
| 1395 | } | 1397 | } |
| 1396 | 1398 | ||
| 1397 | aer_enable_rootport(rpc); | 1399 | aer_enable_rootport(rpc); |
| 1398 | dev_info(device, "AER enabled with IRQ %d\n", dev->irq); | 1400 | pci_info(port, "enabled with IRQ %d\n", dev->irq); |
| 1399 | return 0; | 1401 | return 0; |
| 1400 | } | 1402 | } |
| 1401 | 1403 | ||
| @@ -1419,7 +1421,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev) | |||
| 1419 | pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); | 1421 | pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32); |
| 1420 | 1422 | ||
| 1421 | rc = pci_bus_error_reset(dev); | 1423 | rc = pci_bus_error_reset(dev); |
| 1422 | pci_printk(KERN_DEBUG, dev, "Root Port link has been reset\n"); | 1424 | pci_info(dev, "Root Port link has been reset\n"); |
| 1423 | 1425 | ||
| 1424 | /* Clear Root Error Status */ | 1426 | /* Clear Root Error Status */ |
| 1425 | pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, ®32); | 1427 | pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, ®32); |
diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c index 95d4759664b3..043b8b0cfcc5 100644 --- a/drivers/pci/pcie/aer_inject.c +++ b/drivers/pci/pcie/aer_inject.c | |||
| @@ -12,6 +12,8 @@ | |||
| 12 | * Huang Ying <ying.huang@intel.com> | 12 | * Huang Ying <ying.huang@intel.com> |
| 13 | */ | 13 | */ |
| 14 | 14 | ||
| 15 | #define dev_fmt(fmt) "aer_inject: " fmt | ||
| 16 | |||
| 15 | #include <linux/module.h> | 17 | #include <linux/module.h> |
| 16 | #include <linux/init.h> | 18 | #include <linux/init.h> |
| 17 | #include <linux/irq.h> | 19 | #include <linux/irq.h> |
| @@ -332,14 +334,14 @@ static int aer_inject(struct aer_error_inj *einj) | |||
| 332 | return -ENODEV; | 334 | return -ENODEV; |
| 333 | rpdev = pcie_find_root_port(dev); | 335 | rpdev = pcie_find_root_port(dev); |
| 334 | if (!rpdev) { | 336 | if (!rpdev) { |
| 335 | pci_err(dev, "aer_inject: Root port not found\n"); | 337 | pci_err(dev, "Root port not found\n"); |
| 336 | ret = -ENODEV; | 338 | ret = -ENODEV; |
| 337 | goto out_put; | 339 | goto out_put; |
| 338 | } | 340 | } |
| 339 | 341 | ||
| 340 | pos_cap_err = dev->aer_cap; | 342 | pos_cap_err = dev->aer_cap; |
| 341 | if (!pos_cap_err) { | 343 | if (!pos_cap_err) { |
| 342 | pci_err(dev, "aer_inject: Device doesn't support AER\n"); | 344 | pci_err(dev, "Device doesn't support AER\n"); |
| 343 | ret = -EPROTONOSUPPORT; | 345 | ret = -EPROTONOSUPPORT; |
| 344 | goto out_put; | 346 | goto out_put; |
| 345 | } | 347 | } |
| @@ -350,7 +352,7 @@ static int aer_inject(struct aer_error_inj *einj) | |||
| 350 | 352 | ||
| 351 | rp_pos_cap_err = rpdev->aer_cap; | 353 | rp_pos_cap_err = rpdev->aer_cap; |
| 352 | if (!rp_pos_cap_err) { | 354 | if (!rp_pos_cap_err) { |
| 353 | pci_err(rpdev, "aer_inject: Root port doesn't support AER\n"); | 355 | pci_err(rpdev, "Root port doesn't support AER\n"); |
| 354 | ret = -EPROTONOSUPPORT; | 356 | ret = -EPROTONOSUPPORT; |
| 355 | goto out_put; | 357 | goto out_put; |
| 356 | } | 358 | } |
| @@ -398,14 +400,14 @@ static int aer_inject(struct aer_error_inj *einj) | |||
| 398 | if (!aer_mask_override && einj->cor_status && | 400 | if (!aer_mask_override && einj->cor_status && |
| 399 | !(einj->cor_status & ~cor_mask)) { | 401 | !(einj->cor_status & ~cor_mask)) { |
| 400 | ret = -EINVAL; | 402 | ret = -EINVAL; |
| 401 | pci_warn(dev, "aer_inject: The correctable error(s) is masked by device\n"); | 403 | pci_warn(dev, "The correctable error(s) is masked by device\n"); |
| 402 | spin_unlock_irqrestore(&inject_lock, flags); | 404 | spin_unlock_irqrestore(&inject_lock, flags); |
| 403 | goto out_put; | 405 | goto out_put; |
| 404 | } | 406 | } |
| 405 | if (!aer_mask_override && einj->uncor_status && | 407 | if (!aer_mask_override && einj->uncor_status && |
| 406 | !(einj->uncor_status & ~uncor_mask)) { | 408 | !(einj->uncor_status & ~uncor_mask)) { |
| 407 | ret = -EINVAL; | 409 | ret = -EINVAL; |
| 408 | pci_warn(dev, "aer_inject: The uncorrectable error(s) is masked by device\n"); | 410 | pci_warn(dev, "The uncorrectable error(s) is masked by device\n"); |
| 409 | spin_unlock_irqrestore(&inject_lock, flags); | 411 | spin_unlock_irqrestore(&inject_lock, flags); |
| 410 | goto out_put; | 412 | goto out_put; |
| 411 | } | 413 | } |
| @@ -460,19 +462,17 @@ static int aer_inject(struct aer_error_inj *einj) | |||
| 460 | if (device) { | 462 | if (device) { |
| 461 | edev = to_pcie_device(device); | 463 | edev = to_pcie_device(device); |
| 462 | if (!get_service_data(edev)) { | 464 | if (!get_service_data(edev)) { |
| 463 | dev_warn(&edev->device, | 465 | pci_warn(edev->port, "AER service is not initialized\n"); |
| 464 | "aer_inject: AER service is not initialized\n"); | ||
| 465 | ret = -EPROTONOSUPPORT; | 466 | ret = -EPROTONOSUPPORT; |
| 466 | goto out_put; | 467 | goto out_put; |
| 467 | } | 468 | } |
| 468 | dev_info(&edev->device, | 469 | pci_info(edev->port, "Injecting errors %08x/%08x into device %s\n", |
| 469 | "aer_inject: Injecting errors %08x/%08x into device %s\n", | ||
| 470 | einj->cor_status, einj->uncor_status, pci_name(dev)); | 470 | einj->cor_status, einj->uncor_status, pci_name(dev)); |
| 471 | local_irq_disable(); | 471 | local_irq_disable(); |
| 472 | generic_handle_irq(edev->irq); | 472 | generic_handle_irq(edev->irq); |
| 473 | local_irq_enable(); | 473 | local_irq_enable(); |
| 474 | } else { | 474 | } else { |
| 475 | pci_err(rpdev, "aer_inject: AER device not found\n"); | 475 | pci_err(rpdev, "AER device not found\n"); |
| 476 | ret = -ENODEV; | 476 | ret = -ENODEV; |
| 477 | } | 477 | } |
| 478 | out_put: | 478 | out_put: |
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 727e3c1ef9a4..fd4cb75088f9 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c | |||
| @@ -196,6 +196,36 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) | |||
| 196 | link->clkpm_capable = (blacklist) ? 0 : capable; | 196 | link->clkpm_capable = (blacklist) ? 0 : capable; |
| 197 | } | 197 | } |
| 198 | 198 | ||
| 199 | static bool pcie_retrain_link(struct pcie_link_state *link) | ||
| 200 | { | ||
| 201 | struct pci_dev *parent = link->pdev; | ||
| 202 | unsigned long end_jiffies; | ||
| 203 | u16 reg16; | ||
| 204 | |||
| 205 | pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16); | ||
| 206 | reg16 |= PCI_EXP_LNKCTL_RL; | ||
| 207 | pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); | ||
| 208 | if (parent->clear_retrain_link) { | ||
| 209 | /* | ||
| 210 | * Due to an erratum in some devices the Retrain Link bit | ||
| 211 | * needs to be cleared again manually to allow the link | ||
| 212 | * training to succeed. | ||
| 213 | */ | ||
| 214 | reg16 &= ~PCI_EXP_LNKCTL_RL; | ||
| 215 | pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); | ||
| 216 | } | ||
| 217 | |||
| 218 | /* Wait for link training end. Break out after waiting for timeout */ | ||
| 219 | end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT; | ||
| 220 | do { | ||
| 221 | pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16); | ||
| 222 | if (!(reg16 & PCI_EXP_LNKSTA_LT)) | ||
| 223 | break; | ||
| 224 | msleep(1); | ||
| 225 | } while (time_before(jiffies, end_jiffies)); | ||
| 226 | return !(reg16 & PCI_EXP_LNKSTA_LT); | ||
| 227 | } | ||
| 228 | |||
| 199 | /* | 229 | /* |
| 200 | * pcie_aspm_configure_common_clock: check if the 2 ends of a link | 230 | * pcie_aspm_configure_common_clock: check if the 2 ends of a link |
| 201 | * could use common clock. If they are, configure them to use the | 231 | * could use common clock. If they are, configure them to use the |
| @@ -205,7 +235,6 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) | |||
| 205 | { | 235 | { |
| 206 | int same_clock = 1; | 236 | int same_clock = 1; |
| 207 | u16 reg16, parent_reg, child_reg[8]; | 237 | u16 reg16, parent_reg, child_reg[8]; |
| 208 | unsigned long start_jiffies; | ||
| 209 | struct pci_dev *child, *parent = link->pdev; | 238 | struct pci_dev *child, *parent = link->pdev; |
| 210 | struct pci_bus *linkbus = parent->subordinate; | 239 | struct pci_bus *linkbus = parent->subordinate; |
| 211 | /* | 240 | /* |
| @@ -263,21 +292,7 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) | |||
| 263 | reg16 &= ~PCI_EXP_LNKCTL_CCC; | 292 | reg16 &= ~PCI_EXP_LNKCTL_CCC; |
| 264 | pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); | 293 | pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); |
| 265 | 294 | ||
| 266 | /* Retrain link */ | 295 | if (pcie_retrain_link(link)) |
| 267 | reg16 |= PCI_EXP_LNKCTL_RL; | ||
| 268 | pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); | ||
| 269 | |||
| 270 | /* Wait for link training end. Break out after waiting for timeout */ | ||
| 271 | start_jiffies = jiffies; | ||
| 272 | for (;;) { | ||
| 273 | pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16); | ||
| 274 | if (!(reg16 & PCI_EXP_LNKSTA_LT)) | ||
| 275 | break; | ||
| 276 | if (time_after(jiffies, start_jiffies + LINK_RETRAIN_TIMEOUT)) | ||
| 277 | break; | ||
| 278 | msleep(1); | ||
| 279 | } | ||
| 280 | if (!(reg16 & PCI_EXP_LNKSTA_LT)) | ||
| 281 | return; | 296 | return; |
| 282 | 297 | ||
| 283 | /* Training failed. Restore common clock configurations */ | 298 | /* Training failed. Restore common clock configurations */ |
diff --git a/drivers/pci/pcie/bw_notification.c b/drivers/pci/pcie/bw_notification.c index 4fa9e3523ee1..77e685771487 100644 --- a/drivers/pci/pcie/bw_notification.c +++ b/drivers/pci/pcie/bw_notification.c | |||
| @@ -107,11 +107,25 @@ static void pcie_bandwidth_notification_remove(struct pcie_device *srv) | |||
| 107 | free_irq(srv->irq, srv); | 107 | free_irq(srv->irq, srv); |
| 108 | } | 108 | } |
| 109 | 109 | ||
| 110 | static int pcie_bandwidth_notification_suspend(struct pcie_device *srv) | ||
| 111 | { | ||
| 112 | pcie_disable_link_bandwidth_notification(srv->port); | ||
| 113 | return 0; | ||
| 114 | } | ||
| 115 | |||
| 116 | static int pcie_bandwidth_notification_resume(struct pcie_device *srv) | ||
| 117 | { | ||
| 118 | pcie_enable_link_bandwidth_notification(srv->port); | ||
| 119 | return 0; | ||
| 120 | } | ||
| 121 | |||
| 110 | static struct pcie_port_service_driver pcie_bandwidth_notification_driver = { | 122 | static struct pcie_port_service_driver pcie_bandwidth_notification_driver = { |
| 111 | .name = "pcie_bw_notification", | 123 | .name = "pcie_bw_notification", |
| 112 | .port_type = PCIE_ANY_PORT, | 124 | .port_type = PCIE_ANY_PORT, |
| 113 | .service = PCIE_PORT_SERVICE_BWNOTIF, | 125 | .service = PCIE_PORT_SERVICE_BWNOTIF, |
| 114 | .probe = pcie_bandwidth_notification_probe, | 126 | .probe = pcie_bandwidth_notification_probe, |
| 127 | .suspend = pcie_bandwidth_notification_suspend, | ||
| 128 | .resume = pcie_bandwidth_notification_resume, | ||
| 115 | .remove = pcie_bandwidth_notification_remove, | 129 | .remove = pcie_bandwidth_notification_remove, |
| 116 | }; | 130 | }; |
| 117 | 131 | ||
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c index 7b77754a82de..a32ec3487a8d 100644 --- a/drivers/pci/pcie/dpc.c +++ b/drivers/pci/pcie/dpc.c | |||
| @@ -6,6 +6,8 @@ | |||
| 6 | * Copyright (C) 2016 Intel Corp. | 6 | * Copyright (C) 2016 Intel Corp. |
| 7 | */ | 7 | */ |
| 8 | 8 | ||
| 9 | #define dev_fmt(fmt) "DPC: " fmt | ||
| 10 | |||
| 9 | #include <linux/aer.h> | 11 | #include <linux/aer.h> |
| 10 | #include <linux/delay.h> | 12 | #include <linux/delay.h> |
| 11 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
| @@ -100,7 +102,6 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc) | |||
| 100 | { | 102 | { |
| 101 | unsigned long timeout = jiffies + HZ; | 103 | unsigned long timeout = jiffies + HZ; |
| 102 | struct pci_dev *pdev = dpc->dev->port; | 104 | struct pci_dev *pdev = dpc->dev->port; |
| 103 | struct device *dev = &dpc->dev->device; | ||
| 104 | u16 cap = dpc->cap_pos, status; | 105 | u16 cap = dpc->cap_pos, status; |
| 105 | 106 | ||
| 106 | pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status); | 107 | pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status); |
| @@ -110,7 +111,7 @@ static int dpc_wait_rp_inactive(struct dpc_dev *dpc) | |||
| 110 | pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status); | 111 | pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status); |
| 111 | } | 112 | } |
| 112 | if (status & PCI_EXP_DPC_RP_BUSY) { | 113 | if (status & PCI_EXP_DPC_RP_BUSY) { |
| 113 | dev_warn(dev, "DPC root port still busy\n"); | 114 | pci_warn(pdev, "root port still busy\n"); |
| 114 | return -EBUSY; | 115 | return -EBUSY; |
| 115 | } | 116 | } |
| 116 | return 0; | 117 | return 0; |
| @@ -148,7 +149,6 @@ static pci_ers_result_t dpc_reset_link(struct pci_dev *pdev) | |||
| 148 | 149 | ||
| 149 | static void dpc_process_rp_pio_error(struct dpc_dev *dpc) | 150 | static void dpc_process_rp_pio_error(struct dpc_dev *dpc) |
| 150 | { | 151 | { |
| 151 | struct device *dev = &dpc->dev->device; | ||
| 152 | struct pci_dev *pdev = dpc->dev->port; | 152 | struct pci_dev *pdev = dpc->dev->port; |
| 153 | u16 cap = dpc->cap_pos, dpc_status, first_error; | 153 | u16 cap = dpc->cap_pos, dpc_status, first_error; |
| 154 | u32 status, mask, sev, syserr, exc, dw0, dw1, dw2, dw3, log, prefix; | 154 | u32 status, mask, sev, syserr, exc, dw0, dw1, dw2, dw3, log, prefix; |
| @@ -156,13 +156,13 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc) | |||
| 156 | 156 | ||
| 157 | pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, &status); | 157 | pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, &status); |
| 158 | pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_MASK, &mask); | 158 | pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_MASK, &mask); |
| 159 | dev_err(dev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n", | 159 | pci_err(pdev, "rp_pio_status: %#010x, rp_pio_mask: %#010x\n", |
| 160 | status, mask); | 160 | status, mask); |
| 161 | 161 | ||
| 162 | pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SEVERITY, &sev); | 162 | pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SEVERITY, &sev); |
| 163 | pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SYSERROR, &syserr); | 163 | pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_SYSERROR, &syserr); |
| 164 | pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_EXCEPTION, &exc); | 164 | pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_EXCEPTION, &exc); |
| 165 | dev_err(dev, "RP PIO severity=%#010x, syserror=%#010x, exception=%#010x\n", | 165 | pci_err(pdev, "RP PIO severity=%#010x, syserror=%#010x, exception=%#010x\n", |
| 166 | sev, syserr, exc); | 166 | sev, syserr, exc); |
| 167 | 167 | ||
| 168 | /* Get First Error Pointer */ | 168 | /* Get First Error Pointer */ |
| @@ -171,7 +171,7 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc) | |||
| 171 | 171 | ||
| 172 | for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) { | 172 | for (i = 0; i < ARRAY_SIZE(rp_pio_error_string); i++) { |
| 173 | if ((status & ~mask) & (1 << i)) | 173 | if ((status & ~mask) & (1 << i)) |
| 174 | dev_err(dev, "[%2d] %s%s\n", i, rp_pio_error_string[i], | 174 | pci_err(pdev, "[%2d] %s%s\n", i, rp_pio_error_string[i], |
| 175 | first_error == i ? " (First)" : ""); | 175 | first_error == i ? " (First)" : ""); |
| 176 | } | 176 | } |
| 177 | 177 | ||
| @@ -185,18 +185,18 @@ static void dpc_process_rp_pio_error(struct dpc_dev *dpc) | |||
| 185 | &dw2); | 185 | &dw2); |
| 186 | pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 12, | 186 | pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_HEADER_LOG + 12, |
| 187 | &dw3); | 187 | &dw3); |
| 188 | dev_err(dev, "TLP Header: %#010x %#010x %#010x %#010x\n", | 188 | pci_err(pdev, "TLP Header: %#010x %#010x %#010x %#010x\n", |
| 189 | dw0, dw1, dw2, dw3); | 189 | dw0, dw1, dw2, dw3); |
| 190 | 190 | ||
| 191 | if (dpc->rp_log_size < 5) | 191 | if (dpc->rp_log_size < 5) |
| 192 | goto clear_status; | 192 | goto clear_status; |
| 193 | pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log); | 193 | pci_read_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_IMPSPEC_LOG, &log); |
| 194 | dev_err(dev, "RP PIO ImpSpec Log %#010x\n", log); | 194 | pci_err(pdev, "RP PIO ImpSpec Log %#010x\n", log); |
| 195 | 195 | ||
| 196 | for (i = 0; i < dpc->rp_log_size - 5; i++) { | 196 | for (i = 0; i < dpc->rp_log_size - 5; i++) { |
| 197 | pci_read_config_dword(pdev, | 197 | pci_read_config_dword(pdev, |
| 198 | cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix); | 198 | cap + PCI_EXP_DPC_RP_PIO_TLPPREFIX_LOG, &prefix); |
| 199 | dev_err(dev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix); | 199 | pci_err(pdev, "TLP Prefix Header: dw%d, %#010x\n", i, prefix); |
| 200 | } | 200 | } |
| 201 | clear_status: | 201 | clear_status: |
| 202 | pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status); | 202 | pci_write_config_dword(pdev, cap + PCI_EXP_DPC_RP_PIO_STATUS, status); |
| @@ -229,18 +229,17 @@ static irqreturn_t dpc_handler(int irq, void *context) | |||
| 229 | struct aer_err_info info; | 229 | struct aer_err_info info; |
| 230 | struct dpc_dev *dpc = context; | 230 | struct dpc_dev *dpc = context; |
| 231 | struct pci_dev *pdev = dpc->dev->port; | 231 | struct pci_dev *pdev = dpc->dev->port; |
| 232 | struct device *dev = &dpc->dev->device; | ||
| 233 | u16 cap = dpc->cap_pos, status, source, reason, ext_reason; | 232 | u16 cap = dpc->cap_pos, status, source, reason, ext_reason; |
| 234 | 233 | ||
| 235 | pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status); | 234 | pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status); |
| 236 | pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source); | 235 | pci_read_config_word(pdev, cap + PCI_EXP_DPC_SOURCE_ID, &source); |
| 237 | 236 | ||
| 238 | dev_info(dev, "DPC containment event, status:%#06x source:%#06x\n", | 237 | pci_info(pdev, "containment event, status:%#06x source:%#06x\n", |
| 239 | status, source); | 238 | status, source); |
| 240 | 239 | ||
| 241 | reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN) >> 1; | 240 | reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN) >> 1; |
| 242 | ext_reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT) >> 5; | 241 | ext_reason = (status & PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT) >> 5; |
| 243 | dev_warn(dev, "DPC %s detected\n", | 242 | pci_warn(pdev, "%s detected\n", |
| 244 | (reason == 0) ? "unmasked uncorrectable error" : | 243 | (reason == 0) ? "unmasked uncorrectable error" : |
| 245 | (reason == 1) ? "ERR_NONFATAL" : | 244 | (reason == 1) ? "ERR_NONFATAL" : |
| 246 | (reason == 2) ? "ERR_FATAL" : | 245 | (reason == 2) ? "ERR_FATAL" : |
| @@ -307,7 +306,7 @@ static int dpc_probe(struct pcie_device *dev) | |||
| 307 | dpc_handler, IRQF_SHARED, | 306 | dpc_handler, IRQF_SHARED, |
| 308 | "pcie-dpc", dpc); | 307 | "pcie-dpc", dpc); |
| 309 | if (status) { | 308 | if (status) { |
| 310 | dev_warn(device, "request IRQ%d failed: %d\n", dev->irq, | 309 | pci_warn(pdev, "request IRQ%d failed: %d\n", dev->irq, |
| 311 | status); | 310 | status); |
| 312 | return status; | 311 | return status; |
| 313 | } | 312 | } |
| @@ -319,7 +318,7 @@ static int dpc_probe(struct pcie_device *dev) | |||
| 319 | if (dpc->rp_extensions) { | 318 | if (dpc->rp_extensions) { |
| 320 | dpc->rp_log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8; | 319 | dpc->rp_log_size = (cap & PCI_EXP_DPC_RP_PIO_LOG_SIZE) >> 8; |
| 321 | if (dpc->rp_log_size < 4 || dpc->rp_log_size > 9) { | 320 | if (dpc->rp_log_size < 4 || dpc->rp_log_size > 9) { |
| 322 | dev_err(device, "RP PIO log size %u is invalid\n", | 321 | pci_err(pdev, "RP PIO log size %u is invalid\n", |
| 323 | dpc->rp_log_size); | 322 | dpc->rp_log_size); |
| 324 | dpc->rp_log_size = 0; | 323 | dpc->rp_log_size = 0; |
| 325 | } | 324 | } |
| @@ -328,11 +327,11 @@ static int dpc_probe(struct pcie_device *dev) | |||
| 328 | ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN; | 327 | ctl = (ctl & 0xfff4) | PCI_EXP_DPC_CTL_EN_FATAL | PCI_EXP_DPC_CTL_INT_EN; |
| 329 | pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl); | 328 | pci_write_config_word(pdev, dpc->cap_pos + PCI_EXP_DPC_CTL, ctl); |
| 330 | 329 | ||
| 331 | dev_info(device, "DPC error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n", | 330 | pci_info(pdev, "error containment capabilities: Int Msg #%d, RPExt%c PoisonedTLP%c SwTrigger%c RP PIO Log %d, DL_ActiveErr%c\n", |
| 332 | cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT), | 331 | cap & PCI_EXP_DPC_IRQ, FLAG(cap, PCI_EXP_DPC_CAP_RP_EXT), |
| 333 | FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP), | 332 | FLAG(cap, PCI_EXP_DPC_CAP_POISONED_TLP), |
| 334 | FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), dpc->rp_log_size, | 333 | FLAG(cap, PCI_EXP_DPC_CAP_SW_TRIGGER), dpc->rp_log_size, |
| 335 | FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE)); | 334 | FLAG(cap, PCI_EXP_DPC_CAP_DL_ACTIVE)); |
| 336 | 335 | ||
| 337 | pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_DPC, sizeof(u16)); | 336 | pci_add_ext_cap_save_buffer(pdev, PCI_EXT_CAP_ID_DPC, sizeof(u16)); |
| 338 | return status; | 337 | return status; |
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index 54d593d10396..f38e6c19dd50 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c | |||
| @@ -7,6 +7,8 @@ | |||
| 7 | * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | 7 | * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. |
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #define dev_fmt(fmt) "PME: " fmt | ||
| 11 | |||
| 10 | #include <linux/pci.h> | 12 | #include <linux/pci.h> |
| 11 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
| 12 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
| @@ -194,14 +196,14 @@ static void pcie_pme_handle_request(struct pci_dev *port, u16 req_id) | |||
| 194 | * assuming that the PME was reported by a PCIe-PCI bridge that | 196 | * assuming that the PME was reported by a PCIe-PCI bridge that |
| 195 | * used devfn different from zero. | 197 | * used devfn different from zero. |
| 196 | */ | 198 | */ |
| 197 | pci_dbg(port, "PME interrupt generated for non-existent device %02x:%02x.%d\n", | 199 | pci_info(port, "interrupt generated for non-existent device %02x:%02x.%d\n", |
| 198 | busnr, PCI_SLOT(devfn), PCI_FUNC(devfn)); | 200 | busnr, PCI_SLOT(devfn), PCI_FUNC(devfn)); |
| 199 | found = pcie_pme_from_pci_bridge(bus, 0); | 201 | found = pcie_pme_from_pci_bridge(bus, 0); |
| 200 | } | 202 | } |
| 201 | 203 | ||
| 202 | out: | 204 | out: |
| 203 | if (!found) | 205 | if (!found) |
| 204 | pci_dbg(port, "Spurious native PME interrupt!\n"); | 206 | pci_info(port, "Spurious native interrupt!\n"); |
| 205 | } | 207 | } |
| 206 | 208 | ||
| 207 | /** | 209 | /** |
| @@ -341,7 +343,7 @@ static int pcie_pme_probe(struct pcie_device *srv) | |||
| 341 | return ret; | 343 | return ret; |
| 342 | } | 344 | } |
| 343 | 345 | ||
| 344 | pci_info(port, "Signaling PME with IRQ %d\n", srv->irq); | 346 | pci_info(port, "Signaling with IRQ %d\n", srv->irq); |
| 345 | 347 | ||
| 346 | pcie_pme_mark_devices(port); | 348 | pcie_pme_mark_devices(port); |
| 347 | pcie_pme_interrupt_enable(port, true); | 349 | pcie_pme_interrupt_enable(port, true); |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 7e12d0163863..0e8e2c186f50 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
| @@ -317,7 +317,7 @@ fail: | |||
| 317 | res->flags = 0; | 317 | res->flags = 0; |
| 318 | out: | 318 | out: |
| 319 | if (res->flags) | 319 | if (res->flags) |
| 320 | pci_printk(KERN_DEBUG, dev, "reg 0x%x: %pR\n", pos, res); | 320 | pci_info(dev, "reg 0x%x: %pR\n", pos, res); |
| 321 | 321 | ||
| 322 | return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; | 322 | return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; |
| 323 | } | 323 | } |
| @@ -435,7 +435,7 @@ static void pci_read_bridge_io(struct pci_bus *child) | |||
| 435 | region.start = base; | 435 | region.start = base; |
| 436 | region.end = limit + io_granularity - 1; | 436 | region.end = limit + io_granularity - 1; |
| 437 | pcibios_bus_to_resource(dev->bus, res, ®ion); | 437 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
| 438 | pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res); | 438 | pci_info(dev, " bridge window %pR\n", res); |
| 439 | } | 439 | } |
| 440 | } | 440 | } |
| 441 | 441 | ||
| @@ -457,7 +457,7 @@ static void pci_read_bridge_mmio(struct pci_bus *child) | |||
| 457 | region.start = base; | 457 | region.start = base; |
| 458 | region.end = limit + 0xfffff; | 458 | region.end = limit + 0xfffff; |
| 459 | pcibios_bus_to_resource(dev->bus, res, ®ion); | 459 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
| 460 | pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res); | 460 | pci_info(dev, " bridge window %pR\n", res); |
| 461 | } | 461 | } |
| 462 | } | 462 | } |
| 463 | 463 | ||
| @@ -510,7 +510,7 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child) | |||
| 510 | region.start = base; | 510 | region.start = base; |
| 511 | region.end = limit + 0xfffff; | 511 | region.end = limit + 0xfffff; |
| 512 | pcibios_bus_to_resource(dev->bus, res, ®ion); | 512 | pcibios_bus_to_resource(dev->bus, res, ®ion); |
| 513 | pci_printk(KERN_DEBUG, dev, " bridge window %pR\n", res); | 513 | pci_info(dev, " bridge window %pR\n", res); |
| 514 | } | 514 | } |
| 515 | } | 515 | } |
| 516 | 516 | ||
| @@ -540,8 +540,7 @@ void pci_read_bridge_bases(struct pci_bus *child) | |||
| 540 | if (res && res->flags) { | 540 | if (res && res->flags) { |
| 541 | pci_bus_add_resource(child, res, | 541 | pci_bus_add_resource(child, res, |
| 542 | PCI_SUBTRACTIVE_DECODE); | 542 | PCI_SUBTRACTIVE_DECODE); |
| 543 | pci_printk(KERN_DEBUG, dev, | 543 | pci_info(dev, " bridge window %pR (subtractive decode)\n", |
| 544 | " bridge window %pR (subtractive decode)\n", | ||
| 545 | res); | 544 | res); |
| 546 | } | 545 | } |
| 547 | } | 546 | } |
| @@ -586,16 +585,10 @@ static void pci_release_host_bridge_dev(struct device *dev) | |||
| 586 | kfree(to_pci_host_bridge(dev)); | 585 | kfree(to_pci_host_bridge(dev)); |
| 587 | } | 586 | } |
| 588 | 587 | ||
| 589 | struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) | 588 | static void pci_init_host_bridge(struct pci_host_bridge *bridge) |
| 590 | { | 589 | { |
| 591 | struct pci_host_bridge *bridge; | ||
| 592 | |||
| 593 | bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL); | ||
| 594 | if (!bridge) | ||
| 595 | return NULL; | ||
| 596 | |||
| 597 | INIT_LIST_HEAD(&bridge->windows); | 590 | INIT_LIST_HEAD(&bridge->windows); |
| 598 | bridge->dev.release = pci_release_host_bridge_dev; | 591 | INIT_LIST_HEAD(&bridge->dma_ranges); |
| 599 | 592 | ||
| 600 | /* | 593 | /* |
| 601 | * We assume we can manage these PCIe features. Some systems may | 594 | * We assume we can manage these PCIe features. Some systems may |
| @@ -608,6 +601,18 @@ struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) | |||
| 608 | bridge->native_shpc_hotplug = 1; | 601 | bridge->native_shpc_hotplug = 1; |
| 609 | bridge->native_pme = 1; | 602 | bridge->native_pme = 1; |
| 610 | bridge->native_ltr = 1; | 603 | bridge->native_ltr = 1; |
| 604 | } | ||
| 605 | |||
| 606 | struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) | ||
| 607 | { | ||
| 608 | struct pci_host_bridge *bridge; | ||
| 609 | |||
| 610 | bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL); | ||
| 611 | if (!bridge) | ||
| 612 | return NULL; | ||
| 613 | |||
| 614 | pci_init_host_bridge(bridge); | ||
| 615 | bridge->dev.release = pci_release_host_bridge_dev; | ||
| 611 | 616 | ||
| 612 | return bridge; | 617 | return bridge; |
| 613 | } | 618 | } |
| @@ -622,7 +627,7 @@ struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev, | |||
| 622 | if (!bridge) | 627 | if (!bridge) |
| 623 | return NULL; | 628 | return NULL; |
| 624 | 629 | ||
| 625 | INIT_LIST_HEAD(&bridge->windows); | 630 | pci_init_host_bridge(bridge); |
| 626 | bridge->dev.release = devm_pci_release_host_bridge_dev; | 631 | bridge->dev.release = devm_pci_release_host_bridge_dev; |
| 627 | 632 | ||
| 628 | return bridge; | 633 | return bridge; |
| @@ -632,6 +637,7 @@ EXPORT_SYMBOL(devm_pci_alloc_host_bridge); | |||
| 632 | void pci_free_host_bridge(struct pci_host_bridge *bridge) | 637 | void pci_free_host_bridge(struct pci_host_bridge *bridge) |
| 633 | { | 638 | { |
| 634 | pci_free_resource_list(&bridge->windows); | 639 | pci_free_resource_list(&bridge->windows); |
| 640 | pci_free_resource_list(&bridge->dma_ranges); | ||
| 635 | 641 | ||
| 636 | kfree(bridge); | 642 | kfree(bridge); |
| 637 | } | 643 | } |
| @@ -1081,6 +1087,36 @@ static void pci_enable_crs(struct pci_dev *pdev) | |||
| 1081 | 1087 | ||
| 1082 | static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus, | 1088 | static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus, |
| 1083 | unsigned int available_buses); | 1089 | unsigned int available_buses); |
| 1090 | /** | ||
| 1091 | * pci_ea_fixed_busnrs() - Read fixed Secondary and Subordinate bus | ||
| 1092 | * numbers from EA capability. | ||
| 1093 | * @dev: Bridge | ||
| 1094 | * @sec: updated with secondary bus number from EA | ||
| 1095 | * @sub: updated with subordinate bus number from EA | ||
| 1096 | * | ||
| 1097 | * If @dev is a bridge with EA capability, update @sec and @sub with | ||
| 1098 | * fixed bus numbers from the capability and return true. Otherwise, | ||
| 1099 | * return false. | ||
| 1100 | */ | ||
| 1101 | static bool pci_ea_fixed_busnrs(struct pci_dev *dev, u8 *sec, u8 *sub) | ||
| 1102 | { | ||
| 1103 | int ea, offset; | ||
| 1104 | u32 dw; | ||
| 1105 | |||
| 1106 | if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) | ||
| 1107 | return false; | ||
| 1108 | |||
| 1109 | /* find PCI EA capability in list */ | ||
| 1110 | ea = pci_find_capability(dev, PCI_CAP_ID_EA); | ||
| 1111 | if (!ea) | ||
| 1112 | return false; | ||
| 1113 | |||
| 1114 | offset = ea + PCI_EA_FIRST_ENT; | ||
| 1115 | pci_read_config_dword(dev, offset, &dw); | ||
| 1116 | *sec = dw & PCI_EA_SEC_BUS_MASK; | ||
| 1117 | *sub = (dw & PCI_EA_SUB_BUS_MASK) >> PCI_EA_SUB_BUS_SHIFT; | ||
| 1118 | return true; | ||
| 1119 | } | ||
| 1084 | 1120 | ||
| 1085 | /* | 1121 | /* |
| 1086 | * pci_scan_bridge_extend() - Scan buses behind a bridge | 1122 | * pci_scan_bridge_extend() - Scan buses behind a bridge |
| @@ -1115,6 +1151,9 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev, | |||
| 1115 | u16 bctl; | 1151 | u16 bctl; |
| 1116 | u8 primary, secondary, subordinate; | 1152 | u8 primary, secondary, subordinate; |
| 1117 | int broken = 0; | 1153 | int broken = 0; |
| 1154 | bool fixed_buses; | ||
| 1155 | u8 fixed_sec, fixed_sub; | ||
| 1156 | int next_busnr; | ||
| 1118 | 1157 | ||
| 1119 | /* | 1158 | /* |
| 1120 | * Make sure the bridge is powered on to be able to access config | 1159 | * Make sure the bridge is powered on to be able to access config |
| @@ -1214,17 +1253,24 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev, | |||
| 1214 | /* Clear errors */ | 1253 | /* Clear errors */ |
| 1215 | pci_write_config_word(dev, PCI_STATUS, 0xffff); | 1254 | pci_write_config_word(dev, PCI_STATUS, 0xffff); |
| 1216 | 1255 | ||
| 1256 | /* Read bus numbers from EA Capability (if present) */ | ||
| 1257 | fixed_buses = pci_ea_fixed_busnrs(dev, &fixed_sec, &fixed_sub); | ||
| 1258 | if (fixed_buses) | ||
| 1259 | next_busnr = fixed_sec; | ||
| 1260 | else | ||
| 1261 | next_busnr = max + 1; | ||
| 1262 | |||
| 1217 | /* | 1263 | /* |
| 1218 | * Prevent assigning a bus number that already exists. | 1264 | * Prevent assigning a bus number that already exists. |
| 1219 | * This can happen when a bridge is hot-plugged, so in this | 1265 | * This can happen when a bridge is hot-plugged, so in this |
| 1220 | * case we only re-scan this bus. | 1266 | * case we only re-scan this bus. |
| 1221 | */ | 1267 | */ |
| 1222 | child = pci_find_bus(pci_domain_nr(bus), max+1); | 1268 | child = pci_find_bus(pci_domain_nr(bus), next_busnr); |
| 1223 | if (!child) { | 1269 | if (!child) { |
| 1224 | child = pci_add_new_bus(bus, dev, max+1); | 1270 | child = pci_add_new_bus(bus, dev, next_busnr); |
| 1225 | if (!child) | 1271 | if (!child) |
| 1226 | goto out; | 1272 | goto out; |
| 1227 | pci_bus_insert_busn_res(child, max+1, | 1273 | pci_bus_insert_busn_res(child, next_busnr, |
| 1228 | bus->busn_res.end); | 1274 | bus->busn_res.end); |
| 1229 | } | 1275 | } |
| 1230 | max++; | 1276 | max++; |
| @@ -1285,7 +1331,13 @@ static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev, | |||
| 1285 | max += i; | 1331 | max += i; |
| 1286 | } | 1332 | } |
| 1287 | 1333 | ||
| 1288 | /* Set subordinate bus number to its real value */ | 1334 | /* |
| 1335 | * Set subordinate bus number to its real value. | ||
| 1336 | * If fixed subordinate bus number exists from EA | ||
| 1337 | * capability then use it. | ||
| 1338 | */ | ||
| 1339 | if (fixed_buses) | ||
| 1340 | max = fixed_sub; | ||
| 1289 | pci_bus_update_busn_res_end(child, max); | 1341 | pci_bus_update_busn_res_end(child, max); |
| 1290 | pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); | 1342 | pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max); |
| 1291 | } | 1343 | } |
| @@ -1690,7 +1742,7 @@ int pci_setup_device(struct pci_dev *dev) | |||
| 1690 | dev->revision = class & 0xff; | 1742 | dev->revision = class & 0xff; |
| 1691 | dev->class = class >> 8; /* upper 3 bytes */ | 1743 | dev->class = class >> 8; /* upper 3 bytes */ |
| 1692 | 1744 | ||
| 1693 | pci_printk(KERN_DEBUG, dev, "[%04x:%04x] type %02x class %#08x\n", | 1745 | pci_info(dev, "[%04x:%04x] type %02x class %#08x\n", |
| 1694 | dev->vendor, dev->device, dev->hdr_type, dev->class); | 1746 | dev->vendor, dev->device, dev->hdr_type, dev->class); |
| 1695 | 1747 | ||
| 1696 | if (pci_early_dump) | 1748 | if (pci_early_dump) |
| @@ -2026,6 +2078,119 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) | |||
| 2026 | */ | 2078 | */ |
| 2027 | } | 2079 | } |
| 2028 | 2080 | ||
| 2081 | static u16 hpx3_device_type(struct pci_dev *dev) | ||
| 2082 | { | ||
| 2083 | u16 pcie_type = pci_pcie_type(dev); | ||
| 2084 | const int pcie_to_hpx3_type[] = { | ||
| 2085 | [PCI_EXP_TYPE_ENDPOINT] = HPX_TYPE_ENDPOINT, | ||
| 2086 | [PCI_EXP_TYPE_LEG_END] = HPX_TYPE_LEG_END, | ||
| 2087 | [PCI_EXP_TYPE_RC_END] = HPX_TYPE_RC_END, | ||
| 2088 | [PCI_EXP_TYPE_RC_EC] = HPX_TYPE_RC_EC, | ||
| 2089 | [PCI_EXP_TYPE_ROOT_PORT] = HPX_TYPE_ROOT_PORT, | ||
| 2090 | [PCI_EXP_TYPE_UPSTREAM] = HPX_TYPE_UPSTREAM, | ||
| 2091 | [PCI_EXP_TYPE_DOWNSTREAM] = HPX_TYPE_DOWNSTREAM, | ||
| 2092 | [PCI_EXP_TYPE_PCI_BRIDGE] = HPX_TYPE_PCI_BRIDGE, | ||
| 2093 | [PCI_EXP_TYPE_PCIE_BRIDGE] = HPX_TYPE_PCIE_BRIDGE, | ||
| 2094 | }; | ||
| 2095 | |||
| 2096 | if (pcie_type >= ARRAY_SIZE(pcie_to_hpx3_type)) | ||
| 2097 | return 0; | ||
| 2098 | |||
| 2099 | return pcie_to_hpx3_type[pcie_type]; | ||
| 2100 | } | ||
| 2101 | |||
| 2102 | static u8 hpx3_function_type(struct pci_dev *dev) | ||
| 2103 | { | ||
| 2104 | if (dev->is_virtfn) | ||
| 2105 | return HPX_FN_SRIOV_VIRT; | ||
| 2106 | else if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV) > 0) | ||
| 2107 | return HPX_FN_SRIOV_PHYS; | ||
| 2108 | else | ||
| 2109 | return HPX_FN_NORMAL; | ||
| 2110 | } | ||
| 2111 | |||
| 2112 | static bool hpx3_cap_ver_matches(u8 pcie_cap_id, u8 hpx3_cap_id) | ||
| 2113 | { | ||
| 2114 | u8 cap_ver = hpx3_cap_id & 0xf; | ||
| 2115 | |||
| 2116 | if ((hpx3_cap_id & BIT(4)) && cap_ver >= pcie_cap_id) | ||
| 2117 | return true; | ||
| 2118 | else if (cap_ver == pcie_cap_id) | ||
| 2119 | return true; | ||
| 2120 | |||
| 2121 | return false; | ||
| 2122 | } | ||
| 2123 | |||
| 2124 | static void program_hpx_type3_register(struct pci_dev *dev, | ||
| 2125 | const struct hpx_type3 *reg) | ||
| 2126 | { | ||
| 2127 | u32 match_reg, write_reg, header, orig_value; | ||
| 2128 | u16 pos; | ||
| 2129 | |||
| 2130 | if (!(hpx3_device_type(dev) & reg->device_type)) | ||
| 2131 | return; | ||
| 2132 | |||
| 2133 | if (!(hpx3_function_type(dev) & reg->function_type)) | ||
| 2134 | return; | ||
| 2135 | |||
| 2136 | switch (reg->config_space_location) { | ||
| 2137 | case HPX_CFG_PCICFG: | ||
| 2138 | pos = 0; | ||
| 2139 | break; | ||
| 2140 | case HPX_CFG_PCIE_CAP: | ||
| 2141 | pos = pci_find_capability(dev, reg->pci_exp_cap_id); | ||
| 2142 | if (pos == 0) | ||
| 2143 | return; | ||
| 2144 | |||
| 2145 | break; | ||
| 2146 | case HPX_CFG_PCIE_CAP_EXT: | ||
| 2147 | pos = pci_find_ext_capability(dev, reg->pci_exp_cap_id); | ||
| 2148 | if (pos == 0) | ||
| 2149 | return; | ||
| 2150 | |||
| 2151 | pci_read_config_dword(dev, pos, &header); | ||
| 2152 | if (!hpx3_cap_ver_matches(PCI_EXT_CAP_VER(header), | ||
| 2153 | reg->pci_exp_cap_ver)) | ||
| 2154 | return; | ||
| 2155 | |||
| 2156 | break; | ||
| 2157 | case HPX_CFG_VEND_CAP: /* Fall through */ | ||
| 2158 | case HPX_CFG_DVSEC: /* Fall through */ | ||
| 2159 | default: | ||
| 2160 | pci_warn(dev, "Encountered _HPX type 3 with unsupported config space location"); | ||
| 2161 | return; | ||
| 2162 | } | ||
| 2163 | |||
| 2164 | pci_read_config_dword(dev, pos + reg->match_offset, &match_reg); | ||
| 2165 | |||
| 2166 | if ((match_reg & reg->match_mask_and) != reg->match_value) | ||
| 2167 | return; | ||
| 2168 | |||
| 2169 | pci_read_config_dword(dev, pos + reg->reg_offset, &write_reg); | ||
| 2170 | orig_value = write_reg; | ||
| 2171 | write_reg &= reg->reg_mask_and; | ||
| 2172 | write_reg |= reg->reg_mask_or; | ||
| 2173 | |||
| 2174 | if (orig_value == write_reg) | ||
| 2175 | return; | ||
| 2176 | |||
| 2177 | pci_write_config_dword(dev, pos + reg->reg_offset, write_reg); | ||
| 2178 | |||
| 2179 | pci_dbg(dev, "Applied _HPX3 at [0x%x]: 0x%08x -> 0x%08x", | ||
| 2180 | pos, orig_value, write_reg); | ||
| 2181 | } | ||
| 2182 | |||
| 2183 | static void program_hpx_type3(struct pci_dev *dev, struct hpx_type3 *hpx3) | ||
| 2184 | { | ||
| 2185 | if (!hpx3) | ||
| 2186 | return; | ||
| 2187 | |||
| 2188 | if (!pci_is_pcie(dev)) | ||
| 2189 | return; | ||
| 2190 | |||
| 2191 | program_hpx_type3_register(dev, hpx3); | ||
| 2192 | } | ||
| 2193 | |||
| 2029 | int pci_configure_extended_tags(struct pci_dev *dev, void *ign) | 2194 | int pci_configure_extended_tags(struct pci_dev *dev, void *ign) |
| 2030 | { | 2195 | { |
| 2031 | struct pci_host_bridge *host; | 2196 | struct pci_host_bridge *host; |
| @@ -2206,8 +2371,12 @@ static void pci_configure_serr(struct pci_dev *dev) | |||
| 2206 | 2371 | ||
| 2207 | static void pci_configure_device(struct pci_dev *dev) | 2372 | static void pci_configure_device(struct pci_dev *dev) |
| 2208 | { | 2373 | { |
| 2209 | struct hotplug_params hpp; | 2374 | static const struct hotplug_program_ops hp_ops = { |
| 2210 | int ret; | 2375 | .program_type0 = program_hpp_type0, |
| 2376 | .program_type1 = program_hpp_type1, | ||
| 2377 | .program_type2 = program_hpp_type2, | ||
| 2378 | .program_type3 = program_hpx_type3, | ||
| 2379 | }; | ||
| 2211 | 2380 | ||
| 2212 | pci_configure_mps(dev); | 2381 | pci_configure_mps(dev); |
| 2213 | pci_configure_extended_tags(dev, NULL); | 2382 | pci_configure_extended_tags(dev, NULL); |
| @@ -2216,14 +2385,7 @@ static void pci_configure_device(struct pci_dev *dev) | |||
| 2216 | pci_configure_eetlp_prefix(dev); | 2385 | pci_configure_eetlp_prefix(dev); |
| 2217 | pci_configure_serr(dev); | 2386 | pci_configure_serr(dev); |
| 2218 | 2387 | ||
| 2219 | memset(&hpp, 0, sizeof(hpp)); | 2388 | pci_acpi_program_hp_params(dev, &hp_ops); |
| 2220 | ret = pci_get_hp_params(dev, &hpp); | ||
| 2221 | if (ret) | ||
| 2222 | return; | ||
| 2223 | |||
| 2224 | program_hpp_type2(dev, hpp.t2); | ||
| 2225 | program_hpp_type1(dev, hpp.t1); | ||
| 2226 | program_hpp_type0(dev, hpp.t0); | ||
| 2227 | } | 2389 | } |
| 2228 | 2390 | ||
| 2229 | static void pci_release_capabilities(struct pci_dev *dev) | 2391 | static void pci_release_capabilities(struct pci_dev *dev) |
| @@ -3086,7 +3248,7 @@ int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max) | |||
| 3086 | conflict = request_resource_conflict(parent_res, res); | 3248 | conflict = request_resource_conflict(parent_res, res); |
| 3087 | 3249 | ||
| 3088 | if (conflict) | 3250 | if (conflict) |
| 3089 | dev_printk(KERN_DEBUG, &b->dev, | 3251 | dev_info(&b->dev, |
| 3090 | "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n", | 3252 | "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n", |
| 3091 | res, pci_is_root_bus(b) ? "domain " : "", | 3253 | res, pci_is_root_bus(b) ? "domain " : "", |
| 3092 | parent_res, conflict->name, conflict); | 3254 | parent_res, conflict->name, conflict); |
| @@ -3106,8 +3268,7 @@ int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max) | |||
| 3106 | 3268 | ||
| 3107 | size = bus_max - res->start + 1; | 3269 | size = bus_max - res->start + 1; |
| 3108 | ret = adjust_resource(res, res->start, size); | 3270 | ret = adjust_resource(res, res->start, size); |
| 3109 | dev_printk(KERN_DEBUG, &b->dev, | 3271 | dev_info(&b->dev, "busn_res: %pR end %s updated to %02x\n", |
| 3110 | "busn_res: %pR end %s updated to %02x\n", | ||
| 3111 | &old_res, ret ? "can not be" : "is", bus_max); | 3272 | &old_res, ret ? "can not be" : "is", bus_max); |
| 3112 | 3273 | ||
| 3113 | if (!ret && !res->parent) | 3274 | if (!ret && !res->parent) |
| @@ -3125,8 +3286,7 @@ void pci_bus_release_busn_res(struct pci_bus *b) | |||
| 3125 | return; | 3286 | return; |
| 3126 | 3287 | ||
| 3127 | ret = release_resource(res); | 3288 | ret = release_resource(res); |
| 3128 | dev_printk(KERN_DEBUG, &b->dev, | 3289 | dev_info(&b->dev, "busn_res: %pR %s released\n", |
| 3129 | "busn_res: %pR %s released\n", | ||
| 3130 | res, ret ? "can not be" : "is"); | 3290 | res, ret ? "can not be" : "is"); |
| 3131 | } | 3291 | } |
| 3132 | 3292 | ||
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 6fa1627ce08d..445b51db75b0 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c | |||
| @@ -222,6 +222,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd, | |||
| 222 | } | 222 | } |
| 223 | /* If arch decided it can't, fall through... */ | 223 | /* If arch decided it can't, fall through... */ |
| 224 | #endif /* HAVE_PCI_MMAP */ | 224 | #endif /* HAVE_PCI_MMAP */ |
| 225 | /* fall through */ | ||
| 225 | default: | 226 | default: |
| 226 | ret = -EINVAL; | 227 | ret = -EINVAL; |
| 227 | break; | 228 | break; |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index eb0afc275901..0f16acc323c6 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
| @@ -159,8 +159,7 @@ static int __init pci_apply_final_quirks(void) | |||
| 159 | u8 tmp; | 159 | u8 tmp; |
| 160 | 160 | ||
| 161 | if (pci_cache_line_size) | 161 | if (pci_cache_line_size) |
| 162 | printk(KERN_DEBUG "PCI: CLS %u bytes\n", | 162 | pr_info("PCI: CLS %u bytes\n", pci_cache_line_size << 2); |
| 163 | pci_cache_line_size << 2); | ||
| 164 | 163 | ||
| 165 | pci_apply_fixup_final_quirks = true; | 164 | pci_apply_fixup_final_quirks = true; |
| 166 | for_each_pci_dev(dev) { | 165 | for_each_pci_dev(dev) { |
| @@ -177,16 +176,16 @@ static int __init pci_apply_final_quirks(void) | |||
| 177 | if (!tmp || cls == tmp) | 176 | if (!tmp || cls == tmp) |
| 178 | continue; | 177 | continue; |
| 179 | 178 | ||
| 180 | printk(KERN_DEBUG "PCI: CLS mismatch (%u != %u), using %u bytes\n", | 179 | pci_info(dev, "CLS mismatch (%u != %u), using %u bytes\n", |
| 181 | cls << 2, tmp << 2, | 180 | cls << 2, tmp << 2, |
| 182 | pci_dfl_cache_line_size << 2); | 181 | pci_dfl_cache_line_size << 2); |
| 183 | pci_cache_line_size = pci_dfl_cache_line_size; | 182 | pci_cache_line_size = pci_dfl_cache_line_size; |
| 184 | } | 183 | } |
| 185 | } | 184 | } |
| 186 | 185 | ||
| 187 | if (!pci_cache_line_size) { | 186 | if (!pci_cache_line_size) { |
| 188 | printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n", | 187 | pr_info("PCI: CLS %u bytes, default %u\n", cls << 2, |
| 189 | cls << 2, pci_dfl_cache_line_size << 2); | 188 | pci_dfl_cache_line_size << 2); |
| 190 | pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size; | 189 | pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size; |
| 191 | } | 190 | } |
| 192 | 191 | ||
| @@ -2245,6 +2244,23 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f1, quirk_disable_aspm_l0s); | |||
| 2245 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s); | 2244 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x10f4, quirk_disable_aspm_l0s); |
| 2246 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s); | 2245 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1508, quirk_disable_aspm_l0s); |
| 2247 | 2246 | ||
| 2247 | /* | ||
| 2248 | * Some Pericom PCIe-to-PCI bridges in reverse mode need the PCIe Retrain | ||
| 2249 | * Link bit cleared after starting the link retrain process to allow this | ||
| 2250 | * process to finish. | ||
| 2251 | * | ||
| 2252 | * Affected devices: PI7C9X110, PI7C9X111SL, PI7C9X130. See also the | ||
| 2253 | * Pericom Errata Sheet PI7C9X111SLB_errata_rev1.2_102711.pdf. | ||
| 2254 | */ | ||
| 2255 | static void quirk_enable_clear_retrain_link(struct pci_dev *dev) | ||
| 2256 | { | ||
| 2257 | dev->clear_retrain_link = 1; | ||
| 2258 | pci_info(dev, "Enable PCIe Retrain Link quirk\n"); | ||
| 2259 | } | ||
| 2260 | DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe110, quirk_enable_clear_retrain_link); | ||
| 2261 | DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe111, quirk_enable_clear_retrain_link); | ||
| 2262 | DECLARE_PCI_FIXUP_HEADER(0x12d8, 0xe130, quirk_enable_clear_retrain_link); | ||
| 2263 | |||
| 2248 | static void fixup_rev1_53c810(struct pci_dev *dev) | 2264 | static void fixup_rev1_53c810(struct pci_dev *dev) |
| 2249 | { | 2265 | { |
| 2250 | u32 class = dev->class; | 2266 | u32 class = dev->class; |
| @@ -2596,7 +2612,7 @@ static void nvbridge_check_legacy_irq_routing(struct pci_dev *dev) | |||
| 2596 | pci_read_config_dword(dev, 0x74, &cfg); | 2612 | pci_read_config_dword(dev, 0x74, &cfg); |
| 2597 | 2613 | ||
| 2598 | if (cfg & ((1 << 2) | (1 << 15))) { | 2614 | if (cfg & ((1 << 2) | (1 << 15))) { |
| 2599 | printk(KERN_INFO "Rewriting IRQ routing register on MCP55\n"); | 2615 | pr_info("Rewriting IRQ routing register on MCP55\n"); |
| 2600 | cfg &= ~((1 << 2) | (1 << 15)); | 2616 | cfg &= ~((1 << 2) | (1 << 15)); |
| 2601 | pci_write_config_dword(dev, 0x74, cfg); | 2617 | pci_write_config_dword(dev, 0x74, cfg); |
| 2602 | } | 2618 | } |
| @@ -3408,6 +3424,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); | |||
| 3408 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset); | 3424 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset); |
| 3409 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset); | 3425 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset); |
| 3410 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset); | 3426 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset); |
| 3427 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset); | ||
| 3411 | 3428 | ||
| 3412 | /* | 3429 | /* |
| 3413 | * Root port on some Cavium CN8xxx chips do not successfully complete a bus | 3430 | * Root port on some Cavium CN8xxx chips do not successfully complete a bus |
| @@ -4905,6 +4922,7 @@ static void quirk_no_ats(struct pci_dev *pdev) | |||
| 4905 | 4922 | ||
| 4906 | /* AMD Stoney platform GPU */ | 4923 | /* AMD Stoney platform GPU */ |
| 4907 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats); | 4924 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_no_ats); |
| 4925 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_no_ats); | ||
| 4908 | #endif /* CONFIG_PCI_ATS */ | 4926 | #endif /* CONFIG_PCI_ATS */ |
| 4909 | 4927 | ||
| 4910 | /* Freescale PCIe doesn't support MSI in RC mode */ | 4928 | /* Freescale PCIe doesn't support MSI in RC mode */ |
| @@ -5122,3 +5140,61 @@ SWITCHTEC_QUIRK(0x8573); /* PFXI 48XG3 */ | |||
| 5122 | SWITCHTEC_QUIRK(0x8574); /* PFXI 64XG3 */ | 5140 | SWITCHTEC_QUIRK(0x8574); /* PFXI 64XG3 */ |
| 5123 | SWITCHTEC_QUIRK(0x8575); /* PFXI 80XG3 */ | 5141 | SWITCHTEC_QUIRK(0x8575); /* PFXI 80XG3 */ |
| 5124 | SWITCHTEC_QUIRK(0x8576); /* PFXI 96XG3 */ | 5142 | SWITCHTEC_QUIRK(0x8576); /* PFXI 96XG3 */ |
| 5143 | |||
| 5144 | /* | ||
| 5145 | * On Lenovo Thinkpad P50 SKUs with a Nvidia Quadro M1000M, the BIOS does | ||
| 5146 | * not always reset the secondary Nvidia GPU between reboots if the system | ||
| 5147 | * is configured to use Hybrid Graphics mode. This results in the GPU | ||
| 5148 | * being left in whatever state it was in during the *previous* boot, which | ||
| 5149 | * causes spurious interrupts from the GPU, which in turn causes us to | ||
| 5150 | * disable the wrong IRQ and end up breaking the touchpad. Unsurprisingly, | ||
| 5151 | * this also completely breaks nouveau. | ||
| 5152 | * | ||
| 5153 | * Luckily, it seems a simple reset of the Nvidia GPU brings it back to a | ||
| 5154 | * clean state and fixes all these issues. | ||
| 5155 | * | ||
| 5156 | * When the machine is configured in Dedicated display mode, the issue | ||
| 5157 | * doesn't occur. Fortunately the GPU advertises NoReset+ when in this | ||
| 5158 | * mode, so we can detect that and avoid resetting it. | ||
| 5159 | */ | ||
| 5160 | static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev) | ||
| 5161 | { | ||
| 5162 | void __iomem *map; | ||
| 5163 | int ret; | ||
| 5164 | |||
| 5165 | if (pdev->subsystem_vendor != PCI_VENDOR_ID_LENOVO || | ||
| 5166 | pdev->subsystem_device != 0x222e || | ||
| 5167 | !pdev->reset_fn) | ||
| 5168 | return; | ||
| 5169 | |||
| 5170 | if (pci_enable_device_mem(pdev)) | ||
| 5171 | return; | ||
| 5172 | |||
| 5173 | /* | ||
| 5174 | * Based on nvkm_device_ctor() in | ||
| 5175 | * drivers/gpu/drm/nouveau/nvkm/engine/device/base.c | ||
| 5176 | */ | ||
| 5177 | map = pci_iomap(pdev, 0, 0x23000); | ||
| 5178 | if (!map) { | ||
| 5179 | pci_err(pdev, "Can't map MMIO space\n"); | ||
| 5180 | goto out_disable; | ||
| 5181 | } | ||
| 5182 | |||
| 5183 | /* | ||
| 5184 | * Make sure the GPU looks like it's been POSTed before resetting | ||
| 5185 | * it. | ||
| 5186 | */ | ||
| 5187 | if (ioread32(map + 0x2240c) & 0x2) { | ||
| 5188 | pci_info(pdev, FW_BUG "GPU left initialized by EFI, resetting\n"); | ||
| 5189 | ret = pci_reset_function(pdev); | ||
| 5190 | if (ret < 0) | ||
| 5191 | pci_err(pdev, "Failed to reset GPU: %d\n", ret); | ||
| 5192 | } | ||
| 5193 | |||
| 5194 | iounmap(map); | ||
| 5195 | out_disable: | ||
| 5196 | pci_disable_device(pdev); | ||
| 5197 | } | ||
| 5198 | DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1, | ||
| 5199 | PCI_CLASS_DISPLAY_VGA, 8, | ||
| 5200 | quirk_reset_lenovo_thinkpad_p50_nvgpu); | ||
diff --git a/drivers/pci/search.c b/drivers/pci/search.c index 2b5f720862d3..5c7922612733 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c | |||
| @@ -33,7 +33,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev, | |||
| 33 | struct pci_bus *bus; | 33 | struct pci_bus *bus; |
| 34 | int ret; | 34 | int ret; |
| 35 | 35 | ||
| 36 | ret = fn(pdev, PCI_DEVID(pdev->bus->number, pdev->devfn), data); | 36 | ret = fn(pdev, pci_dev_id(pdev), data); |
| 37 | if (ret) | 37 | if (ret) |
| 38 | return ret; | 38 | return ret; |
| 39 | 39 | ||
| @@ -88,9 +88,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev, | |||
| 88 | return ret; | 88 | return ret; |
| 89 | continue; | 89 | continue; |
| 90 | case PCI_EXP_TYPE_PCIE_BRIDGE: | 90 | case PCI_EXP_TYPE_PCIE_BRIDGE: |
| 91 | ret = fn(tmp, | 91 | ret = fn(tmp, pci_dev_id(tmp), data); |
| 92 | PCI_DEVID(tmp->bus->number, | ||
| 93 | tmp->devfn), data); | ||
| 94 | if (ret) | 92 | if (ret) |
| 95 | return ret; | 93 | return ret; |
| 96 | continue; | 94 | continue; |
| @@ -101,9 +99,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev, | |||
| 101 | PCI_DEVID(tmp->subordinate->number, | 99 | PCI_DEVID(tmp->subordinate->number, |
| 102 | PCI_DEVFN(0, 0)), data); | 100 | PCI_DEVFN(0, 0)), data); |
| 103 | else | 101 | else |
| 104 | ret = fn(tmp, | 102 | ret = fn(tmp, pci_dev_id(tmp), data); |
| 105 | PCI_DEVID(tmp->bus->number, | ||
| 106 | tmp->devfn), data); | ||
| 107 | if (ret) | 103 | if (ret) |
| 108 | return ret; | 104 | return ret; |
| 109 | } | 105 | } |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index ec44a0f3a7ac..0cdd5ff389de 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
| @@ -49,17 +49,15 @@ static void free_list(struct list_head *head) | |||
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | /** | 51 | /** |
| 52 | * add_to_list() - add a new resource tracker to the list | 52 | * add_to_list() - Add a new resource tracker to the list |
| 53 | * @head: Head of the list | 53 | * @head: Head of the list |
| 54 | * @dev: device corresponding to which the resource | 54 | * @dev: Device to which the resource belongs |
| 55 | * belongs | 55 | * @res: Resource to be tracked |
| 56 | * @res: The resource to be tracked | 56 | * @add_size: Additional size to be optionally added to the resource |
| 57 | * @add_size: additional size to be optionally added | ||
| 58 | * to the resource | ||
| 59 | */ | 57 | */ |
| 60 | static int add_to_list(struct list_head *head, | 58 | static int add_to_list(struct list_head *head, struct pci_dev *dev, |
| 61 | struct pci_dev *dev, struct resource *res, | 59 | struct resource *res, resource_size_t add_size, |
| 62 | resource_size_t add_size, resource_size_t min_align) | 60 | resource_size_t min_align) |
| 63 | { | 61 | { |
| 64 | struct pci_dev_resource *tmp; | 62 | struct pci_dev_resource *tmp; |
| 65 | 63 | ||
| @@ -80,8 +78,7 @@ static int add_to_list(struct list_head *head, | |||
| 80 | return 0; | 78 | return 0; |
| 81 | } | 79 | } |
| 82 | 80 | ||
| 83 | static void remove_from_list(struct list_head *head, | 81 | static void remove_from_list(struct list_head *head, struct resource *res) |
| 84 | struct resource *res) | ||
| 85 | { | 82 | { |
| 86 | struct pci_dev_resource *dev_res, *tmp; | 83 | struct pci_dev_resource *dev_res, *tmp; |
| 87 | 84 | ||
| @@ -158,7 +155,7 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head) | |||
| 158 | tmp->res = r; | 155 | tmp->res = r; |
| 159 | tmp->dev = dev; | 156 | tmp->dev = dev; |
| 160 | 157 | ||
| 161 | /* fallback is smallest one or list is empty*/ | 158 | /* Fallback is smallest one or list is empty */ |
| 162 | n = head; | 159 | n = head; |
| 163 | list_for_each_entry(dev_res, head, list) { | 160 | list_for_each_entry(dev_res, head, list) { |
| 164 | resource_size_t align; | 161 | resource_size_t align; |
| @@ -171,21 +168,20 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head) | |||
| 171 | break; | 168 | break; |
| 172 | } | 169 | } |
| 173 | } | 170 | } |
| 174 | /* Insert it just before n*/ | 171 | /* Insert it just before n */ |
| 175 | list_add_tail(&tmp->list, n); | 172 | list_add_tail(&tmp->list, n); |
| 176 | } | 173 | } |
| 177 | } | 174 | } |
| 178 | 175 | ||
| 179 | static void __dev_sort_resources(struct pci_dev *dev, | 176 | static void __dev_sort_resources(struct pci_dev *dev, struct list_head *head) |
| 180 | struct list_head *head) | ||
| 181 | { | 177 | { |
| 182 | u16 class = dev->class >> 8; | 178 | u16 class = dev->class >> 8; |
| 183 | 179 | ||
| 184 | /* Don't touch classless devices or host bridges or ioapics. */ | 180 | /* Don't touch classless devices or host bridges or IOAPICs */ |
| 185 | if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST) | 181 | if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST) |
| 186 | return; | 182 | return; |
| 187 | 183 | ||
| 188 | /* Don't touch ioapic devices already enabled by firmware */ | 184 | /* Don't touch IOAPIC devices already enabled by firmware */ |
| 189 | if (class == PCI_CLASS_SYSTEM_PIC) { | 185 | if (class == PCI_CLASS_SYSTEM_PIC) { |
| 190 | u16 command; | 186 | u16 command; |
| 191 | pci_read_config_word(dev, PCI_COMMAND, &command); | 187 | pci_read_config_word(dev, PCI_COMMAND, &command); |
| @@ -204,19 +200,18 @@ static inline void reset_resource(struct resource *res) | |||
| 204 | } | 200 | } |
| 205 | 201 | ||
| 206 | /** | 202 | /** |
| 207 | * reassign_resources_sorted() - satisfy any additional resource requests | 203 | * reassign_resources_sorted() - Satisfy any additional resource requests |
| 208 | * | 204 | * |
| 209 | * @realloc_head : head of the list tracking requests requiring additional | 205 | * @realloc_head: Head of the list tracking requests requiring |
| 210 | * resources | 206 | * additional resources |
| 211 | * @head : head of the list tracking requests with allocated | 207 | * @head: Head of the list tracking requests with allocated |
| 212 | * resources | 208 | * resources |
| 213 | * | 209 | * |
| 214 | * Walk through each element of the realloc_head and try to procure | 210 | * Walk through each element of the realloc_head and try to procure additional |
| 215 | * additional resources for the element, provided the element | 211 | * resources for the element, provided the element is in the head list. |
| 216 | * is in the head list. | ||
| 217 | */ | 212 | */ |
| 218 | static void reassign_resources_sorted(struct list_head *realloc_head, | 213 | static void reassign_resources_sorted(struct list_head *realloc_head, |
| 219 | struct list_head *head) | 214 | struct list_head *head) |
| 220 | { | 215 | { |
| 221 | struct resource *res; | 216 | struct resource *res; |
| 222 | struct pci_dev_resource *add_res, *tmp; | 217 | struct pci_dev_resource *add_res, *tmp; |
| @@ -228,18 +223,18 @@ static void reassign_resources_sorted(struct list_head *realloc_head, | |||
| 228 | bool found_match = false; | 223 | bool found_match = false; |
| 229 | 224 | ||
| 230 | res = add_res->res; | 225 | res = add_res->res; |
| 231 | /* skip resource that has been reset */ | 226 | /* Skip resource that has been reset */ |
| 232 | if (!res->flags) | 227 | if (!res->flags) |
| 233 | goto out; | 228 | goto out; |
| 234 | 229 | ||
| 235 | /* skip this resource if not found in head list */ | 230 | /* Skip this resource if not found in head list */ |
| 236 | list_for_each_entry(dev_res, head, list) { | 231 | list_for_each_entry(dev_res, head, list) { |
| 237 | if (dev_res->res == res) { | 232 | if (dev_res->res == res) { |
| 238 | found_match = true; | 233 | found_match = true; |
| 239 | break; | 234 | break; |
| 240 | } | 235 | } |
| 241 | } | 236 | } |
| 242 | if (!found_match)/* just skip */ | 237 | if (!found_match) /* Just skip */ |
| 243 | continue; | 238 | continue; |
| 244 | 239 | ||
| 245 | idx = res - &add_res->dev->resource[0]; | 240 | idx = res - &add_res->dev->resource[0]; |
| @@ -255,10 +250,9 @@ static void reassign_resources_sorted(struct list_head *realloc_head, | |||
| 255 | (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN); | 250 | (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN); |
| 256 | if (pci_reassign_resource(add_res->dev, idx, | 251 | if (pci_reassign_resource(add_res->dev, idx, |
| 257 | add_size, align)) | 252 | add_size, align)) |
| 258 | pci_printk(KERN_DEBUG, add_res->dev, | 253 | pci_info(add_res->dev, "failed to add %llx res[%d]=%pR\n", |
| 259 | "failed to add %llx res[%d]=%pR\n", | 254 | (unsigned long long) add_size, idx, |
| 260 | (unsigned long long)add_size, | 255 | res); |
| 261 | idx, res); | ||
| 262 | } | 256 | } |
| 263 | out: | 257 | out: |
| 264 | list_del(&add_res->list); | 258 | list_del(&add_res->list); |
| @@ -267,14 +261,14 @@ out: | |||
| 267 | } | 261 | } |
| 268 | 262 | ||
| 269 | /** | 263 | /** |
| 270 | * assign_requested_resources_sorted() - satisfy resource requests | 264 | * assign_requested_resources_sorted() - Satisfy resource requests |
| 271 | * | 265 | * |
| 272 | * @head : head of the list tracking requests for resources | 266 | * @head: Head of the list tracking requests for resources |
| 273 | * @fail_head : head of the list tracking requests that could | 267 | * @fail_head: Head of the list tracking requests that could not be |
| 274 | * not be allocated | 268 | * allocated |
| 275 | * | 269 | * |
| 276 | * Satisfy resource requests of each element in the list. Add | 270 | * Satisfy resource requests of each element in the list. Add requests that |
| 277 | * requests that could not satisfied to the failed_list. | 271 | * could not be satisfied to the failed_list. |
| 278 | */ | 272 | */ |
| 279 | static void assign_requested_resources_sorted(struct list_head *head, | 273 | static void assign_requested_resources_sorted(struct list_head *head, |
| 280 | struct list_head *fail_head) | 274 | struct list_head *fail_head) |
| @@ -290,8 +284,9 @@ static void assign_requested_resources_sorted(struct list_head *head, | |||
| 290 | pci_assign_resource(dev_res->dev, idx)) { | 284 | pci_assign_resource(dev_res->dev, idx)) { |
| 291 | if (fail_head) { | 285 | if (fail_head) { |
| 292 | /* | 286 | /* |
| 293 | * if the failed res is for ROM BAR, and it will | 287 | * If the failed resource is a ROM BAR and |
| 294 | * be enabled later, don't add it to the list | 288 | * it will be enabled later, don't add it |
| 289 | * to the list. | ||
| 295 | */ | 290 | */ |
| 296 | if (!((idx == PCI_ROM_RESOURCE) && | 291 | if (!((idx == PCI_ROM_RESOURCE) && |
| 297 | (!(res->flags & IORESOURCE_ROM_ENABLE)))) | 292 | (!(res->flags & IORESOURCE_ROM_ENABLE)))) |
| @@ -310,15 +305,14 @@ static unsigned long pci_fail_res_type_mask(struct list_head *fail_head) | |||
| 310 | struct pci_dev_resource *fail_res; | 305 | struct pci_dev_resource *fail_res; |
| 311 | unsigned long mask = 0; | 306 | unsigned long mask = 0; |
| 312 | 307 | ||
| 313 | /* check failed type */ | 308 | /* Check failed type */ |
| 314 | list_for_each_entry(fail_res, fail_head, list) | 309 | list_for_each_entry(fail_res, fail_head, list) |
| 315 | mask |= fail_res->flags; | 310 | mask |= fail_res->flags; |
| 316 | 311 | ||
| 317 | /* | 312 | /* |
| 318 | * one pref failed resource will set IORESOURCE_MEM, | 313 | * One pref failed resource will set IORESOURCE_MEM, as we can |
| 319 | * as we can allocate pref in non-pref range. | 314 | * allocate pref in non-pref range. Will release all assigned |
| 320 | * Will release all assigned non-pref sibling resources | 315 | * non-pref sibling resources according to that bit. |
| 321 | * according to that bit. | ||
| 322 | */ | 316 | */ |
| 323 | return mask & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH); | 317 | return mask & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH); |
| 324 | } | 318 | } |
| @@ -328,11 +322,11 @@ static bool pci_need_to_release(unsigned long mask, struct resource *res) | |||
| 328 | if (res->flags & IORESOURCE_IO) | 322 | if (res->flags & IORESOURCE_IO) |
| 329 | return !!(mask & IORESOURCE_IO); | 323 | return !!(mask & IORESOURCE_IO); |
| 330 | 324 | ||
| 331 | /* check pref at first */ | 325 | /* Check pref at first */ |
| 332 | if (res->flags & IORESOURCE_PREFETCH) { | 326 | if (res->flags & IORESOURCE_PREFETCH) { |
| 333 | if (mask & IORESOURCE_PREFETCH) | 327 | if (mask & IORESOURCE_PREFETCH) |
| 334 | return true; | 328 | return true; |
| 335 | /* count pref if its parent is non-pref */ | 329 | /* Count pref if its parent is non-pref */ |
| 336 | else if ((mask & IORESOURCE_MEM) && | 330 | else if ((mask & IORESOURCE_MEM) && |
| 337 | !(res->parent->flags & IORESOURCE_PREFETCH)) | 331 | !(res->parent->flags & IORESOURCE_PREFETCH)) |
| 338 | return true; | 332 | return true; |
| @@ -343,33 +337,33 @@ static bool pci_need_to_release(unsigned long mask, struct resource *res) | |||
| 343 | if (res->flags & IORESOURCE_MEM) | 337 | if (res->flags & IORESOURCE_MEM) |
| 344 | return !!(mask & IORESOURCE_MEM); | 338 | return !!(mask & IORESOURCE_MEM); |
| 345 | 339 | ||
| 346 | return false; /* should not get here */ | 340 | return false; /* Should not get here */ |
| 347 | } | 341 | } |
| 348 | 342 | ||
| 349 | static void __assign_resources_sorted(struct list_head *head, | 343 | static void __assign_resources_sorted(struct list_head *head, |
| 350 | struct list_head *realloc_head, | 344 | struct list_head *realloc_head, |
| 351 | struct list_head *fail_head) | 345 | struct list_head *fail_head) |
| 352 | { | 346 | { |
| 353 | /* | 347 | /* |
| 354 | * Should not assign requested resources at first. | 348 | * Should not assign requested resources at first. They could be |
| 355 | * they could be adjacent, so later reassign can not reallocate | 349 | * adjacent, so later reassign can not reallocate them one by one in |
| 356 | * them one by one in parent resource window. | 350 | * parent resource window. |
| 357 | * Try to assign requested + add_size at beginning | 351 | * |
| 358 | * if could do that, could get out early. | 352 | * Try to assign requested + add_size at beginning. If could do that, |
| 359 | * if could not do that, we still try to assign requested at first, | 353 | * could get out early. If could not do that, we still try to assign |
| 360 | * then try to reassign add_size for some resources. | 354 | * requested at first, then try to reassign add_size for some resources. |
| 361 | * | 355 | * |
| 362 | * Separate three resource type checking if we need to release | 356 | * Separate three resource type checking if we need to release |
| 363 | * assigned resource after requested + add_size try. | 357 | * assigned resource after requested + add_size try. |
| 364 | * 1. if there is io port assign fail, will release assigned | 358 | * |
| 365 | * io port. | 359 | * 1. If IO port assignment fails, will release assigned IO |
| 366 | * 2. if there is pref mmio assign fail, release assigned | 360 | * port. |
| 367 | * pref mmio. | 361 | * 2. If pref MMIO assignment fails, release assigned pref |
| 368 | * if assigned pref mmio's parent is non-pref mmio and there | 362 | * MMIO. If assigned pref MMIO's parent is non-pref MMIO |
| 369 | * is non-pref mmio assign fail, will release that assigned | 363 | * and non-pref MMIO assignment fails, will release that |
| 370 | * pref mmio. | 364 | * assigned pref MMIO. |
| 371 | * 3. if there is non-pref mmio assign fail or pref mmio | 365 | * 3. If non-pref MMIO assignment fails or pref MMIO |
| 372 | * assigned fail, will release assigned non-pref mmio. | 366 | * assignment fails, will release assigned non-pref MMIO. |
| 373 | */ | 367 | */ |
| 374 | LIST_HEAD(save_head); | 368 | LIST_HEAD(save_head); |
| 375 | LIST_HEAD(local_fail_head); | 369 | LIST_HEAD(local_fail_head); |
| @@ -398,7 +392,7 @@ static void __assign_resources_sorted(struct list_head *head, | |||
| 398 | /* | 392 | /* |
| 399 | * There are two kinds of additional resources in the list: | 393 | * There are two kinds of additional resources in the list: |
| 400 | * 1. bridge resource -- IORESOURCE_STARTALIGN | 394 | * 1. bridge resource -- IORESOURCE_STARTALIGN |
| 401 | * 2. SR-IOV resource -- IORESOURCE_SIZEALIGN | 395 | * 2. SR-IOV resource -- IORESOURCE_SIZEALIGN |
| 402 | * Here just fix the additional alignment for bridge | 396 | * Here just fix the additional alignment for bridge |
| 403 | */ | 397 | */ |
| 404 | if (!(dev_res->res->flags & IORESOURCE_STARTALIGN)) | 398 | if (!(dev_res->res->flags & IORESOURCE_STARTALIGN)) |
| @@ -407,10 +401,10 @@ static void __assign_resources_sorted(struct list_head *head, | |||
| 407 | add_align = get_res_add_align(realloc_head, dev_res->res); | 401 | add_align = get_res_add_align(realloc_head, dev_res->res); |
| 408 | 402 | ||
| 409 | /* | 403 | /* |
| 410 | * The "head" list is sorted by the alignment to make sure | 404 | * The "head" list is sorted by alignment so resources with |
| 411 | * resources with bigger alignment will be assigned first. | 405 | * bigger alignment will be assigned first. After we |
| 412 | * After we change the alignment of a dev_res in "head" list, | 406 | * change the alignment of a dev_res in "head" list, we |
| 413 | * we need to reorder the list by alignment to make it | 407 | * need to reorder the list by alignment to make it |
| 414 | * consistent. | 408 | * consistent. |
| 415 | */ | 409 | */ |
| 416 | if (add_align > dev_res->res->start) { | 410 | if (add_align > dev_res->res->start) { |
| @@ -435,7 +429,7 @@ static void __assign_resources_sorted(struct list_head *head, | |||
| 435 | /* Try updated head list with add_size added */ | 429 | /* Try updated head list with add_size added */ |
| 436 | assign_requested_resources_sorted(head, &local_fail_head); | 430 | assign_requested_resources_sorted(head, &local_fail_head); |
| 437 | 431 | ||
| 438 | /* all assigned with add_size ? */ | 432 | /* All assigned with add_size? */ |
| 439 | if (list_empty(&local_fail_head)) { | 433 | if (list_empty(&local_fail_head)) { |
| 440 | /* Remove head list from realloc_head list */ | 434 | /* Remove head list from realloc_head list */ |
| 441 | list_for_each_entry(dev_res, head, list) | 435 | list_for_each_entry(dev_res, head, list) |
| @@ -445,13 +439,13 @@ static void __assign_resources_sorted(struct list_head *head, | |||
| 445 | return; | 439 | return; |
| 446 | } | 440 | } |
| 447 | 441 | ||
| 448 | /* check failed type */ | 442 | /* Check failed type */ |
| 449 | fail_type = pci_fail_res_type_mask(&local_fail_head); | 443 | fail_type = pci_fail_res_type_mask(&local_fail_head); |
| 450 | /* remove not need to be released assigned res from head list etc */ | 444 | /* Remove not need to be released assigned res from head list etc */ |
| 451 | list_for_each_entry_safe(dev_res, tmp_res, head, list) | 445 | list_for_each_entry_safe(dev_res, tmp_res, head, list) |
| 452 | if (dev_res->res->parent && | 446 | if (dev_res->res->parent && |
| 453 | !pci_need_to_release(fail_type, dev_res->res)) { | 447 | !pci_need_to_release(fail_type, dev_res->res)) { |
| 454 | /* remove it from realloc_head list */ | 448 | /* Remove it from realloc_head list */ |
| 455 | remove_from_list(realloc_head, dev_res->res); | 449 | remove_from_list(realloc_head, dev_res->res); |
| 456 | remove_from_list(&save_head, dev_res->res); | 450 | remove_from_list(&save_head, dev_res->res); |
| 457 | list_del(&dev_res->list); | 451 | list_del(&dev_res->list); |
| @@ -477,16 +471,15 @@ requested_and_reassign: | |||
| 477 | /* Satisfy the must-have resource requests */ | 471 | /* Satisfy the must-have resource requests */ |
| 478 | assign_requested_resources_sorted(head, fail_head); | 472 | assign_requested_resources_sorted(head, fail_head); |
| 479 | 473 | ||
| 480 | /* Try to satisfy any additional optional resource | 474 | /* Try to satisfy any additional optional resource requests */ |
| 481 | requests */ | ||
| 482 | if (realloc_head) | 475 | if (realloc_head) |
| 483 | reassign_resources_sorted(realloc_head, head); | 476 | reassign_resources_sorted(realloc_head, head); |
| 484 | free_list(head); | 477 | free_list(head); |
| 485 | } | 478 | } |
| 486 | 479 | ||
| 487 | static void pdev_assign_resources_sorted(struct pci_dev *dev, | 480 | static void pdev_assign_resources_sorted(struct pci_dev *dev, |
| 488 | struct list_head *add_head, | 481 | struct list_head *add_head, |
| 489 | struct list_head *fail_head) | 482 | struct list_head *fail_head) |
| 490 | { | 483 | { |
| 491 | LIST_HEAD(head); | 484 | LIST_HEAD(head); |
| 492 | 485 | ||
| @@ -563,17 +556,19 @@ void pci_setup_cardbus(struct pci_bus *bus) | |||
| 563 | } | 556 | } |
| 564 | EXPORT_SYMBOL(pci_setup_cardbus); | 557 | EXPORT_SYMBOL(pci_setup_cardbus); |
| 565 | 558 | ||
| 566 | /* Initialize bridges with base/limit values we have collected. | 559 | /* |
| 567 | PCI-to-PCI Bridge Architecture Specification rev. 1.1 (1998) | 560 | * Initialize bridges with base/limit values we have collected. PCI-to-PCI |
| 568 | requires that if there is no I/O ports or memory behind the | 561 | * Bridge Architecture Specification rev. 1.1 (1998) requires that if there |
| 569 | bridge, corresponding range must be turned off by writing base | 562 | * are no I/O ports or memory behind the bridge, the corresponding range |
| 570 | value greater than limit to the bridge's base/limit registers. | 563 | * must be turned off by writing base value greater than limit to the |
| 571 | 564 | * bridge's base/limit registers. | |
| 572 | Note: care must be taken when updating I/O base/limit registers | 565 | * |
| 573 | of bridges which support 32-bit I/O. This update requires two | 566 | * Note: care must be taken when updating I/O base/limit registers of |
| 574 | config space writes, so it's quite possible that an I/O window of | 567 | * bridges which support 32-bit I/O. This update requires two config space |
| 575 | the bridge will have some undesirable address (e.g. 0) after the | 568 | * writes, so it's quite possible that an I/O window of the bridge will |
| 576 | first write. Ditto 64-bit prefetchable MMIO. */ | 569 | * have some undesirable address (e.g. 0) after the first write. Ditto |
| 570 | * 64-bit prefetchable MMIO. | ||
| 571 | */ | ||
| 577 | static void pci_setup_bridge_io(struct pci_dev *bridge) | 572 | static void pci_setup_bridge_io(struct pci_dev *bridge) |
| 578 | { | 573 | { |
| 579 | struct resource *res; | 574 | struct resource *res; |
| @@ -587,7 +582,7 @@ static void pci_setup_bridge_io(struct pci_dev *bridge) | |||
| 587 | if (bridge->io_window_1k) | 582 | if (bridge->io_window_1k) |
| 588 | io_mask = PCI_IO_1K_RANGE_MASK; | 583 | io_mask = PCI_IO_1K_RANGE_MASK; |
| 589 | 584 | ||
| 590 | /* Set up the top and bottom of the PCI I/O segment for this bus. */ | 585 | /* Set up the top and bottom of the PCI I/O segment for this bus */ |
| 591 | res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0]; | 586 | res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0]; |
| 592 | pcibios_resource_to_bus(bridge->bus, ®ion, res); | 587 | pcibios_resource_to_bus(bridge->bus, ®ion, res); |
| 593 | if (res->flags & IORESOURCE_IO) { | 588 | if (res->flags & IORESOURCE_IO) { |
| @@ -595,19 +590,19 @@ static void pci_setup_bridge_io(struct pci_dev *bridge) | |||
| 595 | io_base_lo = (region.start >> 8) & io_mask; | 590 | io_base_lo = (region.start >> 8) & io_mask; |
| 596 | io_limit_lo = (region.end >> 8) & io_mask; | 591 | io_limit_lo = (region.end >> 8) & io_mask; |
| 597 | l = ((u16) io_limit_lo << 8) | io_base_lo; | 592 | l = ((u16) io_limit_lo << 8) | io_base_lo; |
| 598 | /* Set up upper 16 bits of I/O base/limit. */ | 593 | /* Set up upper 16 bits of I/O base/limit */ |
| 599 | io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); | 594 | io_upper16 = (region.end & 0xffff0000) | (region.start >> 16); |
| 600 | pci_info(bridge, " bridge window %pR\n", res); | 595 | pci_info(bridge, " bridge window %pR\n", res); |
| 601 | } else { | 596 | } else { |
| 602 | /* Clear upper 16 bits of I/O base/limit. */ | 597 | /* Clear upper 16 bits of I/O base/limit */ |
| 603 | io_upper16 = 0; | 598 | io_upper16 = 0; |
| 604 | l = 0x00f0; | 599 | l = 0x00f0; |
| 605 | } | 600 | } |
| 606 | /* Temporarily disable the I/O range before updating PCI_IO_BASE. */ | 601 | /* Temporarily disable the I/O range before updating PCI_IO_BASE */ |
| 607 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); | 602 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff); |
| 608 | /* Update lower 16 bits of I/O base/limit. */ | 603 | /* Update lower 16 bits of I/O base/limit */ |
| 609 | pci_write_config_word(bridge, PCI_IO_BASE, l); | 604 | pci_write_config_word(bridge, PCI_IO_BASE, l); |
| 610 | /* Update upper 16 bits of I/O base/limit. */ | 605 | /* Update upper 16 bits of I/O base/limit */ |
| 611 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); | 606 | pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16); |
| 612 | } | 607 | } |
| 613 | 608 | ||
| @@ -617,7 +612,7 @@ static void pci_setup_bridge_mmio(struct pci_dev *bridge) | |||
| 617 | struct pci_bus_region region; | 612 | struct pci_bus_region region; |
| 618 | u32 l; | 613 | u32 l; |
| 619 | 614 | ||
| 620 | /* Set up the top and bottom of the PCI Memory segment for this bus. */ | 615 | /* Set up the top and bottom of the PCI Memory segment for this bus */ |
| 621 | res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1]; | 616 | res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1]; |
| 622 | pcibios_resource_to_bus(bridge->bus, ®ion, res); | 617 | pcibios_resource_to_bus(bridge->bus, ®ion, res); |
| 623 | if (res->flags & IORESOURCE_MEM) { | 618 | if (res->flags & IORESOURCE_MEM) { |
| @@ -636,12 +631,14 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge) | |||
| 636 | struct pci_bus_region region; | 631 | struct pci_bus_region region; |
| 637 | u32 l, bu, lu; | 632 | u32 l, bu, lu; |
| 638 | 633 | ||
| 639 | /* Clear out the upper 32 bits of PREF limit. | 634 | /* |
| 640 | If PCI_PREF_BASE_UPPER32 was non-zero, this temporarily | 635 | * Clear out the upper 32 bits of PREF limit. If |
| 641 | disables PREF range, which is ok. */ | 636 | * PCI_PREF_BASE_UPPER32 was non-zero, this temporarily disables |
| 637 | * PREF range, which is ok. | ||
| 638 | */ | ||
| 642 | pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0); | 639 | pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0); |
| 643 | 640 | ||
| 644 | /* Set up PREF base/limit. */ | 641 | /* Set up PREF base/limit */ |
| 645 | bu = lu = 0; | 642 | bu = lu = 0; |
| 646 | res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2]; | 643 | res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2]; |
| 647 | pcibios_resource_to_bus(bridge->bus, ®ion, res); | 644 | pcibios_resource_to_bus(bridge->bus, ®ion, res); |
| @@ -658,7 +655,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge) | |||
| 658 | } | 655 | } |
| 659 | pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); | 656 | pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l); |
| 660 | 657 | ||
| 661 | /* Set the upper 32 bits of PREF base & limit. */ | 658 | /* Set the upper 32 bits of PREF base & limit */ |
| 662 | pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); | 659 | pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu); |
| 663 | pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); | 660 | pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu); |
| 664 | } | 661 | } |
| @@ -702,13 +699,13 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i) | |||
| 702 | return 0; | 699 | return 0; |
| 703 | 700 | ||
| 704 | if (pci_claim_resource(bridge, i) == 0) | 701 | if (pci_claim_resource(bridge, i) == 0) |
| 705 | return 0; /* claimed the window */ | 702 | return 0; /* Claimed the window */ |
| 706 | 703 | ||
| 707 | if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI) | 704 | if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI) |
| 708 | return 0; | 705 | return 0; |
| 709 | 706 | ||
| 710 | if (!pci_bus_clip_resource(bridge, i)) | 707 | if (!pci_bus_clip_resource(bridge, i)) |
| 711 | return -EINVAL; /* clipping didn't change anything */ | 708 | return -EINVAL; /* Clipping didn't change anything */ |
| 712 | 709 | ||
| 713 | switch (i - PCI_BRIDGE_RESOURCES) { | 710 | switch (i - PCI_BRIDGE_RESOURCES) { |
| 714 | case 0: | 711 | case 0: |
| @@ -725,14 +722,16 @@ int pci_claim_bridge_resource(struct pci_dev *bridge, int i) | |||
| 725 | } | 722 | } |
| 726 | 723 | ||
| 727 | if (pci_claim_resource(bridge, i) == 0) | 724 | if (pci_claim_resource(bridge, i) == 0) |
| 728 | return 0; /* claimed a smaller window */ | 725 | return 0; /* Claimed a smaller window */ |
| 729 | 726 | ||
| 730 | return -EINVAL; | 727 | return -EINVAL; |
| 731 | } | 728 | } |
| 732 | 729 | ||
| 733 | /* Check whether the bridge supports optional I/O and | 730 | /* |
| 734 | prefetchable memory ranges. If not, the respective | 731 | * Check whether the bridge supports optional I/O and prefetchable memory |
| 735 | base/limit registers must be read-only and read as 0. */ | 732 | * ranges. If not, the respective base/limit registers must be read-only |
| 733 | * and read as 0. | ||
| 734 | */ | ||
| 736 | static void pci_bridge_check_ranges(struct pci_bus *bus) | 735 | static void pci_bridge_check_ranges(struct pci_bus *bus) |
| 737 | { | 736 | { |
| 738 | struct pci_dev *bridge = bus->self; | 737 | struct pci_dev *bridge = bus->self; |
| @@ -752,12 +751,14 @@ static void pci_bridge_check_ranges(struct pci_bus *bus) | |||
| 752 | } | 751 | } |
| 753 | } | 752 | } |
| 754 | 753 | ||
| 755 | /* Helper function for sizing routines: find first available | 754 | /* |
| 756 | bus resource of a given type. Note: we intentionally skip | 755 | * Helper function for sizing routines: find first available bus resource |
| 757 | the bus resources which have already been assigned (that is, | 756 | * of a given type. Note: we intentionally skip the bus resources which |
| 758 | have non-NULL parent resource). */ | 757 | * have already been assigned (that is, have non-NULL parent resource). |
| 758 | */ | ||
| 759 | static struct resource *find_free_bus_resource(struct pci_bus *bus, | 759 | static struct resource *find_free_bus_resource(struct pci_bus *bus, |
| 760 | unsigned long type_mask, unsigned long type) | 760 | unsigned long type_mask, |
| 761 | unsigned long type) | ||
| 761 | { | 762 | { |
| 762 | int i; | 763 | int i; |
| 763 | struct resource *r; | 764 | struct resource *r; |
| @@ -772,19 +773,21 @@ static struct resource *find_free_bus_resource(struct pci_bus *bus, | |||
| 772 | } | 773 | } |
| 773 | 774 | ||
| 774 | static resource_size_t calculate_iosize(resource_size_t size, | 775 | static resource_size_t calculate_iosize(resource_size_t size, |
| 775 | resource_size_t min_size, | 776 | resource_size_t min_size, |
| 776 | resource_size_t size1, | 777 | resource_size_t size1, |
| 777 | resource_size_t add_size, | 778 | resource_size_t add_size, |
| 778 | resource_size_t children_add_size, | 779 | resource_size_t children_add_size, |
| 779 | resource_size_t old_size, | 780 | resource_size_t old_size, |
| 780 | resource_size_t align) | 781 | resource_size_t align) |
| 781 | { | 782 | { |
| 782 | if (size < min_size) | 783 | if (size < min_size) |
| 783 | size = min_size; | 784 | size = min_size; |
| 784 | if (old_size == 1) | 785 | if (old_size == 1) |
| 785 | old_size = 0; | 786 | old_size = 0; |
| 786 | /* To be fixed in 2.5: we should have sort of HAVE_ISA | 787 | /* |
| 787 | flag in the struct pci_bus. */ | 788 | * To be fixed in 2.5: we should have sort of HAVE_ISA flag in the |
| 789 | * struct pci_bus. | ||
| 790 | */ | ||
| 788 | #if defined(CONFIG_ISA) || defined(CONFIG_EISA) | 791 | #if defined(CONFIG_ISA) || defined(CONFIG_EISA) |
| 789 | size = (size & 0xff) + ((size & ~0xffUL) << 2); | 792 | size = (size & 0xff) + ((size & ~0xffUL) << 2); |
| 790 | #endif | 793 | #endif |
| @@ -797,11 +800,11 @@ static resource_size_t calculate_iosize(resource_size_t size, | |||
| 797 | } | 800 | } |
| 798 | 801 | ||
| 799 | static resource_size_t calculate_memsize(resource_size_t size, | 802 | static resource_size_t calculate_memsize(resource_size_t size, |
| 800 | resource_size_t min_size, | 803 | resource_size_t min_size, |
| 801 | resource_size_t add_size, | 804 | resource_size_t add_size, |
| 802 | resource_size_t children_add_size, | 805 | resource_size_t children_add_size, |
| 803 | resource_size_t old_size, | 806 | resource_size_t old_size, |
| 804 | resource_size_t align) | 807 | resource_size_t align) |
| 805 | { | 808 | { |
| 806 | if (size < min_size) | 809 | if (size < min_size) |
| 807 | size = min_size; | 810 | size = min_size; |
| @@ -824,8 +827,7 @@ resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus, | |||
| 824 | #define PCI_P2P_DEFAULT_IO_ALIGN 0x1000 /* 4KiB */ | 827 | #define PCI_P2P_DEFAULT_IO_ALIGN 0x1000 /* 4KiB */ |
| 825 | #define PCI_P2P_DEFAULT_IO_ALIGN_1K 0x400 /* 1KiB */ | 828 | #define PCI_P2P_DEFAULT_IO_ALIGN_1K 0x400 /* 1KiB */ |
| 826 | 829 | ||
| 827 | static resource_size_t window_alignment(struct pci_bus *bus, | 830 | static resource_size_t window_alignment(struct pci_bus *bus, unsigned long type) |
| 828 | unsigned long type) | ||
| 829 | { | 831 | { |
| 830 | resource_size_t align = 1, arch_align; | 832 | resource_size_t align = 1, arch_align; |
| 831 | 833 | ||
| @@ -833,8 +835,8 @@ static resource_size_t window_alignment(struct pci_bus *bus, | |||
| 833 | align = PCI_P2P_DEFAULT_MEM_ALIGN; | 835 | align = PCI_P2P_DEFAULT_MEM_ALIGN; |
| 834 | else if (type & IORESOURCE_IO) { | 836 | else if (type & IORESOURCE_IO) { |
| 835 | /* | 837 | /* |
| 836 | * Per spec, I/O windows are 4K-aligned, but some | 838 | * Per spec, I/O windows are 4K-aligned, but some bridges have |
| 837 | * bridges have an extension to support 1K alignment. | 839 | * an extension to support 1K alignment. |
| 838 | */ | 840 | */ |
| 839 | if (bus->self->io_window_1k) | 841 | if (bus->self->io_window_1k) |
| 840 | align = PCI_P2P_DEFAULT_IO_ALIGN_1K; | 842 | align = PCI_P2P_DEFAULT_IO_ALIGN_1K; |
| @@ -847,20 +849,21 @@ static resource_size_t window_alignment(struct pci_bus *bus, | |||
| 847 | } | 849 | } |
| 848 | 850 | ||
| 849 | /** | 851 | /** |
| 850 | * pbus_size_io() - size the io window of a given bus | 852 | * pbus_size_io() - Size the I/O window of a given bus |
| 851 | * | 853 | * |
| 852 | * @bus : the bus | 854 | * @bus: The bus |
| 853 | * @min_size : the minimum io window that must to be allocated | 855 | * @min_size: The minimum I/O window that must be allocated |
| 854 | * @add_size : additional optional io window | 856 | * @add_size: Additional optional I/O window |
| 855 | * @realloc_head : track the additional io window on this list | 857 | * @realloc_head: Track the additional I/O window on this list |
| 856 | * | 858 | * |
| 857 | * Sizing the IO windows of the PCI-PCI bridge is trivial, | 859 | * Sizing the I/O windows of the PCI-PCI bridge is trivial, since these |
| 858 | * since these windows have 1K or 4K granularity and the IO ranges | 860 | * windows have 1K or 4K granularity and the I/O ranges of non-bridge PCI |
| 859 | * of non-bridge PCI devices are limited to 256 bytes. | 861 | * devices are limited to 256 bytes. We must be careful with the ISA |
| 860 | * We must be careful with the ISA aliasing though. | 862 | * aliasing though. |
| 861 | */ | 863 | */ |
| 862 | static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, | 864 | static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, |
| 863 | resource_size_t add_size, struct list_head *realloc_head) | 865 | resource_size_t add_size, |
| 866 | struct list_head *realloc_head) | ||
| 864 | { | 867 | { |
| 865 | struct pci_dev *dev; | 868 | struct pci_dev *dev; |
| 866 | struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO, | 869 | struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO, |
| @@ -918,9 +921,9 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, | |||
| 918 | if (size1 > size0 && realloc_head) { | 921 | if (size1 > size0 && realloc_head) { |
| 919 | add_to_list(realloc_head, bus->self, b_res, size1-size0, | 922 | add_to_list(realloc_head, bus->self, b_res, size1-size0, |
| 920 | min_align); | 923 | min_align); |
| 921 | pci_printk(KERN_DEBUG, bus->self, "bridge window %pR to %pR add_size %llx\n", | 924 | pci_info(bus->self, "bridge window %pR to %pR add_size %llx\n", |
| 922 | b_res, &bus->busn_res, | 925 | b_res, &bus->busn_res, |
| 923 | (unsigned long long)size1-size0); | 926 | (unsigned long long) size1 - size0); |
| 924 | } | 927 | } |
| 925 | } | 928 | } |
| 926 | 929 | ||
| @@ -947,33 +950,33 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns, | |||
| 947 | } | 950 | } |
| 948 | 951 | ||
| 949 | /** | 952 | /** |
| 950 | * pbus_size_mem() - size the memory window of a given bus | 953 | * pbus_size_mem() - Size the memory window of a given bus |
| 951 | * | 954 | * |
| 952 | * @bus : the bus | 955 | * @bus: The bus |
| 953 | * @mask: mask the resource flag, then compare it with type | 956 | * @mask: Mask the resource flag, then compare it with type |
| 954 | * @type: the type of free resource from bridge | 957 | * @type: The type of free resource from bridge |
| 955 | * @type2: second match type | 958 | * @type2: Second match type |
| 956 | * @type3: third match type | 959 | * @type3: Third match type |
| 957 | * @min_size : the minimum memory window that must to be allocated | 960 | * @min_size: The minimum memory window that must be allocated |
| 958 | * @add_size : additional optional memory window | 961 | * @add_size: Additional optional memory window |
| 959 | * @realloc_head : track the additional memory window on this list | 962 | * @realloc_head: Track the additional memory window on this list |
| 960 | * | 963 | * |
| 961 | * Calculate the size of the bus and minimal alignment which | 964 | * Calculate the size of the bus and minimal alignment which guarantees |
| 962 | * guarantees that all child resources fit in this size. | 965 | * that all child resources fit in this size. |
| 963 | * | 966 | * |
| 964 | * Returns -ENOSPC if there's no available bus resource of the desired type. | 967 | * Return -ENOSPC if there's no available bus resource of the desired |
| 965 | * Otherwise, sets the bus resource start/end to indicate the required | 968 | * type. Otherwise, set the bus resource start/end to indicate the |
| 966 | * size, adds things to realloc_head (if supplied), and returns 0. | 969 | * required size, add things to realloc_head (if supplied), and return 0. |
| 967 | */ | 970 | */ |
| 968 | static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | 971 | static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, |
| 969 | unsigned long type, unsigned long type2, | 972 | unsigned long type, unsigned long type2, |
| 970 | unsigned long type3, | 973 | unsigned long type3, resource_size_t min_size, |
| 971 | resource_size_t min_size, resource_size_t add_size, | 974 | resource_size_t add_size, |
| 972 | struct list_head *realloc_head) | 975 | struct list_head *realloc_head) |
| 973 | { | 976 | { |
| 974 | struct pci_dev *dev; | 977 | struct pci_dev *dev; |
| 975 | resource_size_t min_align, align, size, size0, size1; | 978 | resource_size_t min_align, align, size, size0, size1; |
| 976 | resource_size_t aligns[18]; /* Alignments from 1Mb to 128Gb */ | 979 | resource_size_t aligns[18]; /* Alignments from 1MB to 128GB */ |
| 977 | int order, max_order; | 980 | int order, max_order; |
| 978 | struct resource *b_res = find_free_bus_resource(bus, | 981 | struct resource *b_res = find_free_bus_resource(bus, |
| 979 | mask | IORESOURCE_PREFETCH, type); | 982 | mask | IORESOURCE_PREFETCH, type); |
| @@ -1002,12 +1005,12 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
| 1002 | continue; | 1005 | continue; |
| 1003 | r_size = resource_size(r); | 1006 | r_size = resource_size(r); |
| 1004 | #ifdef CONFIG_PCI_IOV | 1007 | #ifdef CONFIG_PCI_IOV |
| 1005 | /* put SRIOV requested res to the optional list */ | 1008 | /* Put SRIOV requested res to the optional list */ |
| 1006 | if (realloc_head && i >= PCI_IOV_RESOURCES && | 1009 | if (realloc_head && i >= PCI_IOV_RESOURCES && |
| 1007 | i <= PCI_IOV_RESOURCE_END) { | 1010 | i <= PCI_IOV_RESOURCE_END) { |
| 1008 | add_align = max(pci_resource_alignment(dev, r), add_align); | 1011 | add_align = max(pci_resource_alignment(dev, r), add_align); |
| 1009 | r->end = r->start - 1; | 1012 | r->end = r->start - 1; |
| 1010 | add_to_list(realloc_head, dev, r, r_size, 0/* don't care */); | 1013 | add_to_list(realloc_head, dev, r, r_size, 0 /* Don't care */); |
| 1011 | children_add_size += r_size; | 1014 | children_add_size += r_size; |
| 1012 | continue; | 1015 | continue; |
| 1013 | } | 1016 | } |
| @@ -1029,8 +1032,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
| 1029 | continue; | 1032 | continue; |
| 1030 | } | 1033 | } |
| 1031 | size += max(r_size, align); | 1034 | size += max(r_size, align); |
| 1032 | /* Exclude ranges with size > align from | 1035 | /* |
| 1033 | calculation of the alignment. */ | 1036 | * Exclude ranges with size > align from calculation of |
| 1037 | * the alignment. | ||
| 1038 | */ | ||
| 1034 | if (r_size <= align) | 1039 | if (r_size <= align) |
| 1035 | aligns[order] += align; | 1040 | aligns[order] += align; |
| 1036 | if (order > max_order) | 1041 | if (order > max_order) |
| @@ -1063,7 +1068,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, | |||
| 1063 | b_res->flags |= IORESOURCE_STARTALIGN; | 1068 | b_res->flags |= IORESOURCE_STARTALIGN; |
| 1064 | if (size1 > size0 && realloc_head) { | 1069 | if (size1 > size0 && realloc_head) { |
| 1065 | add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align); | 1070 | add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align); |
| 1066 | pci_printk(KERN_DEBUG, bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n", | 1071 | pci_info(bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n", |
| 1067 | b_res, &bus->busn_res, | 1072 | b_res, &bus->busn_res, |
| 1068 | (unsigned long long) (size1 - size0), | 1073 | (unsigned long long) (size1 - size0), |
| 1069 | (unsigned long long) add_align); | 1074 | (unsigned long long) add_align); |
| @@ -1081,7 +1086,7 @@ unsigned long pci_cardbus_resource_alignment(struct resource *res) | |||
| 1081 | } | 1086 | } |
| 1082 | 1087 | ||
| 1083 | static void pci_bus_size_cardbus(struct pci_bus *bus, | 1088 | static void pci_bus_size_cardbus(struct pci_bus *bus, |
| 1084 | struct list_head *realloc_head) | 1089 | struct list_head *realloc_head) |
| 1085 | { | 1090 | { |
| 1086 | struct pci_dev *bridge = bus->self; | 1091 | struct pci_dev *bridge = bus->self; |
| 1087 | struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; | 1092 | struct resource *b_res = &bridge->resource[PCI_BRIDGE_RESOURCES]; |
| @@ -1091,8 +1096,8 @@ static void pci_bus_size_cardbus(struct pci_bus *bus, | |||
| 1091 | if (b_res[0].parent) | 1096 | if (b_res[0].parent) |
| 1092 | goto handle_b_res_1; | 1097 | goto handle_b_res_1; |
| 1093 | /* | 1098 | /* |
| 1094 | * Reserve some resources for CardBus. We reserve | 1099 | * Reserve some resources for CardBus. We reserve a fixed amount |
| 1095 | * a fixed amount of bus space for CardBus bridges. | 1100 | * of bus space for CardBus bridges. |
| 1096 | */ | 1101 | */ |
| 1097 | b_res[0].start = pci_cardbus_io_size; | 1102 | b_res[0].start = pci_cardbus_io_size; |
| 1098 | b_res[0].end = b_res[0].start + pci_cardbus_io_size - 1; | 1103 | b_res[0].end = b_res[0].start + pci_cardbus_io_size - 1; |
| @@ -1116,7 +1121,7 @@ handle_b_res_1: | |||
| 1116 | } | 1121 | } |
| 1117 | 1122 | ||
| 1118 | handle_b_res_2: | 1123 | handle_b_res_2: |
| 1119 | /* MEM1 must not be pref mmio */ | 1124 | /* MEM1 must not be pref MMIO */ |
| 1120 | pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl); | 1125 | pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl); |
| 1121 | if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM1) { | 1126 | if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM1) { |
| 1122 | ctrl &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM1; | 1127 | ctrl &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM1; |
| @@ -1124,10 +1129,7 @@ handle_b_res_2: | |||
| 1124 | pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl); | 1129 | pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl); |
| 1125 | } | 1130 | } |
| 1126 | 1131 | ||
| 1127 | /* | 1132 | /* Check whether prefetchable memory is supported by this bridge. */ |
| 1128 | * Check whether prefetchable memory is supported | ||
| 1129 | * by this bridge. | ||
| 1130 | */ | ||
| 1131 | pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl); | 1133 | pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl); |
| 1132 | if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) { | 1134 | if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) { |
| 1133 | ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0; | 1135 | ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0; |
| @@ -1138,9 +1140,8 @@ handle_b_res_2: | |||
| 1138 | if (b_res[2].parent) | 1140 | if (b_res[2].parent) |
| 1139 | goto handle_b_res_3; | 1141 | goto handle_b_res_3; |
| 1140 | /* | 1142 | /* |
| 1141 | * If we have prefetchable memory support, allocate | 1143 | * If we have prefetchable memory support, allocate two regions. |
| 1142 | * two regions. Otherwise, allocate one region of | 1144 | * Otherwise, allocate one region of twice the size. |
| 1143 | * twice the size. | ||
| 1144 | */ | 1145 | */ |
| 1145 | if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { | 1146 | if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) { |
| 1146 | b_res[2].start = pci_cardbus_mem_size; | 1147 | b_res[2].start = pci_cardbus_mem_size; |
| @@ -1153,7 +1154,7 @@ handle_b_res_2: | |||
| 1153 | pci_cardbus_mem_size, pci_cardbus_mem_size); | 1154 | pci_cardbus_mem_size, pci_cardbus_mem_size); |
| 1154 | } | 1155 | } |
| 1155 | 1156 | ||
| 1156 | /* reduce that to half */ | 1157 | /* Reduce that to half */ |
| 1157 | b_res_3_size = pci_cardbus_mem_size; | 1158 | b_res_3_size = pci_cardbus_mem_size; |
| 1158 | } | 1159 | } |
| 1159 | 1160 | ||
| @@ -1204,7 +1205,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head) | |||
| 1204 | 1205 | ||
| 1205 | switch (bus->self->hdr_type) { | 1206 | switch (bus->self->hdr_type) { |
| 1206 | case PCI_HEADER_TYPE_CARDBUS: | 1207 | case PCI_HEADER_TYPE_CARDBUS: |
| 1207 | /* don't size cardbuses yet. */ | 1208 | /* Don't size CardBuses yet */ |
| 1208 | break; | 1209 | break; |
| 1209 | 1210 | ||
| 1210 | case PCI_HEADER_TYPE_BRIDGE: | 1211 | case PCI_HEADER_TYPE_BRIDGE: |
| @@ -1271,18 +1272,17 @@ void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head) | |||
| 1271 | 1272 | ||
| 1272 | /* | 1273 | /* |
| 1273 | * Compute the size required to put everything else in the | 1274 | * Compute the size required to put everything else in the |
| 1274 | * non-prefetchable window. This includes: | 1275 | * non-prefetchable window. This includes: |
| 1275 | * | 1276 | * |
| 1276 | * - all non-prefetchable resources | 1277 | * - all non-prefetchable resources |
| 1277 | * - 32-bit prefetchable resources if there's a 64-bit | 1278 | * - 32-bit prefetchable resources if there's a 64-bit |
| 1278 | * prefetchable window or no prefetchable window at all | 1279 | * prefetchable window or no prefetchable window at all |
| 1279 | * - 64-bit prefetchable resources if there's no | 1280 | * - 64-bit prefetchable resources if there's no prefetchable |
| 1280 | * prefetchable window at all | 1281 | * window at all |
| 1281 | * | 1282 | * |
| 1282 | * Note that the strategy in __pci_assign_resource() must | 1283 | * Note that the strategy in __pci_assign_resource() must match |
| 1283 | * match that used here. Specifically, we cannot put a | 1284 | * that used here. Specifically, we cannot put a 32-bit |
| 1284 | * 32-bit prefetchable resource in a 64-bit prefetchable | 1285 | * prefetchable resource in a 64-bit prefetchable window. |
| 1285 | * window. | ||
| 1286 | */ | 1286 | */ |
| 1287 | pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3, | 1287 | pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3, |
| 1288 | realloc_head ? 0 : additional_mem_size, | 1288 | realloc_head ? 0 : additional_mem_size, |
| @@ -1315,8 +1315,8 @@ static void assign_fixed_resource_on_bus(struct pci_bus *b, struct resource *r) | |||
| 1315 | } | 1315 | } |
| 1316 | 1316 | ||
| 1317 | /* | 1317 | /* |
| 1318 | * Try to assign any resources marked as IORESOURCE_PCI_FIXED, as they | 1318 | * Try to assign any resources marked as IORESOURCE_PCI_FIXED, as they are |
| 1319 | * are skipped by pbus_assign_resources_sorted(). | 1319 | * skipped by pbus_assign_resources_sorted(). |
| 1320 | */ | 1320 | */ |
| 1321 | static void pdev_assign_fixed_resources(struct pci_dev *dev) | 1321 | static void pdev_assign_fixed_resources(struct pci_dev *dev) |
| 1322 | { | 1322 | { |
| @@ -1427,10 +1427,9 @@ static void pci_bus_allocate_resources(struct pci_bus *b) | |||
| 1427 | struct pci_bus *child; | 1427 | struct pci_bus *child; |
| 1428 | 1428 | ||
| 1429 | /* | 1429 | /* |
| 1430 | * Carry out a depth-first search on the PCI bus | 1430 | * Carry out a depth-first search on the PCI bus tree to allocate |
| 1431 | * tree to allocate bridge apertures. Read the | 1431 | * bridge apertures. Read the programmed bridge bases and |
| 1432 | * programmed bridge bases and recursively claim | 1432 | * recursively claim the respective bridge resources. |
| 1433 | * the respective bridge resources. | ||
| 1434 | */ | 1433 | */ |
| 1435 | if (b->self) { | 1434 | if (b->self) { |
| 1436 | pci_read_bridge_bases(b); | 1435 | pci_read_bridge_bases(b); |
| @@ -1484,7 +1483,7 @@ static void __pci_bridge_assign_resources(const struct pci_dev *bridge, | |||
| 1484 | IORESOURCE_MEM_64) | 1483 | IORESOURCE_MEM_64) |
| 1485 | 1484 | ||
| 1486 | static void pci_bridge_release_resources(struct pci_bus *bus, | 1485 | static void pci_bridge_release_resources(struct pci_bus *bus, |
| 1487 | unsigned long type) | 1486 | unsigned long type) |
| 1488 | { | 1487 | { |
| 1489 | struct pci_dev *dev = bus->self; | 1488 | struct pci_dev *dev = bus->self; |
| 1490 | struct resource *r; | 1489 | struct resource *r; |
| @@ -1495,16 +1494,14 @@ static void pci_bridge_release_resources(struct pci_bus *bus, | |||
| 1495 | b_res = &dev->resource[PCI_BRIDGE_RESOURCES]; | 1494 | b_res = &dev->resource[PCI_BRIDGE_RESOURCES]; |
| 1496 | 1495 | ||
| 1497 | /* | 1496 | /* |
| 1498 | * 1. if there is io port assign fail, will release bridge | 1497 | * 1. If IO port assignment fails, release bridge IO port. |
| 1499 | * io port. | 1498 | * 2. If non pref MMIO assignment fails, release bridge nonpref MMIO. |
| 1500 | * 2. if there is non pref mmio assign fail, release bridge | 1499 | * 3. If 64bit pref MMIO assignment fails, and bridge pref is 64bit, |
| 1501 | * nonpref mmio. | 1500 | * release bridge pref MMIO. |
| 1502 | * 3. if there is 64bit pref mmio assign fail, and bridge pref | 1501 | * 4. If pref MMIO assignment fails, and bridge pref is 32bit, |
| 1503 | * is 64bit, release bridge pref mmio. | 1502 | * release bridge pref MMIO. |
| 1504 | * 4. if there is pref mmio assign fail, and bridge pref is | 1503 | * 5. If pref MMIO assignment fails, and bridge pref is not |
| 1505 | * 32bit mmio, release bridge pref mmio | 1504 | * assigned, release bridge nonpref MMIO. |
| 1506 | * 5. if there is pref mmio assign fail, and bridge pref is not | ||
| 1507 | * assigned, release bridge nonpref mmio. | ||
| 1508 | */ | 1505 | */ |
| 1509 | if (type & IORESOURCE_IO) | 1506 | if (type & IORESOURCE_IO) |
| 1510 | idx = 0; | 1507 | idx = 0; |
| @@ -1524,25 +1521,22 @@ static void pci_bridge_release_resources(struct pci_bus *bus, | |||
| 1524 | if (!r->parent) | 1521 | if (!r->parent) |
| 1525 | return; | 1522 | return; |
| 1526 | 1523 | ||
| 1527 | /* | 1524 | /* If there are children, release them all */ |
| 1528 | * if there are children under that, we should release them | ||
| 1529 | * all | ||
| 1530 | */ | ||
| 1531 | release_child_resources(r); | 1525 | release_child_resources(r); |
| 1532 | if (!release_resource(r)) { | 1526 | if (!release_resource(r)) { |
| 1533 | type = old_flags = r->flags & PCI_RES_TYPE_MASK; | 1527 | type = old_flags = r->flags & PCI_RES_TYPE_MASK; |
| 1534 | pci_printk(KERN_DEBUG, dev, "resource %d %pR released\n", | 1528 | pci_info(dev, "resource %d %pR released\n", |
| 1535 | PCI_BRIDGE_RESOURCES + idx, r); | 1529 | PCI_BRIDGE_RESOURCES + idx, r); |
| 1536 | /* keep the old size */ | 1530 | /* Keep the old size */ |
| 1537 | r->end = resource_size(r) - 1; | 1531 | r->end = resource_size(r) - 1; |
| 1538 | r->start = 0; | 1532 | r->start = 0; |
| 1539 | r->flags = 0; | 1533 | r->flags = 0; |
| 1540 | 1534 | ||
| 1541 | /* avoiding touch the one without PREF */ | 1535 | /* Avoiding touch the one without PREF */ |
| 1542 | if (type & IORESOURCE_PREFETCH) | 1536 | if (type & IORESOURCE_PREFETCH) |
| 1543 | type = IORESOURCE_PREFETCH; | 1537 | type = IORESOURCE_PREFETCH; |
| 1544 | __pci_setup_bridge(bus, type); | 1538 | __pci_setup_bridge(bus, type); |
| 1545 | /* for next child res under same bridge */ | 1539 | /* For next child res under same bridge */ |
| 1546 | r->flags = old_flags; | 1540 | r->flags = old_flags; |
| 1547 | } | 1541 | } |
| 1548 | } | 1542 | } |
| @@ -1551,9 +1545,10 @@ enum release_type { | |||
| 1551 | leaf_only, | 1545 | leaf_only, |
| 1552 | whole_subtree, | 1546 | whole_subtree, |
| 1553 | }; | 1547 | }; |
| 1548 | |||
| 1554 | /* | 1549 | /* |
| 1555 | * try to release pci bridge resources that is from leaf bridge, | 1550 | * Try to release PCI bridge resources from leaf bridge, so we can allocate |
| 1556 | * so we can allocate big new one later | 1551 | * a larger window later. |
| 1557 | */ | 1552 | */ |
| 1558 | static void pci_bus_release_bridge_resources(struct pci_bus *bus, | 1553 | static void pci_bus_release_bridge_resources(struct pci_bus *bus, |
| 1559 | unsigned long type, | 1554 | unsigned long type, |
| @@ -1596,7 +1591,7 @@ static void pci_bus_dump_res(struct pci_bus *bus) | |||
| 1596 | if (!res || !res->end || !res->flags) | 1591 | if (!res || !res->end || !res->flags) |
| 1597 | continue; | 1592 | continue; |
| 1598 | 1593 | ||
| 1599 | dev_printk(KERN_DEBUG, &bus->dev, "resource %d %pR\n", i, res); | 1594 | dev_info(&bus->dev, "resource %d %pR\n", i, res); |
| 1600 | } | 1595 | } |
| 1601 | } | 1596 | } |
| 1602 | 1597 | ||
| @@ -1678,7 +1673,7 @@ static int iov_resources_unassigned(struct pci_dev *dev, void *data) | |||
| 1678 | pcibios_resource_to_bus(dev->bus, ®ion, r); | 1673 | pcibios_resource_to_bus(dev->bus, ®ion, r); |
| 1679 | if (!region.start) { | 1674 | if (!region.start) { |
| 1680 | *unassigned = true; | 1675 | *unassigned = true; |
| 1681 | return 1; /* return early from pci_walk_bus() */ | 1676 | return 1; /* Return early from pci_walk_bus() */ |
| 1682 | } | 1677 | } |
| 1683 | } | 1678 | } |
| 1684 | 1679 | ||
| @@ -1686,7 +1681,7 @@ static int iov_resources_unassigned(struct pci_dev *dev, void *data) | |||
| 1686 | } | 1681 | } |
| 1687 | 1682 | ||
| 1688 | static enum enable_type pci_realloc_detect(struct pci_bus *bus, | 1683 | static enum enable_type pci_realloc_detect(struct pci_bus *bus, |
| 1689 | enum enable_type enable_local) | 1684 | enum enable_type enable_local) |
| 1690 | { | 1685 | { |
| 1691 | bool unassigned = false; | 1686 | bool unassigned = false; |
| 1692 | 1687 | ||
| @@ -1701,21 +1696,21 @@ static enum enable_type pci_realloc_detect(struct pci_bus *bus, | |||
| 1701 | } | 1696 | } |
| 1702 | #else | 1697 | #else |
| 1703 | static enum enable_type pci_realloc_detect(struct pci_bus *bus, | 1698 | static enum enable_type pci_realloc_detect(struct pci_bus *bus, |
| 1704 | enum enable_type enable_local) | 1699 | enum enable_type enable_local) |
| 1705 | { | 1700 | { |
| 1706 | return enable_local; | 1701 | return enable_local; |
| 1707 | } | 1702 | } |
| 1708 | #endif | 1703 | #endif |
| 1709 | 1704 | ||
| 1710 | /* | 1705 | /* |
| 1711 | * first try will not touch pci bridge res | 1706 | * First try will not touch PCI bridge res. |
| 1712 | * second and later try will clear small leaf bridge res | 1707 | * Second and later try will clear small leaf bridge res. |
| 1713 | * will stop till to the max depth if can not find good one | 1708 | * Will stop till to the max depth if can not find good one. |
| 1714 | */ | 1709 | */ |
| 1715 | void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus) | 1710 | void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus) |
| 1716 | { | 1711 | { |
| 1717 | LIST_HEAD(realloc_head); /* list of resources that | 1712 | LIST_HEAD(realloc_head); |
| 1718 | want additional resources */ | 1713 | /* List of resources that want additional resources */ |
| 1719 | struct list_head *add_list = NULL; | 1714 | struct list_head *add_list = NULL; |
| 1720 | int tried_times = 0; | 1715 | int tried_times = 0; |
| 1721 | enum release_type rel_type = leaf_only; | 1716 | enum release_type rel_type = leaf_only; |
| @@ -1724,26 +1719,26 @@ void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus) | |||
| 1724 | int pci_try_num = 1; | 1719 | int pci_try_num = 1; |
| 1725 | enum enable_type enable_local; | 1720 | enum enable_type enable_local; |
| 1726 | 1721 | ||
| 1727 | /* don't realloc if asked to do so */ | 1722 | /* Don't realloc if asked to do so */ |
| 1728 | enable_local = pci_realloc_detect(bus, pci_realloc_enable); | 1723 | enable_local = pci_realloc_detect(bus, pci_realloc_enable); |
| 1729 | if (pci_realloc_enabled(enable_local)) { | 1724 | if (pci_realloc_enabled(enable_local)) { |
| 1730 | int max_depth = pci_bus_get_depth(bus); | 1725 | int max_depth = pci_bus_get_depth(bus); |
| 1731 | 1726 | ||
| 1732 | pci_try_num = max_depth + 1; | 1727 | pci_try_num = max_depth + 1; |
| 1733 | dev_printk(KERN_DEBUG, &bus->dev, | 1728 | dev_info(&bus->dev, "max bus depth: %d pci_try_num: %d\n", |
| 1734 | "max bus depth: %d pci_try_num: %d\n", | 1729 | max_depth, pci_try_num); |
| 1735 | max_depth, pci_try_num); | ||
| 1736 | } | 1730 | } |
| 1737 | 1731 | ||
| 1738 | again: | 1732 | again: |
| 1739 | /* | 1733 | /* |
| 1740 | * last try will use add_list, otherwise will try good to have as | 1734 | * Last try will use add_list, otherwise will try good to have as must |
| 1741 | * must have, so can realloc parent bridge resource | 1735 | * have, so can realloc parent bridge resource |
| 1742 | */ | 1736 | */ |
| 1743 | if (tried_times + 1 == pci_try_num) | 1737 | if (tried_times + 1 == pci_try_num) |
| 1744 | add_list = &realloc_head; | 1738 | add_list = &realloc_head; |
| 1745 | /* Depth first, calculate sizes and alignments of all | 1739 | /* |
| 1746 | subordinate buses. */ | 1740 | * Depth first, calculate sizes and alignments of all subordinate buses. |
| 1741 | */ | ||
| 1747 | __pci_bus_size_bridges(bus, add_list); | 1742 | __pci_bus_size_bridges(bus, add_list); |
| 1748 | 1743 | ||
| 1749 | /* Depth last, allocate resources and update the hardware. */ | 1744 | /* Depth last, allocate resources and update the hardware. */ |
| @@ -1752,7 +1747,7 @@ again: | |||
| 1752 | BUG_ON(!list_empty(add_list)); | 1747 | BUG_ON(!list_empty(add_list)); |
| 1753 | tried_times++; | 1748 | tried_times++; |
| 1754 | 1749 | ||
| 1755 | /* any device complain? */ | 1750 | /* Any device complain? */ |
| 1756 | if (list_empty(&fail_head)) | 1751 | if (list_empty(&fail_head)) |
| 1757 | goto dump; | 1752 | goto dump; |
| 1758 | 1753 | ||
| @@ -1766,23 +1761,23 @@ again: | |||
| 1766 | goto dump; | 1761 | goto dump; |
| 1767 | } | 1762 | } |
| 1768 | 1763 | ||
| 1769 | dev_printk(KERN_DEBUG, &bus->dev, | 1764 | dev_info(&bus->dev, "No. %d try to assign unassigned res\n", |
| 1770 | "No. %d try to assign unassigned res\n", tried_times + 1); | 1765 | tried_times + 1); |
| 1771 | 1766 | ||
| 1772 | /* third times and later will not check if it is leaf */ | 1767 | /* Third times and later will not check if it is leaf */ |
| 1773 | if ((tried_times + 1) > 2) | 1768 | if ((tried_times + 1) > 2) |
| 1774 | rel_type = whole_subtree; | 1769 | rel_type = whole_subtree; |
| 1775 | 1770 | ||
| 1776 | /* | 1771 | /* |
| 1777 | * Try to release leaf bridge's resources that doesn't fit resource of | 1772 | * Try to release leaf bridge's resources that doesn't fit resource of |
| 1778 | * child device under that bridge | 1773 | * child device under that bridge. |
| 1779 | */ | 1774 | */ |
| 1780 | list_for_each_entry(fail_res, &fail_head, list) | 1775 | list_for_each_entry(fail_res, &fail_head, list) |
| 1781 | pci_bus_release_bridge_resources(fail_res->dev->bus, | 1776 | pci_bus_release_bridge_resources(fail_res->dev->bus, |
| 1782 | fail_res->flags & PCI_RES_TYPE_MASK, | 1777 | fail_res->flags & PCI_RES_TYPE_MASK, |
| 1783 | rel_type); | 1778 | rel_type); |
| 1784 | 1779 | ||
| 1785 | /* restore size and flags */ | 1780 | /* Restore size and flags */ |
| 1786 | list_for_each_entry(fail_res, &fail_head, list) { | 1781 | list_for_each_entry(fail_res, &fail_head, list) { |
| 1787 | struct resource *res = fail_res->res; | 1782 | struct resource *res = fail_res->res; |
| 1788 | 1783 | ||
| @@ -1797,7 +1792,7 @@ again: | |||
| 1797 | goto again; | 1792 | goto again; |
| 1798 | 1793 | ||
| 1799 | dump: | 1794 | dump: |
| 1800 | /* dump the resource on buses */ | 1795 | /* Dump the resource on buses */ |
| 1801 | pci_bus_dump_resources(bus); | 1796 | pci_bus_dump_resources(bus); |
| 1802 | } | 1797 | } |
| 1803 | 1798 | ||
| @@ -1808,14 +1803,15 @@ void __init pci_assign_unassigned_resources(void) | |||
| 1808 | list_for_each_entry(root_bus, &pci_root_buses, node) { | 1803 | list_for_each_entry(root_bus, &pci_root_buses, node) { |
| 1809 | pci_assign_unassigned_root_bus_resources(root_bus); | 1804 | pci_assign_unassigned_root_bus_resources(root_bus); |
| 1810 | 1805 | ||
| 1811 | /* Make sure the root bridge has a companion ACPI device: */ | 1806 | /* Make sure the root bridge has a companion ACPI device */ |
| 1812 | if (ACPI_HANDLE(root_bus->bridge)) | 1807 | if (ACPI_HANDLE(root_bus->bridge)) |
| 1813 | acpi_ioapic_add(ACPI_HANDLE(root_bus->bridge)); | 1808 | acpi_ioapic_add(ACPI_HANDLE(root_bus->bridge)); |
| 1814 | } | 1809 | } |
| 1815 | } | 1810 | } |
| 1816 | 1811 | ||
| 1817 | static void extend_bridge_window(struct pci_dev *bridge, struct resource *res, | 1812 | static void extend_bridge_window(struct pci_dev *bridge, struct resource *res, |
| 1818 | struct list_head *add_list, resource_size_t available) | 1813 | struct list_head *add_list, |
| 1814 | resource_size_t available) | ||
| 1819 | { | 1815 | { |
| 1820 | struct pci_dev_resource *dev_res; | 1816 | struct pci_dev_resource *dev_res; |
| 1821 | 1817 | ||
| @@ -1839,8 +1835,10 @@ static void extend_bridge_window(struct pci_dev *bridge, struct resource *res, | |||
| 1839 | } | 1835 | } |
| 1840 | 1836 | ||
| 1841 | static void pci_bus_distribute_available_resources(struct pci_bus *bus, | 1837 | static void pci_bus_distribute_available_resources(struct pci_bus *bus, |
| 1842 | struct list_head *add_list, resource_size_t available_io, | 1838 | struct list_head *add_list, |
| 1843 | resource_size_t available_mmio, resource_size_t available_mmio_pref) | 1839 | resource_size_t available_io, |
| 1840 | resource_size_t available_mmio, | ||
| 1841 | resource_size_t available_mmio_pref) | ||
| 1844 | { | 1842 | { |
| 1845 | resource_size_t remaining_io, remaining_mmio, remaining_mmio_pref; | 1843 | resource_size_t remaining_io, remaining_mmio, remaining_mmio_pref; |
| 1846 | unsigned int normal_bridges = 0, hotplug_bridges = 0; | 1844 | unsigned int normal_bridges = 0, hotplug_bridges = 0; |
| @@ -1864,7 +1862,7 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, | |||
| 1864 | 1862 | ||
| 1865 | /* | 1863 | /* |
| 1866 | * Calculate the total amount of extra resource space we can | 1864 | * Calculate the total amount of extra resource space we can |
| 1867 | * pass to bridges below this one. This is basically the | 1865 | * pass to bridges below this one. This is basically the |
| 1868 | * extra space reduced by the minimal required space for the | 1866 | * extra space reduced by the minimal required space for the |
| 1869 | * non-hotplug bridges. | 1867 | * non-hotplug bridges. |
| 1870 | */ | 1868 | */ |
| @@ -1874,7 +1872,7 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, | |||
| 1874 | 1872 | ||
| 1875 | /* | 1873 | /* |
| 1876 | * Calculate how many hotplug bridges and normal bridges there | 1874 | * Calculate how many hotplug bridges and normal bridges there |
| 1877 | * are on this bus. We will distribute the additional available | 1875 | * are on this bus. We will distribute the additional available |
| 1878 | * resources between hotplug bridges. | 1876 | * resources between hotplug bridges. |
| 1879 | */ | 1877 | */ |
| 1880 | for_each_pci_bridge(dev, bus) { | 1878 | for_each_pci_bridge(dev, bus) { |
| @@ -1909,8 +1907,8 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, | |||
| 1909 | 1907 | ||
| 1910 | /* | 1908 | /* |
| 1911 | * There is only one bridge on the bus so it gets all available | 1909 | * There is only one bridge on the bus so it gets all available |
| 1912 | * resources which it can then distribute to the possible | 1910 | * resources which it can then distribute to the possible hotplug |
| 1913 | * hotplug bridges below. | 1911 | * bridges below. |
| 1914 | */ | 1912 | */ |
| 1915 | if (hotplug_bridges + normal_bridges == 1) { | 1913 | if (hotplug_bridges + normal_bridges == 1) { |
| 1916 | dev = list_first_entry(&bus->devices, struct pci_dev, bus_list); | 1914 | dev = list_first_entry(&bus->devices, struct pci_dev, bus_list); |
| @@ -1961,9 +1959,8 @@ static void pci_bus_distribute_available_resources(struct pci_bus *bus, | |||
| 1961 | } | 1959 | } |
| 1962 | } | 1960 | } |
| 1963 | 1961 | ||
| 1964 | static void | 1962 | static void pci_bridge_distribute_available_resources(struct pci_dev *bridge, |
| 1965 | pci_bridge_distribute_available_resources(struct pci_dev *bridge, | 1963 | struct list_head *add_list) |
| 1966 | struct list_head *add_list) | ||
| 1967 | { | 1964 | { |
| 1968 | resource_size_t available_io, available_mmio, available_mmio_pref; | 1965 | resource_size_t available_io, available_mmio, available_mmio_pref; |
| 1969 | const struct resource *res; | 1966 | const struct resource *res; |
| @@ -1980,14 +1977,17 @@ pci_bridge_distribute_available_resources(struct pci_dev *bridge, | |||
| 1980 | available_mmio_pref = resource_size(res); | 1977 | available_mmio_pref = resource_size(res); |
| 1981 | 1978 | ||
| 1982 | pci_bus_distribute_available_resources(bridge->subordinate, | 1979 | pci_bus_distribute_available_resources(bridge->subordinate, |
| 1983 | add_list, available_io, available_mmio, available_mmio_pref); | 1980 | add_list, available_io, |
| 1981 | available_mmio, | ||
| 1982 | available_mmio_pref); | ||
| 1984 | } | 1983 | } |
| 1985 | 1984 | ||
| 1986 | void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge) | 1985 | void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge) |
| 1987 | { | 1986 | { |
| 1988 | struct pci_bus *parent = bridge->subordinate; | 1987 | struct pci_bus *parent = bridge->subordinate; |
| 1989 | LIST_HEAD(add_list); /* list of resources that | 1988 | /* List of resources that want additional resources */ |
| 1990 | want additional resources */ | 1989 | LIST_HEAD(add_list); |
| 1990 | |||
| 1991 | int tried_times = 0; | 1991 | int tried_times = 0; |
| 1992 | LIST_HEAD(fail_head); | 1992 | LIST_HEAD(fail_head); |
| 1993 | struct pci_dev_resource *fail_res; | 1993 | struct pci_dev_resource *fail_res; |
| @@ -1997,9 +1997,9 @@ again: | |||
| 1997 | __pci_bus_size_bridges(parent, &add_list); | 1997 | __pci_bus_size_bridges(parent, &add_list); |
| 1998 | 1998 | ||
| 1999 | /* | 1999 | /* |
| 2000 | * Distribute remaining resources (if any) equally between | 2000 | * Distribute remaining resources (if any) equally between hotplug |
| 2001 | * hotplug bridges below. This makes it possible to extend the | 2001 | * bridges below. This makes it possible to extend the hierarchy |
| 2002 | * hierarchy later without running out of resources. | 2002 | * later without running out of resources. |
| 2003 | */ | 2003 | */ |
| 2004 | pci_bridge_distribute_available_resources(bridge, &add_list); | 2004 | pci_bridge_distribute_available_resources(bridge, &add_list); |
| 2005 | 2005 | ||
| @@ -2011,7 +2011,7 @@ again: | |||
| 2011 | goto enable_all; | 2011 | goto enable_all; |
| 2012 | 2012 | ||
| 2013 | if (tried_times >= 2) { | 2013 | if (tried_times >= 2) { |
| 2014 | /* still fail, don't need to try more */ | 2014 | /* Still fail, don't need to try more */ |
| 2015 | free_list(&fail_head); | 2015 | free_list(&fail_head); |
| 2016 | goto enable_all; | 2016 | goto enable_all; |
| 2017 | } | 2017 | } |
| @@ -2020,15 +2020,15 @@ again: | |||
| 2020 | tried_times + 1); | 2020 | tried_times + 1); |
| 2021 | 2021 | ||
| 2022 | /* | 2022 | /* |
| 2023 | * Try to release leaf bridge's resources that doesn't fit resource of | 2023 | * Try to release leaf bridge's resources that aren't big enough |
| 2024 | * child device under that bridge | 2024 | * to contain child device resources. |
| 2025 | */ | 2025 | */ |
| 2026 | list_for_each_entry(fail_res, &fail_head, list) | 2026 | list_for_each_entry(fail_res, &fail_head, list) |
| 2027 | pci_bus_release_bridge_resources(fail_res->dev->bus, | 2027 | pci_bus_release_bridge_resources(fail_res->dev->bus, |
| 2028 | fail_res->flags & PCI_RES_TYPE_MASK, | 2028 | fail_res->flags & PCI_RES_TYPE_MASK, |
| 2029 | whole_subtree); | 2029 | whole_subtree); |
| 2030 | 2030 | ||
| 2031 | /* restore size and flags */ | 2031 | /* Restore size and flags */ |
| 2032 | list_for_each_entry(fail_res, &fail_head, list) { | 2032 | list_for_each_entry(fail_res, &fail_head, list) { |
| 2033 | struct resource *res = fail_res->res; | 2033 | struct resource *res = fail_res->res; |
| 2034 | 2034 | ||
| @@ -2107,7 +2107,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type) | |||
| 2107 | } | 2107 | } |
| 2108 | 2108 | ||
| 2109 | list_for_each_entry(dev_res, &saved, list) { | 2109 | list_for_each_entry(dev_res, &saved, list) { |
| 2110 | /* Skip the bridge we just assigned resources for. */ | 2110 | /* Skip the bridge we just assigned resources for */ |
| 2111 | if (bridge == dev_res->dev) | 2111 | if (bridge == dev_res->dev) |
| 2112 | continue; | 2112 | continue; |
| 2113 | 2113 | ||
| @@ -2119,7 +2119,7 @@ int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type) | |||
| 2119 | return 0; | 2119 | return 0; |
| 2120 | 2120 | ||
| 2121 | cleanup: | 2121 | cleanup: |
| 2122 | /* restore size and flags */ | 2122 | /* Restore size and flags */ |
| 2123 | list_for_each_entry(dev_res, &failed, list) { | 2123 | list_for_each_entry(dev_res, &failed, list) { |
| 2124 | struct resource *res = dev_res->res; | 2124 | struct resource *res = dev_res->res; |
| 2125 | 2125 | ||
| @@ -2151,8 +2151,8 @@ cleanup: | |||
| 2151 | void pci_assign_unassigned_bus_resources(struct pci_bus *bus) | 2151 | void pci_assign_unassigned_bus_resources(struct pci_bus *bus) |
| 2152 | { | 2152 | { |
| 2153 | struct pci_dev *dev; | 2153 | struct pci_dev *dev; |
| 2154 | LIST_HEAD(add_list); /* list of resources that | 2154 | /* List of resources that want additional resources */ |
| 2155 | want additional resources */ | 2155 | LIST_HEAD(add_list); |
| 2156 | 2156 | ||
| 2157 | down_read(&pci_bus_sem); | 2157 | down_read(&pci_bus_sem); |
| 2158 | for_each_pci_bridge(dev, bus) | 2158 | for_each_pci_bridge(dev, bus) |
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index c46d5e1ff536..f4d92b1afe7b 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c | |||
| @@ -403,7 +403,7 @@ static int pci_slot_init(void) | |||
| 403 | pci_slots_kset = kset_create_and_add("slots", NULL, | 403 | pci_slots_kset = kset_create_and_add("slots", NULL, |
| 404 | &pci_bus_kset->kobj); | 404 | &pci_bus_kset->kobj); |
| 405 | if (!pci_slots_kset) { | 405 | if (!pci_slots_kset) { |
| 406 | printk(KERN_ERR "PCI: Slot initialization failure\n"); | 406 | pr_err("PCI: Slot initialization failure\n"); |
| 407 | return -ENOMEM; | 407 | return -ENOMEM; |
| 408 | } | 408 | } |
| 409 | return 0; | 409 | return 0; |
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index 0f7b80144863..bebbde4ebec0 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c | |||
| @@ -658,19 +658,25 @@ static int ioctl_flash_part_info(struct switchtec_dev *stdev, | |||
| 658 | 658 | ||
| 659 | static int ioctl_event_summary(struct switchtec_dev *stdev, | 659 | static int ioctl_event_summary(struct switchtec_dev *stdev, |
| 660 | struct switchtec_user *stuser, | 660 | struct switchtec_user *stuser, |
| 661 | struct switchtec_ioctl_event_summary __user *usum) | 661 | struct switchtec_ioctl_event_summary __user *usum, |
| 662 | size_t size) | ||
| 662 | { | 663 | { |
| 663 | struct switchtec_ioctl_event_summary s = {0}; | 664 | struct switchtec_ioctl_event_summary *s; |
| 664 | int i; | 665 | int i; |
| 665 | u32 reg; | 666 | u32 reg; |
| 667 | int ret = 0; | ||
| 666 | 668 | ||
| 667 | s.global = ioread32(&stdev->mmio_sw_event->global_summary); | 669 | s = kzalloc(sizeof(*s), GFP_KERNEL); |
| 668 | s.part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap); | 670 | if (!s) |
| 669 | s.local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary); | 671 | return -ENOMEM; |
| 672 | |||
| 673 | s->global = ioread32(&stdev->mmio_sw_event->global_summary); | ||
| 674 | s->part_bitmap = ioread32(&stdev->mmio_sw_event->part_event_bitmap); | ||
| 675 | s->local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary); | ||
| 670 | 676 | ||
| 671 | for (i = 0; i < stdev->partition_count; i++) { | 677 | for (i = 0; i < stdev->partition_count; i++) { |
| 672 | reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary); | 678 | reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary); |
| 673 | s.part[i] = reg; | 679 | s->part[i] = reg; |
| 674 | } | 680 | } |
| 675 | 681 | ||
| 676 | for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) { | 682 | for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) { |
| @@ -679,15 +685,19 @@ static int ioctl_event_summary(struct switchtec_dev *stdev, | |||
| 679 | break; | 685 | break; |
| 680 | 686 | ||
| 681 | reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary); | 687 | reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary); |
| 682 | s.pff[i] = reg; | 688 | s->pff[i] = reg; |
| 683 | } | 689 | } |
| 684 | 690 | ||
| 685 | if (copy_to_user(usum, &s, sizeof(s))) | 691 | if (copy_to_user(usum, s, size)) { |
| 686 | return -EFAULT; | 692 | ret = -EFAULT; |
| 693 | goto error_case; | ||
| 694 | } | ||
| 687 | 695 | ||
| 688 | stuser->event_cnt = atomic_read(&stdev->event_cnt); | 696 | stuser->event_cnt = atomic_read(&stdev->event_cnt); |
| 689 | 697 | ||
| 690 | return 0; | 698 | error_case: |
| 699 | kfree(s); | ||
| 700 | return ret; | ||
| 691 | } | 701 | } |
| 692 | 702 | ||
| 693 | static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev, | 703 | static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev, |
| @@ -977,8 +987,9 @@ static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd, | |||
| 977 | case SWITCHTEC_IOCTL_FLASH_PART_INFO: | 987 | case SWITCHTEC_IOCTL_FLASH_PART_INFO: |
| 978 | rc = ioctl_flash_part_info(stdev, argp); | 988 | rc = ioctl_flash_part_info(stdev, argp); |
| 979 | break; | 989 | break; |
| 980 | case SWITCHTEC_IOCTL_EVENT_SUMMARY: | 990 | case SWITCHTEC_IOCTL_EVENT_SUMMARY_LEGACY: |
| 981 | rc = ioctl_event_summary(stdev, stuser, argp); | 991 | rc = ioctl_event_summary(stdev, stuser, argp, |
| 992 | sizeof(struct switchtec_ioctl_event_summary_legacy)); | ||
| 982 | break; | 993 | break; |
| 983 | case SWITCHTEC_IOCTL_EVENT_CTL: | 994 | case SWITCHTEC_IOCTL_EVENT_CTL: |
| 984 | rc = ioctl_event_ctl(stdev, argp); | 995 | rc = ioctl_event_ctl(stdev, argp); |
| @@ -989,6 +1000,10 @@ static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd, | |||
| 989 | case SWITCHTEC_IOCTL_PORT_TO_PFF: | 1000 | case SWITCHTEC_IOCTL_PORT_TO_PFF: |
| 990 | rc = ioctl_port_to_pff(stdev, argp); | 1001 | rc = ioctl_port_to_pff(stdev, argp); |
| 991 | break; | 1002 | break; |
| 1003 | case SWITCHTEC_IOCTL_EVENT_SUMMARY: | ||
| 1004 | rc = ioctl_event_summary(stdev, stuser, argp, | ||
| 1005 | sizeof(struct switchtec_ioctl_event_summary)); | ||
| 1006 | break; | ||
| 992 | default: | 1007 | default: |
| 993 | rc = -ENOTTY; | 1008 | rc = -ENOTTY; |
| 994 | break; | 1009 | break; |
| @@ -1162,7 +1177,8 @@ static int mask_event(struct switchtec_dev *stdev, int eid, int idx) | |||
| 1162 | if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ)) | 1177 | if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ)) |
| 1163 | return 0; | 1178 | return 0; |
| 1164 | 1179 | ||
| 1165 | if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE) | 1180 | if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE || |
| 1181 | eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP) | ||
| 1166 | return 0; | 1182 | return 0; |
| 1167 | 1183 | ||
| 1168 | dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr); | 1184 | dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr); |
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index eba6e33147a2..d1b16cf3403f 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c | |||
| @@ -291,8 +291,7 @@ static int pci_frontend_enable_msix(struct pci_dev *dev, | |||
| 291 | vector[i] = op.msix_entries[i].vector; | 291 | vector[i] = op.msix_entries[i].vector; |
| 292 | } | 292 | } |
| 293 | } else { | 293 | } else { |
| 294 | printk(KERN_DEBUG "enable msix get value %x\n", | 294 | pr_info("enable msix get value %x\n", op.value); |
| 295 | op.value); | ||
| 296 | err = op.value; | 295 | err = op.value; |
| 297 | } | 296 | } |
| 298 | } else { | 297 | } else { |
| @@ -364,12 +363,12 @@ static void pci_frontend_disable_msi(struct pci_dev *dev) | |||
| 364 | err = do_pci_op(pdev, &op); | 363 | err = do_pci_op(pdev, &op); |
| 365 | if (err == XEN_PCI_ERR_dev_not_found) { | 364 | if (err == XEN_PCI_ERR_dev_not_found) { |
| 366 | /* XXX No response from backend, what shall we do? */ | 365 | /* XXX No response from backend, what shall we do? */ |
| 367 | printk(KERN_DEBUG "get no response from backend for disable MSI\n"); | 366 | pr_info("get no response from backend for disable MSI\n"); |
| 368 | return; | 367 | return; |
| 369 | } | 368 | } |
| 370 | if (err) | 369 | if (err) |
| 371 | /* how can pciback notify us fail? */ | 370 | /* how can pciback notify us fail? */ |
| 372 | printk(KERN_DEBUG "get fake response frombackend\n"); | 371 | pr_info("get fake response from backend\n"); |
| 373 | } | 372 | } |
| 374 | 373 | ||
| 375 | static struct xen_pci_frontend_ops pci_frontend_ops = { | 374 | static struct xen_pci_frontend_ops pci_frontend_ops = { |
| @@ -1104,7 +1103,7 @@ static void __ref pcifront_backend_changed(struct xenbus_device *xdev, | |||
| 1104 | case XenbusStateClosed: | 1103 | case XenbusStateClosed: |
| 1105 | if (xdev->state == XenbusStateClosed) | 1104 | if (xdev->state == XenbusStateClosed) |
| 1106 | break; | 1105 | break; |
| 1107 | /* Missed the backend's CLOSING state -- fallthrough */ | 1106 | /* fall through - Missed the backend's CLOSING state. */ |
| 1108 | case XenbusStateClosing: | 1107 | case XenbusStateClosing: |
| 1109 | dev_warn(&xdev->dev, "backend going away!\n"); | 1108 | dev_warn(&xdev->dev, "backend going away!\n"); |
| 1110 | pcifront_try_disconnect(pdev); | 1109 | pcifront_try_disconnect(pdev); |
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c index 24326eecd787..7abbb6167766 100644 --- a/drivers/platform/chrome/chromeos_laptop.c +++ b/drivers/platform/chrome/chromeos_laptop.c | |||
| @@ -125,7 +125,7 @@ static bool chromeos_laptop_match_adapter_devid(struct device *dev, u32 devid) | |||
| 125 | return false; | 125 | return false; |
| 126 | 126 | ||
| 127 | pdev = to_pci_dev(dev); | 127 | pdev = to_pci_dev(dev); |
| 128 | return devid == PCI_DEVID(pdev->bus->number, pdev->devfn); | 128 | return devid == pci_dev_id(pdev); |
| 129 | } | 129 | } |
| 130 | 130 | ||
| 131 | static void chromeos_laptop_check_adapter(struct i2c_adapter *adapter) | 131 | static void chromeos_laptop_check_adapter(struct i2c_adapter *adapter) |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index e22c237be46a..98440df7fe42 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
| @@ -517,7 +517,8 @@ extern bool osc_pc_lpi_support_confirmed; | |||
| 517 | #define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004 | 517 | #define OSC_PCI_CLOCK_PM_SUPPORT 0x00000004 |
| 518 | #define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008 | 518 | #define OSC_PCI_SEGMENT_GROUPS_SUPPORT 0x00000008 |
| 519 | #define OSC_PCI_MSI_SUPPORT 0x00000010 | 519 | #define OSC_PCI_MSI_SUPPORT 0x00000010 |
| 520 | #define OSC_PCI_SUPPORT_MASKS 0x0000001f | 520 | #define OSC_PCI_HPX_TYPE_3_SUPPORT 0x00000100 |
| 521 | #define OSC_PCI_SUPPORT_MASKS 0x0000011f | ||
| 521 | 522 | ||
| 522 | /* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */ | 523 | /* PCI Host Bridge _OSC: Capabilities DWORD 3: Control Field */ |
| 523 | #define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001 | 524 | #define OSC_PCI_EXPRESS_NATIVE_HP_CONTROL 0x00000001 |
diff --git a/include/linux/cper.h b/include/linux/cper.h index 9c703a0abe6e..cc4980bb0f65 100644 --- a/include/linux/cper.h +++ b/include/linux/cper.h | |||
| @@ -44,7 +44,7 @@ | |||
| 44 | */ | 44 | */ |
| 45 | #define CPER_REC_LEN 256 | 45 | #define CPER_REC_LEN 256 |
| 46 | /* | 46 | /* |
| 47 | * Severity difinition for error_severity in struct cper_record_header | 47 | * Severity definition for error_severity in struct cper_record_header |
| 48 | * and section_severity in struct cper_section_descriptor | 48 | * and section_severity in struct cper_section_descriptor |
| 49 | */ | 49 | */ |
| 50 | enum { | 50 | enum { |
| @@ -55,24 +55,21 @@ enum { | |||
| 55 | }; | 55 | }; |
| 56 | 56 | ||
| 57 | /* | 57 | /* |
| 58 | * Validation bits difinition for validation_bits in struct | 58 | * Validation bits definition for validation_bits in struct |
| 59 | * cper_record_header. If set, corresponding fields in struct | 59 | * cper_record_header. If set, corresponding fields in struct |
| 60 | * cper_record_header contain valid information. | 60 | * cper_record_header contain valid information. |
| 61 | * | ||
| 62 | * corresponds platform_id | ||
| 63 | */ | 61 | */ |
| 64 | #define CPER_VALID_PLATFORM_ID 0x0001 | 62 | #define CPER_VALID_PLATFORM_ID 0x0001 |
| 65 | /* corresponds timestamp */ | ||
| 66 | #define CPER_VALID_TIMESTAMP 0x0002 | 63 | #define CPER_VALID_TIMESTAMP 0x0002 |
| 67 | /* corresponds partition_id */ | ||
| 68 | #define CPER_VALID_PARTITION_ID 0x0004 | 64 | #define CPER_VALID_PARTITION_ID 0x0004 |
| 69 | 65 | ||
| 70 | /* | 66 | /* |
| 71 | * Notification type used to generate error record, used in | 67 | * Notification type used to generate error record, used in |
| 72 | * notification_type in struct cper_record_header | 68 | * notification_type in struct cper_record_header. These UUIDs are defined |
| 73 | * | 69 | * in the UEFI spec v2.7, sec N.2.1. |
| 74 | * Corrected Machine Check | ||
| 75 | */ | 70 | */ |
| 71 | |||
| 72 | /* Corrected Machine Check */ | ||
| 76 | #define CPER_NOTIFY_CMC \ | 73 | #define CPER_NOTIFY_CMC \ |
| 77 | GUID_INIT(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \ | 74 | GUID_INIT(0x2DCE8BB1, 0xBDD7, 0x450e, 0xB9, 0xAD, 0x9C, 0xF4, \ |
| 78 | 0xEB, 0xD4, 0xF8, 0x90) | 75 | 0xEB, 0xD4, 0xF8, 0x90) |
| @@ -122,14 +119,11 @@ enum { | |||
| 122 | #define CPER_SEC_REV 0x0100 | 119 | #define CPER_SEC_REV 0x0100 |
| 123 | 120 | ||
| 124 | /* | 121 | /* |
| 125 | * Validation bits difinition for validation_bits in struct | 122 | * Validation bits definition for validation_bits in struct |
| 126 | * cper_section_descriptor. If set, corresponding fields in struct | 123 | * cper_section_descriptor. If set, corresponding fields in struct |
| 127 | * cper_section_descriptor contain valid information. | 124 | * cper_section_descriptor contain valid information. |
| 128 | * | ||
| 129 | * corresponds fru_id | ||
| 130 | */ | 125 | */ |
| 131 | #define CPER_SEC_VALID_FRU_ID 0x1 | 126 | #define CPER_SEC_VALID_FRU_ID 0x1 |
| 132 | /* corresponds fru_text */ | ||
| 133 | #define CPER_SEC_VALID_FRU_TEXT 0x2 | 127 | #define CPER_SEC_VALID_FRU_TEXT 0x2 |
| 134 | 128 | ||
| 135 | /* | 129 | /* |
| @@ -165,10 +159,11 @@ enum { | |||
| 165 | 159 | ||
| 166 | /* | 160 | /* |
| 167 | * Section type definitions, used in section_type field in struct | 161 | * Section type definitions, used in section_type field in struct |
| 168 | * cper_section_descriptor | 162 | * cper_section_descriptor. These UUIDs are defined in the UEFI spec |
| 169 | * | 163 | * v2.7, sec N.2.2. |
| 170 | * Processor Generic | ||
| 171 | */ | 164 | */ |
| 165 | |||
| 166 | /* Processor Generic */ | ||
| 172 | #define CPER_SEC_PROC_GENERIC \ | 167 | #define CPER_SEC_PROC_GENERIC \ |
| 173 | GUID_INIT(0x9876CCAD, 0x47B4, 0x4bdb, 0xB6, 0x5E, 0x16, 0xF1, \ | 168 | GUID_INIT(0x9876CCAD, 0x47B4, 0x4bdb, 0xB6, 0x5E, 0x16, 0xF1, \ |
| 174 | 0x93, 0xC4, 0xF3, 0xDB) | 169 | 0x93, 0xC4, 0xF3, 0xDB) |
| @@ -325,220 +320,223 @@ enum { | |||
| 325 | */ | 320 | */ |
| 326 | #pragma pack(1) | 321 | #pragma pack(1) |
| 327 | 322 | ||
| 323 | /* Record Header, UEFI v2.7 sec N.2.1 */ | ||
| 328 | struct cper_record_header { | 324 | struct cper_record_header { |
| 329 | char signature[CPER_SIG_SIZE]; /* must be CPER_SIG_RECORD */ | 325 | char signature[CPER_SIG_SIZE]; /* must be CPER_SIG_RECORD */ |
| 330 | __u16 revision; /* must be CPER_RECORD_REV */ | 326 | u16 revision; /* must be CPER_RECORD_REV */ |
| 331 | __u32 signature_end; /* must be CPER_SIG_END */ | 327 | u32 signature_end; /* must be CPER_SIG_END */ |
| 332 | __u16 section_count; | 328 | u16 section_count; |
| 333 | __u32 error_severity; | 329 | u32 error_severity; |
| 334 | __u32 validation_bits; | 330 | u32 validation_bits; |
| 335 | __u32 record_length; | 331 | u32 record_length; |
| 336 | __u64 timestamp; | 332 | u64 timestamp; |
| 337 | guid_t platform_id; | 333 | guid_t platform_id; |
| 338 | guid_t partition_id; | 334 | guid_t partition_id; |
| 339 | guid_t creator_id; | 335 | guid_t creator_id; |
| 340 | guid_t notification_type; | 336 | guid_t notification_type; |
| 341 | __u64 record_id; | 337 | u64 record_id; |
| 342 | __u32 flags; | 338 | u32 flags; |
| 343 | __u64 persistence_information; | 339 | u64 persistence_information; |
| 344 | __u8 reserved[12]; /* must be zero */ | 340 | u8 reserved[12]; /* must be zero */ |
| 345 | }; | 341 | }; |
| 346 | 342 | ||
| 343 | /* Section Descriptor, UEFI v2.7 sec N.2.2 */ | ||
| 347 | struct cper_section_descriptor { | 344 | struct cper_section_descriptor { |
| 348 | __u32 section_offset; /* Offset in bytes of the | 345 | u32 section_offset; /* Offset in bytes of the |
| 349 | * section body from the base | 346 | * section body from the base |
| 350 | * of the record header */ | 347 | * of the record header */ |
| 351 | __u32 section_length; | 348 | u32 section_length; |
| 352 | __u16 revision; /* must be CPER_RECORD_REV */ | 349 | u16 revision; /* must be CPER_RECORD_REV */ |
| 353 | __u8 validation_bits; | 350 | u8 validation_bits; |
| 354 | __u8 reserved; /* must be zero */ | 351 | u8 reserved; /* must be zero */ |
| 355 | __u32 flags; | 352 | u32 flags; |
| 356 | guid_t section_type; | 353 | guid_t section_type; |
| 357 | guid_t fru_id; | 354 | guid_t fru_id; |
| 358 | __u32 section_severity; | 355 | u32 section_severity; |
| 359 | __u8 fru_text[20]; | 356 | u8 fru_text[20]; |
| 360 | }; | 357 | }; |
| 361 | 358 | ||
| 362 | /* Generic Processor Error Section */ | 359 | /* Generic Processor Error Section, UEFI v2.7 sec N.2.4.1 */ |
| 363 | struct cper_sec_proc_generic { | 360 | struct cper_sec_proc_generic { |
| 364 | __u64 validation_bits; | 361 | u64 validation_bits; |
| 365 | __u8 proc_type; | 362 | u8 proc_type; |
| 366 | __u8 proc_isa; | 363 | u8 proc_isa; |
| 367 | __u8 proc_error_type; | 364 | u8 proc_error_type; |
| 368 | __u8 operation; | 365 | u8 operation; |
| 369 | __u8 flags; | 366 | u8 flags; |
| 370 | __u8 level; | 367 | u8 level; |
| 371 | __u16 reserved; | 368 | u16 reserved; |
| 372 | __u64 cpu_version; | 369 | u64 cpu_version; |
| 373 | char cpu_brand[128]; | 370 | char cpu_brand[128]; |
| 374 | __u64 proc_id; | 371 | u64 proc_id; |
| 375 | __u64 target_addr; | 372 | u64 target_addr; |
| 376 | __u64 requestor_id; | 373 | u64 requestor_id; |
| 377 | __u64 responder_id; | 374 | u64 responder_id; |
| 378 | __u64 ip; | 375 | u64 ip; |
| 379 | }; | 376 | }; |
| 380 | 377 | ||
| 381 | /* IA32/X64 Processor Error Section */ | 378 | /* IA32/X64 Processor Error Section, UEFI v2.7 sec N.2.4.2 */ |
| 382 | struct cper_sec_proc_ia { | 379 | struct cper_sec_proc_ia { |
| 383 | __u64 validation_bits; | 380 | u64 validation_bits; |
| 384 | __u64 lapic_id; | 381 | u64 lapic_id; |
| 385 | __u8 cpuid[48]; | 382 | u8 cpuid[48]; |
| 386 | }; | 383 | }; |
| 387 | 384 | ||
| 388 | /* IA32/X64 Processor Error Information Structure */ | 385 | /* IA32/X64 Processor Error Information Structure, UEFI v2.7 sec N.2.4.2.1 */ |
| 389 | struct cper_ia_err_info { | 386 | struct cper_ia_err_info { |
| 390 | guid_t err_type; | 387 | guid_t err_type; |
| 391 | __u64 validation_bits; | 388 | u64 validation_bits; |
| 392 | __u64 check_info; | 389 | u64 check_info; |
| 393 | __u64 target_id; | 390 | u64 target_id; |
| 394 | __u64 requestor_id; | 391 | u64 requestor_id; |
| 395 | __u64 responder_id; | 392 | u64 responder_id; |
| 396 | __u64 ip; | 393 | u64 ip; |
| 397 | }; | 394 | }; |
| 398 | 395 | ||
| 399 | /* IA32/X64 Processor Context Information Structure */ | 396 | /* IA32/X64 Processor Context Information Structure, UEFI v2.7 sec N.2.4.2.2 */ |
| 400 | struct cper_ia_proc_ctx { | 397 | struct cper_ia_proc_ctx { |
| 401 | __u16 reg_ctx_type; | 398 | u16 reg_ctx_type; |
| 402 | __u16 reg_arr_size; | 399 | u16 reg_arr_size; |
| 403 | __u32 msr_addr; | 400 | u32 msr_addr; |
| 404 | __u64 mm_reg_addr; | 401 | u64 mm_reg_addr; |
| 405 | }; | 402 | }; |
| 406 | 403 | ||
| 407 | /* ARM Processor Error Section */ | 404 | /* ARM Processor Error Section, UEFI v2.7 sec N.2.4.4 */ |
| 408 | struct cper_sec_proc_arm { | 405 | struct cper_sec_proc_arm { |
| 409 | __u32 validation_bits; | 406 | u32 validation_bits; |
| 410 | __u16 err_info_num; /* Number of Processor Error Info */ | 407 | u16 err_info_num; /* Number of Processor Error Info */ |
| 411 | __u16 context_info_num; /* Number of Processor Context Info Records*/ | 408 | u16 context_info_num; /* Number of Processor Context Info Records*/ |
| 412 | __u32 section_length; | 409 | u32 section_length; |
| 413 | __u8 affinity_level; | 410 | u8 affinity_level; |
| 414 | __u8 reserved[3]; /* must be zero */ | 411 | u8 reserved[3]; /* must be zero */ |
| 415 | __u64 mpidr; | 412 | u64 mpidr; |
| 416 | __u64 midr; | 413 | u64 midr; |
| 417 | __u32 running_state; /* Bit 0 set - Processor running. PSCI = 0 */ | 414 | u32 running_state; /* Bit 0 set - Processor running. PSCI = 0 */ |
| 418 | __u32 psci_state; | 415 | u32 psci_state; |
| 419 | }; | 416 | }; |
| 420 | 417 | ||
| 421 | /* ARM Processor Error Information Structure */ | 418 | /* ARM Processor Error Information Structure, UEFI v2.7 sec N.2.4.4.1 */ |
| 422 | struct cper_arm_err_info { | 419 | struct cper_arm_err_info { |
| 423 | __u8 version; | 420 | u8 version; |
| 424 | __u8 length; | 421 | u8 length; |
| 425 | __u16 validation_bits; | 422 | u16 validation_bits; |
| 426 | __u8 type; | 423 | u8 type; |
| 427 | __u16 multiple_error; | 424 | u16 multiple_error; |
| 428 | __u8 flags; | 425 | u8 flags; |
| 429 | __u64 error_info; | 426 | u64 error_info; |
| 430 | __u64 virt_fault_addr; | 427 | u64 virt_fault_addr; |
| 431 | __u64 physical_fault_addr; | 428 | u64 physical_fault_addr; |
| 432 | }; | 429 | }; |
| 433 | 430 | ||
| 434 | /* ARM Processor Context Information Structure */ | 431 | /* ARM Processor Context Information Structure, UEFI v2.7 sec N.2.4.4.2 */ |
| 435 | struct cper_arm_ctx_info { | 432 | struct cper_arm_ctx_info { |
| 436 | __u16 version; | 433 | u16 version; |
| 437 | __u16 type; | 434 | u16 type; |
| 438 | __u32 size; | 435 | u32 size; |
| 439 | }; | 436 | }; |
| 440 | 437 | ||
| 441 | /* Old Memory Error Section UEFI 2.1, 2.2 */ | 438 | /* Old Memory Error Section, UEFI v2.1, v2.2 */ |
| 442 | struct cper_sec_mem_err_old { | 439 | struct cper_sec_mem_err_old { |
| 443 | __u64 validation_bits; | 440 | u64 validation_bits; |
| 444 | __u64 error_status; | 441 | u64 error_status; |
| 445 | __u64 physical_addr; | 442 | u64 physical_addr; |
| 446 | __u64 physical_addr_mask; | 443 | u64 physical_addr_mask; |
| 447 | __u16 node; | 444 | u16 node; |
| 448 | __u16 card; | 445 | u16 card; |
| 449 | __u16 module; | 446 | u16 module; |
| 450 | __u16 bank; | 447 | u16 bank; |
| 451 | __u16 device; | 448 | u16 device; |
| 452 | __u16 row; | 449 | u16 row; |
| 453 | __u16 column; | 450 | u16 column; |
| 454 | __u16 bit_pos; | 451 | u16 bit_pos; |
| 455 | __u64 requestor_id; | 452 | u64 requestor_id; |
| 456 | __u64 responder_id; | 453 | u64 responder_id; |
| 457 | __u64 target_id; | 454 | u64 target_id; |
| 458 | __u8 error_type; | 455 | u8 error_type; |
| 459 | }; | 456 | }; |
| 460 | 457 | ||
| 461 | /* Memory Error Section UEFI >= 2.3 */ | 458 | /* Memory Error Section (UEFI >= v2.3), UEFI v2.7 sec N.2.5 */ |
| 462 | struct cper_sec_mem_err { | 459 | struct cper_sec_mem_err { |
| 463 | __u64 validation_bits; | 460 | u64 validation_bits; |
| 464 | __u64 error_status; | 461 | u64 error_status; |
| 465 | __u64 physical_addr; | 462 | u64 physical_addr; |
| 466 | __u64 physical_addr_mask; | 463 | u64 physical_addr_mask; |
| 467 | __u16 node; | 464 | u16 node; |
| 468 | __u16 card; | 465 | u16 card; |
| 469 | __u16 module; | 466 | u16 module; |
| 470 | __u16 bank; | 467 | u16 bank; |
| 471 | __u16 device; | 468 | u16 device; |
| 472 | __u16 row; | 469 | u16 row; |
| 473 | __u16 column; | 470 | u16 column; |
| 474 | __u16 bit_pos; | 471 | u16 bit_pos; |
| 475 | __u64 requestor_id; | 472 | u64 requestor_id; |
| 476 | __u64 responder_id; | 473 | u64 responder_id; |
| 477 | __u64 target_id; | 474 | u64 target_id; |
| 478 | __u8 error_type; | 475 | u8 error_type; |
| 479 | __u8 reserved; | 476 | u8 reserved; |
| 480 | __u16 rank; | 477 | u16 rank; |
| 481 | __u16 mem_array_handle; /* card handle in UEFI 2.4 */ | 478 | u16 mem_array_handle; /* "card handle" in UEFI 2.4 */ |
| 482 | __u16 mem_dev_handle; /* module handle in UEFI 2.4 */ | 479 | u16 mem_dev_handle; /* "module handle" in UEFI 2.4 */ |
| 483 | }; | 480 | }; |
| 484 | 481 | ||
| 485 | struct cper_mem_err_compact { | 482 | struct cper_mem_err_compact { |
| 486 | __u64 validation_bits; | 483 | u64 validation_bits; |
| 487 | __u16 node; | 484 | u16 node; |
| 488 | __u16 card; | 485 | u16 card; |
| 489 | __u16 module; | 486 | u16 module; |
| 490 | __u16 bank; | 487 | u16 bank; |
| 491 | __u16 device; | 488 | u16 device; |
| 492 | __u16 row; | 489 | u16 row; |
| 493 | __u16 column; | 490 | u16 column; |
| 494 | __u16 bit_pos; | 491 | u16 bit_pos; |
| 495 | __u64 requestor_id; | 492 | u64 requestor_id; |
| 496 | __u64 responder_id; | 493 | u64 responder_id; |
| 497 | __u64 target_id; | 494 | u64 target_id; |
| 498 | __u16 rank; | 495 | u16 rank; |
| 499 | __u16 mem_array_handle; | 496 | u16 mem_array_handle; |
| 500 | __u16 mem_dev_handle; | 497 | u16 mem_dev_handle; |
| 501 | }; | 498 | }; |
| 502 | 499 | ||
| 500 | /* PCI Express Error Section, UEFI v2.7 sec N.2.7 */ | ||
| 503 | struct cper_sec_pcie { | 501 | struct cper_sec_pcie { |
| 504 | __u64 validation_bits; | 502 | u64 validation_bits; |
| 505 | __u32 port_type; | 503 | u32 port_type; |
| 506 | struct { | 504 | struct { |
| 507 | __u8 minor; | 505 | u8 minor; |
| 508 | __u8 major; | 506 | u8 major; |
| 509 | __u8 reserved[2]; | 507 | u8 reserved[2]; |
| 510 | } version; | 508 | } version; |
| 511 | __u16 command; | 509 | u16 command; |
| 512 | __u16 status; | 510 | u16 status; |
| 513 | __u32 reserved; | 511 | u32 reserved; |
| 514 | struct { | 512 | struct { |
| 515 | __u16 vendor_id; | 513 | u16 vendor_id; |
| 516 | __u16 device_id; | 514 | u16 device_id; |
| 517 | __u8 class_code[3]; | 515 | u8 class_code[3]; |
| 518 | __u8 function; | 516 | u8 function; |
| 519 | __u8 device; | 517 | u8 device; |
| 520 | __u16 segment; | 518 | u16 segment; |
| 521 | __u8 bus; | 519 | u8 bus; |
| 522 | __u8 secondary_bus; | 520 | u8 secondary_bus; |
| 523 | __u16 slot; | 521 | u16 slot; |
| 524 | __u8 reserved; | 522 | u8 reserved; |
| 525 | } device_id; | 523 | } device_id; |
| 526 | struct { | 524 | struct { |
| 527 | __u32 lower; | 525 | u32 lower; |
| 528 | __u32 upper; | 526 | u32 upper; |
| 529 | } serial_number; | 527 | } serial_number; |
| 530 | struct { | 528 | struct { |
| 531 | __u16 secondary_status; | 529 | u16 secondary_status; |
| 532 | __u16 control; | 530 | u16 control; |
| 533 | } bridge; | 531 | } bridge; |
| 534 | __u8 capability[60]; | 532 | u8 capability[60]; |
| 535 | __u8 aer_info[96]; | 533 | u8 aer_info[96]; |
| 536 | }; | 534 | }; |
| 537 | 535 | ||
| 538 | /* Reset to default packing */ | 536 | /* Reset to default packing */ |
| 539 | #pragma pack() | 537 | #pragma pack() |
| 540 | 538 | ||
| 541 | extern const char * const cper_proc_error_type_strs[4]; | 539 | extern const char *const cper_proc_error_type_strs[4]; |
| 542 | 540 | ||
| 543 | u64 cper_next_record_id(void); | 541 | u64 cper_next_record_id(void); |
| 544 | const char *cper_severity_str(unsigned int); | 542 | const char *cper_severity_str(unsigned int); |
diff --git a/include/linux/msi.h b/include/linux/msi.h index 7e9b81c3b50d..052f04fcf953 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h | |||
| @@ -148,24 +148,6 @@ u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); | |||
| 148 | void pci_msi_mask_irq(struct irq_data *data); | 148 | void pci_msi_mask_irq(struct irq_data *data); |
| 149 | void pci_msi_unmask_irq(struct irq_data *data); | 149 | void pci_msi_unmask_irq(struct irq_data *data); |
| 150 | 150 | ||
| 151 | /* Conversion helpers. Should be removed after merging */ | ||
| 152 | static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | ||
| 153 | { | ||
| 154 | __pci_write_msi_msg(entry, msg); | ||
| 155 | } | ||
| 156 | static inline void write_msi_msg(int irq, struct msi_msg *msg) | ||
| 157 | { | ||
| 158 | pci_write_msi_msg(irq, msg); | ||
| 159 | } | ||
| 160 | static inline void mask_msi_irq(struct irq_data *data) | ||
| 161 | { | ||
| 162 | pci_msi_mask_irq(data); | ||
| 163 | } | ||
| 164 | static inline void unmask_msi_irq(struct irq_data *data) | ||
| 165 | { | ||
| 166 | pci_msi_unmask_irq(data); | ||
| 167 | } | ||
| 168 | |||
| 169 | /* | 151 | /* |
| 170 | * The arch hooks to setup up msi irqs. Those functions are | 152 | * The arch hooks to setup up msi irqs. Those functions are |
| 171 | * implemented as weak symbols so that they /can/ be overriden by | 153 | * implemented as weak symbols so that they /can/ be overriden by |
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index 29efa09d686b..a73164c85e78 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h | |||
| @@ -56,6 +56,7 @@ extern struct pci_ecam_ops thunder_pem_ecam_ops; /* Cavium ThunderX 1.x & 2.x */ | |||
| 56 | extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */ | 56 | extern struct pci_ecam_ops pci_thunder_ecam_ops; /* Cavium ThunderX 1.x */ |
| 57 | extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */ | 57 | extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */ |
| 58 | extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */ | 58 | extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */ |
| 59 | extern struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */ | ||
| 59 | #endif | 60 | #endif |
| 60 | 61 | ||
| 61 | #ifdef CONFIG_PCI_HOST_COMMON | 62 | #ifdef CONFIG_PCI_HOST_COMMON |
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index c3ffa3917f88..f641badc2c61 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h | |||
| @@ -109,6 +109,7 @@ struct pci_epc { | |||
| 109 | * @reserved_bar: bitmap to indicate reserved BAR unavailable to function driver | 109 | * @reserved_bar: bitmap to indicate reserved BAR unavailable to function driver |
| 110 | * @bar_fixed_64bit: bitmap to indicate fixed 64bit BARs | 110 | * @bar_fixed_64bit: bitmap to indicate fixed 64bit BARs |
| 111 | * @bar_fixed_size: Array specifying the size supported by each BAR | 111 | * @bar_fixed_size: Array specifying the size supported by each BAR |
| 112 | * @align: alignment size required for BAR buffer allocation | ||
| 112 | */ | 113 | */ |
| 113 | struct pci_epc_features { | 114 | struct pci_epc_features { |
| 114 | unsigned int linkup_notifier : 1; | 115 | unsigned int linkup_notifier : 1; |
| @@ -117,6 +118,7 @@ struct pci_epc_features { | |||
| 117 | u8 reserved_bar; | 118 | u8 reserved_bar; |
| 118 | u8 bar_fixed_64bit; | 119 | u8 bar_fixed_64bit; |
| 119 | u64 bar_fixed_size[BAR_5 + 1]; | 120 | u64 bar_fixed_size[BAR_5 + 1]; |
| 121 | size_t align; | ||
| 120 | }; | 122 | }; |
| 121 | 123 | ||
| 122 | #define to_pci_epc(device) container_of((device), struct pci_epc, dev) | 124 | #define to_pci_epc(device) container_of((device), struct pci_epc, dev) |
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index ec02f58758c8..2d6f07556682 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h | |||
| @@ -149,7 +149,8 @@ void pci_epf_destroy(struct pci_epf *epf); | |||
| 149 | int __pci_epf_register_driver(struct pci_epf_driver *driver, | 149 | int __pci_epf_register_driver(struct pci_epf_driver *driver, |
| 150 | struct module *owner); | 150 | struct module *owner); |
| 151 | void pci_epf_unregister_driver(struct pci_epf_driver *driver); | 151 | void pci_epf_unregister_driver(struct pci_epf_driver *driver); |
| 152 | void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar); | 152 | void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, |
| 153 | size_t align); | ||
| 153 | void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar); | 154 | void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar); |
| 154 | int pci_epf_bind(struct pci_epf *epf); | 155 | int pci_epf_bind(struct pci_epf *epf); |
| 155 | void pci_epf_unbind(struct pci_epf *epf); | 156 | void pci_epf_unbind(struct pci_epf *epf); |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 1250806dc94a..4a5a84d7bdd4 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
| @@ -348,6 +348,8 @@ struct pci_dev { | |||
| 348 | unsigned int hotplug_user_indicators:1; /* SlotCtl indicators | 348 | unsigned int hotplug_user_indicators:1; /* SlotCtl indicators |
| 349 | controlled exclusively by | 349 | controlled exclusively by |
| 350 | user sysfs */ | 350 | user sysfs */ |
| 351 | unsigned int clear_retrain_link:1; /* Need to clear Retrain Link | ||
| 352 | bit manually */ | ||
| 351 | unsigned int d3_delay; /* D3->D0 transition time in ms */ | 353 | unsigned int d3_delay; /* D3->D0 transition time in ms */ |
| 352 | unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */ | 354 | unsigned int d3cold_delay; /* D3cold->D0 transition time in ms */ |
| 353 | 355 | ||
| @@ -490,6 +492,7 @@ struct pci_host_bridge { | |||
| 490 | void *sysdata; | 492 | void *sysdata; |
| 491 | int busnr; | 493 | int busnr; |
| 492 | struct list_head windows; /* resource_entry */ | 494 | struct list_head windows; /* resource_entry */ |
| 495 | struct list_head dma_ranges; /* dma ranges resource list */ | ||
| 493 | u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */ | 496 | u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */ |
| 494 | int (*map_irq)(const struct pci_dev *, u8, u8); | 497 | int (*map_irq)(const struct pci_dev *, u8, u8); |
| 495 | void (*release_fn)(struct pci_host_bridge *); | 498 | void (*release_fn)(struct pci_host_bridge *); |
| @@ -596,6 +599,11 @@ struct pci_bus { | |||
| 596 | 599 | ||
| 597 | #define to_pci_bus(n) container_of(n, struct pci_bus, dev) | 600 | #define to_pci_bus(n) container_of(n, struct pci_bus, dev) |
| 598 | 601 | ||
| 602 | static inline u16 pci_dev_id(struct pci_dev *dev) | ||
| 603 | { | ||
| 604 | return PCI_DEVID(dev->bus->number, dev->devfn); | ||
| 605 | } | ||
| 606 | |||
| 599 | /* | 607 | /* |
| 600 | * Returns true if the PCI bus is root (behind host-PCI bridge), | 608 | * Returns true if the PCI bus is root (behind host-PCI bridge), |
| 601 | * false otherwise | 609 | * false otherwise |
| @@ -1233,7 +1241,6 @@ int __must_check pci_request_regions(struct pci_dev *, const char *); | |||
| 1233 | int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *); | 1241 | int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *); |
| 1234 | void pci_release_regions(struct pci_dev *); | 1242 | void pci_release_regions(struct pci_dev *); |
| 1235 | int __must_check pci_request_region(struct pci_dev *, int, const char *); | 1243 | int __must_check pci_request_region(struct pci_dev *, int, const char *); |
| 1236 | int __must_check pci_request_region_exclusive(struct pci_dev *, int, const char *); | ||
| 1237 | void pci_release_region(struct pci_dev *, int); | 1244 | void pci_release_region(struct pci_dev *, int); |
| 1238 | int pci_request_selected_regions(struct pci_dev *, int, const char *); | 1245 | int pci_request_selected_regions(struct pci_dev *, int, const char *); |
| 1239 | int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *); | 1246 | int pci_request_selected_regions_exclusive(struct pci_dev *, int, const char *); |
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index 7acc9f91e72b..f694eb2ca978 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h | |||
| @@ -124,26 +124,72 @@ struct hpp_type2 { | |||
| 124 | u32 sec_unc_err_mask_or; | 124 | u32 sec_unc_err_mask_or; |
| 125 | }; | 125 | }; |
| 126 | 126 | ||
| 127 | struct hotplug_params { | 127 | /* |
| 128 | struct hpp_type0 *t0; /* Type0: NULL if not available */ | 128 | * _HPX PCI Express Setting Record (Type 3) |
| 129 | struct hpp_type1 *t1; /* Type1: NULL if not available */ | 129 | */ |
| 130 | struct hpp_type2 *t2; /* Type2: NULL if not available */ | 130 | struct hpx_type3 { |
| 131 | struct hpp_type0 type0_data; | 131 | u16 device_type; |
| 132 | struct hpp_type1 type1_data; | 132 | u16 function_type; |
| 133 | struct hpp_type2 type2_data; | 133 | u16 config_space_location; |
| 134 | u16 pci_exp_cap_id; | ||
| 135 | u16 pci_exp_cap_ver; | ||
| 136 | u16 pci_exp_vendor_id; | ||
| 137 | u16 dvsec_id; | ||
| 138 | u16 dvsec_rev; | ||
| 139 | u16 match_offset; | ||
| 140 | u32 match_mask_and; | ||
| 141 | u32 match_value; | ||
| 142 | u16 reg_offset; | ||
| 143 | u32 reg_mask_and; | ||
| 144 | u32 reg_mask_or; | ||
| 145 | }; | ||
| 146 | |||
| 147 | struct hotplug_program_ops { | ||
| 148 | void (*program_type0)(struct pci_dev *dev, struct hpp_type0 *hpp); | ||
| 149 | void (*program_type1)(struct pci_dev *dev, struct hpp_type1 *hpp); | ||
| 150 | void (*program_type2)(struct pci_dev *dev, struct hpp_type2 *hpp); | ||
| 151 | void (*program_type3)(struct pci_dev *dev, struct hpx_type3 *hpp); | ||
| 152 | }; | ||
| 153 | |||
| 154 | enum hpx_type3_dev_type { | ||
| 155 | HPX_TYPE_ENDPOINT = BIT(0), | ||
| 156 | HPX_TYPE_LEG_END = BIT(1), | ||
| 157 | HPX_TYPE_RC_END = BIT(2), | ||
| 158 | HPX_TYPE_RC_EC = BIT(3), | ||
| 159 | HPX_TYPE_ROOT_PORT = BIT(4), | ||
| 160 | HPX_TYPE_UPSTREAM = BIT(5), | ||
| 161 | HPX_TYPE_DOWNSTREAM = BIT(6), | ||
| 162 | HPX_TYPE_PCI_BRIDGE = BIT(7), | ||
| 163 | HPX_TYPE_PCIE_BRIDGE = BIT(8), | ||
| 164 | }; | ||
| 165 | |||
| 166 | enum hpx_type3_fn_type { | ||
| 167 | HPX_FN_NORMAL = BIT(0), | ||
| 168 | HPX_FN_SRIOV_PHYS = BIT(1), | ||
| 169 | HPX_FN_SRIOV_VIRT = BIT(2), | ||
| 170 | }; | ||
| 171 | |||
| 172 | enum hpx_type3_cfg_loc { | ||
| 173 | HPX_CFG_PCICFG = 0, | ||
| 174 | HPX_CFG_PCIE_CAP = 1, | ||
| 175 | HPX_CFG_PCIE_CAP_EXT = 2, | ||
| 176 | HPX_CFG_VEND_CAP = 3, | ||
| 177 | HPX_CFG_DVSEC = 4, | ||
| 178 | HPX_CFG_MAX, | ||
| 134 | }; | 179 | }; |
| 135 | 180 | ||
| 136 | #ifdef CONFIG_ACPI | 181 | #ifdef CONFIG_ACPI |
| 137 | #include <linux/acpi.h> | 182 | #include <linux/acpi.h> |
| 138 | int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp); | 183 | int pci_acpi_program_hp_params(struct pci_dev *dev, |
| 184 | const struct hotplug_program_ops *hp_ops); | ||
| 139 | bool pciehp_is_native(struct pci_dev *bridge); | 185 | bool pciehp_is_native(struct pci_dev *bridge); |
| 140 | int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge); | 186 | int acpi_get_hp_hw_control_from_firmware(struct pci_dev *bridge); |
| 141 | bool shpchp_is_native(struct pci_dev *bridge); | 187 | bool shpchp_is_native(struct pci_dev *bridge); |
| 142 | int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle); | 188 | int acpi_pci_check_ejectable(struct pci_bus *pbus, acpi_handle handle); |
| 143 | int acpi_pci_detect_ejectable(acpi_handle handle); | 189 | int acpi_pci_detect_ejectable(acpi_handle handle); |
| 144 | #else | 190 | #else |
| 145 | static inline int pci_get_hp_params(struct pci_dev *dev, | 191 | static inline int pci_acpi_program_hp_params(struct pci_dev *dev, |
| 146 | struct hotplug_params *hpp) | 192 | const struct hotplug_program_ops *hp_ops) |
| 147 | { | 193 | { |
| 148 | return -ENODEV; | 194 | return -ENODEV; |
| 149 | } | 195 | } |
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h index 52a079b3a9a6..0cfc34ac37fb 100644 --- a/include/linux/switchtec.h +++ b/include/linux/switchtec.h | |||
| @@ -20,7 +20,7 @@ | |||
| 20 | #include <linux/cdev.h> | 20 | #include <linux/cdev.h> |
| 21 | 21 | ||
| 22 | #define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024 | 22 | #define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024 |
| 23 | #define SWITCHTEC_MAX_PFF_CSR 48 | 23 | #define SWITCHTEC_MAX_PFF_CSR 255 |
| 24 | 24 | ||
| 25 | #define SWITCHTEC_EVENT_OCCURRED BIT(0) | 25 | #define SWITCHTEC_EVENT_OCCURRED BIT(0) |
| 26 | #define SWITCHTEC_EVENT_CLEAR BIT(0) | 26 | #define SWITCHTEC_EVENT_CLEAR BIT(0) |
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index 5c98133f2c94..27164769d184 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h | |||
| @@ -1,7 +1,5 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ | 1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ |
| 2 | /* | 2 | /* |
| 3 | * pci_regs.h | ||
| 4 | * | ||
| 5 | * PCI standard defines | 3 | * PCI standard defines |
| 6 | * Copyright 1994, Drew Eckhardt | 4 | * Copyright 1994, Drew Eckhardt |
| 7 | * Copyright 1997--1999 Martin Mares <mj@ucw.cz> | 5 | * Copyright 1997--1999 Martin Mares <mj@ucw.cz> |
| @@ -15,7 +13,7 @@ | |||
| 15 | * PCI System Design Guide | 13 | * PCI System Design Guide |
| 16 | * | 14 | * |
| 17 | * For HyperTransport information, please consult the following manuals | 15 | * For HyperTransport information, please consult the following manuals |
| 18 | * from http://www.hypertransport.org | 16 | * from http://www.hypertransport.org : |
| 19 | * | 17 | * |
| 20 | * The HyperTransport I/O Link Specification | 18 | * The HyperTransport I/O Link Specification |
| 21 | */ | 19 | */ |
| @@ -301,7 +299,7 @@ | |||
| 301 | #define PCI_SID_ESR_FIC 0x20 /* First In Chassis Flag */ | 299 | #define PCI_SID_ESR_FIC 0x20 /* First In Chassis Flag */ |
| 302 | #define PCI_SID_CHASSIS_NR 3 /* Chassis Number */ | 300 | #define PCI_SID_CHASSIS_NR 3 /* Chassis Number */ |
| 303 | 301 | ||
| 304 | /* Message Signalled Interrupts registers */ | 302 | /* Message Signalled Interrupt registers */ |
| 305 | 303 | ||
| 306 | #define PCI_MSI_FLAGS 2 /* Message Control */ | 304 | #define PCI_MSI_FLAGS 2 /* Message Control */ |
| 307 | #define PCI_MSI_FLAGS_ENABLE 0x0001 /* MSI feature enabled */ | 305 | #define PCI_MSI_FLAGS_ENABLE 0x0001 /* MSI feature enabled */ |
| @@ -319,7 +317,7 @@ | |||
| 319 | #define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */ | 317 | #define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */ |
| 320 | #define PCI_MSI_PENDING_64 20 /* Pending intrs for 64-bit devices */ | 318 | #define PCI_MSI_PENDING_64 20 /* Pending intrs for 64-bit devices */ |
| 321 | 319 | ||
| 322 | /* MSI-X registers */ | 320 | /* MSI-X registers (in MSI-X capability) */ |
| 323 | #define PCI_MSIX_FLAGS 2 /* Message Control */ | 321 | #define PCI_MSIX_FLAGS 2 /* Message Control */ |
| 324 | #define PCI_MSIX_FLAGS_QSIZE 0x07FF /* Table size */ | 322 | #define PCI_MSIX_FLAGS_QSIZE 0x07FF /* Table size */ |
| 325 | #define PCI_MSIX_FLAGS_MASKALL 0x4000 /* Mask all vectors for this function */ | 323 | #define PCI_MSIX_FLAGS_MASKALL 0x4000 /* Mask all vectors for this function */ |
| @@ -333,13 +331,13 @@ | |||
| 333 | #define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */ | 331 | #define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */ |
| 334 | #define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */ | 332 | #define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */ |
| 335 | 333 | ||
| 336 | /* MSI-X Table entry format */ | 334 | /* MSI-X Table entry format (in memory mapped by a BAR) */ |
| 337 | #define PCI_MSIX_ENTRY_SIZE 16 | 335 | #define PCI_MSIX_ENTRY_SIZE 16 |
| 338 | #define PCI_MSIX_ENTRY_LOWER_ADDR 0 | 336 | #define PCI_MSIX_ENTRY_LOWER_ADDR 0 /* Message Address */ |
| 339 | #define PCI_MSIX_ENTRY_UPPER_ADDR 4 | 337 | #define PCI_MSIX_ENTRY_UPPER_ADDR 4 /* Message Upper Address */ |
| 340 | #define PCI_MSIX_ENTRY_DATA 8 | 338 | #define PCI_MSIX_ENTRY_DATA 8 /* Message Data */ |
| 341 | #define PCI_MSIX_ENTRY_VECTOR_CTRL 12 | 339 | #define PCI_MSIX_ENTRY_VECTOR_CTRL 12 /* Vector Control */ |
| 342 | #define PCI_MSIX_ENTRY_CTRL_MASKBIT 1 | 340 | #define PCI_MSIX_ENTRY_CTRL_MASKBIT 0x00000001 |
| 343 | 341 | ||
| 344 | /* CompactPCI Hotswap Register */ | 342 | /* CompactPCI Hotswap Register */ |
| 345 | 343 | ||
| @@ -372,6 +370,12 @@ | |||
| 372 | #define PCI_EA_FIRST_ENT_BRIDGE 8 /* First EA Entry for Bridges */ | 370 | #define PCI_EA_FIRST_ENT_BRIDGE 8 /* First EA Entry for Bridges */ |
| 373 | #define PCI_EA_ES 0x00000007 /* Entry Size */ | 371 | #define PCI_EA_ES 0x00000007 /* Entry Size */ |
| 374 | #define PCI_EA_BEI 0x000000f0 /* BAR Equivalent Indicator */ | 372 | #define PCI_EA_BEI 0x000000f0 /* BAR Equivalent Indicator */ |
| 373 | |||
| 374 | /* EA fixed Secondary and Subordinate bus numbers for Bridge */ | ||
| 375 | #define PCI_EA_SEC_BUS_MASK 0xff | ||
| 376 | #define PCI_EA_SUB_BUS_MASK 0xff00 | ||
| 377 | #define PCI_EA_SUB_BUS_SHIFT 8 | ||
| 378 | |||
| 375 | /* 0-5 map to BARs 0-5 respectively */ | 379 | /* 0-5 map to BARs 0-5 respectively */ |
| 376 | #define PCI_EA_BEI_BAR0 0 | 380 | #define PCI_EA_BEI_BAR0 0 |
| 377 | #define PCI_EA_BEI_BAR5 5 | 381 | #define PCI_EA_BEI_BAR5 5 |
| @@ -465,19 +469,19 @@ | |||
| 465 | /* PCI Express capability registers */ | 469 | /* PCI Express capability registers */ |
| 466 | 470 | ||
| 467 | #define PCI_EXP_FLAGS 2 /* Capabilities register */ | 471 | #define PCI_EXP_FLAGS 2 /* Capabilities register */ |
| 468 | #define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ | 472 | #define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ |
| 469 | #define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ | 473 | #define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ |
| 470 | #define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ | 474 | #define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ |
| 471 | #define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ | 475 | #define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ |
| 472 | #define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ | 476 | #define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ |
| 473 | #define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */ | 477 | #define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */ |
| 474 | #define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ | 478 | #define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ |
| 475 | #define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCIe to PCI/PCI-X Bridge */ | 479 | #define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCIe to PCI/PCI-X Bridge */ |
| 476 | #define PCI_EXP_TYPE_PCIE_BRIDGE 0x8 /* PCI/PCI-X to PCIe Bridge */ | 480 | #define PCI_EXP_TYPE_PCIE_BRIDGE 0x8 /* PCI/PCI-X to PCIe Bridge */ |
| 477 | #define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */ | 481 | #define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */ |
| 478 | #define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ | 482 | #define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ |
| 479 | #define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ | 483 | #define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ |
| 480 | #define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */ | 484 | #define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */ |
| 481 | #define PCI_EXP_DEVCAP 4 /* Device capabilities */ | 485 | #define PCI_EXP_DEVCAP 4 /* Device capabilities */ |
| 482 | #define PCI_EXP_DEVCAP_PAYLOAD 0x00000007 /* Max_Payload_Size */ | 486 | #define PCI_EXP_DEVCAP_PAYLOAD 0x00000007 /* Max_Payload_Size */ |
| 483 | #define PCI_EXP_DEVCAP_PHANTOM 0x00000018 /* Phantom functions */ | 487 | #define PCI_EXP_DEVCAP_PHANTOM 0x00000018 /* Phantom functions */ |
| @@ -616,8 +620,8 @@ | |||
| 616 | #define PCI_EXP_RTCAP 30 /* Root Capabilities */ | 620 | #define PCI_EXP_RTCAP 30 /* Root Capabilities */ |
| 617 | #define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */ | 621 | #define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */ |
| 618 | #define PCI_EXP_RTSTA 32 /* Root Status */ | 622 | #define PCI_EXP_RTSTA 32 /* Root Status */ |
| 619 | #define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */ | 623 | #define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */ |
| 620 | #define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */ | 624 | #define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */ |
| 621 | /* | 625 | /* |
| 622 | * The Device Capabilities 2, Device Status 2, Device Control 2, | 626 | * The Device Capabilities 2, Device Status 2, Device Control 2, |
| 623 | * Link Capabilities 2, Link Status 2, Link Control 2, | 627 | * Link Capabilities 2, Link Status 2, Link Control 2, |
| @@ -637,13 +641,13 @@ | |||
| 637 | #define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */ | 641 | #define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */ |
| 638 | #define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */ | 642 | #define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */ |
| 639 | #define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */ | 643 | #define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */ |
| 640 | #define PCI_EXP_DEVCAP2_EE_PREFIX 0x00200000 /* End-End TLP Prefix */ | 644 | #define PCI_EXP_DEVCAP2_EE_PREFIX 0x00200000 /* End-End TLP Prefix */ |
| 641 | #define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ | 645 | #define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ |
| 642 | #define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */ | 646 | #define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */ |
| 643 | #define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS 0x0010 /* Completion Timeout Disable */ | 647 | #define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS 0x0010 /* Completion Timeout Disable */ |
| 644 | #define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */ | 648 | #define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */ |
| 645 | #define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */ | 649 | #define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */ |
| 646 | #define PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK 0x0080 /* Block atomic egress */ | 650 | #define PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK 0x0080 /* Block atomic egress */ |
| 647 | #define PCI_EXP_DEVCTL2_IDO_REQ_EN 0x0100 /* Allow IDO for requests */ | 651 | #define PCI_EXP_DEVCTL2_IDO_REQ_EN 0x0100 /* Allow IDO for requests */ |
| 648 | #define PCI_EXP_DEVCTL2_IDO_CMP_EN 0x0200 /* Allow IDO for completions */ | 652 | #define PCI_EXP_DEVCTL2_IDO_CMP_EN 0x0200 /* Allow IDO for completions */ |
| 649 | #define PCI_EXP_DEVCTL2_LTR_EN 0x0400 /* Enable LTR mechanism */ | 653 | #define PCI_EXP_DEVCTL2_LTR_EN 0x0400 /* Enable LTR mechanism */ |
| @@ -659,11 +663,11 @@ | |||
| 659 | #define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */ | 663 | #define PCI_EXP_LNKCAP2_SLS_16_0GB 0x00000010 /* Supported Speed 16GT/s */ |
| 660 | #define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */ | 664 | #define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */ |
| 661 | #define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ | 665 | #define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ |
| 662 | #define PCI_EXP_LNKCTL2_TLS 0x000f | 666 | #define PCI_EXP_LNKCTL2_TLS 0x000f |
| 663 | #define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */ | 667 | #define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */ |
| 664 | #define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */ | 668 | #define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */ |
| 665 | #define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */ | 669 | #define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */ |
| 666 | #define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */ | 670 | #define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */ |
| 667 | #define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ | 671 | #define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ |
| 668 | #define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */ | 672 | #define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */ |
| 669 | #define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */ | 673 | #define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */ |
| @@ -752,18 +756,18 @@ | |||
| 752 | #define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */ | 756 | #define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */ |
| 753 | #define PCI_ERR_HEADER_LOG 28 /* Header Log Register (16 bytes) */ | 757 | #define PCI_ERR_HEADER_LOG 28 /* Header Log Register (16 bytes) */ |
| 754 | #define PCI_ERR_ROOT_COMMAND 44 /* Root Error Command */ | 758 | #define PCI_ERR_ROOT_COMMAND 44 /* Root Error Command */ |
| 755 | #define PCI_ERR_ROOT_CMD_COR_EN 0x00000001 /* Correctable Err Reporting Enable */ | 759 | #define PCI_ERR_ROOT_CMD_COR_EN 0x00000001 /* Correctable Err Reporting Enable */ |
| 756 | #define PCI_ERR_ROOT_CMD_NONFATAL_EN 0x00000002 /* Non-Fatal Err Reporting Enable */ | 760 | #define PCI_ERR_ROOT_CMD_NONFATAL_EN 0x00000002 /* Non-Fatal Err Reporting Enable */ |
| 757 | #define PCI_ERR_ROOT_CMD_FATAL_EN 0x00000004 /* Fatal Err Reporting Enable */ | 761 | #define PCI_ERR_ROOT_CMD_FATAL_EN 0x00000004 /* Fatal Err Reporting Enable */ |
| 758 | #define PCI_ERR_ROOT_STATUS 48 | 762 | #define PCI_ERR_ROOT_STATUS 48 |
| 759 | #define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */ | 763 | #define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */ |
| 760 | #define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002 /* Multiple ERR_COR */ | 764 | #define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002 /* Multiple ERR_COR */ |
| 761 | #define PCI_ERR_ROOT_UNCOR_RCV 0x00000004 /* ERR_FATAL/NONFATAL */ | 765 | #define PCI_ERR_ROOT_UNCOR_RCV 0x00000004 /* ERR_FATAL/NONFATAL */ |
| 762 | #define PCI_ERR_ROOT_MULTI_UNCOR_RCV 0x00000008 /* Multiple FATAL/NONFATAL */ | 766 | #define PCI_ERR_ROOT_MULTI_UNCOR_RCV 0x00000008 /* Multiple FATAL/NONFATAL */ |
| 763 | #define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First UNC is Fatal */ | 767 | #define PCI_ERR_ROOT_FIRST_FATAL 0x00000010 /* First UNC is Fatal */ |
| 764 | #define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */ | 768 | #define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */ |
| 765 | #define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */ | 769 | #define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */ |
| 766 | #define PCI_ERR_ROOT_AER_IRQ 0xf8000000 /* Advanced Error Interrupt Message Number */ | 770 | #define PCI_ERR_ROOT_AER_IRQ 0xf8000000 /* Advanced Error Interrupt Message Number */ |
| 767 | #define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */ | 771 | #define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */ |
| 768 | 772 | ||
| 769 | /* Virtual Channel */ | 773 | /* Virtual Channel */ |
| @@ -875,12 +879,12 @@ | |||
| 875 | 879 | ||
| 876 | /* Page Request Interface */ | 880 | /* Page Request Interface */ |
| 877 | #define PCI_PRI_CTRL 0x04 /* PRI control register */ | 881 | #define PCI_PRI_CTRL 0x04 /* PRI control register */ |
| 878 | #define PCI_PRI_CTRL_ENABLE 0x01 /* Enable */ | 882 | #define PCI_PRI_CTRL_ENABLE 0x0001 /* Enable */ |
| 879 | #define PCI_PRI_CTRL_RESET 0x02 /* Reset */ | 883 | #define PCI_PRI_CTRL_RESET 0x0002 /* Reset */ |
| 880 | #define PCI_PRI_STATUS 0x06 /* PRI status register */ | 884 | #define PCI_PRI_STATUS 0x06 /* PRI status register */ |
| 881 | #define PCI_PRI_STATUS_RF 0x001 /* Response Failure */ | 885 | #define PCI_PRI_STATUS_RF 0x0001 /* Response Failure */ |
| 882 | #define PCI_PRI_STATUS_UPRGI 0x002 /* Unexpected PRG index */ | 886 | #define PCI_PRI_STATUS_UPRGI 0x0002 /* Unexpected PRG index */ |
| 883 | #define PCI_PRI_STATUS_STOPPED 0x100 /* PRI Stopped */ | 887 | #define PCI_PRI_STATUS_STOPPED 0x0100 /* PRI Stopped */ |
| 884 | #define PCI_PRI_STATUS_PASID 0x8000 /* PRG Response PASID Required */ | 888 | #define PCI_PRI_STATUS_PASID 0x8000 /* PRG Response PASID Required */ |
| 885 | #define PCI_PRI_MAX_REQ 0x08 /* PRI max reqs supported */ | 889 | #define PCI_PRI_MAX_REQ 0x08 /* PRI max reqs supported */ |
| 886 | #define PCI_PRI_ALLOC_REQ 0x0c /* PRI max reqs allowed */ | 890 | #define PCI_PRI_ALLOC_REQ 0x0c /* PRI max reqs allowed */ |
| @@ -898,16 +902,16 @@ | |||
| 898 | 902 | ||
| 899 | /* Single Root I/O Virtualization */ | 903 | /* Single Root I/O Virtualization */ |
| 900 | #define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */ | 904 | #define PCI_SRIOV_CAP 0x04 /* SR-IOV Capabilities */ |
| 901 | #define PCI_SRIOV_CAP_VFM 0x01 /* VF Migration Capable */ | 905 | #define PCI_SRIOV_CAP_VFM 0x00000001 /* VF Migration Capable */ |
| 902 | #define PCI_SRIOV_CAP_INTR(x) ((x) >> 21) /* Interrupt Message Number */ | 906 | #define PCI_SRIOV_CAP_INTR(x) ((x) >> 21) /* Interrupt Message Number */ |
| 903 | #define PCI_SRIOV_CTRL 0x08 /* SR-IOV Control */ | 907 | #define PCI_SRIOV_CTRL 0x08 /* SR-IOV Control */ |
| 904 | #define PCI_SRIOV_CTRL_VFE 0x01 /* VF Enable */ | 908 | #define PCI_SRIOV_CTRL_VFE 0x0001 /* VF Enable */ |
| 905 | #define PCI_SRIOV_CTRL_VFM 0x02 /* VF Migration Enable */ | 909 | #define PCI_SRIOV_CTRL_VFM 0x0002 /* VF Migration Enable */ |
| 906 | #define PCI_SRIOV_CTRL_INTR 0x04 /* VF Migration Interrupt Enable */ | 910 | #define PCI_SRIOV_CTRL_INTR 0x0004 /* VF Migration Interrupt Enable */ |
| 907 | #define PCI_SRIOV_CTRL_MSE 0x08 /* VF Memory Space Enable */ | 911 | #define PCI_SRIOV_CTRL_MSE 0x0008 /* VF Memory Space Enable */ |
| 908 | #define PCI_SRIOV_CTRL_ARI 0x10 /* ARI Capable Hierarchy */ | 912 | #define PCI_SRIOV_CTRL_ARI 0x0010 /* ARI Capable Hierarchy */ |
| 909 | #define PCI_SRIOV_STATUS 0x0a /* SR-IOV Status */ | 913 | #define PCI_SRIOV_STATUS 0x0a /* SR-IOV Status */ |
| 910 | #define PCI_SRIOV_STATUS_VFM 0x01 /* VF Migration Status */ | 914 | #define PCI_SRIOV_STATUS_VFM 0x0001 /* VF Migration Status */ |
| 911 | #define PCI_SRIOV_INITIAL_VF 0x0c /* Initial VFs */ | 915 | #define PCI_SRIOV_INITIAL_VF 0x0c /* Initial VFs */ |
| 912 | #define PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */ | 916 | #define PCI_SRIOV_TOTAL_VF 0x0e /* Total VFs */ |
| 913 | #define PCI_SRIOV_NUM_VF 0x10 /* Number of VFs */ | 917 | #define PCI_SRIOV_NUM_VF 0x10 /* Number of VFs */ |
| @@ -937,13 +941,13 @@ | |||
| 937 | 941 | ||
| 938 | /* Access Control Service */ | 942 | /* Access Control Service */ |
| 939 | #define PCI_ACS_CAP 0x04 /* ACS Capability Register */ | 943 | #define PCI_ACS_CAP 0x04 /* ACS Capability Register */ |
| 940 | #define PCI_ACS_SV 0x01 /* Source Validation */ | 944 | #define PCI_ACS_SV 0x0001 /* Source Validation */ |
| 941 | #define PCI_ACS_TB 0x02 /* Translation Blocking */ | 945 | #define PCI_ACS_TB 0x0002 /* Translation Blocking */ |
| 942 | #define PCI_ACS_RR 0x04 /* P2P Request Redirect */ | 946 | #define PCI_ACS_RR 0x0004 /* P2P Request Redirect */ |
| 943 | #define PCI_ACS_CR 0x08 /* P2P Completion Redirect */ | 947 | #define PCI_ACS_CR 0x0008 /* P2P Completion Redirect */ |
| 944 | #define PCI_ACS_UF 0x10 /* Upstream Forwarding */ | 948 | #define PCI_ACS_UF 0x0010 /* Upstream Forwarding */ |
| 945 | #define PCI_ACS_EC 0x20 /* P2P Egress Control */ | 949 | #define PCI_ACS_EC 0x0020 /* P2P Egress Control */ |
| 946 | #define PCI_ACS_DT 0x40 /* Direct Translated P2P */ | 950 | #define PCI_ACS_DT 0x0040 /* Direct Translated P2P */ |
| 947 | #define PCI_ACS_EGRESS_BITS 0x05 /* ACS Egress Control Vector Size */ | 951 | #define PCI_ACS_EGRESS_BITS 0x05 /* ACS Egress Control Vector Size */ |
| 948 | #define PCI_ACS_CTRL 0x06 /* ACS Control Register */ | 952 | #define PCI_ACS_CTRL 0x06 /* ACS Control Register */ |
| 949 | #define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */ | 953 | #define PCI_ACS_EGRESS_CTL_V 0x08 /* ACS Egress Control Vector */ |
| @@ -993,9 +997,9 @@ | |||
| 993 | #define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */ | 997 | #define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */ |
| 994 | 998 | ||
| 995 | #define PCI_EXP_DPC_CTL 6 /* DPC control */ | 999 | #define PCI_EXP_DPC_CTL 6 /* DPC control */ |
| 996 | #define PCI_EXP_DPC_CTL_EN_FATAL 0x0001 /* Enable trigger on ERR_FATAL message */ | 1000 | #define PCI_EXP_DPC_CTL_EN_FATAL 0x0001 /* Enable trigger on ERR_FATAL message */ |
| 997 | #define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */ | 1001 | #define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */ |
| 998 | #define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */ | 1002 | #define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */ |
| 999 | 1003 | ||
| 1000 | #define PCI_EXP_DPC_STATUS 8 /* DPC Status */ | 1004 | #define PCI_EXP_DPC_STATUS 8 /* DPC Status */ |
| 1001 | #define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */ | 1005 | #define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */ |
diff --git a/include/uapi/linux/switchtec_ioctl.h b/include/uapi/linux/switchtec_ioctl.h index 4f4daf8db954..c912b5a678e4 100644 --- a/include/uapi/linux/switchtec_ioctl.h +++ b/include/uapi/linux/switchtec_ioctl.h | |||
| @@ -50,7 +50,7 @@ struct switchtec_ioctl_flash_part_info { | |||
| 50 | __u32 active; | 50 | __u32 active; |
| 51 | }; | 51 | }; |
| 52 | 52 | ||
| 53 | struct switchtec_ioctl_event_summary { | 53 | struct switchtec_ioctl_event_summary_legacy { |
| 54 | __u64 global; | 54 | __u64 global; |
| 55 | __u64 part_bitmap; | 55 | __u64 part_bitmap; |
| 56 | __u32 local_part; | 56 | __u32 local_part; |
| @@ -59,6 +59,15 @@ struct switchtec_ioctl_event_summary { | |||
| 59 | __u32 pff[48]; | 59 | __u32 pff[48]; |
| 60 | }; | 60 | }; |
| 61 | 61 | ||
| 62 | struct switchtec_ioctl_event_summary { | ||
| 63 | __u64 global; | ||
| 64 | __u64 part_bitmap; | ||
| 65 | __u32 local_part; | ||
| 66 | __u32 padding; | ||
| 67 | __u32 part[48]; | ||
| 68 | __u32 pff[255]; | ||
| 69 | }; | ||
| 70 | |||
| 62 | #define SWITCHTEC_IOCTL_EVENT_STACK_ERROR 0 | 71 | #define SWITCHTEC_IOCTL_EVENT_STACK_ERROR 0 |
| 63 | #define SWITCHTEC_IOCTL_EVENT_PPU_ERROR 1 | 72 | #define SWITCHTEC_IOCTL_EVENT_PPU_ERROR 1 |
| 64 | #define SWITCHTEC_IOCTL_EVENT_ISP_ERROR 2 | 73 | #define SWITCHTEC_IOCTL_EVENT_ISP_ERROR 2 |
| @@ -127,6 +136,8 @@ struct switchtec_ioctl_pff_port { | |||
| 127 | _IOWR('W', 0x41, struct switchtec_ioctl_flash_part_info) | 136 | _IOWR('W', 0x41, struct switchtec_ioctl_flash_part_info) |
| 128 | #define SWITCHTEC_IOCTL_EVENT_SUMMARY \ | 137 | #define SWITCHTEC_IOCTL_EVENT_SUMMARY \ |
| 129 | _IOR('W', 0x42, struct switchtec_ioctl_event_summary) | 138 | _IOR('W', 0x42, struct switchtec_ioctl_event_summary) |
| 139 | #define SWITCHTEC_IOCTL_EVENT_SUMMARY_LEGACY \ | ||
| 140 | _IOR('W', 0x42, struct switchtec_ioctl_event_summary_legacy) | ||
| 130 | #define SWITCHTEC_IOCTL_EVENT_CTL \ | 141 | #define SWITCHTEC_IOCTL_EVENT_CTL \ |
| 131 | _IOWR('W', 0x43, struct switchtec_ioctl_event_ctl) | 142 | _IOWR('W', 0x43, struct switchtec_ioctl_event_ctl) |
| 132 | #define SWITCHTEC_IOCTL_PFF_TO_PORT \ | 143 | #define SWITCHTEC_IOCTL_PFF_TO_PORT \ |
diff --git a/tools/pci/Makefile b/tools/pci/Makefile index 46e4c2f318c9..9b7534457060 100644 --- a/tools/pci/Makefile +++ b/tools/pci/Makefile | |||
| @@ -14,9 +14,12 @@ MAKEFLAGS += -r | |||
| 14 | 14 | ||
| 15 | CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include | 15 | CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include |
| 16 | 16 | ||
| 17 | ALL_TARGETS := pcitest pcitest.sh | 17 | ALL_TARGETS := pcitest |
| 18 | ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS)) | 18 | ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS)) |
| 19 | 19 | ||
| 20 | SCRIPTS := pcitest.sh | ||
| 21 | ALL_SCRIPTS := $(patsubst %,$(OUTPUT)%,$(SCRIPTS)) | ||
| 22 | |||
| 20 | all: $(ALL_PROGRAMS) | 23 | all: $(ALL_PROGRAMS) |
| 21 | 24 | ||
| 22 | export srctree OUTPUT CC LD CFLAGS | 25 | export srctree OUTPUT CC LD CFLAGS |
| @@ -46,6 +49,9 @@ install: $(ALL_PROGRAMS) | |||
| 46 | install -d -m 755 $(DESTDIR)$(bindir); \ | 49 | install -d -m 755 $(DESTDIR)$(bindir); \ |
| 47 | for program in $(ALL_PROGRAMS); do \ | 50 | for program in $(ALL_PROGRAMS); do \ |
| 48 | install $$program $(DESTDIR)$(bindir); \ | 51 | install $$program $(DESTDIR)$(bindir); \ |
| 52 | done; \ | ||
| 53 | for script in $(ALL_SCRIPTS); do \ | ||
| 54 | install $$script $(DESTDIR)$(bindir); \ | ||
| 49 | done | 55 | done |
| 50 | 56 | ||
| 51 | FORCE: | 57 | FORCE: |
diff --git a/tools/pci/pcitest.c b/tools/pci/pcitest.c index ec4d51f3308b..5fa5c2bdd427 100644 --- a/tools/pci/pcitest.c +++ b/tools/pci/pcitest.c | |||
| @@ -140,6 +140,7 @@ static void run_test(struct pci_test *test) | |||
| 140 | } | 140 | } |
| 141 | 141 | ||
| 142 | fflush(stdout); | 142 | fflush(stdout); |
| 143 | return (ret < 0) ? ret : 1 - ret; /* return 0 if test succeeded */ | ||
| 143 | } | 144 | } |
| 144 | 145 | ||
| 145 | int main(int argc, char **argv) | 146 | int main(int argc, char **argv) |
| @@ -162,7 +163,7 @@ int main(int argc, char **argv) | |||
| 162 | /* set default endpoint device */ | 163 | /* set default endpoint device */ |
| 163 | test->device = "/dev/pci-endpoint-test.0"; | 164 | test->device = "/dev/pci-endpoint-test.0"; |
| 164 | 165 | ||
| 165 | while ((c = getopt(argc, argv, "D:b:m:x:i:Ilrwcs:")) != EOF) | 166 | while ((c = getopt(argc, argv, "D:b:m:x:i:Ilhrwcs:")) != EOF) |
| 166 | switch (c) { | 167 | switch (c) { |
| 167 | case 'D': | 168 | case 'D': |
| 168 | test->device = optarg; | 169 | test->device = optarg; |
| @@ -206,7 +207,6 @@ int main(int argc, char **argv) | |||
| 206 | case 's': | 207 | case 's': |
| 207 | test->size = strtoul(optarg, NULL, 0); | 208 | test->size = strtoul(optarg, NULL, 0); |
| 208 | continue; | 209 | continue; |
| 209 | case '?': | ||
| 210 | case 'h': | 210 | case 'h': |
| 211 | default: | 211 | default: |
| 212 | usage: | 212 | usage: |
| @@ -224,10 +224,10 @@ usage: | |||
| 224 | "\t-w Write buffer test\n" | 224 | "\t-w Write buffer test\n" |
| 225 | "\t-c Copy buffer test\n" | 225 | "\t-c Copy buffer test\n" |
| 226 | "\t-s <size> Size of buffer {default: 100KB}\n", | 226 | "\t-s <size> Size of buffer {default: 100KB}\n", |
| 227 | "\t-h Print this help message\n", | ||
| 227 | argv[0]); | 228 | argv[0]); |
| 228 | return -EINVAL; | 229 | return -EINVAL; |
| 229 | } | 230 | } |
| 230 | 231 | ||
| 231 | run_test(test); | 232 | return run_test(test); |
| 232 | return 0; | ||
| 233 | } | 233 | } |
