diff options
author | Bjorn Helgaas <bhelgaas@google.com> | 2019-05-13 19:34:41 -0400 |
---|---|---|
committer | Bjorn Helgaas <bhelgaas@google.com> | 2019-05-13 19:34:41 -0400 |
commit | 0b8439d374826f03d47b995bd5950ea8d8b7cff8 (patch) | |
tree | 3e0be2b0e8cab9629d7cd0f050c38f232abd1b78 | |
parent | b138f67d7badf9ecb08c120e9c9ecbb8d483da59 (diff) | |
parent | 8f220664570e755946db1282f48e07f26e1f2cb4 (diff) |
Merge branch 'remotes/lorenzo/pci/keystone'
- Move IRQ register address computation inside macros (Kishon Vijay
Abraham I)
- Separate legacy IRQ and MSI configuration (Kishon Vijay Abraham I)
- Use hwirq, not virq, to get MSI IRQ number offset (Kishon Vijay Abraham
I)
- Squash ks_pcie_handle_msi_irq() into ks_pcie_msi_irq_handler() (Kishon
Vijay Abraham I)
- Add dwc support for platforms with custom MSI controllers (Kishon Vijay
Abraham I)
- Add keystone-specific MSI controller (Kishon Vijay Abraham I)
- Remove dwc host_ops previously used for keystone-specific MSI (Kishon
Vijay Abraham I)
- Skip dwc default MSI init if platform has custom MSI controller (Kishon
Vijay Abraham I)
- Implement .start_link() and .stop_link() for keystone endpoint support
(Kishon Vijay Abraham I)
- Add keystone "reg-names" DT binding (Kishon Vijay Abraham I)
- Squash ks_pcie_dw_host_init() into ks_pcie_add_pcie_port() (Kishon
Vijay Abraham I)
- Get keystone register resources from DT by name, not index (Kishon
Vijay Abraham I)
- Get DT resources in .probe() to prepare for endpoint support (Kishon
Vijay Abraham I)
- Add "ti,syscon-pcie-mode" DT property for PCIe mode configuration
(Kishon Vijay Abraham I)
- Explicitly set keystone to host mode (Kishon Vijay Abraham I)
- Document DT "atu" reg-names requirement for DesignWare core >= 4.80
(Kishon Vijay Abraham I)
- Enable dwc iATU unroll for endpoint mode as well as host mode (Kishon
Vijay Abraham I)
- Add dwc "version" to identify core >= 4.80 for ATU programming (Kishon
Vijay Abraham I)
- Don't build ARM32-specific keystone code on ARM64 (Kishon Vijay Abraham
I)
- Add DT binding for keystone PCIe RC in AM654 SoC (Kishon Vijay Abraham
I)
- Add keystone support for AM654 SoC PCIe RC (Kishon Vijay Abraham I)
- Reset keystone PHYs before enabling them (Kishon Vijay Abraham I)
- Make of_pci_get_max_link_speed() available to endpoint drivers as well
as host drivers (Kishon Vijay Abraham I)
- Add keystone support for DT "max-link-speed" property (Kishon Vijay
Abraham I)
- Add endpoint library support for BAR buffer alignment (Kishon Vijay
Abraham I)
- Make all dw_pcie_ep_ops structs const (Kishon Vijay Abraham I)
- Fix fencepost error in dw_pcie_ep_find_capability() (Kishon Vijay
Abraham I)
- Add dwc hooks for dbi/dbi2 that share the same address space (Kishon
Vijay Abraham I)
- Add keystone support for TI AM654x in endpoint mode (Kishon Vijay
Abraham I)
- Configure designware endpoints to advertise smallest resizable BAR
(1MB) (Kishon Vijay Abraham I)
- Align designware endpoint ATU windows for raising MSIs (Kishon Vijay
Abraham I)
- Add endpoint test support for TI AM654x (Kishon Vijay Abraham I)
- Fix endpoint test test_reg_bar issue (Kishon Vijay Abraham I)
* remotes/lorenzo/pci/keystone:
misc: pci_endpoint_test: Fix test_reg_bar to be updated in pci_endpoint_test
misc: pci_endpoint_test: Add support to test PCI EP in AM654x
PCI: designware-ep: Use aligned ATU window for raising MSI interrupts
PCI: designware-ep: Configure Resizable BAR cap to advertise the smallest size
PCI: keystone: Add support for PCIe EP in AM654x Platforms
dt-bindings: PCI: Add PCI EP DT binding documentation for AM654
PCI: dwc: Add callbacks for accessing dbi2 address space
PCI: dwc: Fix dw_pcie_ep_find_capability() to return correct capability offset
PCI: dwc: Add const qualifier to struct dw_pcie_ep_ops
PCI: endpoint: Add support to specify alignment for buffers allocated to BARs
PCI: keystone: Add support to set the max link speed from DT
PCI: OF: Allow of_pci_get_max_link_speed() to be used by PCI Endpoint drivers
PCI: keystone: Invoke phy_reset() API before enabling PHY
PCI: keystone: Add support for PCIe RC in AM654x Platforms
dt-bindings: PCI: Add PCI RC DT binding documentation for AM654
PCI: keystone: Prevent ARM32 specific code to be compiled for ARM64
PCI: dwc: Fix ATU identification for designware version >= 4.80
PCI: dwc: Enable iATU unroll for endpoint too
dt-bindings: PCI: Document "atu" reg-names
PCI: keystone: Explicitly set the PCIe mode
dt-bindings: PCI: Add dt-binding to configure PCIe mode
PCI: keystone: Move resources initialization to prepare for EP support
PCI: keystone: Use platform_get_resource_byname() to get memory resources
PCI: keystone: Perform host initialization in a single function
dt-bindings: PCI: keystone: Add "reg-names" binding information
PCI: keystone: Cleanup error_irq configuration
PCI: keystone: Add start_link()/stop_link() dw_pcie_ops
PCI: dwc: Remove default MSI initialization for platform specific MSI chips
PCI: dwc: Remove Keystone specific dw_pcie_host_ops
PCI: keystone: Use Keystone specific msi_irq_chip
PCI: dwc: Add support to use non default msi_irq_chip
PCI: keystone: Cleanup ks_pcie_msi_irq_handler()
PCI: keystone: Use hwirq to get the MSI IRQ number offset
PCI: keystone: Add separate functions for configuring MSI and legacy interrupt
PCI: keystone: Cleanup interrupt related macros
# Conflicts:
# drivers/pci/controller/dwc/pcie-designware.h
-rw-r--r-- | Documentation/devicetree/bindings/pci/designware-pcie.txt | 7 | ||||
-rw-r--r-- | Documentation/devicetree/bindings/pci/pci-keystone.txt | 58 | ||||
-rw-r--r-- | drivers/misc/pci_endpoint_test.c | 18 | ||||
-rw-r--r-- | drivers/pci/Makefile | 2 | ||||
-rw-r--r-- | drivers/pci/controller/dwc/Kconfig | 29 | ||||
-rw-r--r-- | drivers/pci/controller/dwc/pci-dra7xx.c | 2 | ||||
-rw-r--r-- | drivers/pci/controller/dwc/pci-keystone.c | 926 | ||||
-rw-r--r-- | drivers/pci/controller/dwc/pci-layerscape-ep.c | 2 | ||||
-rw-r--r-- | drivers/pci/controller/dwc/pcie-artpec6.c | 2 | ||||
-rw-r--r-- | drivers/pci/controller/dwc/pcie-designware-ep.c | 55 | ||||
-rw-r--r-- | drivers/pci/controller/dwc/pcie-designware-host.c | 97 | ||||
-rw-r--r-- | drivers/pci/controller/dwc/pcie-designware-plat.c | 2 | ||||
-rw-r--r-- | drivers/pci/controller/dwc/pcie-designware.c | 52 | ||||
-rw-r--r-- | drivers/pci/controller/dwc/pcie-designware.h | 21 | ||||
-rw-r--r-- | drivers/pci/endpoint/functions/pci-epf-test.c | 5 | ||||
-rw-r--r-- | drivers/pci/endpoint/pci-epf-core.c | 10 | ||||
-rw-r--r-- | drivers/pci/of.c | 44 | ||||
-rw-r--r-- | include/linux/pci-epc.h | 2 | ||||
-rw-r--r-- | include/linux/pci-epf.h | 3 |
19 files changed, 952 insertions, 385 deletions
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt index c124f9bc11f3..5561a1c060d0 100644 --- a/Documentation/devicetree/bindings/pci/designware-pcie.txt +++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt | |||
@@ -4,8 +4,11 @@ Required properties: | |||
4 | - compatible: | 4 | - compatible: |
5 | "snps,dw-pcie" for RC mode; | 5 | "snps,dw-pcie" for RC mode; |
6 | "snps,dw-pcie-ep" for EP mode; | 6 | "snps,dw-pcie-ep" for EP mode; |
7 | - reg: Should contain the configuration address space. | 7 | - reg: For designware cores version < 4.80 contains the configuration |
8 | - reg-names: Must be "config" for the PCIe configuration space. | 8 | address space. For designware core version >= 4.80, contains |
9 | the configuration and ATU address space | ||
10 | - reg-names: Must be "config" for the PCIe configuration space and "atu" for | ||
11 | the ATU address space. | ||
9 | (The old way of getting the configuration address space from "ranges" | 12 | (The old way of getting the configuration address space from "ranges" |
10 | is deprecated and should be avoided.) | 13 | is deprecated and should be avoided.) |
11 | - num-lanes: number of lanes to use | 14 | - num-lanes: number of lanes to use |
diff --git a/Documentation/devicetree/bindings/pci/pci-keystone.txt b/Documentation/devicetree/bindings/pci/pci-keystone.txt index 2030ee0dc4f9..47202a2938f2 100644 --- a/Documentation/devicetree/bindings/pci/pci-keystone.txt +++ b/Documentation/devicetree/bindings/pci/pci-keystone.txt | |||
@@ -11,16 +11,24 @@ described here as well as properties that are not applicable. | |||
11 | 11 | ||
12 | Required Properties:- | 12 | Required Properties:- |
13 | 13 | ||
14 | compatibility: "ti,keystone-pcie" | 14 | compatibility: Should be "ti,keystone-pcie" for RC on Keystone2 SoC |
15 | reg: index 1 is the base address and length of DW application registers. | 15 | Should be "ti,am654-pcie-rc" for RC on AM654x SoC |
16 | index 2 is the base address and length of PCI device ID register. | 16 | reg: Three register ranges as listed in the reg-names property |
17 | reg-names: "dbics" for the DesignWare PCIe registers, "app" for the | ||
18 | TI specific application registers, "config" for the | ||
19 | configuration space address | ||
17 | 20 | ||
18 | pcie_msi_intc : Interrupt controller device node for MSI IRQ chip | 21 | pcie_msi_intc : Interrupt controller device node for MSI IRQ chip |
19 | interrupt-cells: should be set to 1 | 22 | interrupt-cells: should be set to 1 |
20 | interrupts: GIC interrupt lines connected to PCI MSI interrupt lines | 23 | interrupts: GIC interrupt lines connected to PCI MSI interrupt lines |
24 | (required if the compatible is "ti,keystone-pcie") | ||
25 | msi-map: As specified in Documentation/devicetree/bindings/pci/pci-msi.txt | ||
26 | (required if the compatible is "ti,am654-pcie-rc". | ||
21 | 27 | ||
22 | ti,syscon-pcie-id : phandle to the device control module required to set device | 28 | ti,syscon-pcie-id : phandle to the device control module required to set device |
23 | id and vendor id. | 29 | id and vendor id. |
30 | ti,syscon-pcie-mode : phandle to the device control module required to configure | ||
31 | PCI in either RC mode or EP mode. | ||
24 | 32 | ||
25 | Example: | 33 | Example: |
26 | pcie_msi_intc: msi-interrupt-controller { | 34 | pcie_msi_intc: msi-interrupt-controller { |
@@ -61,3 +69,47 @@ Optional properties:- | |||
61 | DesignWare DT Properties not applicable for Keystone PCI | 69 | DesignWare DT Properties not applicable for Keystone PCI |
62 | 70 | ||
63 | 1. pcie_bus clock-names not used. Instead, a phandle to phys is used. | 71 | 1. pcie_bus clock-names not used. Instead, a phandle to phys is used. |
72 | |||
73 | AM654 PCIe Endpoint | ||
74 | =================== | ||
75 | |||
76 | Required Properties:- | ||
77 | |||
78 | compatibility: Should be "ti,am654-pcie-ep" for EP on AM654x SoC | ||
79 | reg: Four register ranges as listed in the reg-names property | ||
80 | reg-names: "dbics" for the DesignWare PCIe registers, "app" for the | ||
81 | TI specific application registers, "atu" for the | ||
82 | Address Translation Unit configuration registers and | ||
83 | "addr_space" used to map remote RC address space | ||
84 | num-ib-windows: As specified in | ||
85 | Documentation/devicetree/bindings/pci/designware-pcie.txt | ||
86 | num-ob-windows: As specified in | ||
87 | Documentation/devicetree/bindings/pci/designware-pcie.txt | ||
88 | num-lanes: As specified in | ||
89 | Documentation/devicetree/bindings/pci/designware-pcie.txt | ||
90 | power-domains: As documented by the generic PM domain bindings in | ||
91 | Documentation/devicetree/bindings/power/power_domain.txt. | ||
92 | ti,syscon-pcie-mode: phandle to the device control module required to configure | ||
93 | PCI in either RC mode or EP mode. | ||
94 | |||
95 | Optional properties:- | ||
96 | |||
97 | phys: list of PHY specifiers (used by generic PHY framework) | ||
98 | phy-names: must be "pcie-phy0", "pcie-phy1", "pcie-phyN".. based on the | ||
99 | number of lanes as specified in *num-lanes* property. | ||
100 | ("phys" and "phy-names" DT bindings are specified in | ||
101 | Documentation/devicetree/bindings/phy/phy-bindings.txt) | ||
102 | interrupts: platform interrupt for error interrupts. | ||
103 | |||
104 | pcie-ep { | ||
105 | compatible = "ti,am654-pcie-ep"; | ||
106 | reg = <0x5500000 0x1000>, <0x5501000 0x1000>, | ||
107 | <0x10000000 0x8000000>, <0x5506000 0x1000>; | ||
108 | reg-names = "app", "dbics", "addr_space", "atu"; | ||
109 | power-domains = <&k3_pds 120>; | ||
110 | ti,syscon-pcie-mode = <&pcie0_mode>; | ||
111 | num-lanes = <1>; | ||
112 | num-ib-windows = <16>; | ||
113 | num-ob-windows = <16>; | ||
114 | interrupts = <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>; | ||
115 | }; | ||
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index 29582fe57151..7b015f2a1c6f 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c | |||
@@ -75,6 +75,11 @@ | |||
75 | #define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24 | 75 | #define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24 |
76 | #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28 | 76 | #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28 |
77 | 77 | ||
78 | #define PCI_DEVICE_ID_TI_AM654 0xb00c | ||
79 | |||
80 | #define is_am654_pci_dev(pdev) \ | ||
81 | ((pdev)->device == PCI_DEVICE_ID_TI_AM654) | ||
82 | |||
78 | static DEFINE_IDA(pci_endpoint_test_ida); | 83 | static DEFINE_IDA(pci_endpoint_test_ida); |
79 | 84 | ||
80 | #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \ | 85 | #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \ |
@@ -588,6 +593,7 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd, | |||
588 | int ret = -EINVAL; | 593 | int ret = -EINVAL; |
589 | enum pci_barno bar; | 594 | enum pci_barno bar; |
590 | struct pci_endpoint_test *test = to_endpoint_test(file->private_data); | 595 | struct pci_endpoint_test *test = to_endpoint_test(file->private_data); |
596 | struct pci_dev *pdev = test->pdev; | ||
591 | 597 | ||
592 | mutex_lock(&test->mutex); | 598 | mutex_lock(&test->mutex); |
593 | switch (cmd) { | 599 | switch (cmd) { |
@@ -595,6 +601,8 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd, | |||
595 | bar = arg; | 601 | bar = arg; |
596 | if (bar < 0 || bar > 5) | 602 | if (bar < 0 || bar > 5) |
597 | goto ret; | 603 | goto ret; |
604 | if (is_am654_pci_dev(pdev) && bar == BAR_0) | ||
605 | goto ret; | ||
598 | ret = pci_endpoint_test_bar(test, bar); | 606 | ret = pci_endpoint_test_bar(test, bar); |
599 | break; | 607 | break; |
600 | case PCITEST_LEGACY_IRQ: | 608 | case PCITEST_LEGACY_IRQ: |
@@ -662,6 +670,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev, | |||
662 | data = (struct pci_endpoint_test_data *)ent->driver_data; | 670 | data = (struct pci_endpoint_test_data *)ent->driver_data; |
663 | if (data) { | 671 | if (data) { |
664 | test_reg_bar = data->test_reg_bar; | 672 | test_reg_bar = data->test_reg_bar; |
673 | test->test_reg_bar = test_reg_bar; | ||
665 | test->alignment = data->alignment; | 674 | test->alignment = data->alignment; |
666 | irq_type = data->irq_type; | 675 | irq_type = data->irq_type; |
667 | } | 676 | } |
@@ -785,11 +794,20 @@ static void pci_endpoint_test_remove(struct pci_dev *pdev) | |||
785 | pci_disable_device(pdev); | 794 | pci_disable_device(pdev); |
786 | } | 795 | } |
787 | 796 | ||
797 | static const struct pci_endpoint_test_data am654_data = { | ||
798 | .test_reg_bar = BAR_2, | ||
799 | .alignment = SZ_64K, | ||
800 | .irq_type = IRQ_TYPE_MSI, | ||
801 | }; | ||
802 | |||
788 | static const struct pci_device_id pci_endpoint_test_tbl[] = { | 803 | static const struct pci_device_id pci_endpoint_test_tbl[] = { |
789 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) }, | 804 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) }, |
790 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) }, | 805 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) }, |
791 | { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) }, | 806 | { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) }, |
792 | { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) }, | 807 | { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) }, |
808 | { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654), | ||
809 | .driver_data = (kernel_ulong_t)&am654_data | ||
810 | }, | ||
793 | { } | 811 | { } |
794 | }; | 812 | }; |
795 | MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl); | 813 | MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl); |
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 657d642fcc67..28cdd8c0213a 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
@@ -10,10 +10,10 @@ obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \ | |||
10 | ifdef CONFIG_PCI | 10 | ifdef CONFIG_PCI |
11 | obj-$(CONFIG_PROC_FS) += proc.o | 11 | obj-$(CONFIG_PROC_FS) += proc.o |
12 | obj-$(CONFIG_SYSFS) += slot.o | 12 | obj-$(CONFIG_SYSFS) += slot.o |
13 | obj-$(CONFIG_OF) += of.o | ||
14 | obj-$(CONFIG_ACPI) += pci-acpi.o | 13 | obj-$(CONFIG_ACPI) += pci-acpi.o |
15 | endif | 14 | endif |
16 | 15 | ||
16 | obj-$(CONFIG_OF) += of.o | ||
17 | obj-$(CONFIG_PCI_QUIRKS) += quirks.o | 17 | obj-$(CONFIG_PCI_QUIRKS) += quirks.o |
18 | obj-$(CONFIG_PCIEPORTBUS) += pcie/ | 18 | obj-$(CONFIG_PCIEPORTBUS) += pcie/ |
19 | obj-$(CONFIG_HOTPLUG_PCI) += hotplug/ | 19 | obj-$(CONFIG_HOTPLUG_PCI) += hotplug/ |
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig index 6ea74b1c0d94..a6ce1ee51b4c 100644 --- a/drivers/pci/controller/dwc/Kconfig +++ b/drivers/pci/controller/dwc/Kconfig | |||
@@ -103,15 +103,32 @@ config PCIE_SPEAR13XX | |||
103 | Say Y here if you want PCIe support on SPEAr13XX SoCs. | 103 | Say Y here if you want PCIe support on SPEAr13XX SoCs. |
104 | 104 | ||
105 | config PCI_KEYSTONE | 105 | config PCI_KEYSTONE |
106 | bool "TI Keystone PCIe controller" | 106 | bool |
107 | depends on ARCH_KEYSTONE || (ARM && COMPILE_TEST) | 107 | |
108 | config PCI_KEYSTONE_HOST | ||
109 | bool "PCI Keystone Host Mode" | ||
110 | depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST) | ||
108 | depends on PCI_MSI_IRQ_DOMAIN | 111 | depends on PCI_MSI_IRQ_DOMAIN |
109 | select PCIE_DW_HOST | 112 | select PCIE_DW_HOST |
113 | select PCI_KEYSTONE | ||
114 | default y | ||
110 | help | 115 | help |
111 | Say Y here if you want to enable PCI controller support on Keystone | 116 | Enables support for the PCIe controller in the Keystone SoC to |
112 | SoCs. The PCI controller on Keystone is based on DesignWare hardware | 117 | work in host mode. The PCI controller on Keystone is based on |
113 | and therefore the driver re-uses the DesignWare core functions to | 118 | DesignWare hardware and therefore the driver re-uses the |
114 | implement the driver. | 119 | DesignWare core functions to implement the driver. |
120 | |||
121 | config PCI_KEYSTONE_EP | ||
122 | bool "PCI Keystone Endpoint Mode" | ||
123 | depends on ARCH_KEYSTONE || ARCH_K3 || ((ARM || ARM64) && COMPILE_TEST) | ||
124 | depends on PCI_ENDPOINT | ||
125 | select PCIE_DW_EP | ||
126 | select PCI_KEYSTONE | ||
127 | help | ||
128 | Enables support for the PCIe controller in the Keystone SoC to | ||
129 | work in endpoint mode. The PCI controller on Keystone is based | ||
130 | on DesignWare hardware and therefore the driver re-uses the | ||
131 | DesignWare core functions to implement the driver. | ||
115 | 132 | ||
116 | config PCI_LAYERSCAPE | 133 | config PCI_LAYERSCAPE |
117 | bool "Freescale Layerscape PCIe controller" | 134 | bool "Freescale Layerscape PCIe controller" |
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index ae84a69ae63a..b287dbf6914c 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c | |||
@@ -406,7 +406,7 @@ dra7xx_pcie_get_features(struct dw_pcie_ep *ep) | |||
406 | return &dra7xx_pcie_epc_features; | 406 | return &dra7xx_pcie_epc_features; |
407 | } | 407 | } |
408 | 408 | ||
409 | static struct dw_pcie_ep_ops pcie_ep_ops = { | 409 | static const struct dw_pcie_ep_ops pcie_ep_ops = { |
410 | .ep_init = dra7xx_pcie_ep_init, | 410 | .ep_init = dra7xx_pcie_ep_init, |
411 | .raise_irq = dra7xx_pcie_raise_irq, | 411 | .raise_irq = dra7xx_pcie_raise_irq, |
412 | .get_features = dra7xx_pcie_get_features, | 412 | .get_features = dra7xx_pcie_get_features, |
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 14f2b0b4ed5e..af677254a072 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/clk.h> | 12 | #include <linux/clk.h> |
13 | #include <linux/delay.h> | 13 | #include <linux/delay.h> |
14 | #include <linux/gpio/consumer.h> | ||
14 | #include <linux/init.h> | 15 | #include <linux/init.h> |
15 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
16 | #include <linux/irqchip/chained_irq.h> | 17 | #include <linux/irqchip/chained_irq.h> |
@@ -18,6 +19,7 @@ | |||
18 | #include <linux/mfd/syscon.h> | 19 | #include <linux/mfd/syscon.h> |
19 | #include <linux/msi.h> | 20 | #include <linux/msi.h> |
20 | #include <linux/of.h> | 21 | #include <linux/of.h> |
22 | #include <linux/of_device.h> | ||
21 | #include <linux/of_irq.h> | 23 | #include <linux/of_irq.h> |
22 | #include <linux/of_pci.h> | 24 | #include <linux/of_pci.h> |
23 | #include <linux/phy/phy.h> | 25 | #include <linux/phy/phy.h> |
@@ -26,6 +28,7 @@ | |||
26 | #include <linux/resource.h> | 28 | #include <linux/resource.h> |
27 | #include <linux/signal.h> | 29 | #include <linux/signal.h> |
28 | 30 | ||
31 | #include "../../pci.h" | ||
29 | #include "pcie-designware.h" | 32 | #include "pcie-designware.h" |
30 | 33 | ||
31 | #define PCIE_VENDORID_MASK 0xffff | 34 | #define PCIE_VENDORID_MASK 0xffff |
@@ -44,28 +47,34 @@ | |||
44 | #define CFG_TYPE1 BIT(24) | 47 | #define CFG_TYPE1 BIT(24) |
45 | 48 | ||
46 | #define OB_SIZE 0x030 | 49 | #define OB_SIZE 0x030 |
47 | #define SPACE0_REMOTE_CFG_OFFSET 0x1000 | ||
48 | #define OB_OFFSET_INDEX(n) (0x200 + (8 * (n))) | 50 | #define OB_OFFSET_INDEX(n) (0x200 + (8 * (n))) |
49 | #define OB_OFFSET_HI(n) (0x204 + (8 * (n))) | 51 | #define OB_OFFSET_HI(n) (0x204 + (8 * (n))) |
50 | #define OB_ENABLEN BIT(0) | 52 | #define OB_ENABLEN BIT(0) |
51 | #define OB_WIN_SIZE 8 /* 8MB */ | 53 | #define OB_WIN_SIZE 8 /* 8MB */ |
52 | 54 | ||
55 | #define PCIE_LEGACY_IRQ_ENABLE_SET(n) (0x188 + (0x10 * ((n) - 1))) | ||
56 | #define PCIE_LEGACY_IRQ_ENABLE_CLR(n) (0x18c + (0x10 * ((n) - 1))) | ||
57 | #define PCIE_EP_IRQ_SET 0x64 | ||
58 | #define PCIE_EP_IRQ_CLR 0x68 | ||
59 | #define INT_ENABLE BIT(0) | ||
60 | |||
53 | /* IRQ register defines */ | 61 | /* IRQ register defines */ |
54 | #define IRQ_EOI 0x050 | 62 | #define IRQ_EOI 0x050 |
55 | #define IRQ_STATUS 0x184 | ||
56 | #define IRQ_ENABLE_SET 0x188 | ||
57 | #define IRQ_ENABLE_CLR 0x18c | ||
58 | 63 | ||
59 | #define MSI_IRQ 0x054 | 64 | #define MSI_IRQ 0x054 |
60 | #define MSI0_IRQ_STATUS 0x104 | 65 | #define MSI_IRQ_STATUS(n) (0x104 + ((n) << 4)) |
61 | #define MSI0_IRQ_ENABLE_SET 0x108 | 66 | #define MSI_IRQ_ENABLE_SET(n) (0x108 + ((n) << 4)) |
62 | #define MSI0_IRQ_ENABLE_CLR 0x10c | 67 | #define MSI_IRQ_ENABLE_CLR(n) (0x10c + ((n) << 4)) |
63 | #define IRQ_STATUS 0x184 | ||
64 | #define MSI_IRQ_OFFSET 4 | 68 | #define MSI_IRQ_OFFSET 4 |
65 | 69 | ||
70 | #define IRQ_STATUS(n) (0x184 + ((n) << 4)) | ||
71 | #define IRQ_ENABLE_SET(n) (0x188 + ((n) << 4)) | ||
72 | #define INTx_EN BIT(0) | ||
73 | |||
66 | #define ERR_IRQ_STATUS 0x1c4 | 74 | #define ERR_IRQ_STATUS 0x1c4 |
67 | #define ERR_IRQ_ENABLE_SET 0x1c8 | 75 | #define ERR_IRQ_ENABLE_SET 0x1c8 |
68 | #define ERR_AER BIT(5) /* ECRC error */ | 76 | #define ERR_AER BIT(5) /* ECRC error */ |
77 | #define AM6_ERR_AER BIT(4) /* AM6 ECRC error */ | ||
69 | #define ERR_AXI BIT(4) /* AXI tag lookup fatal error */ | 78 | #define ERR_AXI BIT(4) /* AXI tag lookup fatal error */ |
70 | #define ERR_CORR BIT(3) /* Correctable error */ | 79 | #define ERR_CORR BIT(3) /* Correctable error */ |
71 | #define ERR_NONFATAL BIT(2) /* Non-fatal error */ | 80 | #define ERR_NONFATAL BIT(2) /* Non-fatal error */ |
@@ -74,25 +83,45 @@ | |||
74 | #define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \ | 83 | #define ERR_IRQ_ALL (ERR_AER | ERR_AXI | ERR_CORR | \ |
75 | ERR_NONFATAL | ERR_FATAL | ERR_SYS) | 84 | ERR_NONFATAL | ERR_FATAL | ERR_SYS) |
76 | 85 | ||
77 | #define MAX_MSI_HOST_IRQS 8 | ||
78 | /* PCIE controller device IDs */ | 86 | /* PCIE controller device IDs */ |
79 | #define PCIE_RC_K2HK 0xb008 | 87 | #define PCIE_RC_K2HK 0xb008 |
80 | #define PCIE_RC_K2E 0xb009 | 88 | #define PCIE_RC_K2E 0xb009 |
81 | #define PCIE_RC_K2L 0xb00a | 89 | #define PCIE_RC_K2L 0xb00a |
82 | #define PCIE_RC_K2G 0xb00b | 90 | #define PCIE_RC_K2G 0xb00b |
83 | 91 | ||
92 | #define KS_PCIE_DEV_TYPE_MASK (0x3 << 1) | ||
93 | #define KS_PCIE_DEV_TYPE(mode) ((mode) << 1) | ||
94 | |||
95 | #define EP 0x0 | ||
96 | #define LEG_EP 0x1 | ||
97 | #define RC 0x2 | ||
98 | |||
99 | #define EXP_CAP_ID_OFFSET 0x70 | ||
100 | |||
101 | #define KS_PCIE_SYSCLOCKOUTEN BIT(0) | ||
102 | |||
103 | #define AM654_PCIE_DEV_TYPE_MASK 0x3 | ||
104 | #define AM654_WIN_SIZE SZ_64K | ||
105 | |||
106 | #define APP_ADDR_SPACE_0 (16 * SZ_1K) | ||
107 | |||
84 | #define to_keystone_pcie(x) dev_get_drvdata((x)->dev) | 108 | #define to_keystone_pcie(x) dev_get_drvdata((x)->dev) |
85 | 109 | ||
110 | struct ks_pcie_of_data { | ||
111 | enum dw_pcie_device_mode mode; | ||
112 | const struct dw_pcie_host_ops *host_ops; | ||
113 | const struct dw_pcie_ep_ops *ep_ops; | ||
114 | unsigned int version; | ||
115 | }; | ||
116 | |||
86 | struct keystone_pcie { | 117 | struct keystone_pcie { |
87 | struct dw_pcie *pci; | 118 | struct dw_pcie *pci; |
88 | /* PCI Device ID */ | 119 | /* PCI Device ID */ |
89 | u32 device_id; | 120 | u32 device_id; |
90 | int num_legacy_host_irqs; | ||
91 | int legacy_host_irqs[PCI_NUM_INTX]; | 121 | int legacy_host_irqs[PCI_NUM_INTX]; |
92 | struct device_node *legacy_intc_np; | 122 | struct device_node *legacy_intc_np; |
93 | 123 | ||
94 | int num_msi_host_irqs; | 124 | int msi_host_irq; |
95 | int msi_host_irqs[MAX_MSI_HOST_IRQS]; | ||
96 | int num_lanes; | 125 | int num_lanes; |
97 | u32 num_viewport; | 126 | u32 num_viewport; |
98 | struct phy **phy; | 127 | struct phy **phy; |
@@ -101,28 +130,12 @@ struct keystone_pcie { | |||
101 | struct irq_domain *legacy_irq_domain; | 130 | struct irq_domain *legacy_irq_domain; |
102 | struct device_node *np; | 131 | struct device_node *np; |
103 | 132 | ||
104 | int error_irq; | ||
105 | |||
106 | /* Application register space */ | 133 | /* Application register space */ |
107 | void __iomem *va_app_base; /* DT 1st resource */ | 134 | void __iomem *va_app_base; /* DT 1st resource */ |
108 | struct resource app; | 135 | struct resource app; |
136 | bool is_am6; | ||
109 | }; | 137 | }; |
110 | 138 | ||
111 | static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, | ||
112 | u32 *bit_pos) | ||
113 | { | ||
114 | *reg_offset = offset % 8; | ||
115 | *bit_pos = offset >> 3; | ||
116 | } | ||
117 | |||
118 | static phys_addr_t ks_pcie_get_msi_addr(struct pcie_port *pp) | ||
119 | { | ||
120 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | ||
121 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
122 | |||
123 | return ks_pcie->app.start + MSI_IRQ; | ||
124 | } | ||
125 | |||
126 | static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset) | 139 | static u32 ks_pcie_app_readl(struct keystone_pcie *ks_pcie, u32 offset) |
127 | { | 140 | { |
128 | return readl(ks_pcie->va_app_base + offset); | 141 | return readl(ks_pcie->va_app_base + offset); |
@@ -134,81 +147,114 @@ static void ks_pcie_app_writel(struct keystone_pcie *ks_pcie, u32 offset, | |||
134 | writel(val, ks_pcie->va_app_base + offset); | 147 | writel(val, ks_pcie->va_app_base + offset); |
135 | } | 148 | } |
136 | 149 | ||
137 | static void ks_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset) | 150 | static void ks_pcie_msi_irq_ack(struct irq_data *data) |
138 | { | 151 | { |
139 | struct dw_pcie *pci = ks_pcie->pci; | 152 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); |
140 | struct pcie_port *pp = &pci->pp; | 153 | struct keystone_pcie *ks_pcie; |
141 | struct device *dev = pci->dev; | 154 | u32 irq = data->hwirq; |
142 | u32 pending, vector; | 155 | struct dw_pcie *pci; |
143 | int src, virq; | 156 | u32 reg_offset; |
157 | u32 bit_pos; | ||
144 | 158 | ||
145 | pending = ks_pcie_app_readl(ks_pcie, MSI0_IRQ_STATUS + (offset << 4)); | 159 | pci = to_dw_pcie_from_pp(pp); |
160 | ks_pcie = to_keystone_pcie(pci); | ||
146 | 161 | ||
147 | /* | 162 | reg_offset = irq % 8; |
148 | * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit | 163 | bit_pos = irq >> 3; |
149 | * shows 1, 9, 17, 25 and so forth | 164 | |
150 | */ | 165 | ks_pcie_app_writel(ks_pcie, MSI_IRQ_STATUS(reg_offset), |
151 | for (src = 0; src < 4; src++) { | 166 | BIT(bit_pos)); |
152 | if (BIT(src) & pending) { | 167 | ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET); |
153 | vector = offset + (src << 3); | ||
154 | virq = irq_linear_revmap(pp->irq_domain, vector); | ||
155 | dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", | ||
156 | src, vector, virq); | ||
157 | generic_handle_irq(virq); | ||
158 | } | ||
159 | } | ||
160 | } | 168 | } |
161 | 169 | ||
162 | static void ks_pcie_msi_irq_ack(int irq, struct pcie_port *pp) | 170 | static void ks_pcie_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) |
163 | { | 171 | { |
164 | u32 reg_offset, bit_pos; | 172 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); |
165 | struct keystone_pcie *ks_pcie; | 173 | struct keystone_pcie *ks_pcie; |
166 | struct dw_pcie *pci; | 174 | struct dw_pcie *pci; |
175 | u64 msi_target; | ||
167 | 176 | ||
168 | pci = to_dw_pcie_from_pp(pp); | 177 | pci = to_dw_pcie_from_pp(pp); |
169 | ks_pcie = to_keystone_pcie(pci); | 178 | ks_pcie = to_keystone_pcie(pci); |
170 | update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); | ||
171 | 179 | ||
172 | ks_pcie_app_writel(ks_pcie, MSI0_IRQ_STATUS + (reg_offset << 4), | 180 | msi_target = ks_pcie->app.start + MSI_IRQ; |
173 | BIT(bit_pos)); | 181 | msg->address_lo = lower_32_bits(msi_target); |
174 | ks_pcie_app_writel(ks_pcie, IRQ_EOI, reg_offset + MSI_IRQ_OFFSET); | 182 | msg->address_hi = upper_32_bits(msi_target); |
183 | msg->data = data->hwirq; | ||
184 | |||
185 | dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", | ||
186 | (int)data->hwirq, msg->address_hi, msg->address_lo); | ||
175 | } | 187 | } |
176 | 188 | ||
177 | static void ks_pcie_msi_set_irq(struct pcie_port *pp, int irq) | 189 | static int ks_pcie_msi_set_affinity(struct irq_data *irq_data, |
190 | const struct cpumask *mask, bool force) | ||
178 | { | 191 | { |
179 | u32 reg_offset, bit_pos; | 192 | return -EINVAL; |
180 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 193 | } |
181 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
182 | 194 | ||
183 | update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); | 195 | static void ks_pcie_msi_mask(struct irq_data *data) |
184 | ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_SET + (reg_offset << 4), | 196 | { |
197 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); | ||
198 | struct keystone_pcie *ks_pcie; | ||
199 | u32 irq = data->hwirq; | ||
200 | struct dw_pcie *pci; | ||
201 | unsigned long flags; | ||
202 | u32 reg_offset; | ||
203 | u32 bit_pos; | ||
204 | |||
205 | raw_spin_lock_irqsave(&pp->lock, flags); | ||
206 | |||
207 | pci = to_dw_pcie_from_pp(pp); | ||
208 | ks_pcie = to_keystone_pcie(pci); | ||
209 | |||
210 | reg_offset = irq % 8; | ||
211 | bit_pos = irq >> 3; | ||
212 | |||
213 | ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_CLR(reg_offset), | ||
185 | BIT(bit_pos)); | 214 | BIT(bit_pos)); |
215 | |||
216 | raw_spin_unlock_irqrestore(&pp->lock, flags); | ||
186 | } | 217 | } |
187 | 218 | ||
188 | static void ks_pcie_msi_clear_irq(struct pcie_port *pp, int irq) | 219 | static void ks_pcie_msi_unmask(struct irq_data *data) |
189 | { | 220 | { |
190 | u32 reg_offset, bit_pos; | 221 | struct pcie_port *pp = irq_data_get_irq_chip_data(data); |
191 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 222 | struct keystone_pcie *ks_pcie; |
192 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | 223 | u32 irq = data->hwirq; |
224 | struct dw_pcie *pci; | ||
225 | unsigned long flags; | ||
226 | u32 reg_offset; | ||
227 | u32 bit_pos; | ||
228 | |||
229 | raw_spin_lock_irqsave(&pp->lock, flags); | ||
193 | 230 | ||
194 | update_reg_offset_bit_pos(irq, ®_offset, &bit_pos); | 231 | pci = to_dw_pcie_from_pp(pp); |
195 | ks_pcie_app_writel(ks_pcie, MSI0_IRQ_ENABLE_CLR + (reg_offset << 4), | 232 | ks_pcie = to_keystone_pcie(pci); |
233 | |||
234 | reg_offset = irq % 8; | ||
235 | bit_pos = irq >> 3; | ||
236 | |||
237 | ks_pcie_app_writel(ks_pcie, MSI_IRQ_ENABLE_SET(reg_offset), | ||
196 | BIT(bit_pos)); | 238 | BIT(bit_pos)); |
239 | |||
240 | raw_spin_unlock_irqrestore(&pp->lock, flags); | ||
197 | } | 241 | } |
198 | 242 | ||
243 | static struct irq_chip ks_pcie_msi_irq_chip = { | ||
244 | .name = "KEYSTONE-PCI-MSI", | ||
245 | .irq_ack = ks_pcie_msi_irq_ack, | ||
246 | .irq_compose_msi_msg = ks_pcie_compose_msi_msg, | ||
247 | .irq_set_affinity = ks_pcie_msi_set_affinity, | ||
248 | .irq_mask = ks_pcie_msi_mask, | ||
249 | .irq_unmask = ks_pcie_msi_unmask, | ||
250 | }; | ||
251 | |||
199 | static int ks_pcie_msi_host_init(struct pcie_port *pp) | 252 | static int ks_pcie_msi_host_init(struct pcie_port *pp) |
200 | { | 253 | { |
254 | pp->msi_irq_chip = &ks_pcie_msi_irq_chip; | ||
201 | return dw_pcie_allocate_domains(pp); | 255 | return dw_pcie_allocate_domains(pp); |
202 | } | 256 | } |
203 | 257 | ||
204 | static void ks_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie) | ||
205 | { | ||
206 | int i; | ||
207 | |||
208 | for (i = 0; i < PCI_NUM_INTX; i++) | ||
209 | ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET + (i << 4), 0x1); | ||
210 | } | ||
211 | |||
212 | static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, | 258 | static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, |
213 | int offset) | 259 | int offset) |
214 | { | 260 | { |
@@ -217,7 +263,7 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, | |||
217 | u32 pending; | 263 | u32 pending; |
218 | int virq; | 264 | int virq; |
219 | 265 | ||
220 | pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS + (offset << 4)); | 266 | pending = ks_pcie_app_readl(ks_pcie, IRQ_STATUS(offset)); |
221 | 267 | ||
222 | if (BIT(0) & pending) { | 268 | if (BIT(0) & pending) { |
223 | virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset); | 269 | virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset); |
@@ -229,6 +275,14 @@ static void ks_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, | |||
229 | ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset); | 275 | ks_pcie_app_writel(ks_pcie, IRQ_EOI, offset); |
230 | } | 276 | } |
231 | 277 | ||
278 | /* | ||
279 | * Dummy function so that DW core doesn't configure MSI | ||
280 | */ | ||
281 | static int ks_pcie_am654_msi_host_init(struct pcie_port *pp) | ||
282 | { | ||
283 | return 0; | ||
284 | } | ||
285 | |||
232 | static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie) | 286 | static void ks_pcie_enable_error_irq(struct keystone_pcie *ks_pcie) |
233 | { | 287 | { |
234 | ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL); | 288 | ks_pcie_app_writel(ks_pcie, ERR_IRQ_ENABLE_SET, ERR_IRQ_ALL); |
@@ -255,10 +309,10 @@ static irqreturn_t ks_pcie_handle_error_irq(struct keystone_pcie *ks_pcie) | |||
255 | if (reg & ERR_CORR) | 309 | if (reg & ERR_CORR) |
256 | dev_dbg(dev, "Correctable Error\n"); | 310 | dev_dbg(dev, "Correctable Error\n"); |
257 | 311 | ||
258 | if (reg & ERR_AXI) | 312 | if (!ks_pcie->is_am6 && (reg & ERR_AXI)) |
259 | dev_err(dev, "AXI tag lookup fatal Error\n"); | 313 | dev_err(dev, "AXI tag lookup fatal Error\n"); |
260 | 314 | ||
261 | if (reg & ERR_AER) | 315 | if (reg & ERR_AER || (ks_pcie->is_am6 && (reg & AM6_ERR_AER))) |
262 | dev_err(dev, "ECRC Error\n"); | 316 | dev_err(dev, "ECRC Error\n"); |
263 | 317 | ||
264 | ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg); | 318 | ks_pcie_app_writel(ks_pcie, ERR_IRQ_STATUS, reg); |
@@ -356,6 +410,9 @@ static void ks_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) | |||
356 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0); | 410 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_1, 0); |
357 | ks_pcie_clear_dbi_mode(ks_pcie); | 411 | ks_pcie_clear_dbi_mode(ks_pcie); |
358 | 412 | ||
413 | if (ks_pcie->is_am6) | ||
414 | return; | ||
415 | |||
359 | val = ilog2(OB_WIN_SIZE); | 416 | val = ilog2(OB_WIN_SIZE); |
360 | ks_pcie_app_writel(ks_pcie, OB_SIZE, val); | 417 | ks_pcie_app_writel(ks_pcie, OB_SIZE, val); |
361 | 418 | ||
@@ -445,68 +502,33 @@ static int ks_pcie_link_up(struct dw_pcie *pci) | |||
445 | return (val == PORT_LOGIC_LTSSM_STATE_L0); | 502 | return (val == PORT_LOGIC_LTSSM_STATE_L0); |
446 | } | 503 | } |
447 | 504 | ||
448 | static void ks_pcie_initiate_link_train(struct keystone_pcie *ks_pcie) | 505 | static void ks_pcie_stop_link(struct dw_pcie *pci) |
449 | { | 506 | { |
507 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
450 | u32 val; | 508 | u32 val; |
451 | 509 | ||
452 | /* Disable Link training */ | 510 | /* Disable Link training */ |
453 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); | 511 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); |
454 | val &= ~LTSSM_EN_VAL; | 512 | val &= ~LTSSM_EN_VAL; |
455 | ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); | 513 | ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); |
456 | |||
457 | /* Initiate Link Training */ | ||
458 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); | ||
459 | ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); | ||
460 | } | 514 | } |
461 | 515 | ||
462 | /** | 516 | static int ks_pcie_start_link(struct dw_pcie *pci) |
463 | * ks_pcie_dw_host_init() - initialize host for v3_65 dw hardware | ||
464 | * | ||
465 | * Ioremap the register resources, initialize legacy irq domain | ||
466 | * and call dw_pcie_v3_65_host_init() API to initialize the Keystone | ||
467 | * PCI host controller. | ||
468 | */ | ||
469 | static int __init ks_pcie_dw_host_init(struct keystone_pcie *ks_pcie) | ||
470 | { | 517 | { |
471 | struct dw_pcie *pci = ks_pcie->pci; | 518 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); |
472 | struct pcie_port *pp = &pci->pp; | ||
473 | struct device *dev = pci->dev; | 519 | struct device *dev = pci->dev; |
474 | struct platform_device *pdev = to_platform_device(dev); | 520 | u32 val; |
475 | struct resource *res; | ||
476 | |||
477 | /* Index 0 is the config reg. space address */ | ||
478 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
479 | pci->dbi_base = devm_pci_remap_cfg_resource(dev, res); | ||
480 | if (IS_ERR(pci->dbi_base)) | ||
481 | return PTR_ERR(pci->dbi_base); | ||
482 | |||
483 | /* | ||
484 | * We set these same and is used in pcie rd/wr_other_conf | ||
485 | * functions | ||
486 | */ | ||
487 | pp->va_cfg0_base = pci->dbi_base + SPACE0_REMOTE_CFG_OFFSET; | ||
488 | pp->va_cfg1_base = pp->va_cfg0_base; | ||
489 | |||
490 | /* Index 1 is the application reg. space address */ | ||
491 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | ||
492 | ks_pcie->va_app_base = devm_ioremap_resource(dev, res); | ||
493 | if (IS_ERR(ks_pcie->va_app_base)) | ||
494 | return PTR_ERR(ks_pcie->va_app_base); | ||
495 | |||
496 | ks_pcie->app = *res; | ||
497 | 521 | ||
498 | /* Create legacy IRQ domain */ | 522 | if (dw_pcie_link_up(pci)) { |
499 | ks_pcie->legacy_irq_domain = | 523 | dev_dbg(dev, "link is already up\n"); |
500 | irq_domain_add_linear(ks_pcie->legacy_intc_np, | 524 | return 0; |
501 | PCI_NUM_INTX, | ||
502 | &ks_pcie_legacy_irq_domain_ops, | ||
503 | NULL); | ||
504 | if (!ks_pcie->legacy_irq_domain) { | ||
505 | dev_err(dev, "Failed to add irq domain for legacy irqs\n"); | ||
506 | return -EINVAL; | ||
507 | } | 525 | } |
508 | 526 | ||
509 | return dw_pcie_host_init(pp); | 527 | /* Initiate Link Training */ |
528 | val = ks_pcie_app_readl(ks_pcie, CMD_STATUS); | ||
529 | ks_pcie_app_writel(ks_pcie, CMD_STATUS, LTSSM_EN_VAL | val); | ||
530 | |||
531 | return 0; | ||
510 | } | 532 | } |
511 | 533 | ||
512 | static void ks_pcie_quirk(struct pci_dev *dev) | 534 | static void ks_pcie_quirk(struct pci_dev *dev) |
@@ -552,34 +574,16 @@ static void ks_pcie_quirk(struct pci_dev *dev) | |||
552 | } | 574 | } |
553 | DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk); | 575 | DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, ks_pcie_quirk); |
554 | 576 | ||
555 | static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie) | ||
556 | { | ||
557 | struct dw_pcie *pci = ks_pcie->pci; | ||
558 | struct device *dev = pci->dev; | ||
559 | |||
560 | if (dw_pcie_link_up(pci)) { | ||
561 | dev_info(dev, "Link already up\n"); | ||
562 | return 0; | ||
563 | } | ||
564 | |||
565 | ks_pcie_initiate_link_train(ks_pcie); | ||
566 | |||
567 | /* check if the link is up or not */ | ||
568 | if (!dw_pcie_wait_for_link(pci)) | ||
569 | return 0; | ||
570 | |||
571 | dev_err(dev, "phy link never came up\n"); | ||
572 | return -ETIMEDOUT; | ||
573 | } | ||
574 | |||
575 | static void ks_pcie_msi_irq_handler(struct irq_desc *desc) | 577 | static void ks_pcie_msi_irq_handler(struct irq_desc *desc) |
576 | { | 578 | { |
577 | unsigned int irq = irq_desc_get_irq(desc); | 579 | unsigned int irq = desc->irq_data.hwirq; |
578 | struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); | 580 | struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc); |
579 | u32 offset = irq - ks_pcie->msi_host_irqs[0]; | 581 | u32 offset = irq - ks_pcie->msi_host_irq; |
580 | struct dw_pcie *pci = ks_pcie->pci; | 582 | struct dw_pcie *pci = ks_pcie->pci; |
583 | struct pcie_port *pp = &pci->pp; | ||
581 | struct device *dev = pci->dev; | 584 | struct device *dev = pci->dev; |
582 | struct irq_chip *chip = irq_desc_get_chip(desc); | 585 | struct irq_chip *chip = irq_desc_get_chip(desc); |
586 | u32 vector, virq, reg, pos; | ||
583 | 587 | ||
584 | dev_dbg(dev, "%s, irq %d\n", __func__, irq); | 588 | dev_dbg(dev, "%s, irq %d\n", __func__, irq); |
585 | 589 | ||
@@ -589,7 +593,23 @@ static void ks_pcie_msi_irq_handler(struct irq_desc *desc) | |||
589 | * ack operation. | 593 | * ack operation. |
590 | */ | 594 | */ |
591 | chained_irq_enter(chip, desc); | 595 | chained_irq_enter(chip, desc); |
592 | ks_pcie_handle_msi_irq(ks_pcie, offset); | 596 | |
597 | reg = ks_pcie_app_readl(ks_pcie, MSI_IRQ_STATUS(offset)); | ||
598 | /* | ||
599 | * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit | ||
600 | * shows 1, 9, 17, 25 and so forth | ||
601 | */ | ||
602 | for (pos = 0; pos < 4; pos++) { | ||
603 | if (!(reg & BIT(pos))) | ||
604 | continue; | ||
605 | |||
606 | vector = offset + (pos << 3); | ||
607 | virq = irq_linear_revmap(pp->irq_domain, vector); | ||
608 | dev_dbg(dev, "irq: bit %d, vector %d, virq %d\n", pos, vector, | ||
609 | virq); | ||
610 | generic_handle_irq(virq); | ||
611 | } | ||
612 | |||
593 | chained_irq_exit(chip, desc); | 613 | chained_irq_exit(chip, desc); |
594 | } | 614 | } |
595 | 615 | ||
@@ -622,89 +642,119 @@ static void ks_pcie_legacy_irq_handler(struct irq_desc *desc) | |||
622 | chained_irq_exit(chip, desc); | 642 | chained_irq_exit(chip, desc); |
623 | } | 643 | } |
624 | 644 | ||
625 | static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie, | 645 | static int ks_pcie_config_msi_irq(struct keystone_pcie *ks_pcie) |
626 | char *controller, int *num_irqs) | ||
627 | { | 646 | { |
628 | int temp, max_host_irqs, legacy = 1, *host_irqs; | ||
629 | struct device *dev = ks_pcie->pci->dev; | 647 | struct device *dev = ks_pcie->pci->dev; |
630 | struct device_node *np_pcie = dev->of_node, **np_temp; | 648 | struct device_node *np = ks_pcie->np; |
631 | 649 | struct device_node *intc_np; | |
632 | if (!strcmp(controller, "msi-interrupt-controller")) | 650 | struct irq_data *irq_data; |
633 | legacy = 0; | 651 | int irq_count, irq, ret, i; |
634 | 652 | ||
635 | if (legacy) { | 653 | if (!IS_ENABLED(CONFIG_PCI_MSI)) |
636 | np_temp = &ks_pcie->legacy_intc_np; | 654 | return 0; |
637 | max_host_irqs = PCI_NUM_INTX; | ||
638 | host_irqs = &ks_pcie->legacy_host_irqs[0]; | ||
639 | } else { | ||
640 | np_temp = &ks_pcie->msi_intc_np; | ||
641 | max_host_irqs = MAX_MSI_HOST_IRQS; | ||
642 | host_irqs = &ks_pcie->msi_host_irqs[0]; | ||
643 | } | ||
644 | 655 | ||
645 | /* interrupt controller is in a child node */ | 656 | intc_np = of_get_child_by_name(np, "msi-interrupt-controller"); |
646 | *np_temp = of_get_child_by_name(np_pcie, controller); | 657 | if (!intc_np) { |
647 | if (!(*np_temp)) { | 658 | if (ks_pcie->is_am6) |
648 | dev_err(dev, "Node for %s is absent\n", controller); | 659 | return 0; |
660 | dev_warn(dev, "msi-interrupt-controller node is absent\n"); | ||
649 | return -EINVAL; | 661 | return -EINVAL; |
650 | } | 662 | } |
651 | 663 | ||
652 | temp = of_irq_count(*np_temp); | 664 | irq_count = of_irq_count(intc_np); |
653 | if (!temp) { | 665 | if (!irq_count) { |
654 | dev_err(dev, "No IRQ entries in %s\n", controller); | 666 | dev_err(dev, "No IRQ entries in msi-interrupt-controller\n"); |
655 | of_node_put(*np_temp); | 667 | ret = -EINVAL; |
656 | return -EINVAL; | 668 | goto err; |
657 | } | 669 | } |
658 | 670 | ||
659 | if (temp > max_host_irqs) | 671 | for (i = 0; i < irq_count; i++) { |
660 | dev_warn(dev, "Too many %s interrupts defined %u\n", | 672 | irq = irq_of_parse_and_map(intc_np, i); |
661 | (legacy ? "legacy" : "MSI"), temp); | 673 | if (!irq) { |
662 | 674 | ret = -EINVAL; | |
663 | /* | 675 | goto err; |
664 | * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to | 676 | } |
665 | * 7 (MSI) | ||
666 | */ | ||
667 | for (temp = 0; temp < max_host_irqs; temp++) { | ||
668 | host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp); | ||
669 | if (!host_irqs[temp]) | ||
670 | break; | ||
671 | } | ||
672 | 677 | ||
673 | of_node_put(*np_temp); | 678 | if (!ks_pcie->msi_host_irq) { |
679 | irq_data = irq_get_irq_data(irq); | ||
680 | if (!irq_data) { | ||
681 | ret = -EINVAL; | ||
682 | goto err; | ||
683 | } | ||
684 | ks_pcie->msi_host_irq = irq_data->hwirq; | ||
685 | } | ||
674 | 686 | ||
675 | if (temp) { | 687 | irq_set_chained_handler_and_data(irq, ks_pcie_msi_irq_handler, |
676 | *num_irqs = temp; | 688 | ks_pcie); |
677 | return 0; | ||
678 | } | 689 | } |
679 | 690 | ||
680 | return -EINVAL; | 691 | of_node_put(intc_np); |
692 | return 0; | ||
693 | |||
694 | err: | ||
695 | of_node_put(intc_np); | ||
696 | return ret; | ||
681 | } | 697 | } |
682 | 698 | ||
683 | static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie) | 699 | static int ks_pcie_config_legacy_irq(struct keystone_pcie *ks_pcie) |
684 | { | 700 | { |
685 | int i; | 701 | struct device *dev = ks_pcie->pci->dev; |
702 | struct irq_domain *legacy_irq_domain; | ||
703 | struct device_node *np = ks_pcie->np; | ||
704 | struct device_node *intc_np; | ||
705 | int irq_count, irq, ret = 0, i; | ||
706 | |||
707 | intc_np = of_get_child_by_name(np, "legacy-interrupt-controller"); | ||
708 | if (!intc_np) { | ||
709 | /* | ||
710 | * Since legacy interrupts are modeled as edge-interrupts in | ||
711 | * AM6, keep it disabled for now. | ||
712 | */ | ||
713 | if (ks_pcie->is_am6) | ||
714 | return 0; | ||
715 | dev_warn(dev, "legacy-interrupt-controller node is absent\n"); | ||
716 | return -EINVAL; | ||
717 | } | ||
686 | 718 | ||
687 | /* Legacy IRQ */ | 719 | irq_count = of_irq_count(intc_np); |
688 | for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) { | 720 | if (!irq_count) { |
689 | irq_set_chained_handler_and_data(ks_pcie->legacy_host_irqs[i], | 721 | dev_err(dev, "No IRQ entries in legacy-interrupt-controller\n"); |
722 | ret = -EINVAL; | ||
723 | goto err; | ||
724 | } | ||
725 | |||
726 | for (i = 0; i < irq_count; i++) { | ||
727 | irq = irq_of_parse_and_map(intc_np, i); | ||
728 | if (!irq) { | ||
729 | ret = -EINVAL; | ||
730 | goto err; | ||
731 | } | ||
732 | ks_pcie->legacy_host_irqs[i] = irq; | ||
733 | |||
734 | irq_set_chained_handler_and_data(irq, | ||
690 | ks_pcie_legacy_irq_handler, | 735 | ks_pcie_legacy_irq_handler, |
691 | ks_pcie); | 736 | ks_pcie); |
692 | } | 737 | } |
693 | ks_pcie_enable_legacy_irqs(ks_pcie); | ||
694 | 738 | ||
695 | /* MSI IRQ */ | 739 | legacy_irq_domain = |
696 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | 740 | irq_domain_add_linear(intc_np, PCI_NUM_INTX, |
697 | for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) { | 741 | &ks_pcie_legacy_irq_domain_ops, NULL); |
698 | irq_set_chained_handler_and_data(ks_pcie->msi_host_irqs[i], | 742 | if (!legacy_irq_domain) { |
699 | ks_pcie_msi_irq_handler, | 743 | dev_err(dev, "Failed to add irq domain for legacy irqs\n"); |
700 | ks_pcie); | 744 | ret = -EINVAL; |
701 | } | 745 | goto err; |
702 | } | 746 | } |
747 | ks_pcie->legacy_irq_domain = legacy_irq_domain; | ||
748 | |||
749 | for (i = 0; i < PCI_NUM_INTX; i++) | ||
750 | ks_pcie_app_writel(ks_pcie, IRQ_ENABLE_SET(i), INTx_EN); | ||
703 | 751 | ||
704 | if (ks_pcie->error_irq > 0) | 752 | err: |
705 | ks_pcie_enable_error_irq(ks_pcie); | 753 | of_node_put(intc_np); |
754 | return ret; | ||
706 | } | 755 | } |
707 | 756 | ||
757 | #ifdef CONFIG_ARM | ||
708 | /* | 758 | /* |
709 | * When a PCI device does not exist during config cycles, keystone host gets a | 759 | * When a PCI device does not exist during config cycles, keystone host gets a |
710 | * bus error instead of returning 0xffffffff. This handler always returns 0 | 760 | * bus error instead of returning 0xffffffff. This handler always returns 0 |
@@ -724,6 +774,7 @@ static int ks_pcie_fault(unsigned long addr, unsigned int fsr, | |||
724 | 774 | ||
725 | return 0; | 775 | return 0; |
726 | } | 776 | } |
777 | #endif | ||
727 | 778 | ||
728 | static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie) | 779 | static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie) |
729 | { | 780 | { |
@@ -742,8 +793,10 @@ static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie) | |||
742 | if (ret) | 793 | if (ret) |
743 | return ret; | 794 | return ret; |
744 | 795 | ||
796 | dw_pcie_dbi_ro_wr_en(pci); | ||
745 | dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK); | 797 | dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, id & PCIE_VENDORID_MASK); |
746 | dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT); | 798 | dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, id >> PCIE_DEVICEID_SHIFT); |
799 | dw_pcie_dbi_ro_wr_dis(pci); | ||
747 | 800 | ||
748 | return 0; | 801 | return 0; |
749 | } | 802 | } |
@@ -754,11 +807,18 @@ static int __init ks_pcie_host_init(struct pcie_port *pp) | |||
754 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | 807 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); |
755 | int ret; | 808 | int ret; |
756 | 809 | ||
810 | ret = ks_pcie_config_legacy_irq(ks_pcie); | ||
811 | if (ret) | ||
812 | return ret; | ||
813 | |||
814 | ret = ks_pcie_config_msi_irq(ks_pcie); | ||
815 | if (ret) | ||
816 | return ret; | ||
817 | |||
757 | dw_pcie_setup_rc(pp); | 818 | dw_pcie_setup_rc(pp); |
758 | 819 | ||
759 | ks_pcie_establish_link(ks_pcie); | 820 | ks_pcie_stop_link(pci); |
760 | ks_pcie_setup_rc_app_regs(ks_pcie); | 821 | ks_pcie_setup_rc_app_regs(ks_pcie); |
761 | ks_pcie_setup_interrupts(ks_pcie); | ||
762 | writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8), | 822 | writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8), |
763 | pci->dbi_base + PCI_IO_BASE); | 823 | pci->dbi_base + PCI_IO_BASE); |
764 | 824 | ||
@@ -766,12 +826,17 @@ static int __init ks_pcie_host_init(struct pcie_port *pp) | |||
766 | if (ret < 0) | 826 | if (ret < 0) |
767 | return ret; | 827 | return ret; |
768 | 828 | ||
829 | #ifdef CONFIG_ARM | ||
769 | /* | 830 | /* |
770 | * PCIe access errors that result into OCP errors are caught by ARM as | 831 | * PCIe access errors that result into OCP errors are caught by ARM as |
771 | * "External aborts" | 832 | * "External aborts" |
772 | */ | 833 | */ |
773 | hook_fault_code(17, ks_pcie_fault, SIGBUS, 0, | 834 | hook_fault_code(17, ks_pcie_fault, SIGBUS, 0, |
774 | "Asynchronous external abort"); | 835 | "Asynchronous external abort"); |
836 | #endif | ||
837 | |||
838 | ks_pcie_start_link(pci); | ||
839 | dw_pcie_wait_for_link(pci); | ||
775 | 840 | ||
776 | return 0; | 841 | return 0; |
777 | } | 842 | } |
@@ -780,14 +845,15 @@ static const struct dw_pcie_host_ops ks_pcie_host_ops = { | |||
780 | .rd_other_conf = ks_pcie_rd_other_conf, | 845 | .rd_other_conf = ks_pcie_rd_other_conf, |
781 | .wr_other_conf = ks_pcie_wr_other_conf, | 846 | .wr_other_conf = ks_pcie_wr_other_conf, |
782 | .host_init = ks_pcie_host_init, | 847 | .host_init = ks_pcie_host_init, |
783 | .msi_set_irq = ks_pcie_msi_set_irq, | ||
784 | .msi_clear_irq = ks_pcie_msi_clear_irq, | ||
785 | .get_msi_addr = ks_pcie_get_msi_addr, | ||
786 | .msi_host_init = ks_pcie_msi_host_init, | 848 | .msi_host_init = ks_pcie_msi_host_init, |
787 | .msi_irq_ack = ks_pcie_msi_irq_ack, | ||
788 | .scan_bus = ks_pcie_v3_65_scan_bus, | 849 | .scan_bus = ks_pcie_v3_65_scan_bus, |
789 | }; | 850 | }; |
790 | 851 | ||
852 | static const struct dw_pcie_host_ops ks_pcie_am654_host_ops = { | ||
853 | .host_init = ks_pcie_host_init, | ||
854 | .msi_host_init = ks_pcie_am654_msi_host_init, | ||
855 | }; | ||
856 | |||
791 | static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv) | 857 | static irqreturn_t ks_pcie_err_irq_handler(int irq, void *priv) |
792 | { | 858 | { |
793 | struct keystone_pcie *ks_pcie = priv; | 859 | struct keystone_pcie *ks_pcie = priv; |
@@ -801,41 +867,17 @@ static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie, | |||
801 | struct dw_pcie *pci = ks_pcie->pci; | 867 | struct dw_pcie *pci = ks_pcie->pci; |
802 | struct pcie_port *pp = &pci->pp; | 868 | struct pcie_port *pp = &pci->pp; |
803 | struct device *dev = &pdev->dev; | 869 | struct device *dev = &pdev->dev; |
870 | struct resource *res; | ||
804 | int ret; | 871 | int ret; |
805 | 872 | ||
806 | ret = ks_pcie_get_irq_controller_info(ks_pcie, | 873 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); |
807 | "legacy-interrupt-controller", | 874 | pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res); |
808 | &ks_pcie->num_legacy_host_irqs); | 875 | if (IS_ERR(pp->va_cfg0_base)) |
809 | if (ret) | 876 | return PTR_ERR(pp->va_cfg0_base); |
810 | return ret; | ||
811 | |||
812 | if (IS_ENABLED(CONFIG_PCI_MSI)) { | ||
813 | ret = ks_pcie_get_irq_controller_info(ks_pcie, | ||
814 | "msi-interrupt-controller", | ||
815 | &ks_pcie->num_msi_host_irqs); | ||
816 | if (ret) | ||
817 | return ret; | ||
818 | } | ||
819 | 877 | ||
820 | /* | 878 | pp->va_cfg1_base = pp->va_cfg0_base; |
821 | * Index 0 is the platform interrupt for error interrupt | ||
822 | * from RC. This is optional. | ||
823 | */ | ||
824 | ks_pcie->error_irq = irq_of_parse_and_map(ks_pcie->np, 0); | ||
825 | if (ks_pcie->error_irq <= 0) | ||
826 | dev_info(dev, "no error IRQ defined\n"); | ||
827 | else { | ||
828 | ret = request_irq(ks_pcie->error_irq, ks_pcie_err_irq_handler, | ||
829 | IRQF_SHARED, "pcie-error-irq", ks_pcie); | ||
830 | if (ret < 0) { | ||
831 | dev_err(dev, "failed to request error IRQ %d\n", | ||
832 | ks_pcie->error_irq); | ||
833 | return ret; | ||
834 | } | ||
835 | } | ||
836 | 879 | ||
837 | pp->ops = &ks_pcie_host_ops; | 880 | ret = dw_pcie_host_init(pp); |
838 | ret = ks_pcie_dw_host_init(ks_pcie); | ||
839 | if (ret) { | 881 | if (ret) { |
840 | dev_err(dev, "failed to initialize host\n"); | 882 | dev_err(dev, "failed to initialize host\n"); |
841 | return ret; | 883 | return ret; |
@@ -844,18 +886,139 @@ static int __init ks_pcie_add_pcie_port(struct keystone_pcie *ks_pcie, | |||
844 | return 0; | 886 | return 0; |
845 | } | 887 | } |
846 | 888 | ||
847 | static const struct of_device_id ks_pcie_of_match[] = { | 889 | static u32 ks_pcie_am654_read_dbi2(struct dw_pcie *pci, void __iomem *base, |
848 | { | 890 | u32 reg, size_t size) |
849 | .type = "pci", | 891 | { |
850 | .compatible = "ti,keystone-pcie", | 892 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); |
851 | }, | 893 | u32 val; |
852 | { }, | 894 | |
853 | }; | 895 | ks_pcie_set_dbi_mode(ks_pcie); |
896 | dw_pcie_read(base + reg, size, &val); | ||
897 | ks_pcie_clear_dbi_mode(ks_pcie); | ||
898 | return val; | ||
899 | } | ||
900 | |||
901 | static void ks_pcie_am654_write_dbi2(struct dw_pcie *pci, void __iomem *base, | ||
902 | u32 reg, size_t size, u32 val) | ||
903 | { | ||
904 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
905 | |||
906 | ks_pcie_set_dbi_mode(ks_pcie); | ||
907 | dw_pcie_write(base + reg, size, val); | ||
908 | ks_pcie_clear_dbi_mode(ks_pcie); | ||
909 | } | ||
854 | 910 | ||
855 | static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = { | 911 | static const struct dw_pcie_ops ks_pcie_dw_pcie_ops = { |
912 | .start_link = ks_pcie_start_link, | ||
913 | .stop_link = ks_pcie_stop_link, | ||
856 | .link_up = ks_pcie_link_up, | 914 | .link_up = ks_pcie_link_up, |
915 | .read_dbi2 = ks_pcie_am654_read_dbi2, | ||
916 | .write_dbi2 = ks_pcie_am654_write_dbi2, | ||
917 | }; | ||
918 | |||
919 | static void ks_pcie_am654_ep_init(struct dw_pcie_ep *ep) | ||
920 | { | ||
921 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
922 | int flags; | ||
923 | |||
924 | ep->page_size = AM654_WIN_SIZE; | ||
925 | flags = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_32; | ||
926 | dw_pcie_writel_dbi2(pci, PCI_BASE_ADDRESS_0, APP_ADDR_SPACE_0 - 1); | ||
927 | dw_pcie_writel_dbi(pci, PCI_BASE_ADDRESS_0, flags); | ||
928 | } | ||
929 | |||
930 | static void ks_pcie_am654_raise_legacy_irq(struct keystone_pcie *ks_pcie) | ||
931 | { | ||
932 | struct dw_pcie *pci = ks_pcie->pci; | ||
933 | u8 int_pin; | ||
934 | |||
935 | int_pin = dw_pcie_readb_dbi(pci, PCI_INTERRUPT_PIN); | ||
936 | if (int_pin == 0 || int_pin > 4) | ||
937 | return; | ||
938 | |||
939 | ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_SET(int_pin), | ||
940 | INT_ENABLE); | ||
941 | ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_SET, INT_ENABLE); | ||
942 | mdelay(1); | ||
943 | ks_pcie_app_writel(ks_pcie, PCIE_EP_IRQ_CLR, INT_ENABLE); | ||
944 | ks_pcie_app_writel(ks_pcie, PCIE_LEGACY_IRQ_ENABLE_CLR(int_pin), | ||
945 | INT_ENABLE); | ||
946 | } | ||
947 | |||
948 | static int ks_pcie_am654_raise_irq(struct dw_pcie_ep *ep, u8 func_no, | ||
949 | enum pci_epc_irq_type type, | ||
950 | u16 interrupt_num) | ||
951 | { | ||
952 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | ||
953 | struct keystone_pcie *ks_pcie = to_keystone_pcie(pci); | ||
954 | |||
955 | switch (type) { | ||
956 | case PCI_EPC_IRQ_LEGACY: | ||
957 | ks_pcie_am654_raise_legacy_irq(ks_pcie); | ||
958 | break; | ||
959 | case PCI_EPC_IRQ_MSI: | ||
960 | dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num); | ||
961 | break; | ||
962 | default: | ||
963 | dev_err(pci->dev, "UNKNOWN IRQ type\n"); | ||
964 | return -EINVAL; | ||
965 | } | ||
966 | |||
967 | return 0; | ||
968 | } | ||
969 | |||
970 | static const struct pci_epc_features ks_pcie_am654_epc_features = { | ||
971 | .linkup_notifier = false, | ||
972 | .msi_capable = true, | ||
973 | .msix_capable = false, | ||
974 | .reserved_bar = 1 << BAR_0 | 1 << BAR_1, | ||
975 | .bar_fixed_64bit = 1 << BAR_0, | ||
976 | .bar_fixed_size[2] = SZ_1M, | ||
977 | .bar_fixed_size[3] = SZ_64K, | ||
978 | .bar_fixed_size[4] = 256, | ||
979 | .bar_fixed_size[5] = SZ_1M, | ||
980 | .align = SZ_1M, | ||
857 | }; | 981 | }; |
858 | 982 | ||
983 | static const struct pci_epc_features* | ||
984 | ks_pcie_am654_get_features(struct dw_pcie_ep *ep) | ||
985 | { | ||
986 | return &ks_pcie_am654_epc_features; | ||
987 | } | ||
988 | |||
989 | static const struct dw_pcie_ep_ops ks_pcie_am654_ep_ops = { | ||
990 | .ep_init = ks_pcie_am654_ep_init, | ||
991 | .raise_irq = ks_pcie_am654_raise_irq, | ||
992 | .get_features = &ks_pcie_am654_get_features, | ||
993 | }; | ||
994 | |||
995 | static int __init ks_pcie_add_pcie_ep(struct keystone_pcie *ks_pcie, | ||
996 | struct platform_device *pdev) | ||
997 | { | ||
998 | int ret; | ||
999 | struct dw_pcie_ep *ep; | ||
1000 | struct resource *res; | ||
1001 | struct device *dev = &pdev->dev; | ||
1002 | struct dw_pcie *pci = ks_pcie->pci; | ||
1003 | |||
1004 | ep = &pci->ep; | ||
1005 | |||
1006 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space"); | ||
1007 | if (!res) | ||
1008 | return -EINVAL; | ||
1009 | |||
1010 | ep->phys_base = res->start; | ||
1011 | ep->addr_size = resource_size(res); | ||
1012 | |||
1013 | ret = dw_pcie_ep_init(ep); | ||
1014 | if (ret) { | ||
1015 | dev_err(dev, "failed to initialize endpoint\n"); | ||
1016 | return ret; | ||
1017 | } | ||
1018 | |||
1019 | return 0; | ||
1020 | } | ||
1021 | |||
859 | static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie) | 1022 | static void ks_pcie_disable_phy(struct keystone_pcie *ks_pcie) |
860 | { | 1023 | { |
861 | int num_lanes = ks_pcie->num_lanes; | 1024 | int num_lanes = ks_pcie->num_lanes; |
@@ -873,6 +1036,10 @@ static int ks_pcie_enable_phy(struct keystone_pcie *ks_pcie) | |||
873 | int num_lanes = ks_pcie->num_lanes; | 1036 | int num_lanes = ks_pcie->num_lanes; |
874 | 1037 | ||
875 | for (i = 0; i < num_lanes; i++) { | 1038 | for (i = 0; i < num_lanes; i++) { |
1039 | ret = phy_reset(ks_pcie->phy[i]); | ||
1040 | if (ret < 0) | ||
1041 | goto err_phy; | ||
1042 | |||
876 | ret = phy_init(ks_pcie->phy[i]); | 1043 | ret = phy_init(ks_pcie->phy[i]); |
877 | if (ret < 0) | 1044 | if (ret < 0) |
878 | goto err_phy; | 1045 | goto err_phy; |
@@ -895,20 +1062,161 @@ err_phy: | |||
895 | return ret; | 1062 | return ret; |
896 | } | 1063 | } |
897 | 1064 | ||
1065 | static int ks_pcie_set_mode(struct device *dev) | ||
1066 | { | ||
1067 | struct device_node *np = dev->of_node; | ||
1068 | struct regmap *syscon; | ||
1069 | u32 val; | ||
1070 | u32 mask; | ||
1071 | int ret = 0; | ||
1072 | |||
1073 | syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode"); | ||
1074 | if (IS_ERR(syscon)) | ||
1075 | return 0; | ||
1076 | |||
1077 | mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN; | ||
1078 | val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN; | ||
1079 | |||
1080 | ret = regmap_update_bits(syscon, 0, mask, val); | ||
1081 | if (ret) { | ||
1082 | dev_err(dev, "failed to set pcie mode\n"); | ||
1083 | return ret; | ||
1084 | } | ||
1085 | |||
1086 | return 0; | ||
1087 | } | ||
1088 | |||
1089 | static int ks_pcie_am654_set_mode(struct device *dev, | ||
1090 | enum dw_pcie_device_mode mode) | ||
1091 | { | ||
1092 | struct device_node *np = dev->of_node; | ||
1093 | struct regmap *syscon; | ||
1094 | u32 val; | ||
1095 | u32 mask; | ||
1096 | int ret = 0; | ||
1097 | |||
1098 | syscon = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-mode"); | ||
1099 | if (IS_ERR(syscon)) | ||
1100 | return 0; | ||
1101 | |||
1102 | mask = AM654_PCIE_DEV_TYPE_MASK; | ||
1103 | |||
1104 | switch (mode) { | ||
1105 | case DW_PCIE_RC_TYPE: | ||
1106 | val = RC; | ||
1107 | break; | ||
1108 | case DW_PCIE_EP_TYPE: | ||
1109 | val = EP; | ||
1110 | break; | ||
1111 | default: | ||
1112 | dev_err(dev, "INVALID device type %d\n", mode); | ||
1113 | return -EINVAL; | ||
1114 | } | ||
1115 | |||
1116 | ret = regmap_update_bits(syscon, 0, mask, val); | ||
1117 | if (ret) { | ||
1118 | dev_err(dev, "failed to set pcie mode\n"); | ||
1119 | return ret; | ||
1120 | } | ||
1121 | |||
1122 | return 0; | ||
1123 | } | ||
1124 | |||
1125 | static void ks_pcie_set_link_speed(struct dw_pcie *pci, int link_speed) | ||
1126 | { | ||
1127 | u32 val; | ||
1128 | |||
1129 | dw_pcie_dbi_ro_wr_en(pci); | ||
1130 | |||
1131 | val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP); | ||
1132 | if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) { | ||
1133 | val &= ~((u32)PCI_EXP_LNKCAP_SLS); | ||
1134 | val |= link_speed; | ||
1135 | dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCAP, | ||
1136 | val); | ||
1137 | } | ||
1138 | |||
1139 | val = dw_pcie_readl_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2); | ||
1140 | if ((val & PCI_EXP_LNKCAP_SLS) != link_speed) { | ||
1141 | val &= ~((u32)PCI_EXP_LNKCAP_SLS); | ||
1142 | val |= link_speed; | ||
1143 | dw_pcie_writel_dbi(pci, EXP_CAP_ID_OFFSET + PCI_EXP_LNKCTL2, | ||
1144 | val); | ||
1145 | } | ||
1146 | |||
1147 | dw_pcie_dbi_ro_wr_dis(pci); | ||
1148 | } | ||
1149 | |||
1150 | static const struct ks_pcie_of_data ks_pcie_rc_of_data = { | ||
1151 | .host_ops = &ks_pcie_host_ops, | ||
1152 | .version = 0x365A, | ||
1153 | }; | ||
1154 | |||
1155 | static const struct ks_pcie_of_data ks_pcie_am654_rc_of_data = { | ||
1156 | .host_ops = &ks_pcie_am654_host_ops, | ||
1157 | .mode = DW_PCIE_RC_TYPE, | ||
1158 | .version = 0x490A, | ||
1159 | }; | ||
1160 | |||
1161 | static const struct ks_pcie_of_data ks_pcie_am654_ep_of_data = { | ||
1162 | .ep_ops = &ks_pcie_am654_ep_ops, | ||
1163 | .mode = DW_PCIE_EP_TYPE, | ||
1164 | .version = 0x490A, | ||
1165 | }; | ||
1166 | |||
1167 | static const struct of_device_id ks_pcie_of_match[] = { | ||
1168 | { | ||
1169 | .type = "pci", | ||
1170 | .data = &ks_pcie_rc_of_data, | ||
1171 | .compatible = "ti,keystone-pcie", | ||
1172 | }, | ||
1173 | { | ||
1174 | .data = &ks_pcie_am654_rc_of_data, | ||
1175 | .compatible = "ti,am654-pcie-rc", | ||
1176 | }, | ||
1177 | { | ||
1178 | .data = &ks_pcie_am654_ep_of_data, | ||
1179 | .compatible = "ti,am654-pcie-ep", | ||
1180 | }, | ||
1181 | { }, | ||
1182 | }; | ||
1183 | |||
898 | static int __init ks_pcie_probe(struct platform_device *pdev) | 1184 | static int __init ks_pcie_probe(struct platform_device *pdev) |
899 | { | 1185 | { |
1186 | const struct dw_pcie_host_ops *host_ops; | ||
1187 | const struct dw_pcie_ep_ops *ep_ops; | ||
900 | struct device *dev = &pdev->dev; | 1188 | struct device *dev = &pdev->dev; |
901 | struct device_node *np = dev->of_node; | 1189 | struct device_node *np = dev->of_node; |
1190 | const struct ks_pcie_of_data *data; | ||
1191 | const struct of_device_id *match; | ||
1192 | enum dw_pcie_device_mode mode; | ||
902 | struct dw_pcie *pci; | 1193 | struct dw_pcie *pci; |
903 | struct keystone_pcie *ks_pcie; | 1194 | struct keystone_pcie *ks_pcie; |
904 | struct device_link **link; | 1195 | struct device_link **link; |
1196 | struct gpio_desc *gpiod; | ||
1197 | void __iomem *atu_base; | ||
1198 | struct resource *res; | ||
1199 | unsigned int version; | ||
1200 | void __iomem *base; | ||
905 | u32 num_viewport; | 1201 | u32 num_viewport; |
906 | struct phy **phy; | 1202 | struct phy **phy; |
1203 | int link_speed; | ||
907 | u32 num_lanes; | 1204 | u32 num_lanes; |
908 | char name[10]; | 1205 | char name[10]; |
909 | int ret; | 1206 | int ret; |
1207 | int irq; | ||
910 | int i; | 1208 | int i; |
911 | 1209 | ||
1210 | match = of_match_device(of_match_ptr(ks_pcie_of_match), dev); | ||
1211 | data = (struct ks_pcie_of_data *)match->data; | ||
1212 | if (!data) | ||
1213 | return -EINVAL; | ||
1214 | |||
1215 | version = data->version; | ||
1216 | host_ops = data->host_ops; | ||
1217 | ep_ops = data->ep_ops; | ||
1218 | mode = data->mode; | ||
1219 | |||
912 | ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL); | 1220 | ks_pcie = devm_kzalloc(dev, sizeof(*ks_pcie), GFP_KERNEL); |
913 | if (!ks_pcie) | 1221 | if (!ks_pcie) |
914 | return -ENOMEM; | 1222 | return -ENOMEM; |
@@ -917,12 +1225,38 @@ static int __init ks_pcie_probe(struct platform_device *pdev) | |||
917 | if (!pci) | 1225 | if (!pci) |
918 | return -ENOMEM; | 1226 | return -ENOMEM; |
919 | 1227 | ||
1228 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app"); | ||
1229 | ks_pcie->va_app_base = devm_ioremap_resource(dev, res); | ||
1230 | if (IS_ERR(ks_pcie->va_app_base)) | ||
1231 | return PTR_ERR(ks_pcie->va_app_base); | ||
1232 | |||
1233 | ks_pcie->app = *res; | ||
1234 | |||
1235 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbics"); | ||
1236 | base = devm_pci_remap_cfg_resource(dev, res); | ||
1237 | if (IS_ERR(base)) | ||
1238 | return PTR_ERR(base); | ||
1239 | |||
1240 | if (of_device_is_compatible(np, "ti,am654-pcie-rc")) | ||
1241 | ks_pcie->is_am6 = true; | ||
1242 | |||
1243 | pci->dbi_base = base; | ||
1244 | pci->dbi_base2 = base; | ||
920 | pci->dev = dev; | 1245 | pci->dev = dev; |
921 | pci->ops = &ks_pcie_dw_pcie_ops; | 1246 | pci->ops = &ks_pcie_dw_pcie_ops; |
1247 | pci->version = version; | ||
1248 | |||
1249 | irq = platform_get_irq(pdev, 0); | ||
1250 | if (irq < 0) { | ||
1251 | dev_err(dev, "missing IRQ resource: %d\n", irq); | ||
1252 | return irq; | ||
1253 | } | ||
922 | 1254 | ||
923 | ret = of_property_read_u32(np, "num-viewport", &num_viewport); | 1255 | ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED, |
1256 | "ks-pcie-error-irq", ks_pcie); | ||
924 | if (ret < 0) { | 1257 | if (ret < 0) { |
925 | dev_err(dev, "unable to read *num-viewport* property\n"); | 1258 | dev_err(dev, "failed to request error IRQ %d\n", |
1259 | irq); | ||
926 | return ret; | 1260 | return ret; |
927 | } | 1261 | } |
928 | 1262 | ||
@@ -960,9 +1294,17 @@ static int __init ks_pcie_probe(struct platform_device *pdev) | |||
960 | ks_pcie->pci = pci; | 1294 | ks_pcie->pci = pci; |
961 | ks_pcie->link = link; | 1295 | ks_pcie->link = link; |
962 | ks_pcie->num_lanes = num_lanes; | 1296 | ks_pcie->num_lanes = num_lanes; |
963 | ks_pcie->num_viewport = num_viewport; | ||
964 | ks_pcie->phy = phy; | 1297 | ks_pcie->phy = phy; |
965 | 1298 | ||
1299 | gpiod = devm_gpiod_get_optional(dev, "reset", | ||
1300 | GPIOD_OUT_LOW); | ||
1301 | if (IS_ERR(gpiod)) { | ||
1302 | ret = PTR_ERR(gpiod); | ||
1303 | if (ret != -EPROBE_DEFER) | ||
1304 | dev_err(dev, "Failed to get reset GPIO\n"); | ||
1305 | goto err_link; | ||
1306 | } | ||
1307 | |||
966 | ret = ks_pcie_enable_phy(ks_pcie); | 1308 | ret = ks_pcie_enable_phy(ks_pcie); |
967 | if (ret) { | 1309 | if (ret) { |
968 | dev_err(dev, "failed to enable phy\n"); | 1310 | dev_err(dev, "failed to enable phy\n"); |
@@ -977,9 +1319,79 @@ static int __init ks_pcie_probe(struct platform_device *pdev) | |||
977 | goto err_get_sync; | 1319 | goto err_get_sync; |
978 | } | 1320 | } |
979 | 1321 | ||
980 | ret = ks_pcie_add_pcie_port(ks_pcie, pdev); | 1322 | if (pci->version >= 0x480A) { |
981 | if (ret < 0) | 1323 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu"); |
982 | goto err_get_sync; | 1324 | atu_base = devm_ioremap_resource(dev, res); |
1325 | if (IS_ERR(atu_base)) { | ||
1326 | ret = PTR_ERR(atu_base); | ||
1327 | goto err_get_sync; | ||
1328 | } | ||
1329 | |||
1330 | pci->atu_base = atu_base; | ||
1331 | |||
1332 | ret = ks_pcie_am654_set_mode(dev, mode); | ||
1333 | if (ret < 0) | ||
1334 | goto err_get_sync; | ||
1335 | } else { | ||
1336 | ret = ks_pcie_set_mode(dev); | ||
1337 | if (ret < 0) | ||
1338 | goto err_get_sync; | ||
1339 | } | ||
1340 | |||
1341 | link_speed = of_pci_get_max_link_speed(np); | ||
1342 | if (link_speed < 0) | ||
1343 | link_speed = 2; | ||
1344 | |||
1345 | ks_pcie_set_link_speed(pci, link_speed); | ||
1346 | |||
1347 | switch (mode) { | ||
1348 | case DW_PCIE_RC_TYPE: | ||
1349 | if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_HOST)) { | ||
1350 | ret = -ENODEV; | ||
1351 | goto err_get_sync; | ||
1352 | } | ||
1353 | |||
1354 | ret = of_property_read_u32(np, "num-viewport", &num_viewport); | ||
1355 | if (ret < 0) { | ||
1356 | dev_err(dev, "unable to read *num-viewport* property\n"); | ||
1357 | return ret; | ||
1358 | } | ||
1359 | |||
1360 | /* | ||
1361 | * "Power Sequencing and Reset Signal Timings" table in | ||
1362 | * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 2.0 | ||
1363 | * indicates PERST# should be deasserted after minimum of 100us | ||
1364 | * once REFCLK is stable. The REFCLK to the connector in RC | ||
1365 | * mode is selected while enabling the PHY. So deassert PERST# | ||
1366 | * after 100 us. | ||
1367 | */ | ||
1368 | if (gpiod) { | ||
1369 | usleep_range(100, 200); | ||
1370 | gpiod_set_value_cansleep(gpiod, 1); | ||
1371 | } | ||
1372 | |||
1373 | ks_pcie->num_viewport = num_viewport; | ||
1374 | pci->pp.ops = host_ops; | ||
1375 | ret = ks_pcie_add_pcie_port(ks_pcie, pdev); | ||
1376 | if (ret < 0) | ||
1377 | goto err_get_sync; | ||
1378 | break; | ||
1379 | case DW_PCIE_EP_TYPE: | ||
1380 | if (!IS_ENABLED(CONFIG_PCI_KEYSTONE_EP)) { | ||
1381 | ret = -ENODEV; | ||
1382 | goto err_get_sync; | ||
1383 | } | ||
1384 | |||
1385 | pci->ep.ops = ep_ops; | ||
1386 | ret = ks_pcie_add_pcie_ep(ks_pcie, pdev); | ||
1387 | if (ret < 0) | ||
1388 | goto err_get_sync; | ||
1389 | break; | ||
1390 | default: | ||
1391 | dev_err(dev, "INVALID device type %d\n", mode); | ||
1392 | } | ||
1393 | |||
1394 | ks_pcie_enable_error_irq(ks_pcie); | ||
983 | 1395 | ||
984 | return 0; | 1396 | return 0; |
985 | 1397 | ||
diff --git a/drivers/pci/controller/dwc/pci-layerscape-ep.c b/drivers/pci/controller/dwc/pci-layerscape-ep.c index a42c9c3ae1cc..be61d96cc95e 100644 --- a/drivers/pci/controller/dwc/pci-layerscape-ep.c +++ b/drivers/pci/controller/dwc/pci-layerscape-ep.c | |||
@@ -79,7 +79,7 @@ static int ls_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no, | |||
79 | } | 79 | } |
80 | } | 80 | } |
81 | 81 | ||
82 | static struct dw_pcie_ep_ops pcie_ep_ops = { | 82 | static const struct dw_pcie_ep_ops pcie_ep_ops = { |
83 | .ep_init = ls_pcie_ep_init, | 83 | .ep_init = ls_pcie_ep_init, |
84 | .raise_irq = ls_pcie_ep_raise_irq, | 84 | .raise_irq = ls_pcie_ep_raise_irq, |
85 | .get_features = ls_pcie_ep_get_features, | 85 | .get_features = ls_pcie_ep_get_features, |
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c index dba83abfe764..d00252bd8fae 100644 --- a/drivers/pci/controller/dwc/pcie-artpec6.c +++ b/drivers/pci/controller/dwc/pcie-artpec6.c | |||
@@ -444,7 +444,7 @@ static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no, | |||
444 | return 0; | 444 | return 0; |
445 | } | 445 | } |
446 | 446 | ||
447 | static struct dw_pcie_ep_ops pcie_ep_ops = { | 447 | static const struct dw_pcie_ep_ops pcie_ep_ops = { |
448 | .ep_init = artpec6_pcie_ep_init, | 448 | .ep_init = artpec6_pcie_ep_init, |
449 | .raise_irq = artpec6_pcie_raise_irq, | 449 | .raise_irq = artpec6_pcie_raise_irq, |
450 | }; | 450 | }; |
diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 24f5a775ad34..2bf5a35c0570 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c | |||
@@ -46,16 +46,19 @@ static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr, | |||
46 | u8 cap_id, next_cap_ptr; | 46 | u8 cap_id, next_cap_ptr; |
47 | u16 reg; | 47 | u16 reg; |
48 | 48 | ||
49 | if (!cap_ptr) | ||
50 | return 0; | ||
51 | |||
49 | reg = dw_pcie_readw_dbi(pci, cap_ptr); | 52 | reg = dw_pcie_readw_dbi(pci, cap_ptr); |
50 | next_cap_ptr = (reg & 0xff00) >> 8; | ||
51 | cap_id = (reg & 0x00ff); | 53 | cap_id = (reg & 0x00ff); |
52 | 54 | ||
53 | if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX) | 55 | if (cap_id > PCI_CAP_ID_MAX) |
54 | return 0; | 56 | return 0; |
55 | 57 | ||
56 | if (cap_id == cap) | 58 | if (cap_id == cap) |
57 | return cap_ptr; | 59 | return cap_ptr; |
58 | 60 | ||
61 | next_cap_ptr = (reg & 0xff00) >> 8; | ||
59 | return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); | 62 | return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); |
60 | } | 63 | } |
61 | 64 | ||
@@ -67,9 +70,6 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap) | |||
67 | reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST); | 70 | reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST); |
68 | next_cap_ptr = (reg & 0x00ff); | 71 | next_cap_ptr = (reg & 0x00ff); |
69 | 72 | ||
70 | if (!next_cap_ptr) | ||
71 | return 0; | ||
72 | |||
73 | return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); | 73 | return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap); |
74 | } | 74 | } |
75 | 75 | ||
@@ -397,6 +397,7 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, | |||
397 | { | 397 | { |
398 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | 398 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); |
399 | struct pci_epc *epc = ep->epc; | 399 | struct pci_epc *epc = ep->epc; |
400 | unsigned int aligned_offset; | ||
400 | u16 msg_ctrl, msg_data; | 401 | u16 msg_ctrl, msg_data; |
401 | u32 msg_addr_lower, msg_addr_upper, reg; | 402 | u32 msg_addr_lower, msg_addr_upper, reg; |
402 | u64 msg_addr; | 403 | u64 msg_addr; |
@@ -422,13 +423,15 @@ int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no, | |||
422 | reg = ep->msi_cap + PCI_MSI_DATA_32; | 423 | reg = ep->msi_cap + PCI_MSI_DATA_32; |
423 | msg_data = dw_pcie_readw_dbi(pci, reg); | 424 | msg_data = dw_pcie_readw_dbi(pci, reg); |
424 | } | 425 | } |
425 | msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower; | 426 | aligned_offset = msg_addr_lower & (epc->mem->page_size - 1); |
427 | msg_addr = ((u64)msg_addr_upper) << 32 | | ||
428 | (msg_addr_lower & ~aligned_offset); | ||
426 | ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, | 429 | ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr, |
427 | epc->mem->page_size); | 430 | epc->mem->page_size); |
428 | if (ret) | 431 | if (ret) |
429 | return ret; | 432 | return ret; |
430 | 433 | ||
431 | writel(msg_data | (interrupt_num - 1), ep->msi_mem); | 434 | writel(msg_data | (interrupt_num - 1), ep->msi_mem + aligned_offset); |
432 | 435 | ||
433 | dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); | 436 | dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys); |
434 | 437 | ||
@@ -504,10 +507,32 @@ void dw_pcie_ep_exit(struct dw_pcie_ep *ep) | |||
504 | pci_epc_mem_exit(epc); | 507 | pci_epc_mem_exit(epc); |
505 | } | 508 | } |
506 | 509 | ||
510 | static unsigned int dw_pcie_ep_find_ext_capability(struct dw_pcie *pci, int cap) | ||
511 | { | ||
512 | u32 header; | ||
513 | int pos = PCI_CFG_SPACE_SIZE; | ||
514 | |||
515 | while (pos) { | ||
516 | header = dw_pcie_readl_dbi(pci, pos); | ||
517 | if (PCI_EXT_CAP_ID(header) == cap) | ||
518 | return pos; | ||
519 | |||
520 | pos = PCI_EXT_CAP_NEXT(header); | ||
521 | if (!pos) | ||
522 | break; | ||
523 | } | ||
524 | |||
525 | return 0; | ||
526 | } | ||
527 | |||
507 | int dw_pcie_ep_init(struct dw_pcie_ep *ep) | 528 | int dw_pcie_ep_init(struct dw_pcie_ep *ep) |
508 | { | 529 | { |
530 | int i; | ||
509 | int ret; | 531 | int ret; |
532 | u32 reg; | ||
510 | void *addr; | 533 | void *addr; |
534 | unsigned int nbars; | ||
535 | unsigned int offset; | ||
511 | struct pci_epc *epc; | 536 | struct pci_epc *epc; |
512 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); | 537 | struct dw_pcie *pci = to_dw_pcie_from_ep(ep); |
513 | struct device *dev = pci->dev; | 538 | struct device *dev = pci->dev; |
@@ -517,10 +542,6 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) | |||
517 | dev_err(dev, "dbi_base/dbi_base2 is not populated\n"); | 542 | dev_err(dev, "dbi_base/dbi_base2 is not populated\n"); |
518 | return -EINVAL; | 543 | return -EINVAL; |
519 | } | 544 | } |
520 | if (pci->iatu_unroll_enabled && !pci->atu_base) { | ||
521 | dev_err(dev, "atu_base is not populated\n"); | ||
522 | return -EINVAL; | ||
523 | } | ||
524 | 545 | ||
525 | ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows); | 546 | ret = of_property_read_u32(np, "num-ib-windows", &ep->num_ib_windows); |
526 | if (ret < 0) { | 547 | if (ret < 0) { |
@@ -595,6 +616,18 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep) | |||
595 | 616 | ||
596 | ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX); | 617 | ep->msix_cap = dw_pcie_ep_find_capability(pci, PCI_CAP_ID_MSIX); |
597 | 618 | ||
619 | offset = dw_pcie_ep_find_ext_capability(pci, PCI_EXT_CAP_ID_REBAR); | ||
620 | if (offset) { | ||
621 | reg = dw_pcie_readl_dbi(pci, offset + PCI_REBAR_CTRL); | ||
622 | nbars = (reg & PCI_REBAR_CTRL_NBAR_MASK) >> | ||
623 | PCI_REBAR_CTRL_NBAR_SHIFT; | ||
624 | |||
625 | dw_pcie_dbi_ro_wr_en(pci); | ||
626 | for (i = 0; i < nbars; i++, offset += PCI_REBAR_CTRL) | ||
627 | dw_pcie_writel_dbi(pci, offset + PCI_REBAR_CAP, 0x0); | ||
628 | dw_pcie_dbi_ro_wr_dis(pci); | ||
629 | } | ||
630 | |||
598 | dw_pcie_setup(pci); | 631 | dw_pcie_setup(pci); |
599 | 632 | ||
600 | return 0; | 633 | return 0; |
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 3e4169e738a5..77db32529319 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c | |||
@@ -126,18 +126,12 @@ static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg) | |||
126 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 126 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
127 | u64 msi_target; | 127 | u64 msi_target; |
128 | 128 | ||
129 | if (pp->ops->get_msi_addr) | 129 | msi_target = (u64)pp->msi_data; |
130 | msi_target = pp->ops->get_msi_addr(pp); | ||
131 | else | ||
132 | msi_target = (u64)pp->msi_data; | ||
133 | 130 | ||
134 | msg->address_lo = lower_32_bits(msi_target); | 131 | msg->address_lo = lower_32_bits(msi_target); |
135 | msg->address_hi = upper_32_bits(msi_target); | 132 | msg->address_hi = upper_32_bits(msi_target); |
136 | 133 | ||
137 | if (pp->ops->get_msi_data) | 134 | msg->data = d->hwirq; |
138 | msg->data = pp->ops->get_msi_data(pp, d->hwirq); | ||
139 | else | ||
140 | msg->data = d->hwirq; | ||
141 | 135 | ||
142 | dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", | 136 | dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n", |
143 | (int)d->hwirq, msg->address_hi, msg->address_lo); | 137 | (int)d->hwirq, msg->address_hi, msg->address_lo); |
@@ -157,17 +151,13 @@ static void dw_pci_bottom_mask(struct irq_data *d) | |||
157 | 151 | ||
158 | raw_spin_lock_irqsave(&pp->lock, flags); | 152 | raw_spin_lock_irqsave(&pp->lock, flags); |
159 | 153 | ||
160 | if (pp->ops->msi_clear_irq) { | 154 | ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; |
161 | pp->ops->msi_clear_irq(pp, d->hwirq); | 155 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; |
162 | } else { | 156 | bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; |
163 | ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; | ||
164 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; | ||
165 | bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; | ||
166 | 157 | ||
167 | pp->irq_mask[ctrl] |= BIT(bit); | 158 | pp->irq_mask[ctrl] |= BIT(bit); |
168 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, | 159 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, |
169 | pp->irq_mask[ctrl]); | 160 | pp->irq_mask[ctrl]); |
170 | } | ||
171 | 161 | ||
172 | raw_spin_unlock_irqrestore(&pp->lock, flags); | 162 | raw_spin_unlock_irqrestore(&pp->lock, flags); |
173 | } | 163 | } |
@@ -180,17 +170,13 @@ static void dw_pci_bottom_unmask(struct irq_data *d) | |||
180 | 170 | ||
181 | raw_spin_lock_irqsave(&pp->lock, flags); | 171 | raw_spin_lock_irqsave(&pp->lock, flags); |
182 | 172 | ||
183 | if (pp->ops->msi_set_irq) { | 173 | ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; |
184 | pp->ops->msi_set_irq(pp, d->hwirq); | 174 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; |
185 | } else { | 175 | bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; |
186 | ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; | ||
187 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; | ||
188 | bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; | ||
189 | 176 | ||
190 | pp->irq_mask[ctrl] &= ~BIT(bit); | 177 | pp->irq_mask[ctrl] &= ~BIT(bit); |
191 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, | 178 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, |
192 | pp->irq_mask[ctrl]); | 179 | pp->irq_mask[ctrl]); |
193 | } | ||
194 | 180 | ||
195 | raw_spin_unlock_irqrestore(&pp->lock, flags); | 181 | raw_spin_unlock_irqrestore(&pp->lock, flags); |
196 | } | 182 | } |
@@ -199,20 +185,12 @@ static void dw_pci_bottom_ack(struct irq_data *d) | |||
199 | { | 185 | { |
200 | struct pcie_port *pp = irq_data_get_irq_chip_data(d); | 186 | struct pcie_port *pp = irq_data_get_irq_chip_data(d); |
201 | unsigned int res, bit, ctrl; | 187 | unsigned int res, bit, ctrl; |
202 | unsigned long flags; | ||
203 | 188 | ||
204 | ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; | 189 | ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL; |
205 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; | 190 | res = ctrl * MSI_REG_CTRL_BLOCK_SIZE; |
206 | bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; | 191 | bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL; |
207 | 192 | ||
208 | raw_spin_lock_irqsave(&pp->lock, flags); | ||
209 | |||
210 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit)); | 193 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit)); |
211 | |||
212 | if (pp->ops->msi_irq_ack) | ||
213 | pp->ops->msi_irq_ack(d->hwirq, pp); | ||
214 | |||
215 | raw_spin_unlock_irqrestore(&pp->lock, flags); | ||
216 | } | 194 | } |
217 | 195 | ||
218 | static struct irq_chip dw_pci_msi_bottom_irq_chip = { | 196 | static struct irq_chip dw_pci_msi_bottom_irq_chip = { |
@@ -245,7 +223,7 @@ static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, | |||
245 | 223 | ||
246 | for (i = 0; i < nr_irqs; i++) | 224 | for (i = 0; i < nr_irqs; i++) |
247 | irq_domain_set_info(domain, virq + i, bit + i, | 225 | irq_domain_set_info(domain, virq + i, bit + i, |
248 | &dw_pci_msi_bottom_irq_chip, | 226 | pp->msi_irq_chip, |
249 | pp, handle_edge_irq, | 227 | pp, handle_edge_irq, |
250 | NULL, NULL); | 228 | NULL, NULL); |
251 | 229 | ||
@@ -462,6 +440,8 @@ int dw_pcie_host_init(struct pcie_port *pp) | |||
462 | } | 440 | } |
463 | 441 | ||
464 | if (!pp->ops->msi_host_init) { | 442 | if (!pp->ops->msi_host_init) { |
443 | pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip; | ||
444 | |||
465 | ret = dw_pcie_allocate_domains(pp); | 445 | ret = dw_pcie_allocate_domains(pp); |
466 | if (ret) | 446 | if (ret) |
467 | return ret; | 447 | return ret; |
@@ -632,17 +612,6 @@ static struct pci_ops dw_pcie_ops = { | |||
632 | .write = dw_pcie_wr_conf, | 612 | .write = dw_pcie_wr_conf, |
633 | }; | 613 | }; |
634 | 614 | ||
635 | static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci) | ||
636 | { | ||
637 | u32 val; | ||
638 | |||
639 | val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); | ||
640 | if (val == 0xffffffff) | ||
641 | return 1; | ||
642 | |||
643 | return 0; | ||
644 | } | ||
645 | |||
646 | void dw_pcie_setup_rc(struct pcie_port *pp) | 615 | void dw_pcie_setup_rc(struct pcie_port *pp) |
647 | { | 616 | { |
648 | u32 val, ctrl, num_ctrls; | 617 | u32 val, ctrl, num_ctrls; |
@@ -650,17 +619,19 @@ void dw_pcie_setup_rc(struct pcie_port *pp) | |||
650 | 619 | ||
651 | dw_pcie_setup(pci); | 620 | dw_pcie_setup(pci); |
652 | 621 | ||
653 | num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; | 622 | if (!pp->ops->msi_host_init) { |
654 | 623 | num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; | |
655 | /* Initialize IRQ Status array */ | 624 | |
656 | for (ctrl = 0; ctrl < num_ctrls; ctrl++) { | 625 | /* Initialize IRQ Status array */ |
657 | pp->irq_mask[ctrl] = ~0; | 626 | for (ctrl = 0; ctrl < num_ctrls; ctrl++) { |
658 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + | 627 | pp->irq_mask[ctrl] = ~0; |
659 | (ctrl * MSI_REG_CTRL_BLOCK_SIZE), | 628 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + |
660 | 4, pp->irq_mask[ctrl]); | 629 | (ctrl * MSI_REG_CTRL_BLOCK_SIZE), |
661 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + | 630 | 4, pp->irq_mask[ctrl]); |
662 | (ctrl * MSI_REG_CTRL_BLOCK_SIZE), | 631 | dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + |
663 | 4, ~0); | 632 | (ctrl * MSI_REG_CTRL_BLOCK_SIZE), |
633 | 4, ~0); | ||
634 | } | ||
664 | } | 635 | } |
665 | 636 | ||
666 | /* Setup RC BARs */ | 637 | /* Setup RC BARs */ |
@@ -694,14 +665,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp) | |||
694 | * we should not program the ATU here. | 665 | * we should not program the ATU here. |
695 | */ | 666 | */ |
696 | if (!pp->ops->rd_other_conf) { | 667 | if (!pp->ops->rd_other_conf) { |
697 | /* Get iATU unroll support */ | ||
698 | pci->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pci); | ||
699 | dev_dbg(pci->dev, "iATU unroll: %s\n", | ||
700 | pci->iatu_unroll_enabled ? "enabled" : "disabled"); | ||
701 | |||
702 | if (pci->iatu_unroll_enabled && !pci->atu_base) | ||
703 | pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; | ||
704 | |||
705 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0, | 668 | dw_pcie_prog_outbound_atu(pci, PCIE_ATU_REGION_INDEX0, |
706 | PCIE_ATU_TYPE_MEM, pp->mem_base, | 669 | PCIE_ATU_TYPE_MEM, pp->mem_base, |
707 | pp->mem_bus_addr, pp->mem_size); | 670 | pp->mem_bus_addr, pp->mem_size); |
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c index 932dbd0b34b6..b58fdcbc664b 100644 --- a/drivers/pci/controller/dwc/pcie-designware-plat.c +++ b/drivers/pci/controller/dwc/pcie-designware-plat.c | |||
@@ -106,7 +106,7 @@ dw_plat_pcie_get_features(struct dw_pcie_ep *ep) | |||
106 | return &dw_plat_pcie_epc_features; | 106 | return &dw_plat_pcie_epc_features; |
107 | } | 107 | } |
108 | 108 | ||
109 | static struct dw_pcie_ep_ops pcie_ep_ops = { | 109 | static const struct dw_pcie_ep_ops pcie_ep_ops = { |
110 | .ep_init = dw_plat_pcie_ep_init, | 110 | .ep_init = dw_plat_pcie_ep_init, |
111 | .raise_irq = dw_plat_pcie_ep_raise_irq, | 111 | .raise_irq = dw_plat_pcie_ep_raise_irq, |
112 | .get_features = dw_plat_pcie_get_features, | 112 | .get_features = dw_plat_pcie_get_features, |
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index 086e87a40316..9d7c51c32b3b 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c | |||
@@ -83,6 +83,37 @@ void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, | |||
83 | dev_err(pci->dev, "Write DBI address failed\n"); | 83 | dev_err(pci->dev, "Write DBI address failed\n"); |
84 | } | 84 | } |
85 | 85 | ||
86 | u32 __dw_pcie_read_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg, | ||
87 | size_t size) | ||
88 | { | ||
89 | int ret; | ||
90 | u32 val; | ||
91 | |||
92 | if (pci->ops->read_dbi2) | ||
93 | return pci->ops->read_dbi2(pci, base, reg, size); | ||
94 | |||
95 | ret = dw_pcie_read(base + reg, size, &val); | ||
96 | if (ret) | ||
97 | dev_err(pci->dev, "read DBI address failed\n"); | ||
98 | |||
99 | return val; | ||
100 | } | ||
101 | |||
102 | void __dw_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg, | ||
103 | size_t size, u32 val) | ||
104 | { | ||
105 | int ret; | ||
106 | |||
107 | if (pci->ops->write_dbi2) { | ||
108 | pci->ops->write_dbi2(pci, base, reg, size, val); | ||
109 | return; | ||
110 | } | ||
111 | |||
112 | ret = dw_pcie_write(base + reg, size, val); | ||
113 | if (ret) | ||
114 | dev_err(pci->dev, "write DBI address failed\n"); | ||
115 | } | ||
116 | |||
86 | static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) | 117 | static u32 dw_pcie_readl_ob_unroll(struct dw_pcie *pci, u32 index, u32 reg) |
87 | { | 118 | { |
88 | u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); | 119 | u32 offset = PCIE_GET_ATU_OUTB_UNR_REG_OFFSET(index); |
@@ -333,6 +364,17 @@ int dw_pcie_link_up(struct dw_pcie *pci) | |||
333 | (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING))); | 364 | (!(val & PCIE_PORT_DEBUG1_LINK_IN_TRAINING))); |
334 | } | 365 | } |
335 | 366 | ||
367 | static u8 dw_pcie_iatu_unroll_enabled(struct dw_pcie *pci) | ||
368 | { | ||
369 | u32 val; | ||
370 | |||
371 | val = dw_pcie_readl_dbi(pci, PCIE_ATU_VIEWPORT); | ||
372 | if (val == 0xffffffff) | ||
373 | return 1; | ||
374 | |||
375 | return 0; | ||
376 | } | ||
377 | |||
336 | void dw_pcie_setup(struct dw_pcie *pci) | 378 | void dw_pcie_setup(struct dw_pcie *pci) |
337 | { | 379 | { |
338 | int ret; | 380 | int ret; |
@@ -341,6 +383,16 @@ void dw_pcie_setup(struct dw_pcie *pci) | |||
341 | struct device *dev = pci->dev; | 383 | struct device *dev = pci->dev; |
342 | struct device_node *np = dev->of_node; | 384 | struct device_node *np = dev->of_node; |
343 | 385 | ||
386 | if (pci->version >= 0x480A || (!pci->version && | ||
387 | dw_pcie_iatu_unroll_enabled(pci))) { | ||
388 | pci->iatu_unroll_enabled = true; | ||
389 | if (!pci->atu_base) | ||
390 | pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET; | ||
391 | } | ||
392 | dev_dbg(pci->dev, "iATU unroll: %s\n", pci->iatu_unroll_enabled ? | ||
393 | "enabled" : "disabled"); | ||
394 | |||
395 | |||
344 | ret = of_property_read_u32(np, "num-lanes", &lanes); | 396 | ret = of_property_read_u32(np, "num-lanes", &lanes); |
345 | if (ret) | 397 | if (ret) |
346 | lanes = 0; | 398 | lanes = 0; |
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 351636d6d9b5..b8993f2b78df 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h | |||
@@ -148,14 +148,9 @@ struct dw_pcie_host_ops { | |||
148 | int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus, | 148 | int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus, |
149 | unsigned int devfn, int where, int size, u32 val); | 149 | unsigned int devfn, int where, int size, u32 val); |
150 | int (*host_init)(struct pcie_port *pp); | 150 | int (*host_init)(struct pcie_port *pp); |
151 | void (*msi_set_irq)(struct pcie_port *pp, int irq); | ||
152 | void (*msi_clear_irq)(struct pcie_port *pp, int irq); | ||
153 | phys_addr_t (*get_msi_addr)(struct pcie_port *pp); | ||
154 | u32 (*get_msi_data)(struct pcie_port *pp, int pos); | ||
155 | void (*scan_bus)(struct pcie_port *pp); | 151 | void (*scan_bus)(struct pcie_port *pp); |
156 | void (*set_num_vectors)(struct pcie_port *pp); | 152 | void (*set_num_vectors)(struct pcie_port *pp); |
157 | int (*msi_host_init)(struct pcie_port *pp); | 153 | int (*msi_host_init)(struct pcie_port *pp); |
158 | void (*msi_irq_ack)(int irq, struct pcie_port *pp); | ||
159 | }; | 154 | }; |
160 | 155 | ||
161 | struct pcie_port { | 156 | struct pcie_port { |
@@ -183,6 +178,7 @@ struct pcie_port { | |||
183 | struct irq_domain *msi_domain; | 178 | struct irq_domain *msi_domain; |
184 | dma_addr_t msi_data; | 179 | dma_addr_t msi_data; |
185 | struct page *msi_page; | 180 | struct page *msi_page; |
181 | struct irq_chip *msi_irq_chip; | ||
186 | u32 num_vectors; | 182 | u32 num_vectors; |
187 | u32 irq_mask[MAX_MSI_CTRLS]; | 183 | u32 irq_mask[MAX_MSI_CTRLS]; |
188 | struct pci_bus *root_bus; | 184 | struct pci_bus *root_bus; |
@@ -205,7 +201,7 @@ struct dw_pcie_ep_ops { | |||
205 | 201 | ||
206 | struct dw_pcie_ep { | 202 | struct dw_pcie_ep { |
207 | struct pci_epc *epc; | 203 | struct pci_epc *epc; |
208 | struct dw_pcie_ep_ops *ops; | 204 | const struct dw_pcie_ep_ops *ops; |
209 | phys_addr_t phys_base; | 205 | phys_addr_t phys_base; |
210 | size_t addr_size; | 206 | size_t addr_size; |
211 | size_t page_size; | 207 | size_t page_size; |
@@ -227,6 +223,10 @@ struct dw_pcie_ops { | |||
227 | size_t size); | 223 | size_t size); |
228 | void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg, | 224 | void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg, |
229 | size_t size, u32 val); | 225 | size_t size, u32 val); |
226 | u32 (*read_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg, | ||
227 | size_t size); | ||
228 | void (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg, | ||
229 | size_t size, u32 val); | ||
230 | int (*link_up)(struct dw_pcie *pcie); | 230 | int (*link_up)(struct dw_pcie *pcie); |
231 | int (*start_link)(struct dw_pcie *pcie); | 231 | int (*start_link)(struct dw_pcie *pcie); |
232 | void (*stop_link)(struct dw_pcie *pcie); | 232 | void (*stop_link)(struct dw_pcie *pcie); |
@@ -243,6 +243,7 @@ struct dw_pcie { | |||
243 | struct pcie_port pp; | 243 | struct pcie_port pp; |
244 | struct dw_pcie_ep ep; | 244 | struct dw_pcie_ep ep; |
245 | const struct dw_pcie_ops *ops; | 245 | const struct dw_pcie_ops *ops; |
246 | unsigned int version; | ||
246 | }; | 247 | }; |
247 | 248 | ||
248 | #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) | 249 | #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) |
@@ -257,6 +258,10 @@ u32 __dw_pcie_read_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, | |||
257 | size_t size); | 258 | size_t size); |
258 | void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, | 259 | void __dw_pcie_write_dbi(struct dw_pcie *pci, void __iomem *base, u32 reg, |
259 | size_t size, u32 val); | 260 | size_t size, u32 val); |
261 | u32 __dw_pcie_read_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg, | ||
262 | size_t size); | ||
263 | void __dw_pcie_write_dbi2(struct dw_pcie *pci, void __iomem *base, u32 reg, | ||
264 | size_t size, u32 val); | ||
260 | int dw_pcie_link_up(struct dw_pcie *pci); | 265 | int dw_pcie_link_up(struct dw_pcie *pci); |
261 | int dw_pcie_wait_for_link(struct dw_pcie *pci); | 266 | int dw_pcie_wait_for_link(struct dw_pcie *pci); |
262 | void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, | 267 | void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, |
@@ -300,12 +305,12 @@ static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg) | |||
300 | 305 | ||
301 | static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val) | 306 | static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val) |
302 | { | 307 | { |
303 | __dw_pcie_write_dbi(pci, pci->dbi_base2, reg, 0x4, val); | 308 | __dw_pcie_write_dbi2(pci, pci->dbi_base2, reg, 0x4, val); |
304 | } | 309 | } |
305 | 310 | ||
306 | static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg) | 311 | static inline u32 dw_pcie_readl_dbi2(struct dw_pcie *pci, u32 reg) |
307 | { | 312 | { |
308 | return __dw_pcie_read_dbi(pci, pci->dbi_base2, reg, 0x4); | 313 | return __dw_pcie_read_dbi2(pci, pci->dbi_base2, reg, 0x4); |
309 | } | 314 | } |
310 | 315 | ||
311 | static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val) | 316 | static inline void dw_pcie_writel_atu(struct dw_pcie *pci, u32 reg, u32 val) |
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index d0b91da49bf4..c0786ca74312 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c | |||
@@ -438,7 +438,7 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf) | |||
438 | epc_features = epf_test->epc_features; | 438 | epc_features = epf_test->epc_features; |
439 | 439 | ||
440 | base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg), | 440 | base = pci_epf_alloc_space(epf, sizeof(struct pci_epf_test_reg), |
441 | test_reg_bar); | 441 | test_reg_bar, epc_features->align); |
442 | if (!base) { | 442 | if (!base) { |
443 | dev_err(dev, "Failed to allocated register space\n"); | 443 | dev_err(dev, "Failed to allocated register space\n"); |
444 | return -ENOMEM; | 444 | return -ENOMEM; |
@@ -453,7 +453,8 @@ static int pci_epf_test_alloc_space(struct pci_epf *epf) | |||
453 | if (!!(epc_features->reserved_bar & (1 << bar))) | 453 | if (!!(epc_features->reserved_bar & (1 << bar))) |
454 | continue; | 454 | continue; |
455 | 455 | ||
456 | base = pci_epf_alloc_space(epf, bar_size[bar], bar); | 456 | base = pci_epf_alloc_space(epf, bar_size[bar], bar, |
457 | epc_features->align); | ||
457 | if (!base) | 458 | if (!base) |
458 | dev_err(dev, "Failed to allocate space for BAR%d\n", | 459 | dev_err(dev, "Failed to allocate space for BAR%d\n", |
459 | bar); | 460 | bar); |
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c index 8bfdcd291196..fb1306de8f40 100644 --- a/drivers/pci/endpoint/pci-epf-core.c +++ b/drivers/pci/endpoint/pci-epf-core.c | |||
@@ -109,10 +109,12 @@ EXPORT_SYMBOL_GPL(pci_epf_free_space); | |||
109 | * pci_epf_alloc_space() - allocate memory for the PCI EPF register space | 109 | * pci_epf_alloc_space() - allocate memory for the PCI EPF register space |
110 | * @size: the size of the memory that has to be allocated | 110 | * @size: the size of the memory that has to be allocated |
111 | * @bar: the BAR number corresponding to the allocated register space | 111 | * @bar: the BAR number corresponding to the allocated register space |
112 | * @align: alignment size for the allocation region | ||
112 | * | 113 | * |
113 | * Invoke to allocate memory for the PCI EPF register space. | 114 | * Invoke to allocate memory for the PCI EPF register space. |
114 | */ | 115 | */ |
115 | void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar) | 116 | void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, |
117 | size_t align) | ||
116 | { | 118 | { |
117 | void *space; | 119 | void *space; |
118 | struct device *dev = epf->epc->dev.parent; | 120 | struct device *dev = epf->epc->dev.parent; |
@@ -120,7 +122,11 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar) | |||
120 | 122 | ||
121 | if (size < 128) | 123 | if (size < 128) |
122 | size = 128; | 124 | size = 128; |
123 | size = roundup_pow_of_two(size); | 125 | |
126 | if (align) | ||
127 | size = ALIGN(size, align); | ||
128 | else | ||
129 | size = roundup_pow_of_two(size); | ||
124 | 130 | ||
125 | space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL); | 131 | space = dma_alloc_coherent(dev, size, &phys_addr, GFP_KERNEL); |
126 | if (!space) { | 132 | if (!space) { |
diff --git a/drivers/pci/of.c b/drivers/pci/of.c index 67376cf45880..73d5adec0a28 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/of_pci.h> | 15 | #include <linux/of_pci.h> |
16 | #include "pci.h" | 16 | #include "pci.h" |
17 | 17 | ||
18 | #ifdef CONFIG_PCI | ||
18 | void pci_set_of_node(struct pci_dev *dev) | 19 | void pci_set_of_node(struct pci_dev *dev) |
19 | { | 20 | { |
20 | if (!dev->bus->dev.of_node) | 21 | if (!dev->bus->dev.of_node) |
@@ -203,27 +204,6 @@ int of_get_pci_domain_nr(struct device_node *node) | |||
203 | EXPORT_SYMBOL_GPL(of_get_pci_domain_nr); | 204 | EXPORT_SYMBOL_GPL(of_get_pci_domain_nr); |
204 | 205 | ||
205 | /** | 206 | /** |
206 | * This function will try to find the limitation of link speed by finding | ||
207 | * a property called "max-link-speed" of the given device node. | ||
208 | * | ||
209 | * @node: device tree node with the max link speed information | ||
210 | * | ||
211 | * Returns the associated max link speed from DT, or a negative value if the | ||
212 | * required property is not found or is invalid. | ||
213 | */ | ||
214 | int of_pci_get_max_link_speed(struct device_node *node) | ||
215 | { | ||
216 | u32 max_link_speed; | ||
217 | |||
218 | if (of_property_read_u32(node, "max-link-speed", &max_link_speed) || | ||
219 | max_link_speed > 4) | ||
220 | return -EINVAL; | ||
221 | |||
222 | return max_link_speed; | ||
223 | } | ||
224 | EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed); | ||
225 | |||
226 | /** | ||
227 | * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only | 207 | * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only |
228 | * is present and valid | 208 | * is present and valid |
229 | */ | 209 | */ |
@@ -543,3 +523,25 @@ int pci_parse_request_of_pci_ranges(struct device *dev, | |||
543 | return err; | 523 | return err; |
544 | } | 524 | } |
545 | 525 | ||
526 | #endif /* CONFIG_PCI */ | ||
527 | |||
528 | /** | ||
529 | * This function will try to find the limitation of link speed by finding | ||
530 | * a property called "max-link-speed" of the given device node. | ||
531 | * | ||
532 | * @node: device tree node with the max link speed information | ||
533 | * | ||
534 | * Returns the associated max link speed from DT, or a negative value if the | ||
535 | * required property is not found or is invalid. | ||
536 | */ | ||
537 | int of_pci_get_max_link_speed(struct device_node *node) | ||
538 | { | ||
539 | u32 max_link_speed; | ||
540 | |||
541 | if (of_property_read_u32(node, "max-link-speed", &max_link_speed) || | ||
542 | max_link_speed > 4) | ||
543 | return -EINVAL; | ||
544 | |||
545 | return max_link_speed; | ||
546 | } | ||
547 | EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed); | ||
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index c3ffa3917f88..f641badc2c61 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h | |||
@@ -109,6 +109,7 @@ struct pci_epc { | |||
109 | * @reserved_bar: bitmap to indicate reserved BAR unavailable to function driver | 109 | * @reserved_bar: bitmap to indicate reserved BAR unavailable to function driver |
110 | * @bar_fixed_64bit: bitmap to indicate fixed 64bit BARs | 110 | * @bar_fixed_64bit: bitmap to indicate fixed 64bit BARs |
111 | * @bar_fixed_size: Array specifying the size supported by each BAR | 111 | * @bar_fixed_size: Array specifying the size supported by each BAR |
112 | * @align: alignment size required for BAR buffer allocation | ||
112 | */ | 113 | */ |
113 | struct pci_epc_features { | 114 | struct pci_epc_features { |
114 | unsigned int linkup_notifier : 1; | 115 | unsigned int linkup_notifier : 1; |
@@ -117,6 +118,7 @@ struct pci_epc_features { | |||
117 | u8 reserved_bar; | 118 | u8 reserved_bar; |
118 | u8 bar_fixed_64bit; | 119 | u8 bar_fixed_64bit; |
119 | u64 bar_fixed_size[BAR_5 + 1]; | 120 | u64 bar_fixed_size[BAR_5 + 1]; |
121 | size_t align; | ||
120 | }; | 122 | }; |
121 | 123 | ||
122 | #define to_pci_epc(device) container_of((device), struct pci_epc, dev) | 124 | #define to_pci_epc(device) container_of((device), struct pci_epc, dev) |
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index ec02f58758c8..2d6f07556682 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h | |||
@@ -149,7 +149,8 @@ void pci_epf_destroy(struct pci_epf *epf); | |||
149 | int __pci_epf_register_driver(struct pci_epf_driver *driver, | 149 | int __pci_epf_register_driver(struct pci_epf_driver *driver, |
150 | struct module *owner); | 150 | struct module *owner); |
151 | void pci_epf_unregister_driver(struct pci_epf_driver *driver); | 151 | void pci_epf_unregister_driver(struct pci_epf_driver *driver); |
152 | void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar); | 152 | void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, |
153 | size_t align); | ||
153 | void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar); | 154 | void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar); |
154 | int pci_epf_bind(struct pci_epf *epf); | 155 | int pci_epf_bind(struct pci_epf *epf); |
155 | void pci_epf_unbind(struct pci_epf *epf); | 156 | void pci_epf_unbind(struct pci_epf *epf); |