aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/arm,scmi.txt2
-rw-r--r--Documentation/devicetree/bindings/misc/fsl,dpaa2-console.txt11
-rw-r--r--Documentation/devicetree/bindings/power/qcom,rpmpd.txt2
-rw-r--r--Documentation/devicetree/bindings/reset/bitmain,bm1880-reset.txt18
-rw-r--r--Documentation/devicetree/bindings/reset/fsl,imx7-src.txt2
-rw-r--r--Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.txt10
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.txt81
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt6
-rw-r--r--MAINTAINERS9
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c39
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c60
-rw-r--r--drivers/bus/brcmstb_gisb.c4
-rw-r--r--drivers/bus/fsl-mc/dprc.c30
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-bus.c15
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h17
-rw-r--r--drivers/bus/ti-sysc.c454
-rw-r--r--drivers/firmware/arm_scmi/clock.c2
-rw-r--r--drivers/firmware/arm_scmi/sensors.c10
-rw-r--r--drivers/firmware/psci/psci_checker.c10
-rw-r--r--drivers/firmware/tegra/bpmp.c4
-rw-r--r--drivers/firmware/ti_sci.c859
-rw-r--r--drivers/firmware/ti_sci.h810
-rw-r--r--drivers/hwmon/scmi-hwmon.c48
-rw-r--r--drivers/memory/Kconfig8
-rw-r--r--drivers/memory/Makefile1
-rw-r--r--drivers/memory/brcmstb_dpfe.c317
-rw-r--r--drivers/memory/emif.c3
-rw-r--r--drivers/memory/jedec_ddr.h (renamed from include/memory/jedec_ddr.h)6
-rw-r--r--drivers/memory/jedec_ddr_data.c (renamed from lib/jedec_ddr_data.c)5
-rw-r--r--drivers/memory/of_memory.c3
-rw-r--r--drivers/memory/tegra/tegra124.c44
-rw-r--r--drivers/reset/Kconfig3
-rw-r--r--drivers/reset/core.c3
-rw-r--r--drivers/reset/reset-simple.c2
-rw-r--r--drivers/soc/amlogic/meson-canvas.c14
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c61
-rw-r--r--drivers/soc/fsl/Kconfig10
-rw-r--r--drivers/soc/fsl/Makefile1
-rw-r--r--drivers/soc/fsl/dpaa2-console.c329
-rw-r--r--drivers/soc/fsl/dpio/dpio-driver.c23
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c148
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.h9
-rw-r--r--drivers/soc/fsl/guts.c6
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c20
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c2
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c21
-rw-r--r--drivers/soc/fsl/qbman/qman_priv.h9
-rw-r--r--drivers/soc/imx/Kconfig9
-rw-r--r--drivers/soc/imx/Makefile1
-rw-r--r--drivers/soc/imx/soc-imx-scu.c144
-rw-r--r--drivers/soc/imx/soc-imx8.c63
-rw-r--r--drivers/soc/qcom/Kconfig12
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/apr.c76
-rw-r--r--drivers/soc/qcom/qcom_aoss.c480
-rw-r--r--drivers/soc/qcom/rpmpd.c134
-rw-r--r--drivers/soc/rockchip/pm_domains.c230
-rw-r--r--drivers/soc/tegra/Kconfig1
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c6
-rw-r--r--drivers/soc/tegra/pmc.c18
-rw-r--r--include/dt-bindings/power/qcom-aoss-qmp.h14
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h34
-rw-r--r--include/dt-bindings/reset/bitmain,bm1880-reset.h51
-rw-r--r--include/linux/platform_data/ti-sysc.h12
-rw-r--r--include/linux/scmi_protocol.h1
-rw-r--r--include/linux/soc/ti/ti_sci_protocol.h246
-rw-r--r--include/soc/fsl/bman.h8
-rw-r--r--include/soc/fsl/qman.h9
-rw-r--r--lib/Kconfig8
-rw-r--r--lib/Makefile2
70 files changed, 4618 insertions, 493 deletions
diff --git a/Documentation/devicetree/bindings/arm/arm,scmi.txt b/Documentation/devicetree/bindings/arm/arm,scmi.txt
index 5f3719ab7075..317a2fc3667a 100644
--- a/Documentation/devicetree/bindings/arm/arm,scmi.txt
+++ b/Documentation/devicetree/bindings/arm/arm,scmi.txt
@@ -6,7 +6,7 @@ that are provided by the hardware platform it is running on, including power
6and performance functions. 6and performance functions.
7 7
8This binding is intended to define the interface the firmware implementing 8This binding is intended to define the interface the firmware implementing
9the SCMI as described in ARM document number ARM DUI 0922B ("ARM System Control 9the SCMI as described in ARM document number ARM DEN 0056A ("ARM System Control
10and Management Interface Platform Design Document")[0] provide for OSPM in 10and Management Interface Platform Design Document")[0] provide for OSPM in
11the device tree. 11the device tree.
12 12
diff --git a/Documentation/devicetree/bindings/misc/fsl,dpaa2-console.txt b/Documentation/devicetree/bindings/misc/fsl,dpaa2-console.txt
new file mode 100644
index 000000000000..1442ba5d2d98
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/fsl,dpaa2-console.txt
@@ -0,0 +1,11 @@
1DPAA2 console support
2
3Required properties:
4
5 - compatible
6 Value type: <string>
7 Definition: Must be "fsl,dpaa2-console".
8 - reg
9 Value type: <prop-encoded-array>
10 Definition: A standard property. Specifies the region where the MCFBA
11 (MC firmware base address) register can be found.
diff --git a/Documentation/devicetree/bindings/power/qcom,rpmpd.txt b/Documentation/devicetree/bindings/power/qcom,rpmpd.txt
index 980e5413d18f..eb35b22f9e23 100644
--- a/Documentation/devicetree/bindings/power/qcom,rpmpd.txt
+++ b/Documentation/devicetree/bindings/power/qcom,rpmpd.txt
@@ -6,6 +6,8 @@ which then translates it into a corresponding voltage on a rail
6Required Properties: 6Required Properties:
7 - compatible: Should be one of the following 7 - compatible: Should be one of the following
8 * qcom,msm8996-rpmpd: RPM Power domain for the msm8996 family of SoC 8 * qcom,msm8996-rpmpd: RPM Power domain for the msm8996 family of SoC
9 * qcom,msm8998-rpmpd: RPM Power domain for the msm8998 family of SoC
10 * qcom,qcs404-rpmpd: RPM Power domain for the qcs404 family of SoC
9 * qcom,sdm845-rpmhpd: RPMh Power domain for the sdm845 family of SoC 11 * qcom,sdm845-rpmhpd: RPMh Power domain for the sdm845 family of SoC
10 - #power-domain-cells: number of cells in Power domain specifier 12 - #power-domain-cells: number of cells in Power domain specifier
11 must be 1. 13 must be 1.
diff --git a/Documentation/devicetree/bindings/reset/bitmain,bm1880-reset.txt b/Documentation/devicetree/bindings/reset/bitmain,bm1880-reset.txt
new file mode 100644
index 000000000000..a6f8455ae6c4
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/bitmain,bm1880-reset.txt
@@ -0,0 +1,18 @@
1Bitmain BM1880 SoC Reset Controller
2===================================
3
4Please also refer to reset.txt in this directory for common reset
5controller binding usage.
6
7Required properties:
8- compatible: Should be "bitmain,bm1880-reset"
9- reg: Offset and length of reset controller space in SCTRL.
10- #reset-cells: Must be 1.
11
12Example:
13
14 rst: reset-controller@c00 {
15 compatible = "bitmain,bm1880-reset";
16 reg = <0xc00 0x8>;
17 #reset-cells = <1>;
18 };
diff --git a/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt b/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt
index 2ecf33815d18..13e095182db4 100644
--- a/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt
+++ b/Documentation/devicetree/bindings/reset/fsl,imx7-src.txt
@@ -45,6 +45,6 @@ Example:
45 }; 45 };
46 46
47 47
48For list of all valid reset indicies see 48For list of all valid reset indices see
49<dt-bindings/reset/imx7-reset.h> for i.MX7 and 49<dt-bindings/reset/imx7-reset.h> for i.MX7 and
50<dt-bindings/reset/imx8mq-reset.h> for i.MX8MQ 50<dt-bindings/reset/imx8mq-reset.h> for i.MX8MQ
diff --git a/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.txt b/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.txt
index 436d2106e80d..e876f3ce54f6 100644
--- a/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.txt
+++ b/Documentation/devicetree/bindings/soc/amlogic/amlogic,canvas.txt
@@ -2,8 +2,8 @@ Amlogic Canvas
2================================ 2================================
3 3
4A canvas is a collection of metadata that describes a pixel buffer. 4A canvas is a collection of metadata that describes a pixel buffer.
5Those metadata include: width, height, phyaddr, wrapping, block mode 5Those metadata include: width, height, phyaddr, wrapping and block mode.
6and endianness. 6Starting with GXBB the endianness can also be described.
7 7
8Many IPs within Amlogic SoCs rely on canvas indexes to read/write pixel data 8Many IPs within Amlogic SoCs rely on canvas indexes to read/write pixel data
9rather than use the phy addresses directly. For instance, this is the case for 9rather than use the phy addresses directly. For instance, this is the case for
@@ -18,7 +18,11 @@ Video Lookup Table
18-------------------------- 18--------------------------
19 19
20Required properties: 20Required properties:
21- compatible: "amlogic,canvas" 21- compatible: has to be one of:
22 - "amlogic,meson8-canvas", "amlogic,canvas" on Meson8
23 - "amlogic,meson8b-canvas", "amlogic,canvas" on Meson8b
24 - "amlogic,meson8m2-canvas", "amlogic,canvas" on Meson8m2
25 - "amlogic,canvas" on GXBB and newer
22- reg: Base physical address and size of the canvas registers. 26- reg: Base physical address and size of the canvas registers.
23 27
24Example: 28Example:
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.txt
new file mode 100644
index 000000000000..954ffee0a9c4
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,aoss-qmp.txt
@@ -0,0 +1,81 @@
1Qualcomm Always-On Subsystem side channel binding
2
3This binding describes the hardware component responsible for side channel
4requests to the always-on subsystem (AOSS), used for certain power management
5requests that is not handled by the standard RPMh interface. Each client in the
6SoC has it's own block of message RAM and IRQ for communication with the AOSS.
7The protocol used to communicate in the message RAM is known as Qualcomm
8Messaging Protocol (QMP)
9
10The AOSS side channel exposes control over a set of resources, used to control
11a set of debug related clocks and to affect the low power state of resources
12related to the secondary subsystems. These resources are exposed as a set of
13power-domains.
14
15- compatible:
16 Usage: required
17 Value type: <string>
18 Definition: must be "qcom,sdm845-aoss-qmp"
19
20- reg:
21 Usage: required
22 Value type: <prop-encoded-array>
23 Definition: the base address and size of the message RAM for this
24 client's communication with the AOSS
25
26- interrupts:
27 Usage: required
28 Value type: <prop-encoded-array>
29 Definition: should specify the AOSS message IRQ for this client
30
31- mboxes:
32 Usage: required
33 Value type: <prop-encoded-array>
34 Definition: reference to the mailbox representing the outgoing doorbell
35 in APCS for this client, as described in mailbox/mailbox.txt
36
37- #clock-cells:
38 Usage: optional
39 Value type: <u32>
40 Definition: must be 0
41 The single clock represents the QDSS clock.
42
43- #power-domain-cells:
44 Usage: optional
45 Value type: <u32>
46 Definition: must be 1
47 The provided power-domains are:
48 CDSP state (0), LPASS state (1), modem state (2), SLPI
49 state (3), SPSS state (4) and Venus state (5).
50
51= SUBNODES
52The AOSS side channel also provides the controls for three cooling devices,
53these are expressed as subnodes of the QMP node. The name of the node is used
54to identify the resource and must therefor be "cx", "mx" or "ebi".
55
56- #cooling-cells:
57 Usage: optional
58 Value type: <u32>
59 Definition: must be 2
60
61= EXAMPLE
62
63The following example represents the AOSS side-channel message RAM and the
64mechanism exposing the power-domains, as found in SDM845.
65
66 aoss_qmp: qmp@c300000 {
67 compatible = "qcom,sdm845-aoss-qmp";
68 reg = <0x0c300000 0x100000>;
69 interrupts = <GIC_SPI 389 IRQ_TYPE_EDGE_RISING>;
70 mboxes = <&apss_shared 0>;
71
72 #power-domain-cells = <1>;
73
74 cx_cdev: cx {
75 #cooling-cells = <2>;
76 };
77
78 mx_cdev: mx {
79 #cooling-cells = <2>;
80 };
81 };
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
index bcc612cc7423..db501269f47b 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,apr.txt
@@ -9,7 +9,7 @@ used for audio/voice services on the QDSP.
9 Value type: <stringlist> 9 Value type: <stringlist>
10 Definition: must be "qcom,apr-v<VERSION-NUMBER>", example "qcom,apr-v2" 10 Definition: must be "qcom,apr-v<VERSION-NUMBER>", example "qcom,apr-v2"
11 11
12- reg 12- qcom,apr-domain
13 Usage: required 13 Usage: required
14 Value type: <u32> 14 Value type: <u32>
15 Definition: Destination processor ID. 15 Definition: Destination processor ID.
@@ -49,9 +49,9 @@ by the individual bindings for the specific service
49The following example represents a QDSP based sound card on a MSM8996 device 49The following example represents a QDSP based sound card on a MSM8996 device
50which uses apr as communication between Apps and QDSP. 50which uses apr as communication between Apps and QDSP.
51 51
52 apr@4 { 52 apr {
53 compatible = "qcom,apr-v2"; 53 compatible = "qcom,apr-v2";
54 reg = <APR_DOMAIN_ADSP>; 54 qcom,apr-domain = <APR_DOMAIN_ADSP>;
55 55
56 q6core@3 { 56 q6core@3 {
57 compatible = "qcom,q6core"; 57 compatible = "qcom,q6core";
diff --git a/MAINTAINERS b/MAINTAINERS
index 9bd4c3b154e8..bd3fe4fe13c4 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2091,7 +2091,6 @@ S: Maintained
2091 2091
2092ARM/QUALCOMM SUPPORT 2092ARM/QUALCOMM SUPPORT
2093M: Andy Gross <agross@kernel.org> 2093M: Andy Gross <agross@kernel.org>
2094M: David Brown <david.brown@linaro.org>
2095L: linux-arm-msm@vger.kernel.org 2094L: linux-arm-msm@vger.kernel.org
2096S: Maintained 2095S: Maintained
2097F: Documentation/devicetree/bindings/soc/qcom/ 2096F: Documentation/devicetree/bindings/soc/qcom/
@@ -2113,7 +2112,7 @@ F: drivers/i2c/busses/i2c-qup.c
2113F: drivers/i2c/busses/i2c-qcom-geni.c 2112F: drivers/i2c/busses/i2c-qcom-geni.c
2114F: drivers/mfd/ssbi.c 2113F: drivers/mfd/ssbi.c
2115F: drivers/mmc/host/mmci_qcom* 2114F: drivers/mmc/host/mmci_qcom*
2116F: drivers/mmc/host/sdhci_msm.c 2115F: drivers/mmc/host/sdhci-msm.c
2117F: drivers/pci/controller/dwc/pcie-qcom.c 2116F: drivers/pci/controller/dwc/pcie-qcom.c
2118F: drivers/phy/qualcomm/ 2117F: drivers/phy/qualcomm/
2119F: drivers/power/*/msm* 2118F: drivers/power/*/msm*
@@ -6527,6 +6526,7 @@ M: Li Yang <leoyang.li@nxp.com>
6527L: linuxppc-dev@lists.ozlabs.org 6526L: linuxppc-dev@lists.ozlabs.org
6528L: linux-arm-kernel@lists.infradead.org 6527L: linux-arm-kernel@lists.infradead.org
6529S: Maintained 6528S: Maintained
6529F: Documentation/devicetree/bindings/misc/fsl,dpaa2-console.txt
6530F: Documentation/devicetree/bindings/soc/fsl/ 6530F: Documentation/devicetree/bindings/soc/fsl/
6531F: drivers/soc/fsl/ 6531F: drivers/soc/fsl/
6532F: include/linux/fsl/ 6532F: include/linux/fsl/
@@ -11907,11 +11907,13 @@ F: include/linux/mtd/onenand*.h
11907 11907
11908OP-TEE DRIVER 11908OP-TEE DRIVER
11909M: Jens Wiklander <jens.wiklander@linaro.org> 11909M: Jens Wiklander <jens.wiklander@linaro.org>
11910L: tee-dev@lists.linaro.org
11910S: Maintained 11911S: Maintained
11911F: drivers/tee/optee/ 11912F: drivers/tee/optee/
11912 11913
11913OP-TEE RANDOM NUMBER GENERATOR (RNG) DRIVER 11914OP-TEE RANDOM NUMBER GENERATOR (RNG) DRIVER
11914M: Sumit Garg <sumit.garg@linaro.org> 11915M: Sumit Garg <sumit.garg@linaro.org>
11916L: tee-dev@lists.linaro.org
11915S: Maintained 11917S: Maintained
11916F: drivers/char/hw_random/optee-rng.c 11918F: drivers/char/hw_random/optee-rng.c
11917 11919
@@ -13295,7 +13297,7 @@ M: Niklas Cassel <niklas.cassel@linaro.org>
13295L: netdev@vger.kernel.org 13297L: netdev@vger.kernel.org
13296S: Maintained 13298S: Maintained
13297F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c 13299F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
13298F: Documentation/devicetree/bindings/net/qcom,dwmac.txt 13300F: Documentation/devicetree/bindings/net/qcom,ethqos.txt
13299 13301
13300QUALCOMM GENERIC INTERFACE I2C DRIVER 13302QUALCOMM GENERIC INTERFACE I2C DRIVER
13301M: Alok Chauhan <alokc@codeaurora.org> 13303M: Alok Chauhan <alokc@codeaurora.org>
@@ -15745,6 +15747,7 @@ F: include/media/i2c/tw9910.h
15745 15747
15746TEE SUBSYSTEM 15748TEE SUBSYSTEM
15747M: Jens Wiklander <jens.wiklander@linaro.org> 15749M: Jens Wiklander <jens.wiklander@linaro.org>
15750L: tee-dev@lists.linaro.org
15748S: Maintained 15751S: Maintained
15749F: include/linux/tee_drv.h 15752F: include/linux/tee_drv.h
15750F: include/uapi/linux/tee.h 15753F: include/uapi/linux/tee.h
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index e0350476feaa..203664c40d3d 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -3442,6 +3442,7 @@ static int omap_hwmod_check_module(struct device *dev,
3442 * @dev: struct device 3442 * @dev: struct device
3443 * @oh: module 3443 * @oh: module
3444 * @sysc_fields: sysc register bits 3444 * @sysc_fields: sysc register bits
3445 * @clockdomain: clockdomain
3445 * @rev_offs: revision register offset 3446 * @rev_offs: revision register offset
3446 * @sysc_offs: sysconfig register offset 3447 * @sysc_offs: sysconfig register offset
3447 * @syss_offs: sysstatus register offset 3448 * @syss_offs: sysstatus register offset
@@ -3453,6 +3454,7 @@ static int omap_hwmod_check_module(struct device *dev,
3453static int omap_hwmod_allocate_module(struct device *dev, struct omap_hwmod *oh, 3454static int omap_hwmod_allocate_module(struct device *dev, struct omap_hwmod *oh,
3454 const struct ti_sysc_module_data *data, 3455 const struct ti_sysc_module_data *data,
3455 struct sysc_regbits *sysc_fields, 3456 struct sysc_regbits *sysc_fields,
3457 struct clockdomain *clkdm,
3456 s32 rev_offs, s32 sysc_offs, 3458 s32 rev_offs, s32 sysc_offs,
3457 s32 syss_offs, u32 sysc_flags, 3459 s32 syss_offs, u32 sysc_flags,
3458 u32 idlemodes) 3460 u32 idlemodes)
@@ -3460,8 +3462,6 @@ static int omap_hwmod_allocate_module(struct device *dev, struct omap_hwmod *oh,
3460 struct omap_hwmod_class_sysconfig *sysc; 3462 struct omap_hwmod_class_sysconfig *sysc;
3461 struct omap_hwmod_class *class = NULL; 3463 struct omap_hwmod_class *class = NULL;
3462 struct omap_hwmod_ocp_if *oi = NULL; 3464 struct omap_hwmod_ocp_if *oi = NULL;
3463 struct clockdomain *clkdm = NULL;
3464 struct clk *clk = NULL;
3465 void __iomem *regs = NULL; 3465 void __iomem *regs = NULL;
3466 unsigned long flags; 3466 unsigned long flags;
3467 3467
@@ -3508,36 +3508,6 @@ static int omap_hwmod_allocate_module(struct device *dev, struct omap_hwmod *oh,
3508 oi->user = OCP_USER_MPU | OCP_USER_SDMA; 3508 oi->user = OCP_USER_MPU | OCP_USER_SDMA;
3509 } 3509 }
3510 3510
3511 if (!oh->_clk) {
3512 struct clk_hw_omap *hwclk;
3513
3514 clk = of_clk_get_by_name(dev->of_node, "fck");
3515 if (!IS_ERR(clk))
3516 clk_prepare(clk);
3517 else
3518 clk = NULL;
3519
3520 /*
3521 * Populate clockdomain based on dts clock. It is needed for
3522 * clkdm_deny_idle() and clkdm_allow_idle() until we have have
3523 * interconnect driver and reset driver capable of blocking
3524 * clockdomain idle during reset, enable and idle.
3525 */
3526 if (clk) {
3527 hwclk = to_clk_hw_omap(__clk_get_hw(clk));
3528 if (hwclk && hwclk->clkdm_name)
3529 clkdm = clkdm_lookup(hwclk->clkdm_name);
3530 }
3531
3532 /*
3533 * Note that we assume interconnect driver manages the clocks
3534 * and do not need to populate oh->_clk for dynamically
3535 * allocated modules.
3536 */
3537 clk_unprepare(clk);
3538 clk_put(clk);
3539 }
3540
3541 spin_lock_irqsave(&oh->_lock, flags); 3511 spin_lock_irqsave(&oh->_lock, flags);
3542 if (regs) 3512 if (regs)
3543 oh->_mpu_rt_va = regs; 3513 oh->_mpu_rt_va = regs;
@@ -3623,7 +3593,7 @@ int omap_hwmod_init_module(struct device *dev,
3623 u32 sysc_flags, idlemodes; 3593 u32 sysc_flags, idlemodes;
3624 int error; 3594 int error;
3625 3595
3626 if (!dev || !data) 3596 if (!dev || !data || !data->name || !cookie)
3627 return -EINVAL; 3597 return -EINVAL;
3628 3598
3629 oh = _lookup(data->name); 3599 oh = _lookup(data->name);
@@ -3694,7 +3664,8 @@ int omap_hwmod_init_module(struct device *dev,
3694 return error; 3664 return error;
3695 3665
3696 return omap_hwmod_allocate_module(dev, oh, data, sysc_fields, 3666 return omap_hwmod_allocate_module(dev, oh, data, sysc_fields,
3697 rev_offs, sysc_offs, syss_offs, 3667 cookie->clkdm, rev_offs,
3668 sysc_offs, syss_offs,
3698 sysc_flags, idlemodes); 3669 sysc_flags, idlemodes);
3699} 3670}
3700 3671
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index b0f8c9a70c68..6c6f8fce854e 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -26,6 +26,7 @@
26#include <linux/platform_data/wkup_m3.h> 26#include <linux/platform_data/wkup_m3.h>
27#include <linux/platform_data/asoc-ti-mcbsp.h> 27#include <linux/platform_data/asoc-ti-mcbsp.h>
28 28
29#include "clockdomain.h"
29#include "common.h" 30#include "common.h"
30#include "common-board-devices.h" 31#include "common-board-devices.h"
31#include "control.h" 32#include "control.h"
@@ -460,6 +461,62 @@ static void __init dra7x_evm_mmc_quirk(void)
460} 461}
461#endif 462#endif
462 463
464static struct clockdomain *ti_sysc_find_one_clockdomain(struct clk *clk)
465{
466 struct clockdomain *clkdm = NULL;
467 struct clk_hw_omap *hwclk;
468
469 hwclk = to_clk_hw_omap(__clk_get_hw(clk));
470 if (hwclk && hwclk->clkdm_name)
471 clkdm = clkdm_lookup(hwclk->clkdm_name);
472
473 return clkdm;
474}
475
476/**
477 * ti_sysc_clkdm_init - find clockdomain based on clock
478 * @fck: device functional clock
479 * @ick: device interface clock
480 * @dev: struct device
481 *
482 * Populate clockdomain based on clock. It is needed for
483 * clkdm_deny_idle() and clkdm_allow_idle() for blocking clockdomain
484 * clockdomain idle during reset, enable and idle.
485 *
486 * Note that we assume interconnect driver manages the clocks
487 * and do not need to populate oh->_clk for dynamically
488 * allocated modules.
489 */
490static int ti_sysc_clkdm_init(struct device *dev,
491 struct clk *fck, struct clk *ick,
492 struct ti_sysc_cookie *cookie)
493{
494 if (fck)
495 cookie->clkdm = ti_sysc_find_one_clockdomain(fck);
496 if (cookie->clkdm)
497 return 0;
498 if (ick)
499 cookie->clkdm = ti_sysc_find_one_clockdomain(ick);
500 if (cookie->clkdm)
501 return 0;
502
503 return -ENODEV;
504}
505
506static void ti_sysc_clkdm_deny_idle(struct device *dev,
507 const struct ti_sysc_cookie *cookie)
508{
509 if (cookie->clkdm)
510 clkdm_deny_idle(cookie->clkdm);
511}
512
513static void ti_sysc_clkdm_allow_idle(struct device *dev,
514 const struct ti_sysc_cookie *cookie)
515{
516 if (cookie->clkdm)
517 clkdm_allow_idle(cookie->clkdm);
518}
519
463static int ti_sysc_enable_module(struct device *dev, 520static int ti_sysc_enable_module(struct device *dev,
464 const struct ti_sysc_cookie *cookie) 521 const struct ti_sysc_cookie *cookie)
465{ 522{
@@ -491,6 +548,9 @@ static struct of_dev_auxdata omap_auxdata_lookup[];
491 548
492static struct ti_sysc_platform_data ti_sysc_pdata = { 549static struct ti_sysc_platform_data ti_sysc_pdata = {
493 .auxdata = omap_auxdata_lookup, 550 .auxdata = omap_auxdata_lookup,
551 .init_clockdomain = ti_sysc_clkdm_init,
552 .clkdm_deny_idle = ti_sysc_clkdm_deny_idle,
553 .clkdm_allow_idle = ti_sysc_clkdm_allow_idle,
494 .init_module = omap_hwmod_init_module, 554 .init_module = omap_hwmod_init_module,
495 .enable_module = ti_sysc_enable_module, 555 .enable_module = ti_sysc_enable_module,
496 .idle_module = ti_sysc_idle_module, 556 .idle_module = ti_sysc_idle_module,
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index 972854ca1d9a..ec1004c858b8 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -399,8 +399,8 @@ static int __init brcmstb_gisb_arb_probe(struct platform_device *pdev)
399 &gisb_panic_notifier); 399 &gisb_panic_notifier);
400 } 400 }
401 401
402 dev_info(&pdev->dev, "registered mem: %p, irqs: %d, %d\n", 402 dev_info(&pdev->dev, "registered irqs: %d, %d\n",
403 gdev->base, timeout_irq, tea_irq); 403 timeout_irq, tea_irq);
404 404
405 return 0; 405 return 0;
406} 406}
diff --git a/drivers/bus/fsl-mc/dprc.c b/drivers/bus/fsl-mc/dprc.c
index 1c3f62182266..0fe3f52ae0de 100644
--- a/drivers/bus/fsl-mc/dprc.c
+++ b/drivers/bus/fsl-mc/dprc.c
@@ -443,11 +443,31 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
443 struct fsl_mc_command cmd = { 0 }; 443 struct fsl_mc_command cmd = { 0 };
444 struct dprc_cmd_get_obj_region *cmd_params; 444 struct dprc_cmd_get_obj_region *cmd_params;
445 struct dprc_rsp_get_obj_region *rsp_params; 445 struct dprc_rsp_get_obj_region *rsp_params;
446 u16 major_ver, minor_ver;
446 int err; 447 int err;
447 448
448 /* prepare command */ 449 /* prepare command */
449 cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG, 450 err = dprc_get_api_version(mc_io, 0,
450 cmd_flags, token); 451 &major_ver,
452 &minor_ver);
453 if (err)
454 return err;
455
456 /**
457 * MC API version 6.3 introduced a new field to the region
458 * descriptor: base_address. If the older API is in use then the base
459 * address is set to zero to indicate it needs to be obtained elsewhere
460 * (typically the device tree).
461 */
462 if (major_ver > 6 || (major_ver == 6 && minor_ver >= 3))
463 cmd.header =
464 mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG_V2,
465 cmd_flags, token);
466 else
467 cmd.header =
468 mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG,
469 cmd_flags, token);
470
451 cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params; 471 cmd_params = (struct dprc_cmd_get_obj_region *)cmd.params;
452 cmd_params->obj_id = cpu_to_le32(obj_id); 472 cmd_params->obj_id = cpu_to_le32(obj_id);
453 cmd_params->region_index = region_index; 473 cmd_params->region_index = region_index;
@@ -461,8 +481,12 @@ int dprc_get_obj_region(struct fsl_mc_io *mc_io,
461 481
462 /* retrieve response parameters */ 482 /* retrieve response parameters */
463 rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params; 483 rsp_params = (struct dprc_rsp_get_obj_region *)cmd.params;
464 region_desc->base_offset = le64_to_cpu(rsp_params->base_addr); 484 region_desc->base_offset = le64_to_cpu(rsp_params->base_offset);
465 region_desc->size = le32_to_cpu(rsp_params->size); 485 region_desc->size = le32_to_cpu(rsp_params->size);
486 if (major_ver > 6 || (major_ver == 6 && minor_ver >= 3))
487 region_desc->base_address = le64_to_cpu(rsp_params->base_addr);
488 else
489 region_desc->base_address = 0;
466 490
467 return 0; 491 return 0;
468} 492}
diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c
index f0404c6d1ff4..5c9bf2e06552 100644
--- a/drivers/bus/fsl-mc/fsl-mc-bus.c
+++ b/drivers/bus/fsl-mc/fsl-mc-bus.c
@@ -487,10 +487,19 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
487 "dprc_get_obj_region() failed: %d\n", error); 487 "dprc_get_obj_region() failed: %d\n", error);
488 goto error_cleanup_regions; 488 goto error_cleanup_regions;
489 } 489 }
490 490 /*
491 error = translate_mc_addr(mc_dev, mc_region_type, 491 * Older MC only returned region offset and no base address
492 * If base address is in the region_desc use it otherwise
493 * revert to old mechanism
494 */
495 if (region_desc.base_address)
496 regions[i].start = region_desc.base_address +
497 region_desc.base_offset;
498 else
499 error = translate_mc_addr(mc_dev, mc_region_type,
492 region_desc.base_offset, 500 region_desc.base_offset,
493 &regions[i].start); 501 &regions[i].start);
502
494 if (error < 0) { 503 if (error < 0) {
495 dev_err(parent_dev, 504 dev_err(parent_dev,
496 "Invalid MC offset: %#x (for %s.%d\'s region %d)\n", 505 "Invalid MC offset: %#x (for %s.%d\'s region %d)\n",
@@ -504,6 +513,8 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
504 regions[i].flags = IORESOURCE_IO; 513 regions[i].flags = IORESOURCE_IO;
505 if (region_desc.flags & DPRC_REGION_CACHEABLE) 514 if (region_desc.flags & DPRC_REGION_CACHEABLE)
506 regions[i].flags |= IORESOURCE_CACHEABLE; 515 regions[i].flags |= IORESOURCE_CACHEABLE;
516 if (region_desc.flags & DPRC_REGION_SHAREABLE)
517 regions[i].flags |= IORESOURCE_MEM;
507 } 518 }
508 519
509 mc_dev->regions = regions; 520 mc_dev->regions = regions;
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index ea11b4fe59f7..020fcc04ec8b 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -79,9 +79,11 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
79 79
80/* DPRC command versioning */ 80/* DPRC command versioning */
81#define DPRC_CMD_BASE_VERSION 1 81#define DPRC_CMD_BASE_VERSION 1
82#define DPRC_CMD_2ND_VERSION 2
82#define DPRC_CMD_ID_OFFSET 4 83#define DPRC_CMD_ID_OFFSET 4
83 84
84#define DPRC_CMD(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION) 85#define DPRC_CMD(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_BASE_VERSION)
86#define DPRC_CMD_V2(id) (((id) << DPRC_CMD_ID_OFFSET) | DPRC_CMD_2ND_VERSION)
85 87
86/* DPRC command IDs */ 88/* DPRC command IDs */
87#define DPRC_CMDID_CLOSE DPRC_CMD(0x800) 89#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
@@ -100,6 +102,7 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
100#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159) 102#define DPRC_CMDID_GET_OBJ_COUNT DPRC_CMD(0x159)
101#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A) 103#define DPRC_CMDID_GET_OBJ DPRC_CMD(0x15A)
102#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E) 104#define DPRC_CMDID_GET_OBJ_REG DPRC_CMD(0x15E)
105#define DPRC_CMDID_GET_OBJ_REG_V2 DPRC_CMD_V2(0x15E)
103#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F) 106#define DPRC_CMDID_SET_OBJ_IRQ DPRC_CMD(0x15F)
104 107
105struct dprc_cmd_open { 108struct dprc_cmd_open {
@@ -199,9 +202,16 @@ struct dprc_rsp_get_obj_region {
199 /* response word 0 */ 202 /* response word 0 */
200 __le64 pad; 203 __le64 pad;
201 /* response word 1 */ 204 /* response word 1 */
202 __le64 base_addr; 205 __le64 base_offset;
203 /* response word 2 */ 206 /* response word 2 */
204 __le32 size; 207 __le32 size;
208 __le32 pad2;
209 /* response word 3 */
210 __le32 flags;
211 __le32 pad3;
212 /* response word 4 */
213 /* base_addr may be zero if older MC firmware is used */
214 __le64 base_addr;
205}; 215};
206 216
207struct dprc_cmd_set_obj_irq { 217struct dprc_cmd_set_obj_irq {
@@ -334,6 +344,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
334/* Region flags */ 344/* Region flags */
335/* Cacheable - Indicates that region should be mapped as cacheable */ 345/* Cacheable - Indicates that region should be mapped as cacheable */
336#define DPRC_REGION_CACHEABLE 0x00000001 346#define DPRC_REGION_CACHEABLE 0x00000001
347#define DPRC_REGION_SHAREABLE 0x00000002
337 348
338/** 349/**
339 * enum dprc_region_type - Region type 350 * enum dprc_region_type - Region type
@@ -342,7 +353,8 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
342 */ 353 */
343enum dprc_region_type { 354enum dprc_region_type {
344 DPRC_REGION_TYPE_MC_PORTAL, 355 DPRC_REGION_TYPE_MC_PORTAL,
345 DPRC_REGION_TYPE_QBMAN_PORTAL 356 DPRC_REGION_TYPE_QBMAN_PORTAL,
357 DPRC_REGION_TYPE_QBMAN_MEM_BACKED_PORTAL
346}; 358};
347 359
348/** 360/**
@@ -360,6 +372,7 @@ struct dprc_region_desc {
360 u32 size; 372 u32 size;
361 u32 flags; 373 u32 flags;
362 enum dprc_region_type type; 374 enum dprc_region_type type;
375 u64 base_address;
363}; 376};
364 377
365int dprc_get_obj_region(struct fsl_mc_io *mc_io, 378int dprc_get_obj_region(struct fsl_mc_io *mc_io,
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index b72741668c92..e6deabd8305d 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -71,6 +71,9 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = {
71 * @name: name if available 71 * @name: name if available
72 * @revision: interconnect target module revision 72 * @revision: interconnect target module revision
73 * @needs_resume: runtime resume needed on resume from suspend 73 * @needs_resume: runtime resume needed on resume from suspend
74 * @clk_enable_quirk: module specific clock enable quirk
75 * @clk_disable_quirk: module specific clock disable quirk
76 * @reset_done_quirk: module specific reset done quirk
74 */ 77 */
75struct sysc { 78struct sysc {
76 struct device *dev; 79 struct device *dev;
@@ -89,10 +92,14 @@ struct sysc {
89 struct ti_sysc_cookie cookie; 92 struct ti_sysc_cookie cookie;
90 const char *name; 93 const char *name;
91 u32 revision; 94 u32 revision;
92 bool enabled; 95 unsigned int enabled:1;
93 bool needs_resume; 96 unsigned int needs_resume:1;
94 bool child_needs_resume; 97 unsigned int child_needs_resume:1;
98 unsigned int disable_on_idle:1;
95 struct delayed_work idle_work; 99 struct delayed_work idle_work;
100 void (*clk_enable_quirk)(struct sysc *sysc);
101 void (*clk_disable_quirk)(struct sysc *sysc);
102 void (*reset_done_quirk)(struct sysc *sysc);
96}; 103};
97 104
98static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np, 105static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
@@ -100,6 +107,20 @@ static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
100 107
101static void sysc_write(struct sysc *ddata, int offset, u32 value) 108static void sysc_write(struct sysc *ddata, int offset, u32 value)
102{ 109{
110 if (ddata->cfg.quirks & SYSC_QUIRK_16BIT) {
111 writew_relaxed(value & 0xffff, ddata->module_va + offset);
112
113 /* Only i2c revision has LO and HI register with stride of 4 */
114 if (ddata->offsets[SYSC_REVISION] >= 0 &&
115 offset == ddata->offsets[SYSC_REVISION]) {
116 u16 hi = value >> 16;
117
118 writew_relaxed(hi, ddata->module_va + offset + 4);
119 }
120
121 return;
122 }
123
103 writel_relaxed(value, ddata->module_va + offset); 124 writel_relaxed(value, ddata->module_va + offset);
104} 125}
105 126
@@ -109,7 +130,14 @@ static u32 sysc_read(struct sysc *ddata, int offset)
109 u32 val; 130 u32 val;
110 131
111 val = readw_relaxed(ddata->module_va + offset); 132 val = readw_relaxed(ddata->module_va + offset);
112 val |= (readw_relaxed(ddata->module_va + offset + 4) << 16); 133
134 /* Only i2c revision has LO and HI register with stride of 4 */
135 if (ddata->offsets[SYSC_REVISION] >= 0 &&
136 offset == ddata->offsets[SYSC_REVISION]) {
137 u16 tmp = readw_relaxed(ddata->module_va + offset + 4);
138
139 val |= tmp << 16;
140 }
113 141
114 return val; 142 return val;
115 } 143 }
@@ -132,6 +160,26 @@ static u32 sysc_read_revision(struct sysc *ddata)
132 return sysc_read(ddata, offset); 160 return sysc_read(ddata, offset);
133} 161}
134 162
163static u32 sysc_read_sysconfig(struct sysc *ddata)
164{
165 int offset = ddata->offsets[SYSC_SYSCONFIG];
166
167 if (offset < 0)
168 return 0;
169
170 return sysc_read(ddata, offset);
171}
172
173static u32 sysc_read_sysstatus(struct sysc *ddata)
174{
175 int offset = ddata->offsets[SYSC_SYSSTATUS];
176
177 if (offset < 0)
178 return 0;
179
180 return sysc_read(ddata, offset);
181}
182
135static int sysc_add_named_clock_from_child(struct sysc *ddata, 183static int sysc_add_named_clock_from_child(struct sysc *ddata,
136 const char *name, 184 const char *name,
137 const char *optfck_name) 185 const char *optfck_name)
@@ -422,6 +470,30 @@ static void sysc_disable_opt_clocks(struct sysc *ddata)
422 } 470 }
423} 471}
424 472
473static void sysc_clkdm_deny_idle(struct sysc *ddata)
474{
475 struct ti_sysc_platform_data *pdata;
476
477 if (ddata->legacy_mode)
478 return;
479
480 pdata = dev_get_platdata(ddata->dev);
481 if (pdata && pdata->clkdm_deny_idle)
482 pdata->clkdm_deny_idle(ddata->dev, &ddata->cookie);
483}
484
485static void sysc_clkdm_allow_idle(struct sysc *ddata)
486{
487 struct ti_sysc_platform_data *pdata;
488
489 if (ddata->legacy_mode)
490 return;
491
492 pdata = dev_get_platdata(ddata->dev);
493 if (pdata && pdata->clkdm_allow_idle)
494 pdata->clkdm_allow_idle(ddata->dev, &ddata->cookie);
495}
496
425/** 497/**
426 * sysc_init_resets - init rstctrl reset line if configured 498 * sysc_init_resets - init rstctrl reset line if configured
427 * @ddata: device driver data 499 * @ddata: device driver data
@@ -431,7 +503,7 @@ static void sysc_disable_opt_clocks(struct sysc *ddata)
431static int sysc_init_resets(struct sysc *ddata) 503static int sysc_init_resets(struct sysc *ddata)
432{ 504{
433 ddata->rsts = 505 ddata->rsts =
434 devm_reset_control_array_get_optional_exclusive(ddata->dev); 506 devm_reset_control_get_optional(ddata->dev, "rstctrl");
435 if (IS_ERR(ddata->rsts)) 507 if (IS_ERR(ddata->rsts))
436 return PTR_ERR(ddata->rsts); 508 return PTR_ERR(ddata->rsts);
437 509
@@ -694,8 +766,11 @@ static int sysc_ioremap(struct sysc *ddata)
694 ddata->offsets[SYSC_SYSCONFIG], 766 ddata->offsets[SYSC_SYSCONFIG],
695 ddata->offsets[SYSC_SYSSTATUS]); 767 ddata->offsets[SYSC_SYSSTATUS]);
696 768
769 if (size < SZ_1K)
770 size = SZ_1K;
771
697 if ((size + sizeof(u32)) > ddata->module_size) 772 if ((size + sizeof(u32)) > ddata->module_size)
698 return -EINVAL; 773 size = ddata->module_size;
699 } 774 }
700 775
701 ddata->module_va = devm_ioremap(ddata->dev, 776 ddata->module_va = devm_ioremap(ddata->dev,
@@ -794,7 +869,9 @@ static void sysc_show_registers(struct sysc *ddata)
794} 869}
795 870
796#define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1) 871#define SYSC_IDLE_MASK (SYSC_NR_IDLEMODES - 1)
872#define SYSC_CLOCACT_ICK 2
797 873
874/* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */
798static int sysc_enable_module(struct device *dev) 875static int sysc_enable_module(struct device *dev)
799{ 876{
800 struct sysc *ddata; 877 struct sysc *ddata;
@@ -805,23 +882,34 @@ static int sysc_enable_module(struct device *dev)
805 if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV) 882 if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
806 return 0; 883 return 0;
807 884
808 /*
809 * TODO: Need to prevent clockdomain autoidle?
810 * See clkdm_deny_idle() in arch/mach-omap2/omap_hwmod.c
811 */
812
813 regbits = ddata->cap->regbits; 885 regbits = ddata->cap->regbits;
814 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); 886 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
815 887
888 /* Set CLOCKACTIVITY, we only use it for ick */
889 if (regbits->clkact_shift >= 0 &&
890 (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT ||
891 ddata->cfg.sysc_val & BIT(regbits->clkact_shift)))
892 reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift;
893
816 /* Set SIDLE mode */ 894 /* Set SIDLE mode */
817 idlemodes = ddata->cfg.sidlemodes; 895 idlemodes = ddata->cfg.sidlemodes;
818 if (!idlemodes || regbits->sidle_shift < 0) 896 if (!idlemodes || regbits->sidle_shift < 0)
819 goto set_midle; 897 goto set_midle;
820 898
821 best_mode = fls(ddata->cfg.sidlemodes) - 1; 899 if (ddata->cfg.quirks & (SYSC_QUIRK_SWSUP_SIDLE |
822 if (best_mode > SYSC_IDLE_MASK) { 900 SYSC_QUIRK_SWSUP_SIDLE_ACT)) {
823 dev_err(dev, "%s: invalid sidlemode\n", __func__); 901 best_mode = SYSC_IDLE_NO;
824 return -EINVAL; 902 } else {
903 best_mode = fls(ddata->cfg.sidlemodes) - 1;
904 if (best_mode > SYSC_IDLE_MASK) {
905 dev_err(dev, "%s: invalid sidlemode\n", __func__);
906 return -EINVAL;
907 }
908
909 /* Set WAKEUP */
910 if (regbits->enwkup_shift >= 0 &&
911 ddata->cfg.sysc_val & BIT(regbits->enwkup_shift))
912 reg |= BIT(regbits->enwkup_shift);
825 } 913 }
826 914
827 reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift); 915 reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
@@ -832,7 +920,7 @@ set_midle:
832 /* Set MIDLE mode */ 920 /* Set MIDLE mode */
833 idlemodes = ddata->cfg.midlemodes; 921 idlemodes = ddata->cfg.midlemodes;
834 if (!idlemodes || regbits->midle_shift < 0) 922 if (!idlemodes || regbits->midle_shift < 0)
835 return 0; 923 goto set_autoidle;
836 924
837 best_mode = fls(ddata->cfg.midlemodes) - 1; 925 best_mode = fls(ddata->cfg.midlemodes) - 1;
838 if (best_mode > SYSC_IDLE_MASK) { 926 if (best_mode > SYSC_IDLE_MASK) {
@@ -844,6 +932,14 @@ set_midle:
844 reg |= best_mode << regbits->midle_shift; 932 reg |= best_mode << regbits->midle_shift;
845 sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); 933 sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
846 934
935set_autoidle:
936 /* Autoidle bit must enabled separately if available */
937 if (regbits->autoidle_shift >= 0 &&
938 ddata->cfg.sysc_val & BIT(regbits->autoidle_shift)) {
939 reg |= 1 << regbits->autoidle_shift;
940 sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
941 }
942
847 return 0; 943 return 0;
848} 944}
849 945
@@ -861,6 +957,7 @@ static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
861 return 0; 957 return 0;
862} 958}
863 959
960/* Caller needs to manage sysc_clkdm_deny_idle() and sysc_clkdm_allow_idle() */
864static int sysc_disable_module(struct device *dev) 961static int sysc_disable_module(struct device *dev)
865{ 962{
866 struct sysc *ddata; 963 struct sysc *ddata;
@@ -872,11 +969,6 @@ static int sysc_disable_module(struct device *dev)
872 if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV) 969 if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
873 return 0; 970 return 0;
874 971
875 /*
876 * TODO: Need to prevent clockdomain autoidle?
877 * See clkdm_deny_idle() in arch/mach-omap2/omap_hwmod.c
878 */
879
880 regbits = ddata->cap->regbits; 972 regbits = ddata->cap->regbits;
881 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]); 973 reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
882 974
@@ -901,14 +993,21 @@ set_sidle:
901 if (!idlemodes || regbits->sidle_shift < 0) 993 if (!idlemodes || regbits->sidle_shift < 0)
902 return 0; 994 return 0;
903 995
904 ret = sysc_best_idle_mode(idlemodes, &best_mode); 996 if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE) {
905 if (ret) { 997 best_mode = SYSC_IDLE_FORCE;
906 dev_err(dev, "%s: invalid sidlemode\n", __func__); 998 } else {
907 return ret; 999 ret = sysc_best_idle_mode(idlemodes, &best_mode);
1000 if (ret) {
1001 dev_err(dev, "%s: invalid sidlemode\n", __func__);
1002 return ret;
1003 }
908 } 1004 }
909 1005
910 reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift); 1006 reg &= ~(SYSC_IDLE_MASK << regbits->sidle_shift);
911 reg |= best_mode << regbits->sidle_shift; 1007 reg |= best_mode << regbits->sidle_shift;
1008 if (regbits->autoidle_shift >= 0 &&
1009 ddata->cfg.sysc_val & BIT(regbits->autoidle_shift))
1010 reg |= 1 << regbits->autoidle_shift;
912 sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg); 1011 sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
913 1012
914 return 0; 1013 return 0;
@@ -932,6 +1031,9 @@ static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev,
932 dev_err(dev, "%s: could not idle: %i\n", 1031 dev_err(dev, "%s: could not idle: %i\n",
933 __func__, error); 1032 __func__, error);
934 1033
1034 if (ddata->disable_on_idle)
1035 reset_control_assert(ddata->rsts);
1036
935 return 0; 1037 return 0;
936} 1038}
937 1039
@@ -941,6 +1043,9 @@ static int __maybe_unused sysc_runtime_resume_legacy(struct device *dev,
941 struct ti_sysc_platform_data *pdata; 1043 struct ti_sysc_platform_data *pdata;
942 int error; 1044 int error;
943 1045
1046 if (ddata->disable_on_idle)
1047 reset_control_deassert(ddata->rsts);
1048
944 pdata = dev_get_platdata(ddata->dev); 1049 pdata = dev_get_platdata(ddata->dev);
945 if (!pdata) 1050 if (!pdata)
946 return 0; 1051 return 0;
@@ -966,14 +1071,16 @@ static int __maybe_unused sysc_runtime_suspend(struct device *dev)
966 if (!ddata->enabled) 1071 if (!ddata->enabled)
967 return 0; 1072 return 0;
968 1073
1074 sysc_clkdm_deny_idle(ddata);
1075
969 if (ddata->legacy_mode) { 1076 if (ddata->legacy_mode) {
970 error = sysc_runtime_suspend_legacy(dev, ddata); 1077 error = sysc_runtime_suspend_legacy(dev, ddata);
971 if (error) 1078 if (error)
972 return error; 1079 goto err_allow_idle;
973 } else { 1080 } else {
974 error = sysc_disable_module(dev); 1081 error = sysc_disable_module(dev);
975 if (error) 1082 if (error)
976 return error; 1083 goto err_allow_idle;
977 } 1084 }
978 1085
979 sysc_disable_main_clocks(ddata); 1086 sysc_disable_main_clocks(ddata);
@@ -983,6 +1090,12 @@ static int __maybe_unused sysc_runtime_suspend(struct device *dev)
983 1090
984 ddata->enabled = false; 1091 ddata->enabled = false;
985 1092
1093err_allow_idle:
1094 sysc_clkdm_allow_idle(ddata);
1095
1096 if (ddata->disable_on_idle)
1097 reset_control_assert(ddata->rsts);
1098
986 return error; 1099 return error;
987} 1100}
988 1101
@@ -996,10 +1109,15 @@ static int __maybe_unused sysc_runtime_resume(struct device *dev)
996 if (ddata->enabled) 1109 if (ddata->enabled)
997 return 0; 1110 return 0;
998 1111
1112 if (ddata->disable_on_idle)
1113 reset_control_deassert(ddata->rsts);
1114
1115 sysc_clkdm_deny_idle(ddata);
1116
999 if (sysc_opt_clks_needed(ddata)) { 1117 if (sysc_opt_clks_needed(ddata)) {
1000 error = sysc_enable_opt_clocks(ddata); 1118 error = sysc_enable_opt_clocks(ddata);
1001 if (error) 1119 if (error)
1002 return error; 1120 goto err_allow_idle;
1003 } 1121 }
1004 1122
1005 error = sysc_enable_main_clocks(ddata); 1123 error = sysc_enable_main_clocks(ddata);
@@ -1018,6 +1136,8 @@ static int __maybe_unused sysc_runtime_resume(struct device *dev)
1018 1136
1019 ddata->enabled = true; 1137 ddata->enabled = true;
1020 1138
1139 sysc_clkdm_allow_idle(ddata);
1140
1021 return 0; 1141 return 0;
1022 1142
1023err_main_clocks: 1143err_main_clocks:
@@ -1025,6 +1145,8 @@ err_main_clocks:
1025err_opt_clocks: 1145err_opt_clocks:
1026 if (sysc_opt_clks_needed(ddata)) 1146 if (sysc_opt_clks_needed(ddata))
1027 sysc_disable_opt_clocks(ddata); 1147 sysc_disable_opt_clocks(ddata);
1148err_allow_idle:
1149 sysc_clkdm_allow_idle(ddata);
1028 1150
1029 return error; 1151 return error;
1030} 1152}
@@ -1106,8 +1228,10 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
1106 0), 1228 0),
1107 SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff, 1229 SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff,
1108 0), 1230 0),
1231 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
1232 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
1109 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, 1233 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
1110 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), 1234 SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
1111 /* Uarts on omap4 and later */ 1235 /* Uarts on omap4 and later */
1112 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff, 1236 SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff,
1113 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), 1237 SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE),
@@ -1119,6 +1243,22 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
1119 SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT | 1243 SYSC_QUIRK_EXT_OPT_CLOCK | SYSC_QUIRK_NO_RESET_ON_INIT |
1120 SYSC_QUIRK_SWSUP_SIDLE), 1244 SYSC_QUIRK_SWSUP_SIDLE),
1121 1245
1246 /* Quirks that need to be set based on detected module */
1247 SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
1248 SYSC_MODULE_QUIRK_HDQ1W),
1249 SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff,
1250 SYSC_MODULE_QUIRK_HDQ1W),
1251 SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000036, 0x000000ff,
1252 SYSC_MODULE_QUIRK_I2C),
1253 SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x0000003c, 0x000000ff,
1254 SYSC_MODULE_QUIRK_I2C),
1255 SYSC_QUIRK("i2c", 0, 0, 0x20, 0x10, 0x00000040, 0x000000ff,
1256 SYSC_MODULE_QUIRK_I2C),
1257 SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0,
1258 SYSC_MODULE_QUIRK_I2C),
1259 SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
1260 SYSC_MODULE_QUIRK_WDT),
1261
1122#ifdef DEBUG 1262#ifdef DEBUG
1123 SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0), 1263 SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0),
1124 SYSC_QUIRK("atl", 0, 0, -1, -1, 0x0a070100, 0xffffffff, 0), 1264 SYSC_QUIRK("atl", 0, 0, -1, -1, 0x0a070100, 0xffffffff, 0),
@@ -1132,11 +1272,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
1132 SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0), 1272 SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
1133 SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0), 1273 SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
1134 SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -1, 0, 0, 0), 1274 SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -1, 0, 0, 0),
1135 SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff, 0),
1136 SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x0000000a, 0xffffffff, 0),
1137 SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0), 1275 SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0),
1138 SYSC_QUIRK("iss", 0, 0, 0x10, -1, 0x40000101, 0xffffffff, 0), 1276 SYSC_QUIRK("iss", 0, 0, 0x10, -1, 0x40000101, 0xffffffff, 0),
1139 SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0, 0),
1140 SYSC_QUIRK("lcdc", 0, 0, 0x54, -1, 0x4f201000, 0xffffffff, 0), 1277 SYSC_QUIRK("lcdc", 0, 0, 0x54, -1, 0x4f201000, 0xffffffff, 0),
1141 SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44306302, 0xffffffff, 0), 1278 SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44306302, 0xffffffff, 0),
1142 SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44307b02, 0xffffffff, 0), 1279 SYSC_QUIRK("mcasp", 0, 0, 0x4, -1, 0x44307b02, 0xffffffff, 0),
@@ -1172,7 +1309,6 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
1172 SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -1, 0x50700101, 0xffffffff, 0), 1309 SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -1, 0x50700101, 0xffffffff, 0),
1173 SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050, 1310 SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
1174 0xffffffff, 0), 1311 0xffffffff, 0),
1175 SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, 0),
1176 SYSC_QUIRK("vfpe", 0, 0, 0x104, -1, 0x4d001200, 0xffffffff, 0), 1312 SYSC_QUIRK("vfpe", 0, 0, 0x104, -1, 0x4d001200, 0xffffffff, 0),
1177#endif 1313#endif
1178}; 1314};
@@ -1245,6 +1381,121 @@ static void sysc_init_revision_quirks(struct sysc *ddata)
1245 } 1381 }
1246} 1382}
1247 1383
1384/* 1-wire needs module's internal clocks enabled for reset */
1385static void sysc_clk_enable_quirk_hdq1w(struct sysc *ddata)
1386{
1387 int offset = 0x0c; /* HDQ_CTRL_STATUS */
1388 u16 val;
1389
1390 val = sysc_read(ddata, offset);
1391 val |= BIT(5);
1392 sysc_write(ddata, offset, val);
1393}
1394
1395/* I2C needs extra enable bit toggling for reset */
1396static void sysc_clk_quirk_i2c(struct sysc *ddata, bool enable)
1397{
1398 int offset;
1399 u16 val;
1400
1401 /* I2C_CON, omap2/3 is different from omap4 and later */
1402 if ((ddata->revision & 0xffffff00) == 0x001f0000)
1403 offset = 0x24;
1404 else
1405 offset = 0xa4;
1406
1407 /* I2C_EN */
1408 val = sysc_read(ddata, offset);
1409 if (enable)
1410 val |= BIT(15);
1411 else
1412 val &= ~BIT(15);
1413 sysc_write(ddata, offset, val);
1414}
1415
1416static void sysc_clk_enable_quirk_i2c(struct sysc *ddata)
1417{
1418 sysc_clk_quirk_i2c(ddata, true);
1419}
1420
1421static void sysc_clk_disable_quirk_i2c(struct sysc *ddata)
1422{
1423 sysc_clk_quirk_i2c(ddata, false);
1424}
1425
1426/* Watchdog timer needs a disable sequence after reset */
1427static void sysc_reset_done_quirk_wdt(struct sysc *ddata)
1428{
1429 int wps, spr, error;
1430 u32 val;
1431
1432 wps = 0x34;
1433 spr = 0x48;
1434
1435 sysc_write(ddata, spr, 0xaaaa);
1436 error = readl_poll_timeout(ddata->module_va + wps, val,
1437 !(val & 0x10), 100,
1438 MAX_MODULE_SOFTRESET_WAIT);
1439 if (error)
1440 dev_warn(ddata->dev, "wdt disable spr failed\n");
1441
1442 sysc_write(ddata, wps, 0x5555);
1443 error = readl_poll_timeout(ddata->module_va + wps, val,
1444 !(val & 0x10), 100,
1445 MAX_MODULE_SOFTRESET_WAIT);
1446 if (error)
1447 dev_warn(ddata->dev, "wdt disable wps failed\n");
1448}
1449
1450static void sysc_init_module_quirks(struct sysc *ddata)
1451{
1452 if (ddata->legacy_mode || !ddata->name)
1453 return;
1454
1455 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) {
1456 ddata->clk_enable_quirk = sysc_clk_enable_quirk_hdq1w;
1457
1458 return;
1459 }
1460
1461 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) {
1462 ddata->clk_enable_quirk = sysc_clk_enable_quirk_i2c;
1463 ddata->clk_disable_quirk = sysc_clk_disable_quirk_i2c;
1464
1465 return;
1466 }
1467
1468 if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT)
1469 ddata->reset_done_quirk = sysc_reset_done_quirk_wdt;
1470}
1471
1472static int sysc_clockdomain_init(struct sysc *ddata)
1473{
1474 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
1475 struct clk *fck = NULL, *ick = NULL;
1476 int error;
1477
1478 if (!pdata || !pdata->init_clockdomain)
1479 return 0;
1480
1481 switch (ddata->nr_clocks) {
1482 case 2:
1483 ick = ddata->clocks[SYSC_ICK];
1484 /* fallthrough */
1485 case 1:
1486 fck = ddata->clocks[SYSC_FCK];
1487 break;
1488 case 0:
1489 return 0;
1490 }
1491
1492 error = pdata->init_clockdomain(ddata->dev, fck, ick, &ddata->cookie);
1493 if (!error || error == -ENODEV)
1494 return 0;
1495
1496 return error;
1497}
1498
1248/* 1499/*
1249 * Note that pdata->init_module() typically does a reset first. After 1500 * Note that pdata->init_module() typically does a reset first. After
1250 * pdata->init_module() is done, PM runtime can be used for the interconnect 1501 * pdata->init_module() is done, PM runtime can be used for the interconnect
@@ -1255,7 +1506,7 @@ static int sysc_legacy_init(struct sysc *ddata)
1255 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev); 1506 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
1256 int error; 1507 int error;
1257 1508
1258 if (!ddata->legacy_mode || !pdata || !pdata->init_module) 1509 if (!pdata || !pdata->init_module)
1259 return 0; 1510 return 0;
1260 1511
1261 error = pdata->init_module(ddata->dev, ddata->mdata, &ddata->cookie); 1512 error = pdata->init_module(ddata->dev, ddata->mdata, &ddata->cookie);
@@ -1280,7 +1531,7 @@ static int sysc_legacy_init(struct sysc *ddata)
1280 */ 1531 */
1281static int sysc_rstctrl_reset_deassert(struct sysc *ddata, bool reset) 1532static int sysc_rstctrl_reset_deassert(struct sysc *ddata, bool reset)
1282{ 1533{
1283 int error; 1534 int error, val;
1284 1535
1285 if (!ddata->rsts) 1536 if (!ddata->rsts)
1286 return 0; 1537 return 0;
@@ -1291,37 +1542,68 @@ static int sysc_rstctrl_reset_deassert(struct sysc *ddata, bool reset)
1291 return error; 1542 return error;
1292 } 1543 }
1293 1544
1294 return reset_control_deassert(ddata->rsts); 1545 error = reset_control_deassert(ddata->rsts);
1546 if (error == -EEXIST)
1547 return 0;
1548
1549 error = readx_poll_timeout(reset_control_status, ddata->rsts, val,
1550 val == 0, 100, MAX_MODULE_SOFTRESET_WAIT);
1551
1552 return error;
1295} 1553}
1296 1554
1555/*
1556 * Note that the caller must ensure the interconnect target module is enabled
1557 * before calling reset. Otherwise reset will not complete.
1558 */
1297static int sysc_reset(struct sysc *ddata) 1559static int sysc_reset(struct sysc *ddata)
1298{ 1560{
1299 int offset = ddata->offsets[SYSC_SYSCONFIG]; 1561 int sysc_offset, syss_offset, sysc_val, rstval, quirks, error = 0;
1300 int val; 1562 u32 sysc_mask, syss_done;
1563
1564 sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
1565 syss_offset = ddata->offsets[SYSC_SYSSTATUS];
1566 quirks = ddata->cfg.quirks;
1301 1567
1302 if (ddata->legacy_mode || offset < 0 || 1568 if (ddata->legacy_mode || sysc_offset < 0 ||
1569 ddata->cap->regbits->srst_shift < 0 ||
1303 ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) 1570 ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
1304 return 0; 1571 return 0;
1305 1572
1306 /* 1573 sysc_mask = BIT(ddata->cap->regbits->srst_shift);
1307 * Currently only support reset status in sysstatus.
1308 * Warn and return error in all other cases
1309 */
1310 if (!ddata->cfg.syss_mask) {
1311 dev_err(ddata->dev, "No ti,syss-mask. Reset failed\n");
1312 return -EINVAL;
1313 }
1314 1574
1315 val = sysc_read(ddata, offset); 1575 if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
1316 val |= (0x1 << ddata->cap->regbits->srst_shift); 1576 syss_done = 0;
1317 sysc_write(ddata, offset, val); 1577 else
1578 syss_done = ddata->cfg.syss_mask;
1579
1580 if (ddata->clk_disable_quirk)
1581 ddata->clk_disable_quirk(ddata);
1582
1583 sysc_val = sysc_read_sysconfig(ddata);
1584 sysc_val |= sysc_mask;
1585 sysc_write(ddata, sysc_offset, sysc_val);
1586
1587 if (ddata->clk_enable_quirk)
1588 ddata->clk_enable_quirk(ddata);
1318 1589
1319 /* Poll on reset status */ 1590 /* Poll on reset status */
1320 offset = ddata->offsets[SYSC_SYSSTATUS]; 1591 if (syss_offset >= 0) {
1592 error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval,
1593 (rstval & ddata->cfg.syss_mask) ==
1594 syss_done,
1595 100, MAX_MODULE_SOFTRESET_WAIT);
1596
1597 } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
1598 error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval,
1599 !(rstval & sysc_mask),
1600 100, MAX_MODULE_SOFTRESET_WAIT);
1601 }
1602
1603 if (ddata->reset_done_quirk)
1604 ddata->reset_done_quirk(ddata);
1321 1605
1322 return readl_poll_timeout(ddata->module_va + offset, val, 1606 return error;
1323 (val & ddata->cfg.syss_mask) == 0x0,
1324 100, MAX_MODULE_SOFTRESET_WAIT);
1325} 1607}
1326 1608
1327/* 1609/*
@@ -1334,12 +1616,8 @@ static int sysc_init_module(struct sysc *ddata)
1334{ 1616{
1335 int error = 0; 1617 int error = 0;
1336 bool manage_clocks = true; 1618 bool manage_clocks = true;
1337 bool reset = true;
1338
1339 if (ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)
1340 reset = false;
1341 1619
1342 error = sysc_rstctrl_reset_deassert(ddata, reset); 1620 error = sysc_rstctrl_reset_deassert(ddata, false);
1343 if (error) 1621 if (error)
1344 return error; 1622 return error;
1345 1623
@@ -1347,7 +1625,13 @@ static int sysc_init_module(struct sysc *ddata)
1347 (SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT)) 1625 (SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))
1348 manage_clocks = false; 1626 manage_clocks = false;
1349 1627
1628 error = sysc_clockdomain_init(ddata);
1629 if (error)
1630 return error;
1631
1350 if (manage_clocks) { 1632 if (manage_clocks) {
1633 sysc_clkdm_deny_idle(ddata);
1634
1351 error = sysc_enable_opt_clocks(ddata); 1635 error = sysc_enable_opt_clocks(ddata);
1352 if (error) 1636 if (error)
1353 return error; 1637 return error;
@@ -1357,23 +1641,43 @@ static int sysc_init_module(struct sysc *ddata)
1357 goto err_opt_clocks; 1641 goto err_opt_clocks;
1358 } 1642 }
1359 1643
1644 if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) {
1645 error = sysc_rstctrl_reset_deassert(ddata, true);
1646 if (error)
1647 goto err_main_clocks;
1648 }
1649
1360 ddata->revision = sysc_read_revision(ddata); 1650 ddata->revision = sysc_read_revision(ddata);
1361 sysc_init_revision_quirks(ddata); 1651 sysc_init_revision_quirks(ddata);
1652 sysc_init_module_quirks(ddata);
1362 1653
1363 error = sysc_legacy_init(ddata); 1654 if (ddata->legacy_mode) {
1364 if (error) 1655 error = sysc_legacy_init(ddata);
1365 goto err_main_clocks; 1656 if (error)
1657 goto err_main_clocks;
1658 }
1659
1660 if (!ddata->legacy_mode && manage_clocks) {
1661 error = sysc_enable_module(ddata->dev);
1662 if (error)
1663 goto err_main_clocks;
1664 }
1366 1665
1367 error = sysc_reset(ddata); 1666 error = sysc_reset(ddata);
1368 if (error) 1667 if (error)
1369 dev_err(ddata->dev, "Reset failed with %d\n", error); 1668 dev_err(ddata->dev, "Reset failed with %d\n", error);
1370 1669
1670 if (!ddata->legacy_mode && manage_clocks)
1671 sysc_disable_module(ddata->dev);
1672
1371err_main_clocks: 1673err_main_clocks:
1372 if (manage_clocks) 1674 if (manage_clocks)
1373 sysc_disable_main_clocks(ddata); 1675 sysc_disable_main_clocks(ddata);
1374err_opt_clocks: 1676err_opt_clocks:
1375 if (manage_clocks) 1677 if (manage_clocks) {
1376 sysc_disable_opt_clocks(ddata); 1678 sysc_disable_opt_clocks(ddata);
1679 sysc_clkdm_allow_idle(ddata);
1680 }
1377 1681
1378 return error; 1682 return error;
1379} 1683}
@@ -1663,9 +1967,6 @@ static struct dev_pm_domain sysc_child_pm_domain = {
1663 */ 1967 */
1664static void sysc_legacy_idle_quirk(struct sysc *ddata, struct device *child) 1968static void sysc_legacy_idle_quirk(struct sysc *ddata, struct device *child)
1665{ 1969{
1666 if (!ddata->legacy_mode)
1667 return;
1668
1669 if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE) 1970 if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
1670 dev_pm_domain_set(child, &sysc_child_pm_domain); 1971 dev_pm_domain_set(child, &sysc_child_pm_domain);
1671} 1972}
@@ -2005,6 +2306,7 @@ static const struct sysc_capabilities sysc_dra7_mcan = {
2005 .type = TI_SYSC_DRA7_MCAN, 2306 .type = TI_SYSC_DRA7_MCAN,
2006 .sysc_mask = SYSC_DRA7_MCAN_ENAWAKEUP | SYSC_OMAP4_SOFTRESET, 2307 .sysc_mask = SYSC_DRA7_MCAN_ENAWAKEUP | SYSC_OMAP4_SOFTRESET,
2007 .regbits = &sysc_regbits_dra7_mcan, 2308 .regbits = &sysc_regbits_dra7_mcan,
2309 .mod_quirks = SYSS_QUIRK_RESETDONE_INVERTED,
2008}; 2310};
2009 2311
2010static int sysc_init_pdata(struct sysc *ddata) 2312static int sysc_init_pdata(struct sysc *ddata)
@@ -2012,20 +2314,22 @@ static int sysc_init_pdata(struct sysc *ddata)
2012 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev); 2314 struct ti_sysc_platform_data *pdata = dev_get_platdata(ddata->dev);
2013 struct ti_sysc_module_data *mdata; 2315 struct ti_sysc_module_data *mdata;
2014 2316
2015 if (!pdata || !ddata->legacy_mode) 2317 if (!pdata)
2016 return 0; 2318 return 0;
2017 2319
2018 mdata = devm_kzalloc(ddata->dev, sizeof(*mdata), GFP_KERNEL); 2320 mdata = devm_kzalloc(ddata->dev, sizeof(*mdata), GFP_KERNEL);
2019 if (!mdata) 2321 if (!mdata)
2020 return -ENOMEM; 2322 return -ENOMEM;
2021 2323
2022 mdata->name = ddata->legacy_mode; 2324 if (ddata->legacy_mode) {
2023 mdata->module_pa = ddata->module_pa; 2325 mdata->name = ddata->legacy_mode;
2024 mdata->module_size = ddata->module_size; 2326 mdata->module_pa = ddata->module_pa;
2025 mdata->offsets = ddata->offsets; 2327 mdata->module_size = ddata->module_size;
2026 mdata->nr_offsets = SYSC_MAX_REGS; 2328 mdata->offsets = ddata->offsets;
2027 mdata->cap = ddata->cap; 2329 mdata->nr_offsets = SYSC_MAX_REGS;
2028 mdata->cfg = &ddata->cfg; 2330 mdata->cap = ddata->cap;
2331 mdata->cfg = &ddata->cfg;
2332 }
2029 2333
2030 ddata->mdata = mdata; 2334 ddata->mdata = mdata;
2031 2335
@@ -2145,7 +2449,7 @@ static int sysc_probe(struct platform_device *pdev)
2145 } 2449 }
2146 2450
2147 if (!of_get_available_child_count(ddata->dev->of_node)) 2451 if (!of_get_available_child_count(ddata->dev->of_node))
2148 reset_control_assert(ddata->rsts); 2452 ddata->disable_on_idle = true;
2149 2453
2150 return 0; 2454 return 0;
2151 2455
diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
index 30fc04e28431..0a194af92438 100644
--- a/drivers/firmware/arm_scmi/clock.c
+++ b/drivers/firmware/arm_scmi/clock.c
@@ -185,6 +185,8 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
185 if (rate_discrete) 185 if (rate_discrete)
186 clk->list.num_rates = tot_rate_cnt; 186 clk->list.num_rates = tot_rate_cnt;
187 187
188 clk->rate_discrete = rate_discrete;
189
188err: 190err:
189 scmi_xfer_put(handle, t); 191 scmi_xfer_put(handle, t);
190 return ret; 192 return ret;
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index b53d5cc9c9f6..0e94ab56f679 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -30,10 +30,12 @@ struct scmi_msg_resp_sensor_description {
30 __le32 id; 30 __le32 id;
31 __le32 attributes_low; 31 __le32 attributes_low;
32#define SUPPORTS_ASYNC_READ(x) ((x) & BIT(31)) 32#define SUPPORTS_ASYNC_READ(x) ((x) & BIT(31))
33#define NUM_TRIP_POINTS(x) (((x) >> 4) & 0xff) 33#define NUM_TRIP_POINTS(x) ((x) & 0xff)
34 __le32 attributes_high; 34 __le32 attributes_high;
35#define SENSOR_TYPE(x) ((x) & 0xff) 35#define SENSOR_TYPE(x) ((x) & 0xff)
36#define SENSOR_SCALE(x) (((x) >> 11) & 0x3f) 36#define SENSOR_SCALE(x) (((x) >> 11) & 0x1f)
37#define SENSOR_SCALE_SIGN BIT(4)
38#define SENSOR_SCALE_EXTEND GENMASK(7, 5)
37#define SENSOR_UPDATE_SCALE(x) (((x) >> 22) & 0x1f) 39#define SENSOR_UPDATE_SCALE(x) (((x) >> 22) & 0x1f)
38#define SENSOR_UPDATE_BASE(x) (((x) >> 27) & 0x1f) 40#define SENSOR_UPDATE_BASE(x) (((x) >> 27) & 0x1f)
39 u8 name[SCMI_MAX_STR_SIZE]; 41 u8 name[SCMI_MAX_STR_SIZE];
@@ -140,6 +142,10 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
140 s = &si->sensors[desc_index + cnt]; 142 s = &si->sensors[desc_index + cnt];
141 s->id = le32_to_cpu(buf->desc[cnt].id); 143 s->id = le32_to_cpu(buf->desc[cnt].id);
142 s->type = SENSOR_TYPE(attrh); 144 s->type = SENSOR_TYPE(attrh);
145 s->scale = SENSOR_SCALE(attrh);
146 /* Sign extend to a full s8 */
147 if (s->scale & SENSOR_SCALE_SIGN)
148 s->scale |= SENSOR_SCALE_EXTEND;
143 strlcpy(s->name, buf->desc[cnt].name, SCMI_MAX_STR_SIZE); 149 strlcpy(s->name, buf->desc[cnt].name, SCMI_MAX_STR_SIZE);
144 } 150 }
145 151
diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c
index 08c85099d4d0..f3659443f8c2 100644
--- a/drivers/firmware/psci/psci_checker.c
+++ b/drivers/firmware/psci/psci_checker.c
@@ -359,16 +359,16 @@ static int suspend_test_thread(void *arg)
359 for (;;) { 359 for (;;) {
360 /* Needs to be set first to avoid missing a wakeup. */ 360 /* Needs to be set first to avoid missing a wakeup. */
361 set_current_state(TASK_INTERRUPTIBLE); 361 set_current_state(TASK_INTERRUPTIBLE);
362 if (kthread_should_stop()) { 362 if (kthread_should_park())
363 __set_current_state(TASK_RUNNING);
364 break; 363 break;
365 }
366 schedule(); 364 schedule();
367 } 365 }
368 366
369 pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n", 367 pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
370 cpu, nb_suspend, nb_shallow_sleep, nb_err); 368 cpu, nb_suspend, nb_shallow_sleep, nb_err);
371 369
370 kthread_parkme();
371
372 return nb_err; 372 return nb_err;
373} 373}
374 374
@@ -433,8 +433,10 @@ static int suspend_tests(void)
433 433
434 434
435 /* Stop and destroy all threads, get return status. */ 435 /* Stop and destroy all threads, get return status. */
436 for (i = 0; i < nb_threads; ++i) 436 for (i = 0; i < nb_threads; ++i) {
437 err += kthread_park(threads[i]);
437 err += kthread_stop(threads[i]); 438 err += kthread_stop(threads[i]);
439 }
438 out: 440 out:
439 cpuidle_resume_and_unlock(); 441 cpuidle_resume_and_unlock();
440 kfree(threads); 442 kfree(threads);
diff --git a/drivers/firmware/tegra/bpmp.c b/drivers/firmware/tegra/bpmp.c
index 2418abfe1fb6..19c56133234b 100644
--- a/drivers/firmware/tegra/bpmp.c
+++ b/drivers/firmware/tegra/bpmp.c
@@ -803,7 +803,9 @@ static int __maybe_unused tegra_bpmp_resume(struct device *dev)
803 return 0; 803 return 0;
804} 804}
805 805
806static SIMPLE_DEV_PM_OPS(tegra_bpmp_pm_ops, NULL, tegra_bpmp_resume); 806static const struct dev_pm_ops tegra_bpmp_pm_ops = {
807 .resume_early = tegra_bpmp_resume,
808};
807 809
808#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \ 810#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC) || \
809 IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC) 811 IS_ENABLED(CONFIG_ARCH_TEGRA_194_SOC)
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 7696c692ad5a..cdee0b45943d 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -466,9 +466,9 @@ static int ti_sci_cmd_get_revision(struct ti_sci_info *info)
466 struct ti_sci_xfer *xfer; 466 struct ti_sci_xfer *xfer;
467 int ret; 467 int ret;
468 468
469 /* No need to setup flags since it is expected to respond */
470 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION, 469 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_VERSION,
471 0x0, sizeof(struct ti_sci_msg_hdr), 470 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
471 sizeof(struct ti_sci_msg_hdr),
472 sizeof(*rev_info)); 472 sizeof(*rev_info));
473 if (IS_ERR(xfer)) { 473 if (IS_ERR(xfer)) {
474 ret = PTR_ERR(xfer); 474 ret = PTR_ERR(xfer);
@@ -596,9 +596,9 @@ static int ti_sci_get_device_state(const struct ti_sci_handle *handle,
596 info = handle_to_ti_sci_info(handle); 596 info = handle_to_ti_sci_info(handle);
597 dev = info->dev; 597 dev = info->dev;
598 598
599 /* Response is expected, so need of any flags */
600 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE, 599 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_DEVICE_STATE,
601 0, sizeof(*req), sizeof(*resp)); 600 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
601 sizeof(*req), sizeof(*resp));
602 if (IS_ERR(xfer)) { 602 if (IS_ERR(xfer)) {
603 ret = PTR_ERR(xfer); 603 ret = PTR_ERR(xfer);
604 dev_err(dev, "Message alloc failed(%d)\n", ret); 604 dev_err(dev, "Message alloc failed(%d)\n", ret);
@@ -2057,6 +2057,823 @@ static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
2057 ia_id, vint, global_event, vint_status_bit, 0); 2057 ia_id, vint, global_event, vint_status_bit, 0);
2058} 2058}
2059 2059
2060/**
2061 * ti_sci_cmd_ring_config() - configure RA ring
2062 * @handle: Pointer to TI SCI handle.
2063 * @valid_params: Bitfield defining validity of ring configuration
2064 * parameters
2065 * @nav_id: Device ID of Navigator Subsystem from which the ring is
2066 * allocated
2067 * @index: Ring index
2068 * @addr_lo: The ring base address lo 32 bits
2069 * @addr_hi: The ring base address hi 32 bits
2070 * @count: Number of ring elements
2071 * @mode: The mode of the ring
2072 * @size: The ring element size.
2073 * @order_id: Specifies the ring's bus order ID
2074 *
2075 * Return: 0 if all went well, else returns appropriate error value.
2076 *
2077 * See @ti_sci_msg_rm_ring_cfg_req for more info.
2078 */
2079static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
2080 u32 valid_params, u16 nav_id, u16 index,
2081 u32 addr_lo, u32 addr_hi, u32 count,
2082 u8 mode, u8 size, u8 order_id)
2083{
2084 struct ti_sci_msg_rm_ring_cfg_req *req;
2085 struct ti_sci_msg_hdr *resp;
2086 struct ti_sci_xfer *xfer;
2087 struct ti_sci_info *info;
2088 struct device *dev;
2089 int ret = 0;
2090
2091 if (IS_ERR_OR_NULL(handle))
2092 return -EINVAL;
2093
2094 info = handle_to_ti_sci_info(handle);
2095 dev = info->dev;
2096
2097 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_CFG,
2098 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2099 sizeof(*req), sizeof(*resp));
2100 if (IS_ERR(xfer)) {
2101 ret = PTR_ERR(xfer);
2102 dev_err(dev, "RM_RA:Message config failed(%d)\n", ret);
2103 return ret;
2104 }
2105 req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
2106 req->valid_params = valid_params;
2107 req->nav_id = nav_id;
2108 req->index = index;
2109 req->addr_lo = addr_lo;
2110 req->addr_hi = addr_hi;
2111 req->count = count;
2112 req->mode = mode;
2113 req->size = size;
2114 req->order_id = order_id;
2115
2116 ret = ti_sci_do_xfer(info, xfer);
2117 if (ret) {
2118 dev_err(dev, "RM_RA:Mbox config send fail %d\n", ret);
2119 goto fail;
2120 }
2121
2122 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2123 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2124
2125fail:
2126 ti_sci_put_one_xfer(&info->minfo, xfer);
2127 dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", index, ret);
2128 return ret;
2129}
2130
2131/**
2132 * ti_sci_cmd_ring_get_config() - get RA ring configuration
2133 * @handle: Pointer to TI SCI handle.
2134 * @nav_id: Device ID of Navigator Subsystem from which the ring is
2135 * allocated
2136 * @index: Ring index
2137 * @addr_lo: Returns ring's base address lo 32 bits
2138 * @addr_hi: Returns ring's base address hi 32 bits
2139 * @count: Returns number of ring elements
2140 * @mode: Returns mode of the ring
2141 * @size: Returns ring element size
2142 * @order_id: Returns ring's bus order ID
2143 *
2144 * Return: 0 if all went well, else returns appropriate error value.
2145 *
2146 * See @ti_sci_msg_rm_ring_get_cfg_req for more info.
2147 */
2148static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
2149 u32 nav_id, u32 index, u8 *mode,
2150 u32 *addr_lo, u32 *addr_hi,
2151 u32 *count, u8 *size, u8 *order_id)
2152{
2153 struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
2154 struct ti_sci_msg_rm_ring_get_cfg_req *req;
2155 struct ti_sci_xfer *xfer;
2156 struct ti_sci_info *info;
2157 struct device *dev;
2158 int ret = 0;
2159
2160 if (IS_ERR_OR_NULL(handle))
2161 return -EINVAL;
2162
2163 info = handle_to_ti_sci_info(handle);
2164 dev = info->dev;
2165
2166 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
2167 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2168 sizeof(*req), sizeof(*resp));
2169 if (IS_ERR(xfer)) {
2170 ret = PTR_ERR(xfer);
2171 dev_err(dev,
2172 "RM_RA:Message get config failed(%d)\n", ret);
2173 return ret;
2174 }
2175 req = (struct ti_sci_msg_rm_ring_get_cfg_req *)xfer->xfer_buf;
2176 req->nav_id = nav_id;
2177 req->index = index;
2178
2179 ret = ti_sci_do_xfer(info, xfer);
2180 if (ret) {
2181 dev_err(dev, "RM_RA:Mbox get config send fail %d\n", ret);
2182 goto fail;
2183 }
2184
2185 resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->xfer_buf;
2186
2187 if (!ti_sci_is_response_ack(resp)) {
2188 ret = -ENODEV;
2189 } else {
2190 if (mode)
2191 *mode = resp->mode;
2192 if (addr_lo)
2193 *addr_lo = resp->addr_lo;
2194 if (addr_hi)
2195 *addr_hi = resp->addr_hi;
2196 if (count)
2197 *count = resp->count;
2198 if (size)
2199 *size = resp->size;
2200 if (order_id)
2201 *order_id = resp->order_id;
2202 };
2203
2204fail:
2205 ti_sci_put_one_xfer(&info->minfo, xfer);
2206 dev_dbg(dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
2207 return ret;
2208}
2209
2210/**
2211 * ti_sci_cmd_rm_psil_pair() - Pair PSI-L source to destination thread
2212 * @handle: Pointer to TI SCI handle.
2213 * @nav_id: Device ID of Navigator Subsystem which should be used for
2214 * pairing
2215 * @src_thread: Source PSI-L thread ID
2216 * @dst_thread: Destination PSI-L thread ID
2217 *
2218 * Return: 0 if all went well, else returns appropriate error value.
2219 */
2220static int ti_sci_cmd_rm_psil_pair(const struct ti_sci_handle *handle,
2221 u32 nav_id, u32 src_thread, u32 dst_thread)
2222{
2223 struct ti_sci_msg_psil_pair *req;
2224 struct ti_sci_msg_hdr *resp;
2225 struct ti_sci_xfer *xfer;
2226 struct ti_sci_info *info;
2227 struct device *dev;
2228 int ret = 0;
2229
2230 if (IS_ERR(handle))
2231 return PTR_ERR(handle);
2232 if (!handle)
2233 return -EINVAL;
2234
2235 info = handle_to_ti_sci_info(handle);
2236 dev = info->dev;
2237
2238 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_PAIR,
2239 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2240 sizeof(*req), sizeof(*resp));
2241 if (IS_ERR(xfer)) {
2242 ret = PTR_ERR(xfer);
2243 dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2244 return ret;
2245 }
2246 req = (struct ti_sci_msg_psil_pair *)xfer->xfer_buf;
2247 req->nav_id = nav_id;
2248 req->src_thread = src_thread;
2249 req->dst_thread = dst_thread;
2250
2251 ret = ti_sci_do_xfer(info, xfer);
2252 if (ret) {
2253 dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2254 goto fail;
2255 }
2256
2257 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2258 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2259
2260fail:
2261 ti_sci_put_one_xfer(&info->minfo, xfer);
2262
2263 return ret;
2264}
2265
2266/**
2267 * ti_sci_cmd_rm_psil_unpair() - Unpair PSI-L source from destination thread
2268 * @handle: Pointer to TI SCI handle.
2269 * @nav_id: Device ID of Navigator Subsystem which should be used for
2270 * unpairing
2271 * @src_thread: Source PSI-L thread ID
2272 * @dst_thread: Destination PSI-L thread ID
2273 *
2274 * Return: 0 if all went well, else returns appropriate error value.
2275 */
2276static int ti_sci_cmd_rm_psil_unpair(const struct ti_sci_handle *handle,
2277 u32 nav_id, u32 src_thread, u32 dst_thread)
2278{
2279 struct ti_sci_msg_psil_unpair *req;
2280 struct ti_sci_msg_hdr *resp;
2281 struct ti_sci_xfer *xfer;
2282 struct ti_sci_info *info;
2283 struct device *dev;
2284 int ret = 0;
2285
2286 if (IS_ERR(handle))
2287 return PTR_ERR(handle);
2288 if (!handle)
2289 return -EINVAL;
2290
2291 info = handle_to_ti_sci_info(handle);
2292 dev = info->dev;
2293
2294 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_PSIL_UNPAIR,
2295 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2296 sizeof(*req), sizeof(*resp));
2297 if (IS_ERR(xfer)) {
2298 ret = PTR_ERR(xfer);
2299 dev_err(dev, "RM_PSIL:Message reconfig failed(%d)\n", ret);
2300 return ret;
2301 }
2302 req = (struct ti_sci_msg_psil_unpair *)xfer->xfer_buf;
2303 req->nav_id = nav_id;
2304 req->src_thread = src_thread;
2305 req->dst_thread = dst_thread;
2306
2307 ret = ti_sci_do_xfer(info, xfer);
2308 if (ret) {
2309 dev_err(dev, "RM_PSIL:Mbox send fail %d\n", ret);
2310 goto fail;
2311 }
2312
2313 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2314 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2315
2316fail:
2317 ti_sci_put_one_xfer(&info->minfo, xfer);
2318
2319 return ret;
2320}
2321
2322/**
2323 * ti_sci_cmd_rm_udmap_tx_ch_cfg() - Configure a UDMAP TX channel
2324 * @handle: Pointer to TI SCI handle.
2325 * @params: Pointer to ti_sci_msg_rm_udmap_tx_ch_cfg TX channel config
2326 * structure
2327 *
2328 * Return: 0 if all went well, else returns appropriate error value.
2329 *
2330 * See @ti_sci_msg_rm_udmap_tx_ch_cfg and @ti_sci_msg_rm_udmap_tx_ch_cfg_req for
2331 * more info.
2332 */
2333static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
2334 const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params)
2335{
2336 struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *req;
2337 struct ti_sci_msg_hdr *resp;
2338 struct ti_sci_xfer *xfer;
2339 struct ti_sci_info *info;
2340 struct device *dev;
2341 int ret = 0;
2342
2343 if (IS_ERR_OR_NULL(handle))
2344 return -EINVAL;
2345
2346 info = handle_to_ti_sci_info(handle);
2347 dev = info->dev;
2348
2349 xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_TX_CH_CFG,
2350 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2351 sizeof(*req), sizeof(*resp));
2352 if (IS_ERR(xfer)) {
2353 ret = PTR_ERR(xfer);
2354 dev_err(dev, "Message TX_CH_CFG alloc failed(%d)\n", ret);
2355 return ret;
2356 }
2357 req = (struct ti_sci_msg_rm_udmap_tx_ch_cfg_req *)xfer->xfer_buf;
2358 req->valid_params = params->valid_params;
2359 req->nav_id = params->nav_id;
2360 req->index = params->index;
2361 req->tx_pause_on_err = params->tx_pause_on_err;
2362 req->tx_filt_einfo = params->tx_filt_einfo;
2363 req->tx_filt_pswords = params->tx_filt_pswords;
2364 req->tx_atype = params->tx_atype;
2365 req->tx_chan_type = params->tx_chan_type;
2366 req->tx_supr_tdpkt = params->tx_supr_tdpkt;
2367 req->tx_fetch_size = params->tx_fetch_size;
2368 req->tx_credit_count = params->tx_credit_count;
2369 req->txcq_qnum = params->txcq_qnum;
2370 req->tx_priority = params->tx_priority;
2371 req->tx_qos = params->tx_qos;
2372 req->tx_orderid = params->tx_orderid;
2373 req->fdepth = params->fdepth;
2374 req->tx_sched_priority = params->tx_sched_priority;
2375 req->tx_burst_size = params->tx_burst_size;
2376
2377 ret = ti_sci_do_xfer(info, xfer);
2378 if (ret) {
2379 dev_err(dev, "Mbox send TX_CH_CFG fail %d\n", ret);
2380 goto fail;
2381 }
2382
2383 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2384 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2385
2386fail:
2387 ti_sci_put_one_xfer(&info->minfo, xfer);
2388 dev_dbg(dev, "TX_CH_CFG: chn %u ret:%u\n", params->index, ret);
2389 return ret;
2390}
2391
2392/**
2393 * ti_sci_cmd_rm_udmap_rx_ch_cfg() - Configure a UDMAP RX channel
2394 * @handle: Pointer to TI SCI handle.
2395 * @params: Pointer to ti_sci_msg_rm_udmap_rx_ch_cfg RX channel config
2396 * structure
2397 *
2398 * Return: 0 if all went well, else returns appropriate error value.
2399 *
2400 * See @ti_sci_msg_rm_udmap_rx_ch_cfg and @ti_sci_msg_rm_udmap_rx_ch_cfg_req for
2401 * more info.
2402 */
2403static int ti_sci_cmd_rm_udmap_rx_ch_cfg(const struct ti_sci_handle *handle,
2404 const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params)
2405{
2406 struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *req;
2407 struct ti_sci_msg_hdr *resp;
2408 struct ti_sci_xfer *xfer;
2409 struct ti_sci_info *info;
2410 struct device *dev;
2411 int ret = 0;
2412
2413 if (IS_ERR_OR_NULL(handle))
2414 return -EINVAL;
2415
2416 info = handle_to_ti_sci_info(handle);
2417 dev = info->dev;
2418
2419 xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_RX_CH_CFG,
2420 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2421 sizeof(*req), sizeof(*resp));
2422 if (IS_ERR(xfer)) {
2423 ret = PTR_ERR(xfer);
2424 dev_err(dev, "Message RX_CH_CFG alloc failed(%d)\n", ret);
2425 return ret;
2426 }
2427 req = (struct ti_sci_msg_rm_udmap_rx_ch_cfg_req *)xfer->xfer_buf;
2428 req->valid_params = params->valid_params;
2429 req->nav_id = params->nav_id;
2430 req->index = params->index;
2431 req->rx_fetch_size = params->rx_fetch_size;
2432 req->rxcq_qnum = params->rxcq_qnum;
2433 req->rx_priority = params->rx_priority;
2434 req->rx_qos = params->rx_qos;
2435 req->rx_orderid = params->rx_orderid;
2436 req->rx_sched_priority = params->rx_sched_priority;
2437 req->flowid_start = params->flowid_start;
2438 req->flowid_cnt = params->flowid_cnt;
2439 req->rx_pause_on_err = params->rx_pause_on_err;
2440 req->rx_atype = params->rx_atype;
2441 req->rx_chan_type = params->rx_chan_type;
2442 req->rx_ignore_short = params->rx_ignore_short;
2443 req->rx_ignore_long = params->rx_ignore_long;
2444 req->rx_burst_size = params->rx_burst_size;
2445
2446 ret = ti_sci_do_xfer(info, xfer);
2447 if (ret) {
2448 dev_err(dev, "Mbox send RX_CH_CFG fail %d\n", ret);
2449 goto fail;
2450 }
2451
2452 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2453 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2454
2455fail:
2456 ti_sci_put_one_xfer(&info->minfo, xfer);
2457 dev_dbg(dev, "RX_CH_CFG: chn %u ret:%d\n", params->index, ret);
2458 return ret;
2459}
2460
2461/**
2462 * ti_sci_cmd_rm_udmap_rx_flow_cfg() - Configure UDMAP RX FLOW
2463 * @handle: Pointer to TI SCI handle.
2464 * @params: Pointer to ti_sci_msg_rm_udmap_flow_cfg RX FLOW config
2465 * structure
2466 *
2467 * Return: 0 if all went well, else returns appropriate error value.
2468 *
2469 * See @ti_sci_msg_rm_udmap_flow_cfg and @ti_sci_msg_rm_udmap_flow_cfg_req for
2470 * more info.
2471 */
2472static int ti_sci_cmd_rm_udmap_rx_flow_cfg(const struct ti_sci_handle *handle,
2473 const struct ti_sci_msg_rm_udmap_flow_cfg *params)
2474{
2475 struct ti_sci_msg_rm_udmap_flow_cfg_req *req;
2476 struct ti_sci_msg_hdr *resp;
2477 struct ti_sci_xfer *xfer;
2478 struct ti_sci_info *info;
2479 struct device *dev;
2480 int ret = 0;
2481
2482 if (IS_ERR_OR_NULL(handle))
2483 return -EINVAL;
2484
2485 info = handle_to_ti_sci_info(handle);
2486 dev = info->dev;
2487
2488 xfer = ti_sci_get_one_xfer(info, TISCI_MSG_RM_UDMAP_FLOW_CFG,
2489 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2490 sizeof(*req), sizeof(*resp));
2491 if (IS_ERR(xfer)) {
2492 ret = PTR_ERR(xfer);
2493 dev_err(dev, "RX_FL_CFG: Message alloc failed(%d)\n", ret);
2494 return ret;
2495 }
2496 req = (struct ti_sci_msg_rm_udmap_flow_cfg_req *)xfer->xfer_buf;
2497 req->valid_params = params->valid_params;
2498 req->nav_id = params->nav_id;
2499 req->flow_index = params->flow_index;
2500 req->rx_einfo_present = params->rx_einfo_present;
2501 req->rx_psinfo_present = params->rx_psinfo_present;
2502 req->rx_error_handling = params->rx_error_handling;
2503 req->rx_desc_type = params->rx_desc_type;
2504 req->rx_sop_offset = params->rx_sop_offset;
2505 req->rx_dest_qnum = params->rx_dest_qnum;
2506 req->rx_src_tag_hi = params->rx_src_tag_hi;
2507 req->rx_src_tag_lo = params->rx_src_tag_lo;
2508 req->rx_dest_tag_hi = params->rx_dest_tag_hi;
2509 req->rx_dest_tag_lo = params->rx_dest_tag_lo;
2510 req->rx_src_tag_hi_sel = params->rx_src_tag_hi_sel;
2511 req->rx_src_tag_lo_sel = params->rx_src_tag_lo_sel;
2512 req->rx_dest_tag_hi_sel = params->rx_dest_tag_hi_sel;
2513 req->rx_dest_tag_lo_sel = params->rx_dest_tag_lo_sel;
2514 req->rx_fdq0_sz0_qnum = params->rx_fdq0_sz0_qnum;
2515 req->rx_fdq1_qnum = params->rx_fdq1_qnum;
2516 req->rx_fdq2_qnum = params->rx_fdq2_qnum;
2517 req->rx_fdq3_qnum = params->rx_fdq3_qnum;
2518 req->rx_ps_location = params->rx_ps_location;
2519
2520 ret = ti_sci_do_xfer(info, xfer);
2521 if (ret) {
2522 dev_err(dev, "RX_FL_CFG: Mbox send fail %d\n", ret);
2523 goto fail;
2524 }
2525
2526 resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
2527 ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
2528
2529fail:
2530 ti_sci_put_one_xfer(&info->minfo, xfer);
2531 dev_dbg(info->dev, "RX_FL_CFG: %u ret:%d\n", params->flow_index, ret);
2532 return ret;
2533}
2534
2535/**
2536 * ti_sci_cmd_proc_request() - Command to request a physical processor control
2537 * @handle: Pointer to TI SCI handle
2538 * @proc_id: Processor ID this request is for
2539 *
2540 * Return: 0 if all went well, else returns appropriate error value.
2541 */
2542static int ti_sci_cmd_proc_request(const struct ti_sci_handle *handle,
2543 u8 proc_id)
2544{
2545 struct ti_sci_msg_req_proc_request *req;
2546 struct ti_sci_msg_hdr *resp;
2547 struct ti_sci_info *info;
2548 struct ti_sci_xfer *xfer;
2549 struct device *dev;
2550 int ret = 0;
2551
2552 if (!handle)
2553 return -EINVAL;
2554 if (IS_ERR(handle))
2555 return PTR_ERR(handle);
2556
2557 info = handle_to_ti_sci_info(handle);
2558 dev = info->dev;
2559
2560 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_REQUEST,
2561 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2562 sizeof(*req), sizeof(*resp));
2563 if (IS_ERR(xfer)) {
2564 ret = PTR_ERR(xfer);
2565 dev_err(dev, "Message alloc failed(%d)\n", ret);
2566 return ret;
2567 }
2568 req = (struct ti_sci_msg_req_proc_request *)xfer->xfer_buf;
2569 req->processor_id = proc_id;
2570
2571 ret = ti_sci_do_xfer(info, xfer);
2572 if (ret) {
2573 dev_err(dev, "Mbox send fail %d\n", ret);
2574 goto fail;
2575 }
2576
2577 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2578
2579 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2580
2581fail:
2582 ti_sci_put_one_xfer(&info->minfo, xfer);
2583
2584 return ret;
2585}
2586
2587/**
2588 * ti_sci_cmd_proc_release() - Command to release a physical processor control
2589 * @handle: Pointer to TI SCI handle
2590 * @proc_id: Processor ID this request is for
2591 *
2592 * Return: 0 if all went well, else returns appropriate error value.
2593 */
2594static int ti_sci_cmd_proc_release(const struct ti_sci_handle *handle,
2595 u8 proc_id)
2596{
2597 struct ti_sci_msg_req_proc_release *req;
2598 struct ti_sci_msg_hdr *resp;
2599 struct ti_sci_info *info;
2600 struct ti_sci_xfer *xfer;
2601 struct device *dev;
2602 int ret = 0;
2603
2604 if (!handle)
2605 return -EINVAL;
2606 if (IS_ERR(handle))
2607 return PTR_ERR(handle);
2608
2609 info = handle_to_ti_sci_info(handle);
2610 dev = info->dev;
2611
2612 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_RELEASE,
2613 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2614 sizeof(*req), sizeof(*resp));
2615 if (IS_ERR(xfer)) {
2616 ret = PTR_ERR(xfer);
2617 dev_err(dev, "Message alloc failed(%d)\n", ret);
2618 return ret;
2619 }
2620 req = (struct ti_sci_msg_req_proc_release *)xfer->xfer_buf;
2621 req->processor_id = proc_id;
2622
2623 ret = ti_sci_do_xfer(info, xfer);
2624 if (ret) {
2625 dev_err(dev, "Mbox send fail %d\n", ret);
2626 goto fail;
2627 }
2628
2629 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2630
2631 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2632
2633fail:
2634 ti_sci_put_one_xfer(&info->minfo, xfer);
2635
2636 return ret;
2637}
2638
2639/**
2640 * ti_sci_cmd_proc_handover() - Command to handover a physical processor
2641 * control to a host in the processor's access
2642 * control list.
2643 * @handle: Pointer to TI SCI handle
2644 * @proc_id: Processor ID this request is for
2645 * @host_id: Host ID to get the control of the processor
2646 *
2647 * Return: 0 if all went well, else returns appropriate error value.
2648 */
2649static int ti_sci_cmd_proc_handover(const struct ti_sci_handle *handle,
2650 u8 proc_id, u8 host_id)
2651{
2652 struct ti_sci_msg_req_proc_handover *req;
2653 struct ti_sci_msg_hdr *resp;
2654 struct ti_sci_info *info;
2655 struct ti_sci_xfer *xfer;
2656 struct device *dev;
2657 int ret = 0;
2658
2659 if (!handle)
2660 return -EINVAL;
2661 if (IS_ERR(handle))
2662 return PTR_ERR(handle);
2663
2664 info = handle_to_ti_sci_info(handle);
2665 dev = info->dev;
2666
2667 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_PROC_HANDOVER,
2668 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2669 sizeof(*req), sizeof(*resp));
2670 if (IS_ERR(xfer)) {
2671 ret = PTR_ERR(xfer);
2672 dev_err(dev, "Message alloc failed(%d)\n", ret);
2673 return ret;
2674 }
2675 req = (struct ti_sci_msg_req_proc_handover *)xfer->xfer_buf;
2676 req->processor_id = proc_id;
2677 req->host_id = host_id;
2678
2679 ret = ti_sci_do_xfer(info, xfer);
2680 if (ret) {
2681 dev_err(dev, "Mbox send fail %d\n", ret);
2682 goto fail;
2683 }
2684
2685 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2686
2687 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2688
2689fail:
2690 ti_sci_put_one_xfer(&info->minfo, xfer);
2691
2692 return ret;
2693}
2694
2695/**
2696 * ti_sci_cmd_proc_set_config() - Command to set the processor boot
2697 * configuration flags
2698 * @handle: Pointer to TI SCI handle
2699 * @proc_id: Processor ID this request is for
2700 * @config_flags_set: Configuration flags to be set
2701 * @config_flags_clear: Configuration flags to be cleared.
2702 *
2703 * Return: 0 if all went well, else returns appropriate error value.
2704 */
2705static int ti_sci_cmd_proc_set_config(const struct ti_sci_handle *handle,
2706 u8 proc_id, u64 bootvector,
2707 u32 config_flags_set,
2708 u32 config_flags_clear)
2709{
2710 struct ti_sci_msg_req_set_config *req;
2711 struct ti_sci_msg_hdr *resp;
2712 struct ti_sci_info *info;
2713 struct ti_sci_xfer *xfer;
2714 struct device *dev;
2715 int ret = 0;
2716
2717 if (!handle)
2718 return -EINVAL;
2719 if (IS_ERR(handle))
2720 return PTR_ERR(handle);
2721
2722 info = handle_to_ti_sci_info(handle);
2723 dev = info->dev;
2724
2725 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CONFIG,
2726 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2727 sizeof(*req), sizeof(*resp));
2728 if (IS_ERR(xfer)) {
2729 ret = PTR_ERR(xfer);
2730 dev_err(dev, "Message alloc failed(%d)\n", ret);
2731 return ret;
2732 }
2733 req = (struct ti_sci_msg_req_set_config *)xfer->xfer_buf;
2734 req->processor_id = proc_id;
2735 req->bootvector_low = bootvector & TI_SCI_ADDR_LOW_MASK;
2736 req->bootvector_high = (bootvector & TI_SCI_ADDR_HIGH_MASK) >>
2737 TI_SCI_ADDR_HIGH_SHIFT;
2738 req->config_flags_set = config_flags_set;
2739 req->config_flags_clear = config_flags_clear;
2740
2741 ret = ti_sci_do_xfer(info, xfer);
2742 if (ret) {
2743 dev_err(dev, "Mbox send fail %d\n", ret);
2744 goto fail;
2745 }
2746
2747 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2748
2749 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2750
2751fail:
2752 ti_sci_put_one_xfer(&info->minfo, xfer);
2753
2754 return ret;
2755}
2756
2757/**
2758 * ti_sci_cmd_proc_set_control() - Command to set the processor boot
2759 * control flags
2760 * @handle: Pointer to TI SCI handle
2761 * @proc_id: Processor ID this request is for
2762 * @control_flags_set: Control flags to be set
2763 * @control_flags_clear: Control flags to be cleared
2764 *
2765 * Return: 0 if all went well, else returns appropriate error value.
2766 */
2767static int ti_sci_cmd_proc_set_control(const struct ti_sci_handle *handle,
2768 u8 proc_id, u32 control_flags_set,
2769 u32 control_flags_clear)
2770{
2771 struct ti_sci_msg_req_set_ctrl *req;
2772 struct ti_sci_msg_hdr *resp;
2773 struct ti_sci_info *info;
2774 struct ti_sci_xfer *xfer;
2775 struct device *dev;
2776 int ret = 0;
2777
2778 if (!handle)
2779 return -EINVAL;
2780 if (IS_ERR(handle))
2781 return PTR_ERR(handle);
2782
2783 info = handle_to_ti_sci_info(handle);
2784 dev = info->dev;
2785
2786 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_SET_CTRL,
2787 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2788 sizeof(*req), sizeof(*resp));
2789 if (IS_ERR(xfer)) {
2790 ret = PTR_ERR(xfer);
2791 dev_err(dev, "Message alloc failed(%d)\n", ret);
2792 return ret;
2793 }
2794 req = (struct ti_sci_msg_req_set_ctrl *)xfer->xfer_buf;
2795 req->processor_id = proc_id;
2796 req->control_flags_set = control_flags_set;
2797 req->control_flags_clear = control_flags_clear;
2798
2799 ret = ti_sci_do_xfer(info, xfer);
2800 if (ret) {
2801 dev_err(dev, "Mbox send fail %d\n", ret);
2802 goto fail;
2803 }
2804
2805 resp = (struct ti_sci_msg_hdr *)xfer->tx_message.buf;
2806
2807 ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
2808
2809fail:
2810 ti_sci_put_one_xfer(&info->minfo, xfer);
2811
2812 return ret;
2813}
2814
2815/**
2816 * ti_sci_cmd_get_boot_status() - Command to get the processor boot status
2817 * @handle: Pointer to TI SCI handle
2818 * @proc_id: Processor ID this request is for
2819 *
2820 * Return: 0 if all went well, else returns appropriate error value.
2821 */
2822static int ti_sci_cmd_proc_get_status(const struct ti_sci_handle *handle,
2823 u8 proc_id, u64 *bv, u32 *cfg_flags,
2824 u32 *ctrl_flags, u32 *sts_flags)
2825{
2826 struct ti_sci_msg_resp_get_status *resp;
2827 struct ti_sci_msg_req_get_status *req;
2828 struct ti_sci_info *info;
2829 struct ti_sci_xfer *xfer;
2830 struct device *dev;
2831 int ret = 0;
2832
2833 if (!handle)
2834 return -EINVAL;
2835 if (IS_ERR(handle))
2836 return PTR_ERR(handle);
2837
2838 info = handle_to_ti_sci_info(handle);
2839 dev = info->dev;
2840
2841 xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_GET_STATUS,
2842 TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
2843 sizeof(*req), sizeof(*resp));
2844 if (IS_ERR(xfer)) {
2845 ret = PTR_ERR(xfer);
2846 dev_err(dev, "Message alloc failed(%d)\n", ret);
2847 return ret;
2848 }
2849 req = (struct ti_sci_msg_req_get_status *)xfer->xfer_buf;
2850 req->processor_id = proc_id;
2851
2852 ret = ti_sci_do_xfer(info, xfer);
2853 if (ret) {
2854 dev_err(dev, "Mbox send fail %d\n", ret);
2855 goto fail;
2856 }
2857
2858 resp = (struct ti_sci_msg_resp_get_status *)xfer->tx_message.buf;
2859
2860 if (!ti_sci_is_response_ack(resp)) {
2861 ret = -ENODEV;
2862 } else {
2863 *bv = (resp->bootvector_low & TI_SCI_ADDR_LOW_MASK) |
2864 (((u64)resp->bootvector_high << TI_SCI_ADDR_HIGH_SHIFT) &
2865 TI_SCI_ADDR_HIGH_MASK);
2866 *cfg_flags = resp->config_flags;
2867 *ctrl_flags = resp->control_flags;
2868 *sts_flags = resp->status_flags;
2869 }
2870
2871fail:
2872 ti_sci_put_one_xfer(&info->minfo, xfer);
2873
2874 return ret;
2875}
2876
2060/* 2877/*
2061 * ti_sci_setup_ops() - Setup the operations structures 2878 * ti_sci_setup_ops() - Setup the operations structures
2062 * @info: pointer to TISCI pointer 2879 * @info: pointer to TISCI pointer
@@ -2069,6 +2886,10 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
2069 struct ti_sci_clk_ops *cops = &ops->clk_ops; 2886 struct ti_sci_clk_ops *cops = &ops->clk_ops;
2070 struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops; 2887 struct ti_sci_rm_core_ops *rm_core_ops = &ops->rm_core_ops;
2071 struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops; 2888 struct ti_sci_rm_irq_ops *iops = &ops->rm_irq_ops;
2889 struct ti_sci_rm_ringacc_ops *rops = &ops->rm_ring_ops;
2890 struct ti_sci_rm_psil_ops *psilops = &ops->rm_psil_ops;
2891 struct ti_sci_rm_udmap_ops *udmap_ops = &ops->rm_udmap_ops;
2892 struct ti_sci_proc_ops *pops = &ops->proc_ops;
2072 2893
2073 core_ops->reboot_device = ti_sci_cmd_core_reboot; 2894 core_ops->reboot_device = ti_sci_cmd_core_reboot;
2074 2895
@@ -2108,6 +2929,23 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
2108 iops->set_event_map = ti_sci_cmd_set_event_map; 2929 iops->set_event_map = ti_sci_cmd_set_event_map;
2109 iops->free_irq = ti_sci_cmd_free_irq; 2930 iops->free_irq = ti_sci_cmd_free_irq;
2110 iops->free_event_map = ti_sci_cmd_free_event_map; 2931 iops->free_event_map = ti_sci_cmd_free_event_map;
2932
2933 rops->config = ti_sci_cmd_ring_config;
2934 rops->get_config = ti_sci_cmd_ring_get_config;
2935
2936 psilops->pair = ti_sci_cmd_rm_psil_pair;
2937 psilops->unpair = ti_sci_cmd_rm_psil_unpair;
2938
2939 udmap_ops->tx_ch_cfg = ti_sci_cmd_rm_udmap_tx_ch_cfg;
2940 udmap_ops->rx_ch_cfg = ti_sci_cmd_rm_udmap_rx_ch_cfg;
2941 udmap_ops->rx_flow_cfg = ti_sci_cmd_rm_udmap_rx_flow_cfg;
2942
2943 pops->request = ti_sci_cmd_proc_request;
2944 pops->release = ti_sci_cmd_proc_release;
2945 pops->handover = ti_sci_cmd_proc_handover;
2946 pops->set_config = ti_sci_cmd_proc_set_config;
2947 pops->set_control = ti_sci_cmd_proc_set_control;
2948 pops->get_status = ti_sci_cmd_proc_get_status;
2111} 2949}
2112 2950
2113/** 2951/**
@@ -2395,6 +3233,7 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
2395 struct device *dev, u32 dev_id, char *of_prop) 3233 struct device *dev, u32 dev_id, char *of_prop)
2396{ 3234{
2397 struct ti_sci_resource *res; 3235 struct ti_sci_resource *res;
3236 bool valid_set = false;
2398 u32 resource_subtype; 3237 u32 resource_subtype;
2399 int i, ret; 3238 int i, ret;
2400 3239
@@ -2426,15 +3265,18 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
2426 &res->desc[i].start, 3265 &res->desc[i].start,
2427 &res->desc[i].num); 3266 &res->desc[i].num);
2428 if (ret) { 3267 if (ret) {
2429 dev_err(dev, "dev = %d subtype %d not allocated for this host\n", 3268 dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
2430 dev_id, resource_subtype); 3269 dev_id, resource_subtype);
2431 return ERR_PTR(ret); 3270 res->desc[i].start = 0;
3271 res->desc[i].num = 0;
3272 continue;
2432 } 3273 }
2433 3274
2434 dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n", 3275 dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
2435 dev_id, resource_subtype, res->desc[i].start, 3276 dev_id, resource_subtype, res->desc[i].start,
2436 res->desc[i].num); 3277 res->desc[i].num);
2437 3278
3279 valid_set = true;
2438 res->desc[i].res_map = 3280 res->desc[i].res_map =
2439 devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) * 3281 devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
2440 sizeof(*res->desc[i].res_map), GFP_KERNEL); 3282 sizeof(*res->desc[i].res_map), GFP_KERNEL);
@@ -2443,7 +3285,10 @@ devm_ti_sci_get_of_resource(const struct ti_sci_handle *handle,
2443 } 3285 }
2444 raw_spin_lock_init(&res->lock); 3286 raw_spin_lock_init(&res->lock);
2445 3287
2446 return res; 3288 if (valid_set)
3289 return res;
3290
3291 return ERR_PTR(-EINVAL);
2447} 3292}
2448 3293
2449static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode, 3294static int tisci_reboot_handler(struct notifier_block *nb, unsigned long mode,
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
index 414e0ced5409..f0d068c03944 100644
--- a/drivers/firmware/ti_sci.h
+++ b/drivers/firmware/ti_sci.h
@@ -42,6 +42,43 @@
42#define TI_SCI_MSG_SET_IRQ 0x1000 42#define TI_SCI_MSG_SET_IRQ 0x1000
43#define TI_SCI_MSG_FREE_IRQ 0x1001 43#define TI_SCI_MSG_FREE_IRQ 0x1001
44 44
45/* NAVSS resource management */
46/* Ringacc requests */
47#define TI_SCI_MSG_RM_RING_ALLOCATE 0x1100
48#define TI_SCI_MSG_RM_RING_FREE 0x1101
49#define TI_SCI_MSG_RM_RING_RECONFIG 0x1102
50#define TI_SCI_MSG_RM_RING_RESET 0x1103
51#define TI_SCI_MSG_RM_RING_CFG 0x1110
52#define TI_SCI_MSG_RM_RING_GET_CFG 0x1111
53
54/* PSI-L requests */
55#define TI_SCI_MSG_RM_PSIL_PAIR 0x1280
56#define TI_SCI_MSG_RM_PSIL_UNPAIR 0x1281
57
58#define TI_SCI_MSG_RM_UDMAP_TX_ALLOC 0x1200
59#define TI_SCI_MSG_RM_UDMAP_TX_FREE 0x1201
60#define TI_SCI_MSG_RM_UDMAP_RX_ALLOC 0x1210
61#define TI_SCI_MSG_RM_UDMAP_RX_FREE 0x1211
62#define TI_SCI_MSG_RM_UDMAP_FLOW_CFG 0x1220
63#define TI_SCI_MSG_RM_UDMAP_OPT_FLOW_CFG 0x1221
64
65#define TISCI_MSG_RM_UDMAP_TX_CH_CFG 0x1205
66#define TISCI_MSG_RM_UDMAP_TX_CH_GET_CFG 0x1206
67#define TISCI_MSG_RM_UDMAP_RX_CH_CFG 0x1215
68#define TISCI_MSG_RM_UDMAP_RX_CH_GET_CFG 0x1216
69#define TISCI_MSG_RM_UDMAP_FLOW_CFG 0x1230
70#define TISCI_MSG_RM_UDMAP_FLOW_SIZE_THRESH_CFG 0x1231
71#define TISCI_MSG_RM_UDMAP_FLOW_GET_CFG 0x1232
72#define TISCI_MSG_RM_UDMAP_FLOW_SIZE_THRESH_GET_CFG 0x1233
73
74/* Processor Control requests */
75#define TI_SCI_MSG_PROC_REQUEST 0xc000
76#define TI_SCI_MSG_PROC_RELEASE 0xc001
77#define TI_SCI_MSG_PROC_HANDOVER 0xc005
78#define TI_SCI_MSG_SET_CONFIG 0xc100
79#define TI_SCI_MSG_SET_CTRL 0xc101
80#define TI_SCI_MSG_GET_STATUS 0xc400
81
45/** 82/**
46 * struct ti_sci_msg_hdr - Generic Message Header for All messages and responses 83 * struct ti_sci_msg_hdr - Generic Message Header for All messages and responses
47 * @type: Type of messages: One of TI_SCI_MSG* values 84 * @type: Type of messages: One of TI_SCI_MSG* values
@@ -604,4 +641,777 @@ struct ti_sci_msg_req_manage_irq {
604 u8 secondary_host; 641 u8 secondary_host;
605} __packed; 642} __packed;
606 643
644/**
645 * struct ti_sci_msg_rm_ring_cfg_req - Configure a Navigator Subsystem ring
646 *
647 * Configures the non-real-time registers of a Navigator Subsystem ring.
648 * @hdr: Generic Header
649 * @valid_params: Bitfield defining validity of ring configuration parameters.
650 * The ring configuration fields are not valid, and will not be used for
651 * ring configuration, if their corresponding valid bit is zero.
652 * Valid bit usage:
653 * 0 - Valid bit for @tisci_msg_rm_ring_cfg_req addr_lo
654 * 1 - Valid bit for @tisci_msg_rm_ring_cfg_req addr_hi
655 * 2 - Valid bit for @tisci_msg_rm_ring_cfg_req count
656 * 3 - Valid bit for @tisci_msg_rm_ring_cfg_req mode
657 * 4 - Valid bit for @tisci_msg_rm_ring_cfg_req size
658 * 5 - Valid bit for @tisci_msg_rm_ring_cfg_req order_id
659 * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
660 * @index: ring index to be configured.
661 * @addr_lo: 32 LSBs of ring base address to be programmed into the ring's
662 * RING_BA_LO register
663 * @addr_hi: 16 MSBs of ring base address to be programmed into the ring's
664 * RING_BA_HI register.
665 * @count: Number of ring elements. Must be even if mode is CREDENTIALS or QM
666 * modes.
667 * @mode: Specifies the mode the ring is to be configured.
668 * @size: Specifies encoded ring element size. To calculate the encoded size use
669 * the formula (log2(size_bytes) - 2), where size_bytes cannot be
670 * greater than 256.
671 * @order_id: Specifies the ring's bus order ID.
672 */
673struct ti_sci_msg_rm_ring_cfg_req {
674 struct ti_sci_msg_hdr hdr;
675 u32 valid_params;
676 u16 nav_id;
677 u16 index;
678 u32 addr_lo;
679 u32 addr_hi;
680 u32 count;
681 u8 mode;
682 u8 size;
683 u8 order_id;
684} __packed;
685
686/**
687 * struct ti_sci_msg_rm_ring_get_cfg_req - Get RA ring's configuration
688 *
689 * Gets the configuration of the non-real-time register fields of a ring. The
690 * host, or a supervisor of the host, who owns the ring must be the requesting
691 * host. The values of the non-real-time registers are returned in
692 * @ti_sci_msg_rm_ring_get_cfg_resp.
693 *
694 * @hdr: Generic Header
695 * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
696 * @index: ring index.
697 */
698struct ti_sci_msg_rm_ring_get_cfg_req {
699 struct ti_sci_msg_hdr hdr;
700 u16 nav_id;
701 u16 index;
702} __packed;
703
704/**
705 * struct ti_sci_msg_rm_ring_get_cfg_resp - Ring get configuration response
706 *
707 * Response received by host processor after RM has handled
708 * @ti_sci_msg_rm_ring_get_cfg_req. The response contains the ring's
709 * non-real-time register values.
710 *
711 * @hdr: Generic Header
712 * @addr_lo: Ring 32 LSBs of base address
713 * @addr_hi: Ring 16 MSBs of base address.
714 * @count: Ring number of elements.
715 * @mode: Ring mode.
716 * @size: encoded Ring element size
717 * @order_id: ing order ID.
718 */
719struct ti_sci_msg_rm_ring_get_cfg_resp {
720 struct ti_sci_msg_hdr hdr;
721 u32 addr_lo;
722 u32 addr_hi;
723 u32 count;
724 u8 mode;
725 u8 size;
726 u8 order_id;
727} __packed;
728
729/**
730 * struct ti_sci_msg_psil_pair - Pairs a PSI-L source thread to a destination
731 * thread
732 * @hdr: Generic Header
733 * @nav_id: SoC Navigator Subsystem device ID whose PSI-L config proxy is
734 * used to pair the source and destination threads.
735 * @src_thread: PSI-L source thread ID within the PSI-L System thread map.
736 *
737 * UDMAP transmit channels mapped to source threads will have their
738 * TCHAN_THRD_ID register programmed with the destination thread if the pairing
739 * is successful.
740
741 * @dst_thread: PSI-L destination thread ID within the PSI-L System thread map.
742 * PSI-L destination threads start at index 0x8000. The request is NACK'd if
743 * the destination thread is not greater than or equal to 0x8000.
744 *
745 * UDMAP receive channels mapped to destination threads will have their
746 * RCHAN_THRD_ID register programmed with the source thread if the pairing
747 * is successful.
748 *
749 * Request type is TI_SCI_MSG_RM_PSIL_PAIR, response is a generic ACK or NACK
750 * message.
751 */
752struct ti_sci_msg_psil_pair {
753 struct ti_sci_msg_hdr hdr;
754 u32 nav_id;
755 u32 src_thread;
756 u32 dst_thread;
757} __packed;
758
759/**
760 * struct ti_sci_msg_psil_unpair - Unpairs a PSI-L source thread from a
761 * destination thread
762 * @hdr: Generic Header
763 * @nav_id: SoC Navigator Subsystem device ID whose PSI-L config proxy is
764 * used to unpair the source and destination threads.
765 * @src_thread: PSI-L source thread ID within the PSI-L System thread map.
766 *
767 * UDMAP transmit channels mapped to source threads will have their
768 * TCHAN_THRD_ID register cleared if the unpairing is successful.
769 *
770 * @dst_thread: PSI-L destination thread ID within the PSI-L System thread map.
771 * PSI-L destination threads start at index 0x8000. The request is NACK'd if
772 * the destination thread is not greater than or equal to 0x8000.
773 *
774 * UDMAP receive channels mapped to destination threads will have their
775 * RCHAN_THRD_ID register cleared if the unpairing is successful.
776 *
777 * Request type is TI_SCI_MSG_RM_PSIL_UNPAIR, response is a generic ACK or NACK
778 * message.
779 */
780struct ti_sci_msg_psil_unpair {
781 struct ti_sci_msg_hdr hdr;
782 u32 nav_id;
783 u32 src_thread;
784 u32 dst_thread;
785} __packed;
786
787/**
788 * struct ti_sci_msg_udmap_rx_flow_cfg - UDMAP receive flow configuration
789 * message
790 * @hdr: Generic Header
791 * @nav_id: SoC Navigator Subsystem device ID from which the receive flow is
792 * allocated
793 * @flow_index: UDMAP receive flow index for non-optional configuration.
794 * @rx_ch_index: Specifies the index of the receive channel using the flow_index
795 * @rx_einfo_present: UDMAP receive flow extended packet info present.
796 * @rx_psinfo_present: UDMAP receive flow PS words present.
797 * @rx_error_handling: UDMAP receive flow error handling configuration. Valid
798 * values are TI_SCI_RM_UDMAP_RX_FLOW_ERR_DROP/RETRY.
799 * @rx_desc_type: UDMAP receive flow descriptor type. It can be one of
800 * TI_SCI_RM_UDMAP_RX_FLOW_DESC_HOST/MONO.
801 * @rx_sop_offset: UDMAP receive flow start of packet offset.
802 * @rx_dest_qnum: UDMAP receive flow destination queue number.
803 * @rx_ps_location: UDMAP receive flow PS words location.
804 * 0 - end of packet descriptor
805 * 1 - Beginning of the data buffer
806 * @rx_src_tag_hi: UDMAP receive flow source tag high byte constant
807 * @rx_src_tag_lo: UDMAP receive flow source tag low byte constant
808 * @rx_dest_tag_hi: UDMAP receive flow destination tag high byte constant
809 * @rx_dest_tag_lo: UDMAP receive flow destination tag low byte constant
810 * @rx_src_tag_hi_sel: UDMAP receive flow source tag high byte selector
811 * @rx_src_tag_lo_sel: UDMAP receive flow source tag low byte selector
812 * @rx_dest_tag_hi_sel: UDMAP receive flow destination tag high byte selector
813 * @rx_dest_tag_lo_sel: UDMAP receive flow destination tag low byte selector
814 * @rx_size_thresh_en: UDMAP receive flow packet size based free buffer queue
815 * enable. If enabled, the ti_sci_rm_udmap_rx_flow_opt_cfg also need to be
816 * configured and sent.
817 * @rx_fdq0_sz0_qnum: UDMAP receive flow free descriptor queue 0.
818 * @rx_fdq1_qnum: UDMAP receive flow free descriptor queue 1.
819 * @rx_fdq2_qnum: UDMAP receive flow free descriptor queue 2.
820 * @rx_fdq3_qnum: UDMAP receive flow free descriptor queue 3.
821 *
822 * For detailed information on the settings, see the UDMAP section of the TRM.
823 */
824struct ti_sci_msg_udmap_rx_flow_cfg {
825 struct ti_sci_msg_hdr hdr;
826 u32 nav_id;
827 u32 flow_index;
828 u32 rx_ch_index;
829 u8 rx_einfo_present;
830 u8 rx_psinfo_present;
831 u8 rx_error_handling;
832 u8 rx_desc_type;
833 u16 rx_sop_offset;
834 u16 rx_dest_qnum;
835 u8 rx_ps_location;
836 u8 rx_src_tag_hi;
837 u8 rx_src_tag_lo;
838 u8 rx_dest_tag_hi;
839 u8 rx_dest_tag_lo;
840 u8 rx_src_tag_hi_sel;
841 u8 rx_src_tag_lo_sel;
842 u8 rx_dest_tag_hi_sel;
843 u8 rx_dest_tag_lo_sel;
844 u8 rx_size_thresh_en;
845 u16 rx_fdq0_sz0_qnum;
846 u16 rx_fdq1_qnum;
847 u16 rx_fdq2_qnum;
848 u16 rx_fdq3_qnum;
849} __packed;
850
851/**
852 * struct rm_ti_sci_msg_udmap_rx_flow_opt_cfg - parameters for UDMAP receive
853 * flow optional configuration
854 * @hdr: Generic Header
855 * @nav_id: SoC Navigator Subsystem device ID from which the receive flow is
856 * allocated
857 * @flow_index: UDMAP receive flow index for optional configuration.
858 * @rx_ch_index: Specifies the index of the receive channel using the flow_index
859 * @rx_size_thresh0: UDMAP receive flow packet size threshold 0.
860 * @rx_size_thresh1: UDMAP receive flow packet size threshold 1.
861 * @rx_size_thresh2: UDMAP receive flow packet size threshold 2.
862 * @rx_fdq0_sz1_qnum: UDMAP receive flow free descriptor queue for size
863 * threshold 1.
864 * @rx_fdq0_sz2_qnum: UDMAP receive flow free descriptor queue for size
865 * threshold 2.
866 * @rx_fdq0_sz3_qnum: UDMAP receive flow free descriptor queue for size
867 * threshold 3.
868 *
869 * For detailed information on the settings, see the UDMAP section of the TRM.
870 */
871struct rm_ti_sci_msg_udmap_rx_flow_opt_cfg {
872 struct ti_sci_msg_hdr hdr;
873 u32 nav_id;
874 u32 flow_index;
875 u32 rx_ch_index;
876 u16 rx_size_thresh0;
877 u16 rx_size_thresh1;
878 u16 rx_size_thresh2;
879 u16 rx_fdq0_sz1_qnum;
880 u16 rx_fdq0_sz2_qnum;
881 u16 rx_fdq0_sz3_qnum;
882} __packed;
883
884/**
885 * Configures a Navigator Subsystem UDMAP transmit channel
886 *
887 * Configures the non-real-time registers of a Navigator Subsystem UDMAP
888 * transmit channel. The channel index must be assigned to the host defined
889 * in the TISCI header via the RM board configuration resource assignment
890 * range list.
891 *
892 * @hdr: Generic Header
893 *
894 * @valid_params: Bitfield defining validity of tx channel configuration
895 * parameters. The tx channel configuration fields are not valid, and will not
896 * be used for ch configuration, if their corresponding valid bit is zero.
897 * Valid bit usage:
898 * 0 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_pause_on_err
899 * 1 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_atype
900 * 2 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_chan_type
901 * 3 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_fetch_size
902 * 4 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::txcq_qnum
903 * 5 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_priority
904 * 6 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_qos
905 * 7 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_orderid
906 * 8 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_sched_priority
907 * 9 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_filt_einfo
908 * 10 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_filt_pswords
909 * 11 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_supr_tdpkt
910 * 12 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_credit_count
911 * 13 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::fdepth
912 * 14 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_burst_size
913 *
914 * @nav_id: SoC device ID of Navigator Subsystem where tx channel is located
915 *
916 * @index: UDMAP transmit channel index.
917 *
918 * @tx_pause_on_err: UDMAP transmit channel pause on error configuration to
919 * be programmed into the tx_pause_on_err field of the channel's TCHAN_TCFG
920 * register.
921 *
922 * @tx_filt_einfo: UDMAP transmit channel extended packet information passing
923 * configuration to be programmed into the tx_filt_einfo field of the
924 * channel's TCHAN_TCFG register.
925 *
926 * @tx_filt_pswords: UDMAP transmit channel protocol specific word passing
927 * configuration to be programmed into the tx_filt_pswords field of the
928 * channel's TCHAN_TCFG register.
929 *
930 * @tx_atype: UDMAP transmit channel non Ring Accelerator access pointer
931 * interpretation configuration to be programmed into the tx_atype field of
932 * the channel's TCHAN_TCFG register.
933 *
934 * @tx_chan_type: UDMAP transmit channel functional channel type and work
935 * passing mechanism configuration to be programmed into the tx_chan_type
936 * field of the channel's TCHAN_TCFG register.
937 *
938 * @tx_supr_tdpkt: UDMAP transmit channel teardown packet generation suppression
939 * configuration to be programmed into the tx_supr_tdpkt field of the channel's
940 * TCHAN_TCFG register.
941 *
942 * @tx_fetch_size: UDMAP transmit channel number of 32-bit descriptor words to
943 * fetch configuration to be programmed into the tx_fetch_size field of the
944 * channel's TCHAN_TCFG register. The user must make sure to set the maximum
945 * word count that can pass through the channel for any allowed descriptor type.
946 *
947 * @tx_credit_count: UDMAP transmit channel transfer request credit count
948 * configuration to be programmed into the count field of the TCHAN_TCREDIT
949 * register. Specifies how many credits for complete TRs are available.
950 *
951 * @txcq_qnum: UDMAP transmit channel completion queue configuration to be
952 * programmed into the txcq_qnum field of the TCHAN_TCQ register. The specified
953 * completion queue must be assigned to the host, or a subordinate of the host,
954 * requesting configuration of the transmit channel.
955 *
956 * @tx_priority: UDMAP transmit channel transmit priority value to be programmed
957 * into the priority field of the channel's TCHAN_TPRI_CTRL register.
958 *
959 * @tx_qos: UDMAP transmit channel transmit qos value to be programmed into the
960 * qos field of the channel's TCHAN_TPRI_CTRL register.
961 *
962 * @tx_orderid: UDMAP transmit channel bus order id value to be programmed into
963 * the orderid field of the channel's TCHAN_TPRI_CTRL register.
964 *
965 * @fdepth: UDMAP transmit channel FIFO depth configuration to be programmed
966 * into the fdepth field of the TCHAN_TFIFO_DEPTH register. Sets the number of
967 * Tx FIFO bytes which are allowed to be stored for the channel. Check the UDMAP
968 * section of the TRM for restrictions regarding this parameter.
969 *
970 * @tx_sched_priority: UDMAP transmit channel tx scheduling priority
971 * configuration to be programmed into the priority field of the channel's
972 * TCHAN_TST_SCHED register.
973 *
974 * @tx_burst_size: UDMAP transmit channel burst size configuration to be
975 * programmed into the tx_burst_size field of the TCHAN_TCFG register.
976 */
977struct ti_sci_msg_rm_udmap_tx_ch_cfg_req {
978 struct ti_sci_msg_hdr hdr;
979 u32 valid_params;
980 u16 nav_id;
981 u16 index;
982 u8 tx_pause_on_err;
983 u8 tx_filt_einfo;
984 u8 tx_filt_pswords;
985 u8 tx_atype;
986 u8 tx_chan_type;
987 u8 tx_supr_tdpkt;
988 u16 tx_fetch_size;
989 u8 tx_credit_count;
990 u16 txcq_qnum;
991 u8 tx_priority;
992 u8 tx_qos;
993 u8 tx_orderid;
994 u16 fdepth;
995 u8 tx_sched_priority;
996 u8 tx_burst_size;
997} __packed;
998
999/**
1000 * Configures a Navigator Subsystem UDMAP receive channel
1001 *
1002 * Configures the non-real-time registers of a Navigator Subsystem UDMAP
1003 * receive channel. The channel index must be assigned to the host defined
1004 * in the TISCI header via the RM board configuration resource assignment
1005 * range list.
1006 *
1007 * @hdr: Generic Header
1008 *
1009 * @valid_params: Bitfield defining validity of rx channel configuration
1010 * parameters.
1011 * The rx channel configuration fields are not valid, and will not be used for
1012 * ch configuration, if their corresponding valid bit is zero.
1013 * Valid bit usage:
1014 * 0 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_pause_on_err
1015 * 1 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_atype
1016 * 2 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_chan_type
1017 * 3 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_fetch_size
1018 * 4 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rxcq_qnum
1019 * 5 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_priority
1020 * 6 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_qos
1021 * 7 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_orderid
1022 * 8 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_sched_priority
1023 * 9 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::flowid_start
1024 * 10 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::flowid_cnt
1025 * 11 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_ignore_short
1026 * 12 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_ignore_long
1027 * 14 - Valid bit for @ti_sci_msg_rm_udmap_rx_ch_cfg_req::rx_burst_size
1028 *
1029 * @nav_id: SoC device ID of Navigator Subsystem where rx channel is located
1030 *
1031 * @index: UDMAP receive channel index.
1032 *
1033 * @rx_fetch_size: UDMAP receive channel number of 32-bit descriptor words to
1034 * fetch configuration to be programmed into the rx_fetch_size field of the
1035 * channel's RCHAN_RCFG register.
1036 *
1037 * @rxcq_qnum: UDMAP receive channel completion queue configuration to be
1038 * programmed into the rxcq_qnum field of the RCHAN_RCQ register.
1039 * The specified completion queue must be assigned to the host, or a subordinate
1040 * of the host, requesting configuration of the receive channel.
1041 *
1042 * @rx_priority: UDMAP receive channel receive priority value to be programmed
1043 * into the priority field of the channel's RCHAN_RPRI_CTRL register.
1044 *
1045 * @rx_qos: UDMAP receive channel receive qos value to be programmed into the
1046 * qos field of the channel's RCHAN_RPRI_CTRL register.
1047 *
1048 * @rx_orderid: UDMAP receive channel bus order id value to be programmed into
1049 * the orderid field of the channel's RCHAN_RPRI_CTRL register.
1050 *
1051 * @rx_sched_priority: UDMAP receive channel rx scheduling priority
1052 * configuration to be programmed into the priority field of the channel's
1053 * RCHAN_RST_SCHED register.
1054 *
1055 * @flowid_start: UDMAP receive channel additional flows starting index
1056 * configuration to program into the flow_start field of the RCHAN_RFLOW_RNG
1057 * register. Specifies the starting index for flow IDs the receive channel is to
1058 * make use of beyond the default flow. flowid_start and @ref flowid_cnt must be
1059 * set as valid and configured together. The starting flow ID set by
1060 * @ref flowid_cnt must be a flow index within the Navigator Subsystem's subset
1061 * of flows beyond the default flows statically mapped to receive channels.
1062 * The additional flows must be assigned to the host, or a subordinate of the
1063 * host, requesting configuration of the receive channel.
1064 *
1065 * @flowid_cnt: UDMAP receive channel additional flows count configuration to
1066 * program into the flowid_cnt field of the RCHAN_RFLOW_RNG register.
1067 * This field specifies how many flow IDs are in the additional contiguous range
1068 * of legal flow IDs for the channel. @ref flowid_start and flowid_cnt must be
1069 * set as valid and configured together. Disabling the valid_params field bit
1070 * for flowid_cnt indicates no flow IDs other than the default are to be
1071 * allocated and used by the receive channel. @ref flowid_start plus flowid_cnt
1072 * cannot be greater than the number of receive flows in the receive channel's
1073 * Navigator Subsystem. The additional flows must be assigned to the host, or a
1074 * subordinate of the host, requesting configuration of the receive channel.
1075 *
1076 * @rx_pause_on_err: UDMAP receive channel pause on error configuration to be
1077 * programmed into the rx_pause_on_err field of the channel's RCHAN_RCFG
1078 * register.
1079 *
1080 * @rx_atype: UDMAP receive channel non Ring Accelerator access pointer
1081 * interpretation configuration to be programmed into the rx_atype field of the
1082 * channel's RCHAN_RCFG register.
1083 *
1084 * @rx_chan_type: UDMAP receive channel functional channel type and work passing
1085 * mechanism configuration to be programmed into the rx_chan_type field of the
1086 * channel's RCHAN_RCFG register.
1087 *
1088 * @rx_ignore_short: UDMAP receive channel short packet treatment configuration
1089 * to be programmed into the rx_ignore_short field of the RCHAN_RCFG register.
1090 *
1091 * @rx_ignore_long: UDMAP receive channel long packet treatment configuration to
1092 * be programmed into the rx_ignore_long field of the RCHAN_RCFG register.
1093 *
1094 * @rx_burst_size: UDMAP receive channel burst size configuration to be
1095 * programmed into the rx_burst_size field of the RCHAN_RCFG register.
1096 */
1097struct ti_sci_msg_rm_udmap_rx_ch_cfg_req {
1098 struct ti_sci_msg_hdr hdr;
1099 u32 valid_params;
1100 u16 nav_id;
1101 u16 index;
1102 u16 rx_fetch_size;
1103 u16 rxcq_qnum;
1104 u8 rx_priority;
1105 u8 rx_qos;
1106 u8 rx_orderid;
1107 u8 rx_sched_priority;
1108 u16 flowid_start;
1109 u16 flowid_cnt;
1110 u8 rx_pause_on_err;
1111 u8 rx_atype;
1112 u8 rx_chan_type;
1113 u8 rx_ignore_short;
1114 u8 rx_ignore_long;
1115 u8 rx_burst_size;
1116} __packed;
1117
1118/**
1119 * Configures a Navigator Subsystem UDMAP receive flow
1120 *
1121 * Configures a Navigator Subsystem UDMAP receive flow's registers.
1122 * Configuration does not include the flow registers which handle size-based
1123 * free descriptor queue routing.
1124 *
1125 * The flow index must be assigned to the host defined in the TISCI header via
1126 * the RM board configuration resource assignment range list.
1127 *
1128 * @hdr: Standard TISCI header
1129 *
1130 * @valid_params
1131 * Bitfield defining validity of rx flow configuration parameters. The
1132 * rx flow configuration fields are not valid, and will not be used for flow
1133 * configuration, if their corresponding valid bit is zero. Valid bit usage:
1134 * 0 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_einfo_present
1135 * 1 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_psinfo_present
1136 * 2 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_error_handling
1137 * 3 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_desc_type
1138 * 4 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_sop_offset
1139 * 5 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_qnum
1140 * 6 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_hi
1141 * 7 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_lo
1142 * 8 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_hi
1143 * 9 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_lo
1144 * 10 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_hi_sel
1145 * 11 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_src_tag_lo_sel
1146 * 12 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_hi_sel
1147 * 13 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_dest_tag_lo_sel
1148 * 14 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq0_sz0_qnum
1149 * 15 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq1_sz0_qnum
1150 * 16 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq2_sz0_qnum
1151 * 17 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_fdq3_sz0_qnum
1152 * 18 - Valid bit for @tisci_msg_rm_udmap_flow_cfg_req::rx_ps_location
1153 *
1154 * @nav_id: SoC device ID of Navigator Subsystem from which the receive flow is
1155 * allocated
1156 *
1157 * @flow_index: UDMAP receive flow index for non-optional configuration.
1158 *
1159 * @rx_einfo_present:
1160 * UDMAP receive flow extended packet info present configuration to be
1161 * programmed into the rx_einfo_present field of the flow's RFLOW_RFA register.
1162 *
1163 * @rx_psinfo_present:
1164 * UDMAP receive flow PS words present configuration to be programmed into the
1165 * rx_psinfo_present field of the flow's RFLOW_RFA register.
1166 *
1167 * @rx_error_handling:
1168 * UDMAP receive flow error handling configuration to be programmed into the
1169 * rx_error_handling field of the flow's RFLOW_RFA register.
1170 *
1171 * @rx_desc_type:
1172 * UDMAP receive flow descriptor type configuration to be programmed into the
1173 * rx_desc_type field field of the flow's RFLOW_RFA register.
1174 *
1175 * @rx_sop_offset:
1176 * UDMAP receive flow start of packet offset configuration to be programmed
1177 * into the rx_sop_offset field of the RFLOW_RFA register. See the UDMAP
1178 * section of the TRM for more information on this setting. Valid values for
1179 * this field are 0-255 bytes.
1180 *
1181 * @rx_dest_qnum:
1182 * UDMAP receive flow destination queue configuration to be programmed into the
1183 * rx_dest_qnum field of the flow's RFLOW_RFA register. The specified
1184 * destination queue must be valid within the Navigator Subsystem and must be
1185 * owned by the host, or a subordinate of the host, requesting allocation and
1186 * configuration of the receive flow.
1187 *
1188 * @rx_src_tag_hi:
1189 * UDMAP receive flow source tag high byte constant configuration to be
1190 * programmed into the rx_src_tag_hi field of the flow's RFLOW_RFB register.
1191 * See the UDMAP section of the TRM for more information on this setting.
1192 *
1193 * @rx_src_tag_lo:
1194 * UDMAP receive flow source tag low byte constant configuration to be
1195 * programmed into the rx_src_tag_lo field of the flow's RFLOW_RFB register.
1196 * See the UDMAP section of the TRM for more information on this setting.
1197 *
1198 * @rx_dest_tag_hi:
1199 * UDMAP receive flow destination tag high byte constant configuration to be
1200 * programmed into the rx_dest_tag_hi field of the flow's RFLOW_RFB register.
1201 * See the UDMAP section of the TRM for more information on this setting.
1202 *
1203 * @rx_dest_tag_lo:
1204 * UDMAP receive flow destination tag low byte constant configuration to be
1205 * programmed into the rx_dest_tag_lo field of the flow's RFLOW_RFB register.
1206 * See the UDMAP section of the TRM for more information on this setting.
1207 *
1208 * @rx_src_tag_hi_sel:
1209 * UDMAP receive flow source tag high byte selector configuration to be
1210 * programmed into the rx_src_tag_hi_sel field of the RFLOW_RFC register. See
1211 * the UDMAP section of the TRM for more information on this setting.
1212 *
1213 * @rx_src_tag_lo_sel:
1214 * UDMAP receive flow source tag low byte selector configuration to be
1215 * programmed into the rx_src_tag_lo_sel field of the RFLOW_RFC register. See
1216 * the UDMAP section of the TRM for more information on this setting.
1217 *
1218 * @rx_dest_tag_hi_sel:
1219 * UDMAP receive flow destination tag high byte selector configuration to be
1220 * programmed into the rx_dest_tag_hi_sel field of the RFLOW_RFC register. See
1221 * the UDMAP section of the TRM for more information on this setting.
1222 *
1223 * @rx_dest_tag_lo_sel:
1224 * UDMAP receive flow destination tag low byte selector configuration to be
1225 * programmed into the rx_dest_tag_lo_sel field of the RFLOW_RFC register. See
1226 * the UDMAP section of the TRM for more information on this setting.
1227 *
1228 * @rx_fdq0_sz0_qnum:
1229 * UDMAP receive flow free descriptor queue 0 configuration to be programmed
1230 * into the rx_fdq0_sz0_qnum field of the flow's RFLOW_RFD register. See the
1231 * UDMAP section of the TRM for more information on this setting. The specified
1232 * free queue must be valid within the Navigator Subsystem and must be owned
1233 * by the host, or a subordinate of the host, requesting allocation and
1234 * configuration of the receive flow.
1235 *
1236 * @rx_fdq1_qnum:
1237 * UDMAP receive flow free descriptor queue 1 configuration to be programmed
1238 * into the rx_fdq1_qnum field of the flow's RFLOW_RFD register. See the
1239 * UDMAP section of the TRM for more information on this setting. The specified
1240 * free queue must be valid within the Navigator Subsystem and must be owned
1241 * by the host, or a subordinate of the host, requesting allocation and
1242 * configuration of the receive flow.
1243 *
1244 * @rx_fdq2_qnum:
1245 * UDMAP receive flow free descriptor queue 2 configuration to be programmed
1246 * into the rx_fdq2_qnum field of the flow's RFLOW_RFE register. See the
1247 * UDMAP section of the TRM for more information on this setting. The specified
1248 * free queue must be valid within the Navigator Subsystem and must be owned
1249 * by the host, or a subordinate of the host, requesting allocation and
1250 * configuration of the receive flow.
1251 *
1252 * @rx_fdq3_qnum:
1253 * UDMAP receive flow free descriptor queue 3 configuration to be programmed
1254 * into the rx_fdq3_qnum field of the flow's RFLOW_RFE register. See the
1255 * UDMAP section of the TRM for more information on this setting. The specified
1256 * free queue must be valid within the Navigator Subsystem and must be owned
1257 * by the host, or a subordinate of the host, requesting allocation and
1258 * configuration of the receive flow.
1259 *
1260 * @rx_ps_location:
1261 * UDMAP receive flow PS words location configuration to be programmed into the
1262 * rx_ps_location field of the flow's RFLOW_RFA register.
1263 */
1264struct ti_sci_msg_rm_udmap_flow_cfg_req {
1265 struct ti_sci_msg_hdr hdr;
1266 u32 valid_params;
1267 u16 nav_id;
1268 u16 flow_index;
1269 u8 rx_einfo_present;
1270 u8 rx_psinfo_present;
1271 u8 rx_error_handling;
1272 u8 rx_desc_type;
1273 u16 rx_sop_offset;
1274 u16 rx_dest_qnum;
1275 u8 rx_src_tag_hi;
1276 u8 rx_src_tag_lo;
1277 u8 rx_dest_tag_hi;
1278 u8 rx_dest_tag_lo;
1279 u8 rx_src_tag_hi_sel;
1280 u8 rx_src_tag_lo_sel;
1281 u8 rx_dest_tag_hi_sel;
1282 u8 rx_dest_tag_lo_sel;
1283 u16 rx_fdq0_sz0_qnum;
1284 u16 rx_fdq1_qnum;
1285 u16 rx_fdq2_qnum;
1286 u16 rx_fdq3_qnum;
1287 u8 rx_ps_location;
1288} __packed;
1289
1290/**
1291 * struct ti_sci_msg_req_proc_request - Request a processor
1292 * @hdr: Generic Header
1293 * @processor_id: ID of processor being requested
1294 *
1295 * Request type is TI_SCI_MSG_PROC_REQUEST, response is a generic ACK/NACK
1296 * message.
1297 */
1298struct ti_sci_msg_req_proc_request {
1299 struct ti_sci_msg_hdr hdr;
1300 u8 processor_id;
1301} __packed;
1302
1303/**
1304 * struct ti_sci_msg_req_proc_release - Release a processor
1305 * @hdr: Generic Header
1306 * @processor_id: ID of processor being released
1307 *
1308 * Request type is TI_SCI_MSG_PROC_RELEASE, response is a generic ACK/NACK
1309 * message.
1310 */
1311struct ti_sci_msg_req_proc_release {
1312 struct ti_sci_msg_hdr hdr;
1313 u8 processor_id;
1314} __packed;
1315
1316/**
1317 * struct ti_sci_msg_req_proc_handover - Handover a processor to a host
1318 * @hdr: Generic Header
1319 * @processor_id: ID of processor being handed over
1320 * @host_id: Host ID the control needs to be transferred to
1321 *
1322 * Request type is TI_SCI_MSG_PROC_HANDOVER, response is a generic ACK/NACK
1323 * message.
1324 */
1325struct ti_sci_msg_req_proc_handover {
1326 struct ti_sci_msg_hdr hdr;
1327 u8 processor_id;
1328 u8 host_id;
1329} __packed;
1330
1331/* Boot Vector masks */
1332#define TI_SCI_ADDR_LOW_MASK GENMASK_ULL(31, 0)
1333#define TI_SCI_ADDR_HIGH_MASK GENMASK_ULL(63, 32)
1334#define TI_SCI_ADDR_HIGH_SHIFT 32
1335
1336/**
1337 * struct ti_sci_msg_req_set_config - Set Processor boot configuration
1338 * @hdr: Generic Header
1339 * @processor_id: ID of processor being configured
1340 * @bootvector_low: Lower 32 bit address (Little Endian) of boot vector
1341 * @bootvector_high: Higher 32 bit address (Little Endian) of boot vector
1342 * @config_flags_set: Optional Processor specific Config Flags to set.
1343 * Setting a bit here implies the corresponding mode
1344 * will be set
1345 * @config_flags_clear: Optional Processor specific Config Flags to clear.
1346 * Setting a bit here implies the corresponding mode
1347 * will be cleared
1348 *
1349 * Request type is TI_SCI_MSG_PROC_HANDOVER, response is a generic ACK/NACK
1350 * message.
1351 */
1352struct ti_sci_msg_req_set_config {
1353 struct ti_sci_msg_hdr hdr;
1354 u8 processor_id;
1355 u32 bootvector_low;
1356 u32 bootvector_high;
1357 u32 config_flags_set;
1358 u32 config_flags_clear;
1359} __packed;
1360
1361/**
1362 * struct ti_sci_msg_req_set_ctrl - Set Processor boot control flags
1363 * @hdr: Generic Header
1364 * @processor_id: ID of processor being configured
1365 * @control_flags_set: Optional Processor specific Control Flags to set.
1366 * Setting a bit here implies the corresponding mode
1367 * will be set
1368 * @control_flags_clear:Optional Processor specific Control Flags to clear.
1369 * Setting a bit here implies the corresponding mode
1370 * will be cleared
1371 *
1372 * Request type is TI_SCI_MSG_SET_CTRL, response is a generic ACK/NACK
1373 * message.
1374 */
1375struct ti_sci_msg_req_set_ctrl {
1376 struct ti_sci_msg_hdr hdr;
1377 u8 processor_id;
1378 u32 control_flags_set;
1379 u32 control_flags_clear;
1380} __packed;
1381
1382/**
1383 * struct ti_sci_msg_req_get_status - Processor boot status request
1384 * @hdr: Generic Header
1385 * @processor_id: ID of processor whose status is being requested
1386 *
1387 * Request type is TI_SCI_MSG_GET_STATUS, response is an appropriate
1388 * message, or NACK in case of inability to satisfy request.
1389 */
1390struct ti_sci_msg_req_get_status {
1391 struct ti_sci_msg_hdr hdr;
1392 u8 processor_id;
1393} __packed;
1394
1395/**
1396 * struct ti_sci_msg_resp_get_status - Processor boot status response
1397 * @hdr: Generic Header
1398 * @processor_id: ID of processor whose status is returned
1399 * @bootvector_low: Lower 32 bit address (Little Endian) of boot vector
1400 * @bootvector_high: Higher 32 bit address (Little Endian) of boot vector
1401 * @config_flags: Optional Processor specific Config Flags set currently
1402 * @control_flags: Optional Processor specific Control Flags set currently
1403 * @status_flags: Optional Processor specific Status Flags set currently
1404 *
1405 * Response structure to a TI_SCI_MSG_GET_STATUS request.
1406 */
1407struct ti_sci_msg_resp_get_status {
1408 struct ti_sci_msg_hdr hdr;
1409 u8 processor_id;
1410 u32 bootvector_low;
1411 u32 bootvector_high;
1412 u32 config_flags;
1413 u32 control_flags;
1414 u32 status_flags;
1415} __packed;
1416
607#endif /* __TI_SCI_H */ 1417#endif /* __TI_SCI_H */
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c
index a80183a488c5..0c93fc5ca762 100644
--- a/drivers/hwmon/scmi-hwmon.c
+++ b/drivers/hwmon/scmi-hwmon.c
@@ -18,6 +18,50 @@ struct scmi_sensors {
18 const struct scmi_sensor_info **info[hwmon_max]; 18 const struct scmi_sensor_info **info[hwmon_max];
19}; 19};
20 20
21static inline u64 __pow10(u8 x)
22{
23 u64 r = 1;
24
25 while (x--)
26 r *= 10;
27
28 return r;
29}
30
31static int scmi_hwmon_scale(const struct scmi_sensor_info *sensor, u64 *value)
32{
33 s8 scale = sensor->scale;
34 u64 f;
35
36 switch (sensor->type) {
37 case TEMPERATURE_C:
38 case VOLTAGE:
39 case CURRENT:
40 scale += 3;
41 break;
42 case POWER:
43 case ENERGY:
44 scale += 6;
45 break;
46 default:
47 break;
48 }
49
50 if (scale == 0)
51 return 0;
52
53 if (abs(scale) > 19)
54 return -E2BIG;
55
56 f = __pow10(abs(scale));
57 if (scale > 0)
58 *value *= f;
59 else
60 *value = div64_u64(*value, f);
61
62 return 0;
63}
64
21static int scmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type, 65static int scmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
22 u32 attr, int channel, long *val) 66 u32 attr, int channel, long *val)
23{ 67{
@@ -29,6 +73,10 @@ static int scmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
29 73
30 sensor = *(scmi_sensors->info[type] + channel); 74 sensor = *(scmi_sensors->info[type] + channel);
31 ret = h->sensor_ops->reading_get(h, sensor->id, false, &value); 75 ret = h->sensor_ops->reading_get(h, sensor->id, false, &value);
76 if (ret)
77 return ret;
78
79 ret = scmi_hwmon_scale(sensor, &value);
32 if (!ret) 80 if (!ret)
33 *val = value; 81 *val = value;
34 82
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index dbdee02bb592..9bddca292330 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -8,6 +8,14 @@ menuconfig MEMORY
8 8
9if MEMORY 9if MEMORY
10 10
11config DDR
12 bool
13 help
14 Data from JEDEC specs for DDR SDRAM memories,
15 particularly the AC timing parameters and addressing
16 information. This data is useful for drivers handling
17 DDR SDRAM controllers.
18
11config ARM_PL172_MPMC 19config ARM_PL172_MPMC
12 tristate "ARM PL172 MPMC driver" 20 tristate "ARM PL172 MPMC driver"
13 depends on ARM_AMBA && OF 21 depends on ARM_AMBA && OF
diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile
index 91ae4eb0e913..9d5c409a1591 100644
--- a/drivers/memory/Makefile
+++ b/drivers/memory/Makefile
@@ -3,6 +3,7 @@
3# Makefile for memory devices 3# Makefile for memory devices
4# 4#
5 5
6obj-$(CONFIG_DDR) += jedec_ddr_data.o
6ifeq ($(CONFIG_DDR),y) 7ifeq ($(CONFIG_DDR),y)
7obj-$(CONFIG_OF) += of_memory.o 8obj-$(CONFIG_OF) += of_memory.o
8endif 9endif
diff --git a/drivers/memory/brcmstb_dpfe.c b/drivers/memory/brcmstb_dpfe.c
index 3065a8bc8fd6..6827ed484750 100644
--- a/drivers/memory/brcmstb_dpfe.c
+++ b/drivers/memory/brcmstb_dpfe.c
@@ -33,10 +33,10 @@
33#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/module.h> 34#include <linux/module.h>
35#include <linux/of_address.h> 35#include <linux/of_address.h>
36#include <linux/of_device.h>
36#include <linux/platform_device.h> 37#include <linux/platform_device.h>
37 38
38#define DRVNAME "brcmstb-dpfe" 39#define DRVNAME "brcmstb-dpfe"
39#define FIRMWARE_NAME "dpfe.bin"
40 40
41/* DCPU register offsets */ 41/* DCPU register offsets */
42#define REG_DCPU_RESET 0x0 42#define REG_DCPU_RESET 0x0
@@ -59,6 +59,7 @@
59#define DRAM_INFO_MR4 0x4 59#define DRAM_INFO_MR4 0x4
60#define DRAM_INFO_ERROR 0x8 60#define DRAM_INFO_ERROR 0x8
61#define DRAM_INFO_MR4_MASK 0xff 61#define DRAM_INFO_MR4_MASK 0xff
62#define DRAM_INFO_MR4_SHIFT 24 /* We need to look at byte 3 */
62 63
63/* DRAM MR4 Offsets & Masks */ 64/* DRAM MR4 Offsets & Masks */
64#define DRAM_MR4_REFRESH 0x0 /* Refresh rate */ 65#define DRAM_MR4_REFRESH 0x0 /* Refresh rate */
@@ -73,13 +74,23 @@
73#define DRAM_MR4_TH_OFFS_MASK 0x3 74#define DRAM_MR4_TH_OFFS_MASK 0x3
74#define DRAM_MR4_TUF_MASK 0x1 75#define DRAM_MR4_TUF_MASK 0x1
75 76
76/* DRAM Vendor Offsets & Masks */ 77/* DRAM Vendor Offsets & Masks (API v2) */
77#define DRAM_VENDOR_MR5 0x0 78#define DRAM_VENDOR_MR5 0x0
78#define DRAM_VENDOR_MR6 0x4 79#define DRAM_VENDOR_MR6 0x4
79#define DRAM_VENDOR_MR7 0x8 80#define DRAM_VENDOR_MR7 0x8
80#define DRAM_VENDOR_MR8 0xc 81#define DRAM_VENDOR_MR8 0xc
81#define DRAM_VENDOR_ERROR 0x10 82#define DRAM_VENDOR_ERROR 0x10
82#define DRAM_VENDOR_MASK 0xff 83#define DRAM_VENDOR_MASK 0xff
84#define DRAM_VENDOR_SHIFT 24 /* We need to look at byte 3 */
85
86/* DRAM Information Offsets & Masks (API v3) */
87#define DRAM_DDR_INFO_MR4 0x0
88#define DRAM_DDR_INFO_MR5 0x4
89#define DRAM_DDR_INFO_MR6 0x8
90#define DRAM_DDR_INFO_MR7 0xc
91#define DRAM_DDR_INFO_MR8 0x10
92#define DRAM_DDR_INFO_ERROR 0x14
93#define DRAM_DDR_INFO_MASK 0xff
83 94
84/* Reset register bits & masks */ 95/* Reset register bits & masks */
85#define DCPU_RESET_SHIFT 0x0 96#define DCPU_RESET_SHIFT 0x0
@@ -109,7 +120,7 @@
109#define DPFE_MSG_TYPE_COMMAND 1 120#define DPFE_MSG_TYPE_COMMAND 1
110#define DPFE_MSG_TYPE_RESPONSE 2 121#define DPFE_MSG_TYPE_RESPONSE 2
111 122
112#define DELAY_LOOP_MAX 200000 123#define DELAY_LOOP_MAX 1000
113 124
114enum dpfe_msg_fields { 125enum dpfe_msg_fields {
115 MSG_HEADER, 126 MSG_HEADER,
@@ -117,7 +128,7 @@ enum dpfe_msg_fields {
117 MSG_ARG_COUNT, 128 MSG_ARG_COUNT,
118 MSG_ARG0, 129 MSG_ARG0,
119 MSG_CHKSUM, 130 MSG_CHKSUM,
120 MSG_FIELD_MAX /* Last entry */ 131 MSG_FIELD_MAX = 16 /* Max number of arguments */
121}; 132};
122 133
123enum dpfe_commands { 134enum dpfe_commands {
@@ -127,14 +138,6 @@ enum dpfe_commands {
127 DPFE_CMD_MAX /* Last entry */ 138 DPFE_CMD_MAX /* Last entry */
128}; 139};
129 140
130struct dpfe_msg {
131 u32 header;
132 u32 command;
133 u32 arg_count;
134 u32 arg0;
135 u32 chksum; /* This is the sum of all other entries. */
136};
137
138/* 141/*
139 * Format of the binary firmware file: 142 * Format of the binary firmware file:
140 * 143 *
@@ -168,12 +171,21 @@ struct init_data {
168 bool is_big_endian; 171 bool is_big_endian;
169}; 172};
170 173
174/* API version and corresponding commands */
175struct dpfe_api {
176 int version;
177 const char *fw_name;
178 const struct attribute_group **sysfs_attrs;
179 u32 command[DPFE_CMD_MAX][MSG_FIELD_MAX];
180};
181
171/* Things we need for as long as we are active. */ 182/* Things we need for as long as we are active. */
172struct private_data { 183struct private_data {
173 void __iomem *regs; 184 void __iomem *regs;
174 void __iomem *dmem; 185 void __iomem *dmem;
175 void __iomem *imem; 186 void __iomem *imem;
176 struct device *dev; 187 struct device *dev;
188 const struct dpfe_api *dpfe_api;
177 struct mutex lock; 189 struct mutex lock;
178}; 190};
179 191
@@ -182,28 +194,99 @@ static const char *error_text[] = {
182 "Incorrect checksum", "Malformed command", "Timed out", 194 "Incorrect checksum", "Malformed command", "Timed out",
183}; 195};
184 196
185/* List of supported firmware commands */ 197/*
186static const u32 dpfe_commands[DPFE_CMD_MAX][MSG_FIELD_MAX] = { 198 * Forward declaration of our sysfs attribute functions, so we can declare the
187 [DPFE_CMD_GET_INFO] = { 199 * attribute data structures early.
188 [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND, 200 */
189 [MSG_COMMAND] = 1, 201static ssize_t show_info(struct device *, struct device_attribute *, char *);
190 [MSG_ARG_COUNT] = 1, 202static ssize_t show_refresh(struct device *, struct device_attribute *, char *);
191 [MSG_ARG0] = 1, 203static ssize_t store_refresh(struct device *, struct device_attribute *,
192 [MSG_CHKSUM] = 4, 204 const char *, size_t);
193 }, 205static ssize_t show_vendor(struct device *, struct device_attribute *, char *);
194 [DPFE_CMD_GET_REFRESH] = { 206static ssize_t show_dram(struct device *, struct device_attribute *, char *);
195 [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND, 207
196 [MSG_COMMAND] = 2, 208/*
197 [MSG_ARG_COUNT] = 1, 209 * Declare our attributes early, so they can be referenced in the API data
198 [MSG_ARG0] = 1, 210 * structure. We need to do this, because the attributes depend on the API
199 [MSG_CHKSUM] = 5, 211 * version.
200 }, 212 */
201 [DPFE_CMD_GET_VENDOR] = { 213static DEVICE_ATTR(dpfe_info, 0444, show_info, NULL);
202 [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND, 214static DEVICE_ATTR(dpfe_refresh, 0644, show_refresh, store_refresh);
203 [MSG_COMMAND] = 2, 215static DEVICE_ATTR(dpfe_vendor, 0444, show_vendor, NULL);
204 [MSG_ARG_COUNT] = 1, 216static DEVICE_ATTR(dpfe_dram, 0444, show_dram, NULL);
205 [MSG_ARG0] = 2, 217
206 [MSG_CHKSUM] = 6, 218/* API v2 sysfs attributes */
219static struct attribute *dpfe_v2_attrs[] = {
220 &dev_attr_dpfe_info.attr,
221 &dev_attr_dpfe_refresh.attr,
222 &dev_attr_dpfe_vendor.attr,
223 NULL
224};
225ATTRIBUTE_GROUPS(dpfe_v2);
226
227/* API v3 sysfs attributes */
228static struct attribute *dpfe_v3_attrs[] = {
229 &dev_attr_dpfe_info.attr,
230 &dev_attr_dpfe_dram.attr,
231 NULL
232};
233ATTRIBUTE_GROUPS(dpfe_v3);
234
235/* API v2 firmware commands */
236static const struct dpfe_api dpfe_api_v2 = {
237 .version = 2,
238 .fw_name = "dpfe.bin",
239 .sysfs_attrs = dpfe_v2_groups,
240 .command = {
241 [DPFE_CMD_GET_INFO] = {
242 [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
243 [MSG_COMMAND] = 1,
244 [MSG_ARG_COUNT] = 1,
245 [MSG_ARG0] = 1,
246 [MSG_CHKSUM] = 4,
247 },
248 [DPFE_CMD_GET_REFRESH] = {
249 [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
250 [MSG_COMMAND] = 2,
251 [MSG_ARG_COUNT] = 1,
252 [MSG_ARG0] = 1,
253 [MSG_CHKSUM] = 5,
254 },
255 [DPFE_CMD_GET_VENDOR] = {
256 [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
257 [MSG_COMMAND] = 2,
258 [MSG_ARG_COUNT] = 1,
259 [MSG_ARG0] = 2,
260 [MSG_CHKSUM] = 6,
261 },
262 }
263};
264
265/* API v3 firmware commands */
266static const struct dpfe_api dpfe_api_v3 = {
267 .version = 3,
268 .fw_name = NULL, /* We expect the firmware to have been downloaded! */
269 .sysfs_attrs = dpfe_v3_groups,
270 .command = {
271 [DPFE_CMD_GET_INFO] = {
272 [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
273 [MSG_COMMAND] = 0x0101,
274 [MSG_ARG_COUNT] = 1,
275 [MSG_ARG0] = 1,
276 [MSG_CHKSUM] = 0x104,
277 },
278 [DPFE_CMD_GET_REFRESH] = {
279 [MSG_HEADER] = DPFE_MSG_TYPE_COMMAND,
280 [MSG_COMMAND] = 0x0202,
281 [MSG_ARG_COUNT] = 0,
282 /*
283 * This is a bit ugly. Without arguments, the checksum
284 * follows right after the argument count and not at
285 * offset MSG_CHKSUM.
286 */
287 [MSG_ARG0] = 0x203,
288 },
289 /* There's no GET_VENDOR command in API v3. */
207 }, 290 },
208}; 291};
209 292
@@ -248,13 +331,13 @@ static void __enable_dcpu(void __iomem *regs)
248 writel_relaxed(val, regs + REG_DCPU_RESET); 331 writel_relaxed(val, regs + REG_DCPU_RESET);
249} 332}
250 333
251static unsigned int get_msg_chksum(const u32 msg[]) 334static unsigned int get_msg_chksum(const u32 msg[], unsigned int max)
252{ 335{
253 unsigned int sum = 0; 336 unsigned int sum = 0;
254 unsigned int i; 337 unsigned int i;
255 338
256 /* Don't include the last field in the checksum. */ 339 /* Don't include the last field in the checksum. */
257 for (i = 0; i < MSG_FIELD_MAX - 1; i++) 340 for (i = 0; i < max; i++)
258 sum += msg[i]; 341 sum += msg[i];
259 342
260 return sum; 343 return sum;
@@ -267,6 +350,11 @@ static void __iomem *get_msg_ptr(struct private_data *priv, u32 response,
267 unsigned int offset; 350 unsigned int offset;
268 void __iomem *ptr = NULL; 351 void __iomem *ptr = NULL;
269 352
353 /* There is no need to use this function for API v3 or later. */
354 if (unlikely(priv->dpfe_api->version >= 3)) {
355 return NULL;
356 }
357
270 msg_type = (response >> DRAM_MSG_TYPE_OFFSET) & DRAM_MSG_TYPE_MASK; 358 msg_type = (response >> DRAM_MSG_TYPE_OFFSET) & DRAM_MSG_TYPE_MASK;
271 offset = (response >> DRAM_MSG_ADDR_OFFSET) & DRAM_MSG_ADDR_MASK; 359 offset = (response >> DRAM_MSG_ADDR_OFFSET) & DRAM_MSG_ADDR_MASK;
272 360
@@ -294,12 +382,25 @@ static void __iomem *get_msg_ptr(struct private_data *priv, u32 response,
294 return ptr; 382 return ptr;
295} 383}
296 384
385static void __finalize_command(struct private_data *priv)
386{
387 unsigned int release_mbox;
388
389 /*
390 * It depends on the API version which MBOX register we have to write to
391 * to signal we are done.
392 */
393 release_mbox = (priv->dpfe_api->version < 3)
394 ? REG_TO_HOST_MBOX : REG_TO_DCPU_MBOX;
395 writel_relaxed(0, priv->regs + release_mbox);
396}
397
297static int __send_command(struct private_data *priv, unsigned int cmd, 398static int __send_command(struct private_data *priv, unsigned int cmd,
298 u32 result[]) 399 u32 result[])
299{ 400{
300 const u32 *msg = dpfe_commands[cmd]; 401 const u32 *msg = priv->dpfe_api->command[cmd];
301 void __iomem *regs = priv->regs; 402 void __iomem *regs = priv->regs;
302 unsigned int i, chksum; 403 unsigned int i, chksum, chksum_idx;
303 int ret = 0; 404 int ret = 0;
304 u32 resp; 405 u32 resp;
305 406
@@ -308,6 +409,18 @@ static int __send_command(struct private_data *priv, unsigned int cmd,
308 409
309 mutex_lock(&priv->lock); 410 mutex_lock(&priv->lock);
310 411
412 /* Wait for DCPU to become ready */
413 for (i = 0; i < DELAY_LOOP_MAX; i++) {
414 resp = readl_relaxed(regs + REG_TO_HOST_MBOX);
415 if (resp == 0)
416 break;
417 msleep(1);
418 }
419 if (resp != 0) {
420 mutex_unlock(&priv->lock);
421 return -ETIMEDOUT;
422 }
423
311 /* Write command and arguments to message area */ 424 /* Write command and arguments to message area */
312 for (i = 0; i < MSG_FIELD_MAX; i++) 425 for (i = 0; i < MSG_FIELD_MAX; i++)
313 writel_relaxed(msg[i], regs + DCPU_MSG_RAM(i)); 426 writel_relaxed(msg[i], regs + DCPU_MSG_RAM(i));
@@ -321,7 +434,7 @@ static int __send_command(struct private_data *priv, unsigned int cmd,
321 resp = readl_relaxed(regs + REG_TO_HOST_MBOX); 434 resp = readl_relaxed(regs + REG_TO_HOST_MBOX);
322 if (resp > 0) 435 if (resp > 0)
323 break; 436 break;
324 udelay(5); 437 msleep(1);
325 } 438 }
326 439
327 if (i == DELAY_LOOP_MAX) { 440 if (i == DELAY_LOOP_MAX) {
@@ -331,10 +444,11 @@ static int __send_command(struct private_data *priv, unsigned int cmd,
331 /* Read response data */ 444 /* Read response data */
332 for (i = 0; i < MSG_FIELD_MAX; i++) 445 for (i = 0; i < MSG_FIELD_MAX; i++)
333 result[i] = readl_relaxed(regs + DCPU_MSG_RAM(i)); 446 result[i] = readl_relaxed(regs + DCPU_MSG_RAM(i));
447 chksum_idx = result[MSG_ARG_COUNT] + MSG_ARG_COUNT + 1;
334 } 448 }
335 449
336 /* Tell DCPU we are done */ 450 /* Tell DCPU we are done */
337 writel_relaxed(0, regs + REG_TO_HOST_MBOX); 451 __finalize_command(priv);
338 452
339 mutex_unlock(&priv->lock); 453 mutex_unlock(&priv->lock);
340 454
@@ -342,8 +456,8 @@ static int __send_command(struct private_data *priv, unsigned int cmd,
342 return ret; 456 return ret;
343 457
344 /* Verify response */ 458 /* Verify response */
345 chksum = get_msg_chksum(result); 459 chksum = get_msg_chksum(result, chksum_idx);
346 if (chksum != result[MSG_CHKSUM]) 460 if (chksum != result[chksum_idx])
347 resp = DCPU_RET_ERR_CHKSUM; 461 resp = DCPU_RET_ERR_CHKSUM;
348 462
349 if (resp != DCPU_RET_SUCCESS) { 463 if (resp != DCPU_RET_SUCCESS) {
@@ -484,7 +598,15 @@ static int brcmstb_dpfe_download_firmware(struct platform_device *pdev,
484 return 0; 598 return 0;
485 } 599 }
486 600
487 ret = request_firmware(&fw, FIRMWARE_NAME, dev); 601 /*
602 * If the firmware filename is NULL it means the boot firmware has to
603 * download the DCPU firmware for us. If that didn't work, we have to
604 * bail, since downloading it ourselves wouldn't work either.
605 */
606 if (!priv->dpfe_api->fw_name)
607 return -ENODEV;
608
609 ret = request_firmware(&fw, priv->dpfe_api->fw_name, dev);
488 /* request_firmware() prints its own error messages. */ 610 /* request_firmware() prints its own error messages. */
489 if (ret) 611 if (ret)
490 return ret; 612 return ret;
@@ -525,12 +647,10 @@ static int brcmstb_dpfe_download_firmware(struct platform_device *pdev,
525} 647}
526 648
527static ssize_t generic_show(unsigned int command, u32 response[], 649static ssize_t generic_show(unsigned int command, u32 response[],
528 struct device *dev, char *buf) 650 struct private_data *priv, char *buf)
529{ 651{
530 struct private_data *priv;
531 int ret; 652 int ret;
532 653
533 priv = dev_get_drvdata(dev);
534 if (!priv) 654 if (!priv)
535 return sprintf(buf, "ERROR: driver private data not set\n"); 655 return sprintf(buf, "ERROR: driver private data not set\n");
536 656
@@ -545,10 +665,12 @@ static ssize_t show_info(struct device *dev, struct device_attribute *devattr,
545 char *buf) 665 char *buf)
546{ 666{
547 u32 response[MSG_FIELD_MAX]; 667 u32 response[MSG_FIELD_MAX];
668 struct private_data *priv;
548 unsigned int info; 669 unsigned int info;
549 ssize_t ret; 670 ssize_t ret;
550 671
551 ret = generic_show(DPFE_CMD_GET_INFO, response, dev, buf); 672 priv = dev_get_drvdata(dev);
673 ret = generic_show(DPFE_CMD_GET_INFO, response, priv, buf);
552 if (ret) 674 if (ret)
553 return ret; 675 return ret;
554 676
@@ -571,17 +693,17 @@ static ssize_t show_refresh(struct device *dev,
571 u32 mr4; 693 u32 mr4;
572 ssize_t ret; 694 ssize_t ret;
573 695
574 ret = generic_show(DPFE_CMD_GET_REFRESH, response, dev, buf); 696 priv = dev_get_drvdata(dev);
697 ret = generic_show(DPFE_CMD_GET_REFRESH, response, priv, buf);
575 if (ret) 698 if (ret)
576 return ret; 699 return ret;
577 700
578 priv = dev_get_drvdata(dev);
579
580 info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret); 701 info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
581 if (!info) 702 if (!info)
582 return ret; 703 return ret;
583 704
584 mr4 = readl_relaxed(info + DRAM_INFO_MR4) & DRAM_INFO_MR4_MASK; 705 mr4 = (readl_relaxed(info + DRAM_INFO_MR4) >> DRAM_INFO_MR4_SHIFT) &
706 DRAM_INFO_MR4_MASK;
585 707
586 refresh = (mr4 >> DRAM_MR4_REFRESH) & DRAM_MR4_REFRESH_MASK; 708 refresh = (mr4 >> DRAM_MR4_REFRESH) & DRAM_MR4_REFRESH_MASK;
587 sr_abort = (mr4 >> DRAM_MR4_SR_ABORT) & DRAM_MR4_SR_ABORT_MASK; 709 sr_abort = (mr4 >> DRAM_MR4_SR_ABORT) & DRAM_MR4_SR_ABORT_MASK;
@@ -608,7 +730,6 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
608 return -EINVAL; 730 return -EINVAL;
609 731
610 priv = dev_get_drvdata(dev); 732 priv = dev_get_drvdata(dev);
611
612 ret = __send_command(priv, DPFE_CMD_GET_REFRESH, response); 733 ret = __send_command(priv, DPFE_CMD_GET_REFRESH, response);
613 if (ret) 734 if (ret)
614 return ret; 735 return ret;
@@ -623,30 +744,58 @@ static ssize_t store_refresh(struct device *dev, struct device_attribute *attr,
623} 744}
624 745
625static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr, 746static ssize_t show_vendor(struct device *dev, struct device_attribute *devattr,
626 char *buf) 747 char *buf)
627{ 748{
628 u32 response[MSG_FIELD_MAX]; 749 u32 response[MSG_FIELD_MAX];
629 struct private_data *priv; 750 struct private_data *priv;
630 void __iomem *info; 751 void __iomem *info;
631 ssize_t ret; 752 ssize_t ret;
753 u32 mr5, mr6, mr7, mr8, err;
632 754
633 ret = generic_show(DPFE_CMD_GET_VENDOR, response, dev, buf); 755 priv = dev_get_drvdata(dev);
756 ret = generic_show(DPFE_CMD_GET_VENDOR, response, priv, buf);
634 if (ret) 757 if (ret)
635 return ret; 758 return ret;
636 759
637 priv = dev_get_drvdata(dev);
638
639 info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret); 760 info = get_msg_ptr(priv, response[MSG_ARG0], buf, &ret);
640 if (!info) 761 if (!info)
641 return ret; 762 return ret;
642 763
643 return sprintf(buf, "%#x %#x %#x %#x %#x\n", 764 mr5 = (readl_relaxed(info + DRAM_VENDOR_MR5) >> DRAM_VENDOR_SHIFT) &
644 readl_relaxed(info + DRAM_VENDOR_MR5) & DRAM_VENDOR_MASK, 765 DRAM_VENDOR_MASK;
645 readl_relaxed(info + DRAM_VENDOR_MR6) & DRAM_VENDOR_MASK, 766 mr6 = (readl_relaxed(info + DRAM_VENDOR_MR6) >> DRAM_VENDOR_SHIFT) &
646 readl_relaxed(info + DRAM_VENDOR_MR7) & DRAM_VENDOR_MASK, 767 DRAM_VENDOR_MASK;
647 readl_relaxed(info + DRAM_VENDOR_MR8) & DRAM_VENDOR_MASK, 768 mr7 = (readl_relaxed(info + DRAM_VENDOR_MR7) >> DRAM_VENDOR_SHIFT) &
648 readl_relaxed(info + DRAM_VENDOR_ERROR) & 769 DRAM_VENDOR_MASK;
649 DRAM_VENDOR_MASK); 770 mr8 = (readl_relaxed(info + DRAM_VENDOR_MR8) >> DRAM_VENDOR_SHIFT) &
771 DRAM_VENDOR_MASK;
772 err = readl_relaxed(info + DRAM_VENDOR_ERROR) & DRAM_VENDOR_MASK;
773
774 return sprintf(buf, "%#x %#x %#x %#x %#x\n", mr5, mr6, mr7, mr8, err);
775}
776
777static ssize_t show_dram(struct device *dev, struct device_attribute *devattr,
778 char *buf)
779{
780 u32 response[MSG_FIELD_MAX];
781 struct private_data *priv;
782 ssize_t ret;
783 u32 mr4, mr5, mr6, mr7, mr8, err;
784
785 priv = dev_get_drvdata(dev);
786 ret = generic_show(DPFE_CMD_GET_REFRESH, response, priv, buf);
787 if (ret)
788 return ret;
789
790 mr4 = response[MSG_ARG0 + 0] & DRAM_INFO_MR4_MASK;
791 mr5 = response[MSG_ARG0 + 1] & DRAM_DDR_INFO_MASK;
792 mr6 = response[MSG_ARG0 + 2] & DRAM_DDR_INFO_MASK;
793 mr7 = response[MSG_ARG0 + 3] & DRAM_DDR_INFO_MASK;
794 mr8 = response[MSG_ARG0 + 4] & DRAM_DDR_INFO_MASK;
795 err = response[MSG_ARG0 + 5] & DRAM_DDR_INFO_MASK;
796
797 return sprintf(buf, "%#x %#x %#x %#x %#x %#x\n", mr4, mr5, mr6, mr7,
798 mr8, err);
650} 799}
651 800
652static int brcmstb_dpfe_resume(struct platform_device *pdev) 801static int brcmstb_dpfe_resume(struct platform_device *pdev)
@@ -656,17 +805,6 @@ static int brcmstb_dpfe_resume(struct platform_device *pdev)
656 return brcmstb_dpfe_download_firmware(pdev, &init); 805 return brcmstb_dpfe_download_firmware(pdev, &init);
657} 806}
658 807
659static DEVICE_ATTR(dpfe_info, 0444, show_info, NULL);
660static DEVICE_ATTR(dpfe_refresh, 0644, show_refresh, store_refresh);
661static DEVICE_ATTR(dpfe_vendor, 0444, show_vendor, NULL);
662static struct attribute *dpfe_attrs[] = {
663 &dev_attr_dpfe_info.attr,
664 &dev_attr_dpfe_refresh.attr,
665 &dev_attr_dpfe_vendor.attr,
666 NULL
667};
668ATTRIBUTE_GROUPS(dpfe);
669
670static int brcmstb_dpfe_probe(struct platform_device *pdev) 808static int brcmstb_dpfe_probe(struct platform_device *pdev)
671{ 809{
672 struct device *dev = &pdev->dev; 810 struct device *dev = &pdev->dev;
@@ -703,26 +841,47 @@ static int brcmstb_dpfe_probe(struct platform_device *pdev)
703 return -ENOENT; 841 return -ENOENT;
704 } 842 }
705 843
844 priv->dpfe_api = of_device_get_match_data(dev);
845 if (unlikely(!priv->dpfe_api)) {
846 /*
847 * It should be impossible to end up here, but to be safe we
848 * check anyway.
849 */
850 dev_err(dev, "Couldn't determine API\n");
851 return -ENOENT;
852 }
853
706 ret = brcmstb_dpfe_download_firmware(pdev, &init); 854 ret = brcmstb_dpfe_download_firmware(pdev, &init);
707 if (ret) 855 if (ret) {
856 dev_err(dev, "Couldn't download firmware -- %d\n", ret);
708 return ret; 857 return ret;
858 }
709 859
710 ret = sysfs_create_groups(&pdev->dev.kobj, dpfe_groups); 860 ret = sysfs_create_groups(&pdev->dev.kobj, priv->dpfe_api->sysfs_attrs);
711 if (!ret) 861 if (!ret)
712 dev_info(dev, "registered.\n"); 862 dev_info(dev, "registered with API v%d.\n",
863 priv->dpfe_api->version);
713 864
714 return ret; 865 return ret;
715} 866}
716 867
717static int brcmstb_dpfe_remove(struct platform_device *pdev) 868static int brcmstb_dpfe_remove(struct platform_device *pdev)
718{ 869{
719 sysfs_remove_groups(&pdev->dev.kobj, dpfe_groups); 870 struct private_data *priv = dev_get_drvdata(&pdev->dev);
871
872 sysfs_remove_groups(&pdev->dev.kobj, priv->dpfe_api->sysfs_attrs);
720 873
721 return 0; 874 return 0;
722} 875}
723 876
724static const struct of_device_id brcmstb_dpfe_of_match[] = { 877static const struct of_device_id brcmstb_dpfe_of_match[] = {
725 { .compatible = "brcm,dpfe-cpu", }, 878 /* Use legacy API v2 for a select number of chips */
879 { .compatible = "brcm,bcm7268-dpfe-cpu", .data = &dpfe_api_v2 },
880 { .compatible = "brcm,bcm7271-dpfe-cpu", .data = &dpfe_api_v2 },
881 { .compatible = "brcm,bcm7278-dpfe-cpu", .data = &dpfe_api_v2 },
882 { .compatible = "brcm,bcm7211-dpfe-cpu", .data = &dpfe_api_v2 },
883 /* API v3 is the default going forward */
884 { .compatible = "brcm,dpfe-cpu", .data = &dpfe_api_v3 },
726 {} 885 {}
727}; 886};
728MODULE_DEVICE_TABLE(of, brcmstb_dpfe_of_match); 887MODULE_DEVICE_TABLE(of, brcmstb_dpfe_of_match);
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index ee67a9a5d775..402c6bc8e621 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -23,8 +23,9 @@
23#include <linux/list.h> 23#include <linux/list.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/pm.h> 25#include <linux/pm.h>
26#include <memory/jedec_ddr.h> 26
27#include "emif.h" 27#include "emif.h"
28#include "jedec_ddr.h"
28#include "of_memory.h" 29#include "of_memory.h"
29 30
30/** 31/**
diff --git a/include/memory/jedec_ddr.h b/drivers/memory/jedec_ddr.h
index 90a9dabbe606..4a21b5044ff8 100644
--- a/include/memory/jedec_ddr.h
+++ b/drivers/memory/jedec_ddr.h
@@ -6,8 +6,8 @@
6 * 6 *
7 * Aneesh V <aneesh@ti.com> 7 * Aneesh V <aneesh@ti.com>
8 */ 8 */
9#ifndef __LINUX_JEDEC_DDR_H 9#ifndef __JEDEC_DDR_H
10#define __LINUX_JEDEC_DDR_H 10#define __JEDEC_DDR_H
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13 13
@@ -169,4 +169,4 @@ extern const struct lpddr2_timings
169 lpddr2_jedec_timings[NUM_DDR_TIMING_TABLE_ENTRIES]; 169 lpddr2_jedec_timings[NUM_DDR_TIMING_TABLE_ENTRIES];
170extern const struct lpddr2_min_tck lpddr2_jedec_min_tck; 170extern const struct lpddr2_min_tck lpddr2_jedec_min_tck;
171 171
172#endif /* __LINUX_JEDEC_DDR_H */ 172#endif /* __JEDEC_DDR_H */
diff --git a/lib/jedec_ddr_data.c b/drivers/memory/jedec_ddr_data.c
index d0b312e28d36..ed601d813175 100644
--- a/lib/jedec_ddr_data.c
+++ b/drivers/memory/jedec_ddr_data.c
@@ -7,8 +7,9 @@
7 * Aneesh V <aneesh@ti.com> 7 * Aneesh V <aneesh@ti.com>
8 */ 8 */
9 9
10#include <memory/jedec_ddr.h> 10#include <linux/export.h>
11#include <linux/module.h> 11
12#include "jedec_ddr.h"
12 13
13/* LPDDR2 addressing details from JESD209-2 section 2.4 */ 14/* LPDDR2 addressing details from JESD209-2 section 2.4 */
14const struct lpddr2_addressing 15const struct lpddr2_addressing
diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c
index 12a61f558644..46539b27a3fb 100644
--- a/drivers/memory/of_memory.c
+++ b/drivers/memory/of_memory.c
@@ -10,8 +10,9 @@
10#include <linux/list.h> 10#include <linux/list.h>
11#include <linux/of.h> 11#include <linux/of.h>
12#include <linux/gfp.h> 12#include <linux/gfp.h>
13#include <memory/jedec_ddr.h>
14#include <linux/export.h> 13#include <linux/export.h>
14
15#include "jedec_ddr.h"
15#include "of_memory.h" 16#include "of_memory.h"
16 17
17/** 18/**
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
index 41f08b2effd2..5d0ccb2be206 100644
--- a/drivers/memory/tegra/tegra124.c
+++ b/drivers/memory/tegra/tegra124.c
@@ -30,28 +30,6 @@
30#define MC_EMEM_ARB_MISC1 0xdc 30#define MC_EMEM_ARB_MISC1 0xdc
31#define MC_EMEM_ARB_RING1_THROTTLE 0xe0 31#define MC_EMEM_ARB_RING1_THROTTLE 0xe0
32 32
33static const unsigned long tegra124_mc_emem_regs[] = {
34 MC_EMEM_ARB_CFG,
35 MC_EMEM_ARB_OUTSTANDING_REQ,
36 MC_EMEM_ARB_TIMING_RCD,
37 MC_EMEM_ARB_TIMING_RP,
38 MC_EMEM_ARB_TIMING_RC,
39 MC_EMEM_ARB_TIMING_RAS,
40 MC_EMEM_ARB_TIMING_FAW,
41 MC_EMEM_ARB_TIMING_RRD,
42 MC_EMEM_ARB_TIMING_RAP2PRE,
43 MC_EMEM_ARB_TIMING_WAP2PRE,
44 MC_EMEM_ARB_TIMING_R2R,
45 MC_EMEM_ARB_TIMING_W2W,
46 MC_EMEM_ARB_TIMING_R2W,
47 MC_EMEM_ARB_TIMING_W2R,
48 MC_EMEM_ARB_DA_TURNS,
49 MC_EMEM_ARB_DA_COVERS,
50 MC_EMEM_ARB_MISC0,
51 MC_EMEM_ARB_MISC1,
52 MC_EMEM_ARB_RING1_THROTTLE
53};
54
55static const struct tegra_mc_client tegra124_mc_clients[] = { 33static const struct tegra_mc_client tegra124_mc_clients[] = {
56 { 34 {
57 .id = 0x00, 35 .id = 0x00,
@@ -1046,6 +1024,28 @@ static const struct tegra_mc_reset tegra124_mc_resets[] = {
1046}; 1024};
1047 1025
1048#ifdef CONFIG_ARCH_TEGRA_124_SOC 1026#ifdef CONFIG_ARCH_TEGRA_124_SOC
1027static const unsigned long tegra124_mc_emem_regs[] = {
1028 MC_EMEM_ARB_CFG,
1029 MC_EMEM_ARB_OUTSTANDING_REQ,
1030 MC_EMEM_ARB_TIMING_RCD,
1031 MC_EMEM_ARB_TIMING_RP,
1032 MC_EMEM_ARB_TIMING_RC,
1033 MC_EMEM_ARB_TIMING_RAS,
1034 MC_EMEM_ARB_TIMING_FAW,
1035 MC_EMEM_ARB_TIMING_RRD,
1036 MC_EMEM_ARB_TIMING_RAP2PRE,
1037 MC_EMEM_ARB_TIMING_WAP2PRE,
1038 MC_EMEM_ARB_TIMING_R2R,
1039 MC_EMEM_ARB_TIMING_W2W,
1040 MC_EMEM_ARB_TIMING_R2W,
1041 MC_EMEM_ARB_TIMING_W2R,
1042 MC_EMEM_ARB_DA_TURNS,
1043 MC_EMEM_ARB_DA_COVERS,
1044 MC_EMEM_ARB_MISC0,
1045 MC_EMEM_ARB_MISC1,
1046 MC_EMEM_ARB_RING1_THROTTLE
1047};
1048
1049static const struct tegra_smmu_soc tegra124_smmu_soc = { 1049static const struct tegra_smmu_soc tegra124_smmu_soc = {
1050 .clients = tegra124_mc_clients, 1050 .clients = tegra124_mc_clients,
1051 .num_clients = ARRAY_SIZE(tegra124_mc_clients), 1051 .num_clients = ARRAY_SIZE(tegra124_mc_clients),
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index d506d32385fc..21efb7d39d62 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -118,7 +118,7 @@ config RESET_QCOM_PDC
118 118
119config RESET_SIMPLE 119config RESET_SIMPLE
120 bool "Simple Reset Controller Driver" if COMPILE_TEST 120 bool "Simple Reset Controller Driver" if COMPILE_TEST
121 default ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED 121 default ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARCH_ASPEED || ARCH_BITMAIN
122 help 122 help
123 This enables a simple reset controller driver for reset lines that 123 This enables a simple reset controller driver for reset lines that
124 that can be asserted and deasserted by toggling bits in a contiguous, 124 that can be asserted and deasserted by toggling bits in a contiguous,
@@ -130,6 +130,7 @@ config RESET_SIMPLE
130 - RCC reset controller in STM32 MCUs 130 - RCC reset controller in STM32 MCUs
131 - Allwinner SoCs 131 - Allwinner SoCs
132 - ZTE's zx2967 family 132 - ZTE's zx2967 family
133 - Bitmain BM1880 SoC
133 134
134config RESET_STM32MP157 135config RESET_STM32MP157
135 bool "STM32MP157 Reset Driver" if COMPILE_TEST 136 bool "STM32MP157 Reset Driver" if COMPILE_TEST
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 21b9bd5692e1..213ff40dda11 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -690,9 +690,6 @@ __reset_control_get_from_lookup(struct device *dev, const char *con_id,
690 const char *dev_id = dev_name(dev); 690 const char *dev_id = dev_name(dev);
691 struct reset_control *rstc = NULL; 691 struct reset_control *rstc = NULL;
692 692
693 if (!dev)
694 return ERR_PTR(-EINVAL);
695
696 mutex_lock(&reset_lookup_mutex); 693 mutex_lock(&reset_lookup_mutex);
697 694
698 list_for_each_entry(lookup, &reset_lookup_list, list) { 695 list_for_each_entry(lookup, &reset_lookup_list, list) {
diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c
index 7e48b9c05ecd..1154f7b1f4dd 100644
--- a/drivers/reset/reset-simple.c
+++ b/drivers/reset/reset-simple.c
@@ -125,6 +125,8 @@ static const struct of_device_id reset_simple_dt_ids[] = {
125 .data = &reset_simple_active_low }, 125 .data = &reset_simple_active_low },
126 { .compatible = "aspeed,ast2400-lpc-reset" }, 126 { .compatible = "aspeed,ast2400-lpc-reset" },
127 { .compatible = "aspeed,ast2500-lpc-reset" }, 127 { .compatible = "aspeed,ast2500-lpc-reset" },
128 { .compatible = "bitmain,bm1880-reset",
129 .data = &reset_simple_active_low },
128 { /* sentinel */ }, 130 { /* sentinel */ },
129}; 131};
130 132
diff --git a/drivers/soc/amlogic/meson-canvas.c b/drivers/soc/amlogic/meson-canvas.c
index be95a37c3fec..c655f5f92b12 100644
--- a/drivers/soc/amlogic/meson-canvas.c
+++ b/drivers/soc/amlogic/meson-canvas.c
@@ -35,6 +35,7 @@ struct meson_canvas {
35 void __iomem *reg_base; 35 void __iomem *reg_base;
36 spinlock_t lock; /* canvas device lock */ 36 spinlock_t lock; /* canvas device lock */
37 u8 used[NUM_CANVAS]; 37 u8 used[NUM_CANVAS];
38 bool supports_endianness;
38}; 39};
39 40
40static void canvas_write(struct meson_canvas *canvas, u32 reg, u32 val) 41static void canvas_write(struct meson_canvas *canvas, u32 reg, u32 val)
@@ -86,6 +87,12 @@ int meson_canvas_config(struct meson_canvas *canvas, u8 canvas_index,
86{ 87{
87 unsigned long flags; 88 unsigned long flags;
88 89
90 if (endian && !canvas->supports_endianness) {
91 dev_err(canvas->dev,
92 "Endianness is not supported on this SoC\n");
93 return -EINVAL;
94 }
95
89 spin_lock_irqsave(&canvas->lock, flags); 96 spin_lock_irqsave(&canvas->lock, flags);
90 if (!canvas->used[canvas_index]) { 97 if (!canvas->used[canvas_index]) {
91 dev_err(canvas->dev, 98 dev_err(canvas->dev,
@@ -172,6 +179,8 @@ static int meson_canvas_probe(struct platform_device *pdev)
172 if (IS_ERR(canvas->reg_base)) 179 if (IS_ERR(canvas->reg_base))
173 return PTR_ERR(canvas->reg_base); 180 return PTR_ERR(canvas->reg_base);
174 181
182 canvas->supports_endianness = of_device_get_match_data(dev);
183
175 canvas->dev = dev; 184 canvas->dev = dev;
176 spin_lock_init(&canvas->lock); 185 spin_lock_init(&canvas->lock);
177 dev_set_drvdata(dev, canvas); 186 dev_set_drvdata(dev, canvas);
@@ -180,7 +189,10 @@ static int meson_canvas_probe(struct platform_device *pdev)
180} 189}
181 190
182static const struct of_device_id canvas_dt_match[] = { 191static const struct of_device_id canvas_dt_match[] = {
183 { .compatible = "amlogic,canvas" }, 192 { .compatible = "amlogic,meson8-canvas", .data = (void *)false, },
193 { .compatible = "amlogic,meson8b-canvas", .data = (void *)false, },
194 { .compatible = "amlogic,meson8m2-canvas", .data = (void *)false, },
195 { .compatible = "amlogic,canvas", .data = (void *)true, },
184 {} 196 {}
185}; 197};
186MODULE_DEVICE_TABLE(of, canvas_dt_match); 198MODULE_DEVICE_TABLE(of, canvas_dt_match);
diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
index 61276ec692f8..01ed21e8bfee 100644
--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
@@ -64,6 +64,7 @@ static long aspeed_lpc_ctrl_ioctl(struct file *file, unsigned int cmd,
64 unsigned long param) 64 unsigned long param)
65{ 65{
66 struct aspeed_lpc_ctrl *lpc_ctrl = file_aspeed_lpc_ctrl(file); 66 struct aspeed_lpc_ctrl *lpc_ctrl = file_aspeed_lpc_ctrl(file);
67 struct device *dev = file->private_data;
67 void __user *p = (void __user *)param; 68 void __user *p = (void __user *)param;
68 struct aspeed_lpc_ctrl_mapping map; 69 struct aspeed_lpc_ctrl_mapping map;
69 u32 addr; 70 u32 addr;
@@ -86,6 +87,12 @@ static long aspeed_lpc_ctrl_ioctl(struct file *file, unsigned int cmd,
86 if (map.window_id != 0) 87 if (map.window_id != 0)
87 return -EINVAL; 88 return -EINVAL;
88 89
90 /* If memory-region is not described in device tree */
91 if (!lpc_ctrl->mem_size) {
92 dev_dbg(dev, "Didn't find reserved memory\n");
93 return -ENXIO;
94 }
95
89 map.size = lpc_ctrl->mem_size; 96 map.size = lpc_ctrl->mem_size;
90 97
91 return copy_to_user(p, &map, sizeof(map)) ? -EFAULT : 0; 98 return copy_to_user(p, &map, sizeof(map)) ? -EFAULT : 0;
@@ -122,9 +129,18 @@ static long aspeed_lpc_ctrl_ioctl(struct file *file, unsigned int cmd,
122 return -EINVAL; 129 return -EINVAL;
123 130
124 if (map.window_type == ASPEED_LPC_CTRL_WINDOW_FLASH) { 131 if (map.window_type == ASPEED_LPC_CTRL_WINDOW_FLASH) {
132 if (!lpc_ctrl->pnor_size) {
133 dev_dbg(dev, "Didn't find host pnor flash\n");
134 return -ENXIO;
135 }
125 addr = lpc_ctrl->pnor_base; 136 addr = lpc_ctrl->pnor_base;
126 size = lpc_ctrl->pnor_size; 137 size = lpc_ctrl->pnor_size;
127 } else if (map.window_type == ASPEED_LPC_CTRL_WINDOW_MEMORY) { 138 } else if (map.window_type == ASPEED_LPC_CTRL_WINDOW_MEMORY) {
139 /* If memory-region is not described in device tree */
140 if (!lpc_ctrl->mem_size) {
141 dev_dbg(dev, "Didn't find reserved memory\n");
142 return -ENXIO;
143 }
128 addr = lpc_ctrl->mem_base; 144 addr = lpc_ctrl->mem_base;
129 size = lpc_ctrl->mem_size; 145 size = lpc_ctrl->mem_size;
130 } else { 146 } else {
@@ -192,40 +208,41 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
192 if (!lpc_ctrl) 208 if (!lpc_ctrl)
193 return -ENOMEM; 209 return -ENOMEM;
194 210
211 /* If flash is described in device tree then store */
195 node = of_parse_phandle(dev->of_node, "flash", 0); 212 node = of_parse_phandle(dev->of_node, "flash", 0);
196 if (!node) { 213 if (!node) {
197 dev_err(dev, "Didn't find host pnor flash node\n"); 214 dev_dbg(dev, "Didn't find host pnor flash node\n");
198 return -ENODEV; 215 } else {
199 } 216 rc = of_address_to_resource(node, 1, &resm);
217 of_node_put(node);
218 if (rc) {
219 dev_err(dev, "Couldn't address to resource for flash\n");
220 return rc;
221 }
200 222
201 rc = of_address_to_resource(node, 1, &resm); 223 lpc_ctrl->pnor_size = resource_size(&resm);
202 of_node_put(node); 224 lpc_ctrl->pnor_base = resm.start;
203 if (rc) {
204 dev_err(dev, "Couldn't address to resource for flash\n");
205 return rc;
206 } 225 }
207 226
208 lpc_ctrl->pnor_size = resource_size(&resm);
209 lpc_ctrl->pnor_base = resm.start;
210 227
211 dev_set_drvdata(&pdev->dev, lpc_ctrl); 228 dev_set_drvdata(&pdev->dev, lpc_ctrl);
212 229
230 /* If memory-region is described in device tree then store */
213 node = of_parse_phandle(dev->of_node, "memory-region", 0); 231 node = of_parse_phandle(dev->of_node, "memory-region", 0);
214 if (!node) { 232 if (!node) {
215 dev_err(dev, "Didn't find reserved memory\n"); 233 dev_dbg(dev, "Didn't find reserved memory\n");
216 return -EINVAL; 234 } else {
217 } 235 rc = of_address_to_resource(node, 0, &resm);
236 of_node_put(node);
237 if (rc) {
238 dev_err(dev, "Couldn't address to resource for reserved memory\n");
239 return -ENXIO;
240 }
218 241
219 rc = of_address_to_resource(node, 0, &resm); 242 lpc_ctrl->mem_size = resource_size(&resm);
220 of_node_put(node); 243 lpc_ctrl->mem_base = resm.start;
221 if (rc) {
222 dev_err(dev, "Couldn't address to resource for reserved memory\n");
223 return -ENOMEM;
224 } 244 }
225 245
226 lpc_ctrl->mem_size = resource_size(&resm);
227 lpc_ctrl->mem_base = resm.start;
228
229 lpc_ctrl->regmap = syscon_node_to_regmap( 246 lpc_ctrl->regmap = syscon_node_to_regmap(
230 pdev->dev.parent->of_node); 247 pdev->dev.parent->of_node);
231 if (IS_ERR(lpc_ctrl->regmap)) { 248 if (IS_ERR(lpc_ctrl->regmap)) {
@@ -254,8 +271,6 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
254 goto err; 271 goto err;
255 } 272 }
256 273
257 dev_info(dev, "Loaded at %pr\n", &resm);
258
259 return 0; 274 return 0;
260 275
261err: 276err:
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index 217f7752cf2c..f9ad8ad54a7d 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -30,4 +30,14 @@ config FSL_MC_DPIO
30 other DPAA2 objects. This driver does not expose the DPIO 30 other DPAA2 objects. This driver does not expose the DPIO
31 objects individually, but groups them under a service layer 31 objects individually, but groups them under a service layer
32 API. 32 API.
33
34config DPAA2_CONSOLE
35 tristate "QorIQ DPAA2 console driver"
36 depends on OF && (ARCH_LAYERSCAPE || COMPILE_TEST)
37 default y
38 help
39 Console driver for DPAA2 platforms. Exports 2 char devices,
40 /dev/dpaa2_mc_console and /dev/dpaa2_aiop_console,
41 which can be used to dump the Management Complex and AIOP
42 firmware logs.
33endmenu 43endmenu
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
index 158541a83d26..71dee8d0d1f0 100644
--- a/drivers/soc/fsl/Makefile
+++ b/drivers/soc/fsl/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_QUICC_ENGINE) += qe/
8obj-$(CONFIG_CPM) += qe/ 8obj-$(CONFIG_CPM) += qe/
9obj-$(CONFIG_FSL_GUTS) += guts.o 9obj-$(CONFIG_FSL_GUTS) += guts.o
10obj-$(CONFIG_FSL_MC_DPIO) += dpio/ 10obj-$(CONFIG_FSL_MC_DPIO) += dpio/
11obj-$(CONFIG_DPAA2_CONSOLE) += dpaa2-console.o
diff --git a/drivers/soc/fsl/dpaa2-console.c b/drivers/soc/fsl/dpaa2-console.c
new file mode 100644
index 000000000000..9168d8ddc932
--- /dev/null
+++ b/drivers/soc/fsl/dpaa2-console.c
@@ -0,0 +1,329 @@
1// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2/*
3 * Freescale DPAA2 Platforms Console Driver
4 *
5 * Copyright 2015-2016 Freescale Semiconductor Inc.
6 * Copyright 2018 NXP
7 */
8
9#define pr_fmt(fmt) "dpaa2-console: " fmt
10
11#include <linux/module.h>
12#include <linux/of_device.h>
13#include <linux/of_address.h>
14#include <linux/miscdevice.h>
15#include <linux/uaccess.h>
16#include <linux/slab.h>
17#include <linux/fs.h>
18#include <linux/io.h>
19
20/* MC firmware base low/high registers indexes */
21#define MCFBALR_OFFSET 0
22#define MCFBAHR_OFFSET 1
23
24/* Bit masks used to get the most/least significant part of the MC base addr */
25#define MC_FW_ADDR_MASK_HIGH 0x1FFFF
26#define MC_FW_ADDR_MASK_LOW 0xE0000000
27
28#define MC_BUFFER_OFFSET 0x01000000
29#define MC_BUFFER_SIZE (1024 * 1024 * 16)
30#define MC_OFFSET_DELTA MC_BUFFER_OFFSET
31
32#define AIOP_BUFFER_OFFSET 0x06000000
33#define AIOP_BUFFER_SIZE (1024 * 1024 * 16)
34#define AIOP_OFFSET_DELTA 0
35
36#define LOG_HEADER_FLAG_BUFFER_WRAPAROUND 0x80000000
37#define LAST_BYTE(a) ((a) & ~(LOG_HEADER_FLAG_BUFFER_WRAPAROUND))
38
39/* MC and AIOP Magic words */
40#define MAGIC_MC 0x4d430100
41#define MAGIC_AIOP 0x41494F50
42
43struct log_header {
44 __le32 magic_word;
45 char reserved[4];
46 __le32 buf_start;
47 __le32 buf_length;
48 __le32 last_byte;
49};
50
51struct console_data {
52 void __iomem *map_addr;
53 struct log_header __iomem *hdr;
54 void __iomem *start_addr;
55 void __iomem *end_addr;
56 void __iomem *end_of_data;
57 void __iomem *cur_ptr;
58};
59
60static struct resource mc_base_addr;
61
62static inline void adjust_end(struct console_data *cd)
63{
64 u32 last_byte = readl(&cd->hdr->last_byte);
65
66 cd->end_of_data = cd->start_addr + LAST_BYTE(last_byte);
67}
68
69static u64 get_mc_fw_base_address(void)
70{
71 u64 mcfwbase = 0ULL;
72 u32 __iomem *mcfbaregs;
73
74 mcfbaregs = ioremap(mc_base_addr.start, resource_size(&mc_base_addr));
75 if (!mcfbaregs) {
76 pr_err("could not map MC Firmaware Base registers\n");
77 return 0;
78 }
79
80 mcfwbase = readl(mcfbaregs + MCFBAHR_OFFSET) &
81 MC_FW_ADDR_MASK_HIGH;
82 mcfwbase <<= 32;
83 mcfwbase |= readl(mcfbaregs + MCFBALR_OFFSET) & MC_FW_ADDR_MASK_LOW;
84 iounmap(mcfbaregs);
85
86 pr_debug("MC base address at 0x%016llx\n", mcfwbase);
87 return mcfwbase;
88}
89
90static ssize_t dpaa2_console_size(struct console_data *cd)
91{
92 ssize_t size;
93
94 if (cd->cur_ptr <= cd->end_of_data)
95 size = cd->end_of_data - cd->cur_ptr;
96 else
97 size = (cd->end_addr - cd->cur_ptr) +
98 (cd->end_of_data - cd->start_addr);
99
100 return size;
101}
102
103static int dpaa2_generic_console_open(struct inode *node, struct file *fp,
104 u64 offset, u64 size,
105 u32 expected_magic,
106 u32 offset_delta)
107{
108 u32 read_magic, wrapped, last_byte, buf_start, buf_length;
109 struct console_data *cd;
110 u64 base_addr;
111 int err;
112
113 cd = kmalloc(sizeof(*cd), GFP_KERNEL);
114 if (!cd)
115 return -ENOMEM;
116
117 base_addr = get_mc_fw_base_address();
118 if (!base_addr) {
119 err = -EIO;
120 goto err_fwba;
121 }
122
123 cd->map_addr = ioremap(base_addr + offset, size);
124 if (!cd->map_addr) {
125 pr_err("cannot map console log memory\n");
126 err = -EIO;
127 goto err_ioremap;
128 }
129
130 cd->hdr = (struct log_header __iomem *)cd->map_addr;
131 read_magic = readl(&cd->hdr->magic_word);
132 last_byte = readl(&cd->hdr->last_byte);
133 buf_start = readl(&cd->hdr->buf_start);
134 buf_length = readl(&cd->hdr->buf_length);
135
136 if (read_magic != expected_magic) {
137 pr_warn("expected = %08x, read = %08x\n",
138 expected_magic, read_magic);
139 err = -EIO;
140 goto err_magic;
141 }
142
143 cd->start_addr = cd->map_addr + buf_start - offset_delta;
144 cd->end_addr = cd->start_addr + buf_length;
145
146 wrapped = last_byte & LOG_HEADER_FLAG_BUFFER_WRAPAROUND;
147
148 adjust_end(cd);
149 if (wrapped && cd->end_of_data != cd->end_addr)
150 cd->cur_ptr = cd->end_of_data + 1;
151 else
152 cd->cur_ptr = cd->start_addr;
153
154 fp->private_data = cd;
155
156 return 0;
157
158err_magic:
159 iounmap(cd->map_addr);
160
161err_ioremap:
162err_fwba:
163 kfree(cd);
164
165 return err;
166}
167
168static int dpaa2_mc_console_open(struct inode *node, struct file *fp)
169{
170 return dpaa2_generic_console_open(node, fp,
171 MC_BUFFER_OFFSET, MC_BUFFER_SIZE,
172 MAGIC_MC, MC_OFFSET_DELTA);
173}
174
175static int dpaa2_aiop_console_open(struct inode *node, struct file *fp)
176{
177 return dpaa2_generic_console_open(node, fp,
178 AIOP_BUFFER_OFFSET, AIOP_BUFFER_SIZE,
179 MAGIC_AIOP, AIOP_OFFSET_DELTA);
180}
181
182static int dpaa2_console_close(struct inode *node, struct file *fp)
183{
184 struct console_data *cd = fp->private_data;
185
186 iounmap(cd->map_addr);
187 kfree(cd);
188 return 0;
189}
190
191static ssize_t dpaa2_console_read(struct file *fp, char __user *buf,
192 size_t count, loff_t *f_pos)
193{
194 struct console_data *cd = fp->private_data;
195 size_t bytes = dpaa2_console_size(cd);
196 size_t bytes_end = cd->end_addr - cd->cur_ptr;
197 size_t written = 0;
198 void *kbuf;
199 int err;
200
201 /* Check if we need to adjust the end of data addr */
202 adjust_end(cd);
203
204 if (cd->end_of_data == cd->cur_ptr)
205 return 0;
206
207 if (count < bytes)
208 bytes = count;
209
210 kbuf = kmalloc(bytes, GFP_KERNEL);
211 if (!kbuf)
212 return -ENOMEM;
213
214 if (bytes > bytes_end) {
215 memcpy_fromio(kbuf, cd->cur_ptr, bytes_end);
216 if (copy_to_user(buf, kbuf, bytes_end)) {
217 err = -EFAULT;
218 goto err_free_buf;
219 }
220 buf += bytes_end;
221 cd->cur_ptr = cd->start_addr;
222 bytes -= bytes_end;
223 written += bytes_end;
224 }
225
226 memcpy_fromio(kbuf, cd->cur_ptr, bytes);
227 if (copy_to_user(buf, kbuf, bytes)) {
228 err = -EFAULT;
229 goto err_free_buf;
230 }
231 cd->cur_ptr += bytes;
232 written += bytes;
233
234 return written;
235
236err_free_buf:
237 kfree(kbuf);
238
239 return err;
240}
241
242static const struct file_operations dpaa2_mc_console_fops = {
243 .owner = THIS_MODULE,
244 .open = dpaa2_mc_console_open,
245 .release = dpaa2_console_close,
246 .read = dpaa2_console_read,
247};
248
249static struct miscdevice dpaa2_mc_console_dev = {
250 .minor = MISC_DYNAMIC_MINOR,
251 .name = "dpaa2_mc_console",
252 .fops = &dpaa2_mc_console_fops
253};
254
255static const struct file_operations dpaa2_aiop_console_fops = {
256 .owner = THIS_MODULE,
257 .open = dpaa2_aiop_console_open,
258 .release = dpaa2_console_close,
259 .read = dpaa2_console_read,
260};
261
262static struct miscdevice dpaa2_aiop_console_dev = {
263 .minor = MISC_DYNAMIC_MINOR,
264 .name = "dpaa2_aiop_console",
265 .fops = &dpaa2_aiop_console_fops
266};
267
268static int dpaa2_console_probe(struct platform_device *pdev)
269{
270 int error;
271
272 error = of_address_to_resource(pdev->dev.of_node, 0, &mc_base_addr);
273 if (error < 0) {
274 pr_err("of_address_to_resource() failed for %pOF with %d\n",
275 pdev->dev.of_node, error);
276 return error;
277 }
278
279 error = misc_register(&dpaa2_mc_console_dev);
280 if (error) {
281 pr_err("cannot register device %s\n",
282 dpaa2_mc_console_dev.name);
283 goto err_register_mc;
284 }
285
286 error = misc_register(&dpaa2_aiop_console_dev);
287 if (error) {
288 pr_err("cannot register device %s\n",
289 dpaa2_aiop_console_dev.name);
290 goto err_register_aiop;
291 }
292
293 return 0;
294
295err_register_aiop:
296 misc_deregister(&dpaa2_mc_console_dev);
297err_register_mc:
298 return error;
299}
300
301static int dpaa2_console_remove(struct platform_device *pdev)
302{
303 misc_deregister(&dpaa2_mc_console_dev);
304 misc_deregister(&dpaa2_aiop_console_dev);
305
306 return 0;
307}
308
309static const struct of_device_id dpaa2_console_match_table[] = {
310 { .compatible = "fsl,dpaa2-console",},
311 {},
312};
313
314MODULE_DEVICE_TABLE(of, dpaa2_console_match_table);
315
316static struct platform_driver dpaa2_console_driver = {
317 .driver = {
318 .name = "dpaa2-console",
319 .pm = NULL,
320 .of_match_table = dpaa2_console_match_table,
321 },
322 .probe = dpaa2_console_probe,
323 .remove = dpaa2_console_remove,
324};
325module_platform_driver(dpaa2_console_driver);
326
327MODULE_LICENSE("Dual BSD/GPL");
328MODULE_AUTHOR("Roy Pledge <roy.pledge@nxp.com>");
329MODULE_DESCRIPTION("DPAA2 console driver");
diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
index c0cdc8946031..70014ecce2a7 100644
--- a/drivers/soc/fsl/dpio/dpio-driver.c
+++ b/drivers/soc/fsl/dpio/dpio-driver.c
@@ -197,13 +197,22 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
197 desc.cpu); 197 desc.cpu);
198 } 198 }
199 199
200 /* 200 if (dpio_dev->obj_desc.region_count < 3) {
201 * Set the CENA regs to be the cache inhibited area of the portal to 201 /* No support for DDR backed portals, use classic mapping */
202 * avoid coherency issues if a user migrates to another core. 202 /*
203 */ 203 * Set the CENA regs to be the cache inhibited area of the
204 desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start, 204 * portal to avoid coherency issues if a user migrates to
205 resource_size(&dpio_dev->regions[1]), 205 * another core.
206 MEMREMAP_WC); 206 */
207 desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start,
208 resource_size(&dpio_dev->regions[1]),
209 MEMREMAP_WC);
210 } else {
211 desc.regs_cena = devm_memremap(dev, dpio_dev->regions[2].start,
212 resource_size(&dpio_dev->regions[2]),
213 MEMREMAP_WB);
214 }
215
207 if (IS_ERR(desc.regs_cena)) { 216 if (IS_ERR(desc.regs_cena)) {
208 dev_err(dev, "devm_memremap failed\n"); 217 dev_err(dev, "devm_memremap failed\n");
209 err = PTR_ERR(desc.regs_cena); 218 err = PTR_ERR(desc.regs_cena);
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index d02013556a1b..c66f5b73777c 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -15,6 +15,8 @@
15#define QMAN_REV_4000 0x04000000 15#define QMAN_REV_4000 0x04000000
16#define QMAN_REV_4100 0x04010000 16#define QMAN_REV_4100 0x04010000
17#define QMAN_REV_4101 0x04010001 17#define QMAN_REV_4101 0x04010001
18#define QMAN_REV_5000 0x05000000
19
18#define QMAN_REV_MASK 0xffff0000 20#define QMAN_REV_MASK 0xffff0000
19 21
20/* All QBMan command and result structures use this "valid bit" encoding */ 22/* All QBMan command and result structures use this "valid bit" encoding */
@@ -25,10 +27,17 @@
25#define QBMAN_WQCHAN_CONFIGURE 0x46 27#define QBMAN_WQCHAN_CONFIGURE 0x46
26 28
27/* CINH register offsets */ 29/* CINH register offsets */
30#define QBMAN_CINH_SWP_EQCR_PI 0x800
28#define QBMAN_CINH_SWP_EQAR 0x8c0 31#define QBMAN_CINH_SWP_EQAR 0x8c0
32#define QBMAN_CINH_SWP_CR_RT 0x900
33#define QBMAN_CINH_SWP_VDQCR_RT 0x940
34#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
35#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
29#define QBMAN_CINH_SWP_DQPI 0xa00 36#define QBMAN_CINH_SWP_DQPI 0xa00
30#define QBMAN_CINH_SWP_DCAP 0xac0 37#define QBMAN_CINH_SWP_DCAP 0xac0
31#define QBMAN_CINH_SWP_SDQCR 0xb00 38#define QBMAN_CINH_SWP_SDQCR 0xb00
39#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
40#define QBMAN_CINH_SWP_RCR_PI 0xc00
32#define QBMAN_CINH_SWP_RAR 0xcc0 41#define QBMAN_CINH_SWP_RAR 0xcc0
33#define QBMAN_CINH_SWP_ISR 0xe00 42#define QBMAN_CINH_SWP_ISR 0xe00
34#define QBMAN_CINH_SWP_IER 0xe40 43#define QBMAN_CINH_SWP_IER 0xe40
@@ -43,6 +52,13 @@
43#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1)) 52#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((u32)(vb) >> 1))
44#define QBMAN_CENA_SWP_VDQCR 0x780 53#define QBMAN_CENA_SWP_VDQCR 0x780
45 54
55/* CENA register offsets in memory-backed mode */
56#define QBMAN_CENA_SWP_DQRR_MEM(n) (0x800 + ((u32)(n) << 6))
57#define QBMAN_CENA_SWP_RCR_MEM(n) (0x1400 + ((u32)(n) << 6))
58#define QBMAN_CENA_SWP_CR_MEM 0x1600
59#define QBMAN_CENA_SWP_RR_MEM 0x1680
60#define QBMAN_CENA_SWP_VDQCR_MEM 0x1780
61
46/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */ 62/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */
47#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6) 63#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)(p) & 0x1ff) >> 6)
48 64
@@ -96,10 +112,13 @@ static inline void *qbman_get_cmd(struct qbman_swp *p, u32 offset)
96 112
97#define SWP_CFG_DQRR_MF_SHIFT 20 113#define SWP_CFG_DQRR_MF_SHIFT 20
98#define SWP_CFG_EST_SHIFT 16 114#define SWP_CFG_EST_SHIFT 16
115#define SWP_CFG_CPBS_SHIFT 15
99#define SWP_CFG_WN_SHIFT 14 116#define SWP_CFG_WN_SHIFT 14
100#define SWP_CFG_RPM_SHIFT 12 117#define SWP_CFG_RPM_SHIFT 12
101#define SWP_CFG_DCM_SHIFT 10 118#define SWP_CFG_DCM_SHIFT 10
102#define SWP_CFG_EPM_SHIFT 8 119#define SWP_CFG_EPM_SHIFT 8
120#define SWP_CFG_VPM_SHIFT 7
121#define SWP_CFG_CPM_SHIFT 6
103#define SWP_CFG_SD_SHIFT 5 122#define SWP_CFG_SD_SHIFT 5
104#define SWP_CFG_SP_SHIFT 4 123#define SWP_CFG_SP_SHIFT 4
105#define SWP_CFG_SE_SHIFT 3 124#define SWP_CFG_SE_SHIFT 3
@@ -125,6 +144,8 @@ static inline u32 qbman_set_swp_cfg(u8 max_fill, u8 wn, u8 est, u8 rpm, u8 dcm,
125 ep << SWP_CFG_EP_SHIFT); 144 ep << SWP_CFG_EP_SHIFT);
126} 145}
127 146
147#define QMAN_RT_MODE 0x00000100
148
128/** 149/**
129 * qbman_swp_init() - Create a functional object representing the given 150 * qbman_swp_init() - Create a functional object representing the given
130 * QBMan portal descriptor. 151 * QBMan portal descriptor.
@@ -146,6 +167,8 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
146 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT; 167 p->sdq |= qbman_sdqcr_dct_prio_ics << QB_SDQCR_DCT_SHIFT;
147 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT; 168 p->sdq |= qbman_sdqcr_fc_up_to_3 << QB_SDQCR_FC_SHIFT;
148 p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT; 169 p->sdq |= QMAN_SDQCR_TOKEN << QB_SDQCR_TOK_SHIFT;
170 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
171 p->mr.valid_bit = QB_VALID_BIT;
149 172
150 atomic_set(&p->vdq.available, 1); 173 atomic_set(&p->vdq.available, 1);
151 p->vdq.valid_bit = QB_VALID_BIT; 174 p->vdq.valid_bit = QB_VALID_BIT;
@@ -163,6 +186,9 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
163 p->addr_cena = d->cena_bar; 186 p->addr_cena = d->cena_bar;
164 p->addr_cinh = d->cinh_bar; 187 p->addr_cinh = d->cinh_bar;
165 188
189 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
190 memset(p->addr_cena, 0, 64 * 1024);
191
166 reg = qbman_set_swp_cfg(p->dqrr.dqrr_size, 192 reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
167 1, /* Writes Non-cacheable */ 193 1, /* Writes Non-cacheable */
168 0, /* EQCR_CI stashing threshold */ 194 0, /* EQCR_CI stashing threshold */
@@ -175,6 +201,10 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
175 1, /* dequeue stashing priority == TRUE */ 201 1, /* dequeue stashing priority == TRUE */
176 0, /* dequeue stashing enable == FALSE */ 202 0, /* dequeue stashing enable == FALSE */
177 0); /* EQCR_CI stashing priority == FALSE */ 203 0); /* EQCR_CI stashing priority == FALSE */
204 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000)
205 reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
206 1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
207 1 << SWP_CFG_CPM_SHIFT; /* CR read triggered mode */
178 208
179 qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg); 209 qbman_write_register(p, QBMAN_CINH_SWP_CFG, reg);
180 reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG); 210 reg = qbman_read_register(p, QBMAN_CINH_SWP_CFG);
@@ -184,6 +214,10 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
184 return NULL; 214 return NULL;
185 } 215 }
186 216
217 if ((p->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
218 qbman_write_register(p, QBMAN_CINH_SWP_EQCR_PI, QMAN_RT_MODE);
219 qbman_write_register(p, QBMAN_CINH_SWP_RCR_PI, QMAN_RT_MODE);
220 }
187 /* 221 /*
188 * SDQCR needs to be initialized to 0 when no channels are 222 * SDQCR needs to be initialized to 0 when no channels are
189 * being dequeued from or else the QMan HW will indicate an 223 * being dequeued from or else the QMan HW will indicate an
@@ -278,7 +312,10 @@ void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
278 */ 312 */
279void *qbman_swp_mc_start(struct qbman_swp *p) 313void *qbman_swp_mc_start(struct qbman_swp *p)
280{ 314{
281 return qbman_get_cmd(p, QBMAN_CENA_SWP_CR); 315 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
316 return qbman_get_cmd(p, QBMAN_CENA_SWP_CR);
317 else
318 return qbman_get_cmd(p, QBMAN_CENA_SWP_CR_MEM);
282} 319}
283 320
284/* 321/*
@@ -289,8 +326,14 @@ void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
289{ 326{
290 u8 *v = cmd; 327 u8 *v = cmd;
291 328
292 dma_wmb(); 329 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
293 *v = cmd_verb | p->mc.valid_bit; 330 dma_wmb();
331 *v = cmd_verb | p->mc.valid_bit;
332 } else {
333 *v = cmd_verb | p->mc.valid_bit;
334 dma_wmb();
335 qbman_write_register(p, QBMAN_CINH_SWP_CR_RT, QMAN_RT_MODE);
336 }
294} 337}
295 338
296/* 339/*
@@ -301,13 +344,27 @@ void *qbman_swp_mc_result(struct qbman_swp *p)
301{ 344{
302 u32 *ret, verb; 345 u32 *ret, verb;
303 346
304 ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit)); 347 if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
348 ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
349 /* Remove the valid-bit - command completed if the rest
350 * is non-zero.
351 */
352 verb = ret[0] & ~QB_VALID_BIT;
353 if (!verb)
354 return NULL;
355 p->mc.valid_bit ^= QB_VALID_BIT;
356 } else {
357 ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR_MEM);
358 /* Command completed if the valid bit is toggled */
359 if (p->mr.valid_bit != (ret[0] & QB_VALID_BIT))
360 return NULL;
361 /* Command completed if the rest is non-zero */
362 verb = ret[0] & ~QB_VALID_BIT;
363 if (!verb)
364 return NULL;
365 p->mr.valid_bit ^= QB_VALID_BIT;
366 }
305 367
306 /* Remove the valid-bit - command completed if the rest is non-zero */
307 verb = ret[0] & ~QB_VALID_BIT;
308 if (!verb)
309 return NULL;
310 p->mc.valid_bit ^= QB_VALID_BIT;
311 return ret; 368 return ret;
312} 369}
313 370
@@ -384,6 +441,18 @@ void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
384#define EQAR_VB(eqar) ((eqar) & 0x80) 441#define EQAR_VB(eqar) ((eqar) & 0x80)
385#define EQAR_SUCCESS(eqar) ((eqar) & 0x100) 442#define EQAR_SUCCESS(eqar) ((eqar) & 0x100)
386 443
444static inline void qbman_write_eqcr_am_rt_register(struct qbman_swp *p,
445 u8 idx)
446{
447 if (idx < 16)
448 qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT + idx * 4,
449 QMAN_RT_MODE);
450 else
451 qbman_write_register(p, QBMAN_CINH_SWP_EQCR_AM_RT2 +
452 (idx - 16) * 4,
453 QMAN_RT_MODE);
454}
455
387/** 456/**
388 * qbman_swp_enqueue() - Issue an enqueue command 457 * qbman_swp_enqueue() - Issue an enqueue command
389 * @s: the software portal used for enqueue 458 * @s: the software portal used for enqueue
@@ -408,9 +477,15 @@ int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d,
408 memcpy(&p->dca, &d->dca, 31); 477 memcpy(&p->dca, &d->dca, 31);
409 memcpy(&p->fd, fd, sizeof(*fd)); 478 memcpy(&p->fd, fd, sizeof(*fd));
410 479
411 /* Set the verb byte, have to substitute in the valid-bit */ 480 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
412 dma_wmb(); 481 /* Set the verb byte, have to substitute in the valid-bit */
413 p->verb = d->verb | EQAR_VB(eqar); 482 dma_wmb();
483 p->verb = d->verb | EQAR_VB(eqar);
484 } else {
485 p->verb = d->verb | EQAR_VB(eqar);
486 dma_wmb();
487 qbman_write_eqcr_am_rt_register(s, EQAR_IDX(eqar));
488 }
414 489
415 return 0; 490 return 0;
416} 491}
@@ -587,17 +662,27 @@ int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d)
587 return -EBUSY; 662 return -EBUSY;
588 } 663 }
589 s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt; 664 s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt;
590 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR); 665 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
666 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR);
667 else
668 p = qbman_get_cmd(s, QBMAN_CENA_SWP_VDQCR_MEM);
591 p->numf = d->numf; 669 p->numf = d->numf;
592 p->tok = QMAN_DQ_TOKEN_VALID; 670 p->tok = QMAN_DQ_TOKEN_VALID;
593 p->dq_src = d->dq_src; 671 p->dq_src = d->dq_src;
594 p->rsp_addr = d->rsp_addr; 672 p->rsp_addr = d->rsp_addr;
595 p->rsp_addr_virt = d->rsp_addr_virt; 673 p->rsp_addr_virt = d->rsp_addr_virt;
596 dma_wmb();
597 674
598 /* Set the verb byte, have to substitute in the valid-bit */ 675 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
599 p->verb = d->verb | s->vdq.valid_bit; 676 dma_wmb();
600 s->vdq.valid_bit ^= QB_VALID_BIT; 677 /* Set the verb byte, have to substitute in the valid-bit */
678 p->verb = d->verb | s->vdq.valid_bit;
679 s->vdq.valid_bit ^= QB_VALID_BIT;
680 } else {
681 p->verb = d->verb | s->vdq.valid_bit;
682 s->vdq.valid_bit ^= QB_VALID_BIT;
683 dma_wmb();
684 qbman_write_register(s, QBMAN_CINH_SWP_VDQCR_RT, QMAN_RT_MODE);
685 }
601 686
602 return 0; 687 return 0;
603} 688}
@@ -655,7 +740,10 @@ const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
655 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx))); 740 QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
656 } 741 }
657 742
658 p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); 743 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
744 p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
745 else
746 p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
659 verb = p->dq.verb; 747 verb = p->dq.verb;
660 748
661 /* 749 /*
@@ -807,18 +895,28 @@ int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
807 return -EBUSY; 895 return -EBUSY;
808 896
809 /* Start the release command */ 897 /* Start the release command */
810 p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); 898 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000)
899 p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR(RAR_IDX(rar)));
900 else
901 p = qbman_get_cmd(s, QBMAN_CENA_SWP_RCR_MEM(RAR_IDX(rar)));
811 /* Copy the caller's buffer pointers to the command */ 902 /* Copy the caller's buffer pointers to the command */
812 for (i = 0; i < num_buffers; i++) 903 for (i = 0; i < num_buffers; i++)
813 p->buf[i] = cpu_to_le64(buffers[i]); 904 p->buf[i] = cpu_to_le64(buffers[i]);
814 p->bpid = d->bpid; 905 p->bpid = d->bpid;
815 906
816 /* 907 if ((s->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
817 * Set the verb byte, have to substitute in the valid-bit and the number 908 /*
818 * of buffers. 909 * Set the verb byte, have to substitute in the valid-bit
819 */ 910 * and the number of buffers.
820 dma_wmb(); 911 */
821 p->verb = d->verb | RAR_VB(rar) | num_buffers; 912 dma_wmb();
913 p->verb = d->verb | RAR_VB(rar) | num_buffers;
914 } else {
915 p->verb = d->verb | RAR_VB(rar) | num_buffers;
916 dma_wmb();
917 qbman_write_register(s, QBMAN_CINH_SWP_RCR_AM_RT +
918 RAR_IDX(rar) * 4, QMAN_RT_MODE);
919 }
822 920
823 return 0; 921 return 0;
824} 922}
diff --git a/drivers/soc/fsl/dpio/qbman-portal.h b/drivers/soc/fsl/dpio/qbman-portal.h
index fa35fc1afeaa..f3ec5d2044fb 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.h
+++ b/drivers/soc/fsl/dpio/qbman-portal.h
@@ -1,7 +1,7 @@
1/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
2/* 2/*
3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc. 3 * Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
4 * Copyright 2016 NXP 4 * Copyright 2016-2019 NXP
5 * 5 *
6 */ 6 */
7#ifndef __FSL_QBMAN_PORTAL_H 7#ifndef __FSL_QBMAN_PORTAL_H
@@ -110,6 +110,11 @@ struct qbman_swp {
110 u32 valid_bit; /* 0x00 or 0x80 */ 110 u32 valid_bit; /* 0x00 or 0x80 */
111 } mc; 111 } mc;
112 112
113 /* Management response */
114 struct {
115 u32 valid_bit; /* 0x00 or 0x80 */
116 } mr;
117
113 /* Push dequeues */ 118 /* Push dequeues */
114 u32 sdq; 119 u32 sdq;
115 120
@@ -428,7 +433,7 @@ static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
428static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd, 433static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
429 u8 cmd_verb) 434 u8 cmd_verb)
430{ 435{
431 int loopvar = 1000; 436 int loopvar = 2000;
432 437
433 qbman_swp_mc_submit(swp, cmd, cmd_verb); 438 qbman_swp_mc_submit(swp, cmd, cmd_verb);
434 439
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index 78607da7320e..1ef8068c8dd3 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -97,6 +97,11 @@ static const struct fsl_soc_die_attr fsl_soc_die[] = {
97 .svr = 0x87000000, 97 .svr = 0x87000000,
98 .mask = 0xfff70000, 98 .mask = 0xfff70000,
99 }, 99 },
100 /* Die: LX2160A, SoC: LX2160A/LX2120A/LX2080A */
101 { .die = "LX2160A",
102 .svr = 0x87360000,
103 .mask = 0xff3f0000,
104 },
100 { }, 105 { },
101}; 106};
102 107
@@ -218,6 +223,7 @@ static const struct of_device_id fsl_guts_of_match[] = {
218 { .compatible = "fsl,ls1088a-dcfg", }, 223 { .compatible = "fsl,ls1088a-dcfg", },
219 { .compatible = "fsl,ls1012a-dcfg", }, 224 { .compatible = "fsl,ls1012a-dcfg", },
220 { .compatible = "fsl,ls1046a-dcfg", }, 225 { .compatible = "fsl,ls1046a-dcfg", },
226 { .compatible = "fsl,lx2160a-dcfg", },
221 {} 227 {}
222}; 228};
223MODULE_DEVICE_TABLE(of, fsl_guts_of_match); 229MODULE_DEVICE_TABLE(of, fsl_guts_of_match);
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
index 2c95cf59f3e7..cf4f10d6f590 100644
--- a/drivers/soc/fsl/qbman/bman_portal.c
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -32,6 +32,7 @@
32 32
33static struct bman_portal *affine_bportals[NR_CPUS]; 33static struct bman_portal *affine_bportals[NR_CPUS];
34static struct cpumask portal_cpus; 34static struct cpumask portal_cpus;
35static int __bman_portals_probed;
35/* protect bman global registers and global data shared among portals */ 36/* protect bman global registers and global data shared among portals */
36static DEFINE_SPINLOCK(bman_lock); 37static DEFINE_SPINLOCK(bman_lock);
37 38
@@ -87,6 +88,12 @@ static int bman_online_cpu(unsigned int cpu)
87 return 0; 88 return 0;
88} 89}
89 90
91int bman_portals_probed(void)
92{
93 return __bman_portals_probed;
94}
95EXPORT_SYMBOL_GPL(bman_portals_probed);
96
90static int bman_portal_probe(struct platform_device *pdev) 97static int bman_portal_probe(struct platform_device *pdev)
91{ 98{
92 struct device *dev = &pdev->dev; 99 struct device *dev = &pdev->dev;
@@ -104,8 +111,10 @@ static int bman_portal_probe(struct platform_device *pdev)
104 } 111 }
105 112
106 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); 113 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
107 if (!pcfg) 114 if (!pcfg) {
115 __bman_portals_probed = -1;
108 return -ENOMEM; 116 return -ENOMEM;
117 }
109 118
110 pcfg->dev = dev; 119 pcfg->dev = dev;
111 120
@@ -113,14 +122,14 @@ static int bman_portal_probe(struct platform_device *pdev)
113 DPAA_PORTAL_CE); 122 DPAA_PORTAL_CE);
114 if (!addr_phys[0]) { 123 if (!addr_phys[0]) {
115 dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node); 124 dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
116 return -ENXIO; 125 goto err_ioremap1;
117 } 126 }
118 127
119 addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM, 128 addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
120 DPAA_PORTAL_CI); 129 DPAA_PORTAL_CI);
121 if (!addr_phys[1]) { 130 if (!addr_phys[1]) {
122 dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node); 131 dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
123 return -ENXIO; 132 goto err_ioremap1;
124 } 133 }
125 134
126 pcfg->cpu = -1; 135 pcfg->cpu = -1;
@@ -128,7 +137,7 @@ static int bman_portal_probe(struct platform_device *pdev)
128 irq = platform_get_irq(pdev, 0); 137 irq = platform_get_irq(pdev, 0);
129 if (irq <= 0) { 138 if (irq <= 0) {
130 dev_err(dev, "Can't get %pOF IRQ'\n", node); 139 dev_err(dev, "Can't get %pOF IRQ'\n", node);
131 return -ENXIO; 140 goto err_ioremap1;
132 } 141 }
133 pcfg->irq = irq; 142 pcfg->irq = irq;
134 143
@@ -150,6 +159,7 @@ static int bman_portal_probe(struct platform_device *pdev)
150 spin_lock(&bman_lock); 159 spin_lock(&bman_lock);
151 cpu = cpumask_next_zero(-1, &portal_cpus); 160 cpu = cpumask_next_zero(-1, &portal_cpus);
152 if (cpu >= nr_cpu_ids) { 161 if (cpu >= nr_cpu_ids) {
162 __bman_portals_probed = 1;
153 /* unassigned portal, skip init */ 163 /* unassigned portal, skip init */
154 spin_unlock(&bman_lock); 164 spin_unlock(&bman_lock);
155 return 0; 165 return 0;
@@ -175,6 +185,8 @@ err_portal_init:
175err_ioremap2: 185err_ioremap2:
176 memunmap(pcfg->addr_virt_ce); 186 memunmap(pcfg->addr_virt_ce);
177err_ioremap1: 187err_ioremap1:
188 __bman_portals_probed = -1;
189
178 return -ENXIO; 190 return -ENXIO;
179} 191}
180 192
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 109b38de3176..a6bb43007d03 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -596,7 +596,7 @@ static int qman_init_ccsr(struct device *dev)
596} 596}
597 597
598#define LIO_CFG_LIODN_MASK 0x0fff0000 598#define LIO_CFG_LIODN_MASK 0x0fff0000
599void qman_liodn_fixup(u16 channel) 599void __qman_liodn_fixup(u16 channel)
600{ 600{
601 static int done; 601 static int done;
602 static u32 liodn_offset; 602 static u32 liodn_offset;
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index 661c9b234d32..e2186b681d87 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -38,6 +38,7 @@ EXPORT_SYMBOL(qman_dma_portal);
38#define CONFIG_FSL_DPA_PIRQ_FAST 1 38#define CONFIG_FSL_DPA_PIRQ_FAST 1
39 39
40static struct cpumask portal_cpus; 40static struct cpumask portal_cpus;
41static int __qman_portals_probed;
41/* protect qman global registers and global data shared among portals */ 42/* protect qman global registers and global data shared among portals */
42static DEFINE_SPINLOCK(qman_lock); 43static DEFINE_SPINLOCK(qman_lock);
43 44
@@ -220,6 +221,12 @@ static int qman_online_cpu(unsigned int cpu)
220 return 0; 221 return 0;
221} 222}
222 223
224int qman_portals_probed(void)
225{
226 return __qman_portals_probed;
227}
228EXPORT_SYMBOL_GPL(qman_portals_probed);
229
223static int qman_portal_probe(struct platform_device *pdev) 230static int qman_portal_probe(struct platform_device *pdev)
224{ 231{
225 struct device *dev = &pdev->dev; 232 struct device *dev = &pdev->dev;
@@ -238,8 +245,10 @@ static int qman_portal_probe(struct platform_device *pdev)
238 } 245 }
239 246
240 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL); 247 pcfg = devm_kmalloc(dev, sizeof(*pcfg), GFP_KERNEL);
241 if (!pcfg) 248 if (!pcfg) {
249 __qman_portals_probed = -1;
242 return -ENOMEM; 250 return -ENOMEM;
251 }
243 252
244 pcfg->dev = dev; 253 pcfg->dev = dev;
245 254
@@ -247,19 +256,20 @@ static int qman_portal_probe(struct platform_device *pdev)
247 DPAA_PORTAL_CE); 256 DPAA_PORTAL_CE);
248 if (!addr_phys[0]) { 257 if (!addr_phys[0]) {
249 dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node); 258 dev_err(dev, "Can't get %pOF property 'reg::CE'\n", node);
250 return -ENXIO; 259 goto err_ioremap1;
251 } 260 }
252 261
253 addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM, 262 addr_phys[1] = platform_get_resource(pdev, IORESOURCE_MEM,
254 DPAA_PORTAL_CI); 263 DPAA_PORTAL_CI);
255 if (!addr_phys[1]) { 264 if (!addr_phys[1]) {
256 dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node); 265 dev_err(dev, "Can't get %pOF property 'reg::CI'\n", node);
257 return -ENXIO; 266 goto err_ioremap1;
258 } 267 }
259 268
260 err = of_property_read_u32(node, "cell-index", &val); 269 err = of_property_read_u32(node, "cell-index", &val);
261 if (err) { 270 if (err) {
262 dev_err(dev, "Can't get %pOF property 'cell-index'\n", node); 271 dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
272 __qman_portals_probed = -1;
263 return err; 273 return err;
264 } 274 }
265 pcfg->channel = val; 275 pcfg->channel = val;
@@ -267,7 +277,7 @@ static int qman_portal_probe(struct platform_device *pdev)
267 irq = platform_get_irq(pdev, 0); 277 irq = platform_get_irq(pdev, 0);
268 if (irq <= 0) { 278 if (irq <= 0) {
269 dev_err(dev, "Can't get %pOF IRQ\n", node); 279 dev_err(dev, "Can't get %pOF IRQ\n", node);
270 return -ENXIO; 280 goto err_ioremap1;
271 } 281 }
272 pcfg->irq = irq; 282 pcfg->irq = irq;
273 283
@@ -291,6 +301,7 @@ static int qman_portal_probe(struct platform_device *pdev)
291 spin_lock(&qman_lock); 301 spin_lock(&qman_lock);
292 cpu = cpumask_next_zero(-1, &portal_cpus); 302 cpu = cpumask_next_zero(-1, &portal_cpus);
293 if (cpu >= nr_cpu_ids) { 303 if (cpu >= nr_cpu_ids) {
304 __qman_portals_probed = 1;
294 /* unassigned portal, skip init */ 305 /* unassigned portal, skip init */
295 spin_unlock(&qman_lock); 306 spin_unlock(&qman_lock);
296 return 0; 307 return 0;
@@ -321,6 +332,8 @@ err_portal_init:
321err_ioremap2: 332err_ioremap2:
322 memunmap(pcfg->addr_virt_ce); 333 memunmap(pcfg->addr_virt_ce);
323err_ioremap1: 334err_ioremap1:
335 __qman_portals_probed = -1;
336
324 return -ENXIO; 337 return -ENXIO;
325} 338}
326 339
diff --git a/drivers/soc/fsl/qbman/qman_priv.h b/drivers/soc/fsl/qbman/qman_priv.h
index 75a8f905f8f7..04515718cfd9 100644
--- a/drivers/soc/fsl/qbman/qman_priv.h
+++ b/drivers/soc/fsl/qbman/qman_priv.h
@@ -193,7 +193,14 @@ extern struct gen_pool *qm_cgralloc; /* CGR ID allocator */
193u32 qm_get_pools_sdqcr(void); 193u32 qm_get_pools_sdqcr(void);
194 194
195int qman_wq_alloc(void); 195int qman_wq_alloc(void);
196void qman_liodn_fixup(u16 channel); 196#ifdef CONFIG_FSL_PAMU
197#define qman_liodn_fixup __qman_liodn_fixup
198#else
199static inline void qman_liodn_fixup(u16 channel)
200{
201}
202#endif
203void __qman_liodn_fixup(u16 channel);
197void qman_set_sdest(u16 channel, unsigned int cpu_idx); 204void qman_set_sdest(u16 channel, unsigned int cpu_idx);
198 205
199struct qman_portal *qman_create_affine_portal( 206struct qman_portal *qman_create_affine_portal(
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index ade1b46d669c..8aaebf13e2e6 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -8,4 +8,13 @@ config IMX_GPCV2_PM_DOMAINS
8 select PM_GENERIC_DOMAINS 8 select PM_GENERIC_DOMAINS
9 default y if SOC_IMX7D 9 default y if SOC_IMX7D
10 10
11config IMX_SCU_SOC
12 bool "i.MX System Controller Unit SoC info support"
13 depends on IMX_SCU
14 select SOC_BUS
15 help
16 If you say yes here you get support for the NXP i.MX System
17 Controller Unit SoC info module, it will provide the SoC info
18 like SoC family, ID and revision etc.
19
11endmenu 20endmenu
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index caa8653600f2..cf9ca42ff739 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -2,3 +2,4 @@
2obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o 2obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
3obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o 3obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
4obj-$(CONFIG_ARCH_MXC) += soc-imx8.o 4obj-$(CONFIG_ARCH_MXC) += soc-imx8.o
5obj-$(CONFIG_IMX_SCU_SOC) += soc-imx-scu.o
diff --git a/drivers/soc/imx/soc-imx-scu.c b/drivers/soc/imx/soc-imx-scu.c
new file mode 100644
index 000000000000..676f612f6488
--- /dev/null
+++ b/drivers/soc/imx/soc-imx-scu.c
@@ -0,0 +1,144 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright 2019 NXP.
4 */
5
6#include <dt-bindings/firmware/imx/rsrc.h>
7#include <linux/firmware/imx/sci.h>
8#include <linux/slab.h>
9#include <linux/sys_soc.h>
10#include <linux/platform_device.h>
11#include <linux/of.h>
12
13#define IMX_SCU_SOC_DRIVER_NAME "imx-scu-soc"
14
15static struct imx_sc_ipc *soc_ipc_handle;
16
17struct imx_sc_msg_misc_get_soc_id {
18 struct imx_sc_rpc_msg hdr;
19 union {
20 struct {
21 u32 control;
22 u16 resource;
23 } __packed req;
24 struct {
25 u32 id;
26 } resp;
27 } data;
28} __packed;
29
30static int imx_scu_soc_id(void)
31{
32 struct imx_sc_msg_misc_get_soc_id msg;
33 struct imx_sc_rpc_msg *hdr = &msg.hdr;
34 int ret;
35
36 hdr->ver = IMX_SC_RPC_VERSION;
37 hdr->svc = IMX_SC_RPC_SVC_MISC;
38 hdr->func = IMX_SC_MISC_FUNC_GET_CONTROL;
39 hdr->size = 3;
40
41 msg.data.req.control = IMX_SC_C_ID;
42 msg.data.req.resource = IMX_SC_R_SYSTEM;
43
44 ret = imx_scu_call_rpc(soc_ipc_handle, &msg, true);
45 if (ret) {
46 pr_err("%s: get soc info failed, ret %d\n", __func__, ret);
47 return ret;
48 }
49
50 return msg.data.resp.id;
51}
52
53static int imx_scu_soc_probe(struct platform_device *pdev)
54{
55 struct soc_device_attribute *soc_dev_attr;
56 struct soc_device *soc_dev;
57 int id, ret;
58 u32 val;
59
60 ret = imx_scu_get_handle(&soc_ipc_handle);
61 if (ret)
62 return ret;
63
64 soc_dev_attr = devm_kzalloc(&pdev->dev, sizeof(*soc_dev_attr),
65 GFP_KERNEL);
66 if (!soc_dev_attr)
67 return -ENOMEM;
68
69 soc_dev_attr->family = "Freescale i.MX";
70
71 ret = of_property_read_string(of_root,
72 "model",
73 &soc_dev_attr->machine);
74 if (ret)
75 return ret;
76
77 id = imx_scu_soc_id();
78 if (id < 0)
79 return -EINVAL;
80
81 /* format soc_id value passed from SCU firmware */
82 val = id & 0x1f;
83 soc_dev_attr->soc_id = kasprintf(GFP_KERNEL, "0x%x", val);
84 if (!soc_dev_attr->soc_id)
85 return -ENOMEM;
86
87 /* format revision value passed from SCU firmware */
88 val = (id >> 5) & 0xf;
89 val = (((val >> 2) + 1) << 4) | (val & 0x3);
90 soc_dev_attr->revision = kasprintf(GFP_KERNEL,
91 "%d.%d",
92 (val >> 4) & 0xf,
93 val & 0xf);
94 if (!soc_dev_attr->revision) {
95 ret = -ENOMEM;
96 goto free_soc_id;
97 }
98
99 soc_dev = soc_device_register(soc_dev_attr);
100 if (IS_ERR(soc_dev)) {
101 ret = PTR_ERR(soc_dev);
102 goto free_revision;
103 }
104
105 return 0;
106
107free_revision:
108 kfree(soc_dev_attr->revision);
109free_soc_id:
110 kfree(soc_dev_attr->soc_id);
111 return ret;
112}
113
114static struct platform_driver imx_scu_soc_driver = {
115 .driver = {
116 .name = IMX_SCU_SOC_DRIVER_NAME,
117 },
118 .probe = imx_scu_soc_probe,
119};
120
121static int __init imx_scu_soc_init(void)
122{
123 struct platform_device *pdev;
124 struct device_node *np;
125 int ret;
126
127 np = of_find_compatible_node(NULL, NULL, "fsl,imx-scu");
128 if (!np)
129 return -ENODEV;
130
131 of_node_put(np);
132
133 ret = platform_driver_register(&imx_scu_soc_driver);
134 if (ret)
135 return ret;
136
137 pdev = platform_device_register_simple(IMX_SCU_SOC_DRIVER_NAME,
138 -1, NULL, 0);
139 if (IS_ERR(pdev))
140 platform_driver_unregister(&imx_scu_soc_driver);
141
142 return PTR_ERR_OR_ZERO(pdev);
143}
144device_initcall(imx_scu_soc_init);
diff --git a/drivers/soc/imx/soc-imx8.c b/drivers/soc/imx/soc-imx8.c
index b1bd8e2543ac..f924ae8c6514 100644
--- a/drivers/soc/imx/soc-imx8.c
+++ b/drivers/soc/imx/soc-imx8.c
@@ -16,6 +16,9 @@
16#define IMX8MQ_SW_INFO_B1 0x40 16#define IMX8MQ_SW_INFO_B1 0x40
17#define IMX8MQ_SW_MAGIC_B1 0xff0055aa 17#define IMX8MQ_SW_MAGIC_B1 0xff0055aa
18 18
19/* Same as ANADIG_DIGPROG_IMX7D */
20#define ANADIG_DIGPROG_IMX8MM 0x800
21
19struct imx8_soc_data { 22struct imx8_soc_data {
20 char *name; 23 char *name;
21 u32 (*soc_revision)(void); 24 u32 (*soc_revision)(void);
@@ -46,13 +49,45 @@ out:
46 return rev; 49 return rev;
47} 50}
48 51
52static u32 __init imx8mm_soc_revision(void)
53{
54 struct device_node *np;
55 void __iomem *anatop_base;
56 u32 rev;
57
58 np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop");
59 if (!np)
60 return 0;
61
62 anatop_base = of_iomap(np, 0);
63 WARN_ON(!anatop_base);
64
65 rev = readl_relaxed(anatop_base + ANADIG_DIGPROG_IMX8MM);
66
67 iounmap(anatop_base);
68 of_node_put(np);
69 return rev;
70}
71
49static const struct imx8_soc_data imx8mq_soc_data = { 72static const struct imx8_soc_data imx8mq_soc_data = {
50 .name = "i.MX8MQ", 73 .name = "i.MX8MQ",
51 .soc_revision = imx8mq_soc_revision, 74 .soc_revision = imx8mq_soc_revision,
52}; 75};
53 76
77static const struct imx8_soc_data imx8mm_soc_data = {
78 .name = "i.MX8MM",
79 .soc_revision = imx8mm_soc_revision,
80};
81
82static const struct imx8_soc_data imx8mn_soc_data = {
83 .name = "i.MX8MN",
84 .soc_revision = imx8mm_soc_revision,
85};
86
54static const struct of_device_id imx8_soc_match[] = { 87static const struct of_device_id imx8_soc_match[] = {
55 { .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, }, 88 { .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, },
89 { .compatible = "fsl,imx8mm", .data = &imx8mm_soc_data, },
90 { .compatible = "fsl,imx8mn", .data = &imx8mn_soc_data, },
56 { } 91 { }
57}; 92};
58 93
@@ -65,7 +100,6 @@ static int __init imx8_soc_init(void)
65{ 100{
66 struct soc_device_attribute *soc_dev_attr; 101 struct soc_device_attribute *soc_dev_attr;
67 struct soc_device *soc_dev; 102 struct soc_device *soc_dev;
68 struct device_node *root;
69 const struct of_device_id *id; 103 const struct of_device_id *id;
70 u32 soc_rev = 0; 104 u32 soc_rev = 0;
71 const struct imx8_soc_data *data; 105 const struct imx8_soc_data *data;
@@ -73,20 +107,19 @@ static int __init imx8_soc_init(void)
73 107
74 soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); 108 soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
75 if (!soc_dev_attr) 109 if (!soc_dev_attr)
76 return -ENODEV; 110 return -ENOMEM;
77 111
78 soc_dev_attr->family = "Freescale i.MX"; 112 soc_dev_attr->family = "Freescale i.MX";
79 113
80 root = of_find_node_by_path("/"); 114 ret = of_property_read_string(of_root, "model", &soc_dev_attr->machine);
81 ret = of_property_read_string(root, "model", &soc_dev_attr->machine);
82 if (ret) 115 if (ret)
83 goto free_soc; 116 goto free_soc;
84 117
85 id = of_match_node(imx8_soc_match, root); 118 id = of_match_node(imx8_soc_match, of_root);
86 if (!id) 119 if (!id) {
120 ret = -ENODEV;
87 goto free_soc; 121 goto free_soc;
88 122 }
89 of_node_put(root);
90 123
91 data = id->data; 124 data = id->data;
92 if (data) { 125 if (data) {
@@ -96,12 +129,16 @@ static int __init imx8_soc_init(void)
96 } 129 }
97 130
98 soc_dev_attr->revision = imx8_revision(soc_rev); 131 soc_dev_attr->revision = imx8_revision(soc_rev);
99 if (!soc_dev_attr->revision) 132 if (!soc_dev_attr->revision) {
133 ret = -ENOMEM;
100 goto free_soc; 134 goto free_soc;
135 }
101 136
102 soc_dev = soc_device_register(soc_dev_attr); 137 soc_dev = soc_device_register(soc_dev_attr);
103 if (IS_ERR(soc_dev)) 138 if (IS_ERR(soc_dev)) {
139 ret = PTR_ERR(soc_dev);
104 goto free_rev; 140 goto free_rev;
141 }
105 142
106 if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT)) 143 if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT))
107 platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0); 144 platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
@@ -109,10 +146,10 @@ static int __init imx8_soc_init(void)
109 return 0; 146 return 0;
110 147
111free_rev: 148free_rev:
112 kfree(soc_dev_attr->revision); 149 if (strcmp(soc_dev_attr->revision, "unknown"))
150 kfree(soc_dev_attr->revision);
113free_soc: 151free_soc:
114 kfree(soc_dev_attr); 152 kfree(soc_dev_attr);
115 of_node_put(root); 153 return ret;
116 return -ENODEV;
117} 154}
118device_initcall(imx8_soc_init); 155device_initcall(imx8_soc_init);
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 880cf0290962..a6d1bfb17279 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -4,6 +4,18 @@
4# 4#
5menu "Qualcomm SoC drivers" 5menu "Qualcomm SoC drivers"
6 6
7config QCOM_AOSS_QMP
8 tristate "Qualcomm AOSS Driver"
9 depends on ARCH_QCOM || COMPILE_TEST
10 depends on MAILBOX
11 depends on COMMON_CLK && PM
12 select PM_GENERIC_DOMAINS
13 help
14 This driver provides the means of communicating with and controlling
15 the low-power state for resources related to the remoteproc
16 subsystems as well as controlling the debug clocks exposed by the Always On
17 Subsystem (AOSS) using Qualcomm Messaging Protocol (QMP).
18
7config QCOM_COMMAND_DB 19config QCOM_COMMAND_DB
8 bool "Qualcomm Command DB" 20 bool "Qualcomm Command DB"
9 depends on ARCH_QCOM || COMPILE_TEST 21 depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index ffe519b0cb66..eeb088beb15f 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,5 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2CFLAGS_rpmh-rsc.o := -I$(src) 2CFLAGS_rpmh-rsc.o := -I$(src)
3obj-$(CONFIG_QCOM_AOSS_QMP) += qcom_aoss.o
3obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o 4obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
4obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o 5obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
5obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o 6obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index 74f8b9607daa..4fcc32420c47 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -8,6 +8,7 @@
8#include <linux/spinlock.h> 8#include <linux/spinlock.h>
9#include <linux/idr.h> 9#include <linux/idr.h>
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/workqueue.h>
11#include <linux/of_device.h> 12#include <linux/of_device.h>
12#include <linux/soc/qcom/apr.h> 13#include <linux/soc/qcom/apr.h>
13#include <linux/rpmsg.h> 14#include <linux/rpmsg.h>
@@ -17,8 +18,18 @@ struct apr {
17 struct rpmsg_endpoint *ch; 18 struct rpmsg_endpoint *ch;
18 struct device *dev; 19 struct device *dev;
19 spinlock_t svcs_lock; 20 spinlock_t svcs_lock;
21 spinlock_t rx_lock;
20 struct idr svcs_idr; 22 struct idr svcs_idr;
21 int dest_domain_id; 23 int dest_domain_id;
24 struct workqueue_struct *rxwq;
25 struct work_struct rx_work;
26 struct list_head rx_list;
27};
28
29struct apr_rx_buf {
30 struct list_head node;
31 int len;
32 uint8_t buf[];
22}; 33};
23 34
24/** 35/**
@@ -62,11 +73,7 @@ static int apr_callback(struct rpmsg_device *rpdev, void *buf,
62 int len, void *priv, u32 addr) 73 int len, void *priv, u32 addr)
63{ 74{
64 struct apr *apr = dev_get_drvdata(&rpdev->dev); 75 struct apr *apr = dev_get_drvdata(&rpdev->dev);
65 uint16_t hdr_size, msg_type, ver, svc_id; 76 struct apr_rx_buf *abuf;
66 struct apr_device *svc = NULL;
67 struct apr_driver *adrv = NULL;
68 struct apr_resp_pkt resp;
69 struct apr_hdr *hdr;
70 unsigned long flags; 77 unsigned long flags;
71 78
72 if (len <= APR_HDR_SIZE) { 79 if (len <= APR_HDR_SIZE) {
@@ -75,6 +82,34 @@ static int apr_callback(struct rpmsg_device *rpdev, void *buf,
75 return -EINVAL; 82 return -EINVAL;
76 } 83 }
77 84
85 abuf = kzalloc(sizeof(*abuf) + len, GFP_ATOMIC);
86 if (!abuf)
87 return -ENOMEM;
88
89 abuf->len = len;
90 memcpy(abuf->buf, buf, len);
91
92 spin_lock_irqsave(&apr->rx_lock, flags);
93 list_add_tail(&abuf->node, &apr->rx_list);
94 spin_unlock_irqrestore(&apr->rx_lock, flags);
95
96 queue_work(apr->rxwq, &apr->rx_work);
97
98 return 0;
99}
100
101
102static int apr_do_rx_callback(struct apr *apr, struct apr_rx_buf *abuf)
103{
104 uint16_t hdr_size, msg_type, ver, svc_id;
105 struct apr_device *svc = NULL;
106 struct apr_driver *adrv = NULL;
107 struct apr_resp_pkt resp;
108 struct apr_hdr *hdr;
109 unsigned long flags;
110 void *buf = abuf->buf;
111 int len = abuf->len;
112
78 hdr = buf; 113 hdr = buf;
79 ver = APR_HDR_FIELD_VER(hdr->hdr_field); 114 ver = APR_HDR_FIELD_VER(hdr->hdr_field);
80 if (ver > APR_PKT_VER + 1) 115 if (ver > APR_PKT_VER + 1)
@@ -132,6 +167,23 @@ static int apr_callback(struct rpmsg_device *rpdev, void *buf,
132 return 0; 167 return 0;
133} 168}
134 169
170static void apr_rxwq(struct work_struct *work)
171{
172 struct apr *apr = container_of(work, struct apr, rx_work);
173 struct apr_rx_buf *abuf, *b;
174 unsigned long flags;
175
176 if (!list_empty(&apr->rx_list)) {
177 list_for_each_entry_safe(abuf, b, &apr->rx_list, node) {
178 apr_do_rx_callback(apr, abuf);
179 spin_lock_irqsave(&apr->rx_lock, flags);
180 list_del(&abuf->node);
181 spin_unlock_irqrestore(&apr->rx_lock, flags);
182 kfree(abuf);
183 }
184 }
185}
186
135static int apr_device_match(struct device *dev, struct device_driver *drv) 187static int apr_device_match(struct device *dev, struct device_driver *drv)
136{ 188{
137 struct apr_device *adev = to_apr_device(dev); 189 struct apr_device *adev = to_apr_device(dev);
@@ -276,7 +328,7 @@ static int apr_probe(struct rpmsg_device *rpdev)
276 if (!apr) 328 if (!apr)
277 return -ENOMEM; 329 return -ENOMEM;
278 330
279 ret = of_property_read_u32(dev->of_node, "reg", &apr->dest_domain_id); 331 ret = of_property_read_u32(dev->of_node, "qcom,apr-domain", &apr->dest_domain_id);
280 if (ret) { 332 if (ret) {
281 dev_err(dev, "APR Domain ID not specified in DT\n"); 333 dev_err(dev, "APR Domain ID not specified in DT\n");
282 return ret; 334 return ret;
@@ -285,6 +337,14 @@ static int apr_probe(struct rpmsg_device *rpdev)
285 dev_set_drvdata(dev, apr); 337 dev_set_drvdata(dev, apr);
286 apr->ch = rpdev->ept; 338 apr->ch = rpdev->ept;
287 apr->dev = dev; 339 apr->dev = dev;
340 apr->rxwq = create_singlethread_workqueue("qcom_apr_rx");
341 if (!apr->rxwq) {
342 dev_err(apr->dev, "Failed to start Rx WQ\n");
343 return -ENOMEM;
344 }
345 INIT_WORK(&apr->rx_work, apr_rxwq);
346 INIT_LIST_HEAD(&apr->rx_list);
347 spin_lock_init(&apr->rx_lock);
288 spin_lock_init(&apr->svcs_lock); 348 spin_lock_init(&apr->svcs_lock);
289 idr_init(&apr->svcs_idr); 349 idr_init(&apr->svcs_idr);
290 of_register_apr_devices(dev); 350 of_register_apr_devices(dev);
@@ -303,7 +363,11 @@ static int apr_remove_device(struct device *dev, void *null)
303 363
304static void apr_remove(struct rpmsg_device *rpdev) 364static void apr_remove(struct rpmsg_device *rpdev)
305{ 365{
366 struct apr *apr = dev_get_drvdata(&rpdev->dev);
367
306 device_for_each_child(&rpdev->dev, NULL, apr_remove_device); 368 device_for_each_child(&rpdev->dev, NULL, apr_remove_device);
369 flush_workqueue(apr->rxwq);
370 destroy_workqueue(apr->rxwq);
307} 371}
308 372
309/* 373/*
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
new file mode 100644
index 000000000000..5f885196f4d0
--- /dev/null
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -0,0 +1,480 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2019, Linaro Ltd
4 */
5#include <dt-bindings/power/qcom-aoss-qmp.h>
6#include <linux/clk-provider.h>
7#include <linux/interrupt.h>
8#include <linux/io.h>
9#include <linux/mailbox_client.h>
10#include <linux/module.h>
11#include <linux/platform_device.h>
12#include <linux/pm_domain.h>
13
14#define QMP_DESC_MAGIC 0x0
15#define QMP_DESC_VERSION 0x4
16#define QMP_DESC_FEATURES 0x8
17
18/* AOP-side offsets */
19#define QMP_DESC_UCORE_LINK_STATE 0xc
20#define QMP_DESC_UCORE_LINK_STATE_ACK 0x10
21#define QMP_DESC_UCORE_CH_STATE 0x14
22#define QMP_DESC_UCORE_CH_STATE_ACK 0x18
23#define QMP_DESC_UCORE_MBOX_SIZE 0x1c
24#define QMP_DESC_UCORE_MBOX_OFFSET 0x20
25
26/* Linux-side offsets */
27#define QMP_DESC_MCORE_LINK_STATE 0x24
28#define QMP_DESC_MCORE_LINK_STATE_ACK 0x28
29#define QMP_DESC_MCORE_CH_STATE 0x2c
30#define QMP_DESC_MCORE_CH_STATE_ACK 0x30
31#define QMP_DESC_MCORE_MBOX_SIZE 0x34
32#define QMP_DESC_MCORE_MBOX_OFFSET 0x38
33
34#define QMP_STATE_UP GENMASK(15, 0)
35#define QMP_STATE_DOWN GENMASK(31, 16)
36
37#define QMP_MAGIC 0x4d41494c /* mail */
38#define QMP_VERSION 1
39
40/* 64 bytes is enough to store the requests and provides padding to 4 bytes */
41#define QMP_MSG_LEN 64
42
43/**
44 * struct qmp - driver state for QMP implementation
45 * @msgram: iomem referencing the message RAM used for communication
46 * @dev: reference to QMP device
47 * @mbox_client: mailbox client used to ring the doorbell on transmit
48 * @mbox_chan: mailbox channel used to ring the doorbell on transmit
49 * @offset: offset within @msgram where messages should be written
50 * @size: maximum size of the messages to be transmitted
51 * @event: wait_queue for synchronization with the IRQ
52 * @tx_lock: provides synchronization between multiple callers of qmp_send()
53 * @qdss_clk: QDSS clock hw struct
54 * @pd_data: genpd data
55 */
56struct qmp {
57 void __iomem *msgram;
58 struct device *dev;
59
60 struct mbox_client mbox_client;
61 struct mbox_chan *mbox_chan;
62
63 size_t offset;
64 size_t size;
65
66 wait_queue_head_t event;
67
68 struct mutex tx_lock;
69
70 struct clk_hw qdss_clk;
71 struct genpd_onecell_data pd_data;
72};
73
74struct qmp_pd {
75 struct qmp *qmp;
76 struct generic_pm_domain pd;
77};
78
79#define to_qmp_pd_resource(res) container_of(res, struct qmp_pd, pd)
80
81static void qmp_kick(struct qmp *qmp)
82{
83 mbox_send_message(qmp->mbox_chan, NULL);
84 mbox_client_txdone(qmp->mbox_chan, 0);
85}
86
87static bool qmp_magic_valid(struct qmp *qmp)
88{
89 return readl(qmp->msgram + QMP_DESC_MAGIC) == QMP_MAGIC;
90}
91
92static bool qmp_link_acked(struct qmp *qmp)
93{
94 return readl(qmp->msgram + QMP_DESC_MCORE_LINK_STATE_ACK) == QMP_STATE_UP;
95}
96
97static bool qmp_mcore_channel_acked(struct qmp *qmp)
98{
99 return readl(qmp->msgram + QMP_DESC_MCORE_CH_STATE_ACK) == QMP_STATE_UP;
100}
101
102static bool qmp_ucore_channel_up(struct qmp *qmp)
103{
104 return readl(qmp->msgram + QMP_DESC_UCORE_CH_STATE) == QMP_STATE_UP;
105}
106
107static int qmp_open(struct qmp *qmp)
108{
109 int ret;
110 u32 val;
111
112 if (!qmp_magic_valid(qmp)) {
113 dev_err(qmp->dev, "QMP magic doesn't match\n");
114 return -EINVAL;
115 }
116
117 val = readl(qmp->msgram + QMP_DESC_VERSION);
118 if (val != QMP_VERSION) {
119 dev_err(qmp->dev, "unsupported QMP version %d\n", val);
120 return -EINVAL;
121 }
122
123 qmp->offset = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_OFFSET);
124 qmp->size = readl(qmp->msgram + QMP_DESC_MCORE_MBOX_SIZE);
125 if (!qmp->size) {
126 dev_err(qmp->dev, "invalid mailbox size\n");
127 return -EINVAL;
128 }
129
130 /* Ack remote core's link state */
131 val = readl(qmp->msgram + QMP_DESC_UCORE_LINK_STATE);
132 writel(val, qmp->msgram + QMP_DESC_UCORE_LINK_STATE_ACK);
133
134 /* Set local core's link state to up */
135 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
136
137 qmp_kick(qmp);
138
139 ret = wait_event_timeout(qmp->event, qmp_link_acked(qmp), HZ);
140 if (!ret) {
141 dev_err(qmp->dev, "ucore didn't ack link\n");
142 goto timeout_close_link;
143 }
144
145 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
146
147 qmp_kick(qmp);
148
149 ret = wait_event_timeout(qmp->event, qmp_ucore_channel_up(qmp), HZ);
150 if (!ret) {
151 dev_err(qmp->dev, "ucore didn't open channel\n");
152 goto timeout_close_channel;
153 }
154
155 /* Ack remote core's channel state */
156 writel(QMP_STATE_UP, qmp->msgram + QMP_DESC_UCORE_CH_STATE_ACK);
157
158 qmp_kick(qmp);
159
160 ret = wait_event_timeout(qmp->event, qmp_mcore_channel_acked(qmp), HZ);
161 if (!ret) {
162 dev_err(qmp->dev, "ucore didn't ack channel\n");
163 goto timeout_close_channel;
164 }
165
166 return 0;
167
168timeout_close_channel:
169 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
170
171timeout_close_link:
172 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
173 qmp_kick(qmp);
174
175 return -ETIMEDOUT;
176}
177
178static void qmp_close(struct qmp *qmp)
179{
180 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_CH_STATE);
181 writel(QMP_STATE_DOWN, qmp->msgram + QMP_DESC_MCORE_LINK_STATE);
182 qmp_kick(qmp);
183}
184
185static irqreturn_t qmp_intr(int irq, void *data)
186{
187 struct qmp *qmp = data;
188
189 wake_up_interruptible_all(&qmp->event);
190
191 return IRQ_HANDLED;
192}
193
194static bool qmp_message_empty(struct qmp *qmp)
195{
196 return readl(qmp->msgram + qmp->offset) == 0;
197}
198
199/**
200 * qmp_send() - send a message to the AOSS
201 * @qmp: qmp context
202 * @data: message to be sent
203 * @len: length of the message
204 *
205 * Transmit @data to AOSS and wait for the AOSS to acknowledge the message.
206 * @len must be a multiple of 4 and not longer than the mailbox size. Access is
207 * synchronized by this implementation.
208 *
209 * Return: 0 on success, negative errno on failure
210 */
211static int qmp_send(struct qmp *qmp, const void *data, size_t len)
212{
213 long time_left;
214 int ret;
215
216 if (WARN_ON(len + sizeof(u32) > qmp->size))
217 return -EINVAL;
218
219 if (WARN_ON(len % sizeof(u32)))
220 return -EINVAL;
221
222 mutex_lock(&qmp->tx_lock);
223
224 /* The message RAM only implements 32-bit accesses */
225 __iowrite32_copy(qmp->msgram + qmp->offset + sizeof(u32),
226 data, len / sizeof(u32));
227 writel(len, qmp->msgram + qmp->offset);
228 qmp_kick(qmp);
229
230 time_left = wait_event_interruptible_timeout(qmp->event,
231 qmp_message_empty(qmp), HZ);
232 if (!time_left) {
233 dev_err(qmp->dev, "ucore did not ack channel\n");
234 ret = -ETIMEDOUT;
235
236 /* Clear message from buffer */
237 writel(0, qmp->msgram + qmp->offset);
238 } else {
239 ret = 0;
240 }
241
242 mutex_unlock(&qmp->tx_lock);
243
244 return ret;
245}
246
247static int qmp_qdss_clk_prepare(struct clk_hw *hw)
248{
249 static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 1}";
250 struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
251
252 return qmp_send(qmp, buf, sizeof(buf));
253}
254
255static void qmp_qdss_clk_unprepare(struct clk_hw *hw)
256{
257 static const char buf[QMP_MSG_LEN] = "{class: clock, res: qdss, val: 0}";
258 struct qmp *qmp = container_of(hw, struct qmp, qdss_clk);
259
260 qmp_send(qmp, buf, sizeof(buf));
261}
262
263static const struct clk_ops qmp_qdss_clk_ops = {
264 .prepare = qmp_qdss_clk_prepare,
265 .unprepare = qmp_qdss_clk_unprepare,
266};
267
268static int qmp_qdss_clk_add(struct qmp *qmp)
269{
270 static const struct clk_init_data qdss_init = {
271 .ops = &qmp_qdss_clk_ops,
272 .name = "qdss",
273 };
274 int ret;
275
276 qmp->qdss_clk.init = &qdss_init;
277 ret = clk_hw_register(qmp->dev, &qmp->qdss_clk);
278 if (ret < 0) {
279 dev_err(qmp->dev, "failed to register qdss clock\n");
280 return ret;
281 }
282
283 ret = of_clk_add_hw_provider(qmp->dev->of_node, of_clk_hw_simple_get,
284 &qmp->qdss_clk);
285 if (ret < 0) {
286 dev_err(qmp->dev, "unable to register of clk hw provider\n");
287 clk_hw_unregister(&qmp->qdss_clk);
288 }
289
290 return ret;
291}
292
293static void qmp_qdss_clk_remove(struct qmp *qmp)
294{
295 of_clk_del_provider(qmp->dev->of_node);
296 clk_hw_unregister(&qmp->qdss_clk);
297}
298
299static int qmp_pd_power_toggle(struct qmp_pd *res, bool enable)
300{
301 char buf[QMP_MSG_LEN] = {};
302
303 snprintf(buf, sizeof(buf),
304 "{class: image, res: load_state, name: %s, val: %s}",
305 res->pd.name, enable ? "on" : "off");
306 return qmp_send(res->qmp, buf, sizeof(buf));
307}
308
309static int qmp_pd_power_on(struct generic_pm_domain *domain)
310{
311 return qmp_pd_power_toggle(to_qmp_pd_resource(domain), true);
312}
313
314static int qmp_pd_power_off(struct generic_pm_domain *domain)
315{
316 return qmp_pd_power_toggle(to_qmp_pd_resource(domain), false);
317}
318
319static const char * const sdm845_resources[] = {
320 [AOSS_QMP_LS_CDSP] = "cdsp",
321 [AOSS_QMP_LS_LPASS] = "adsp",
322 [AOSS_QMP_LS_MODEM] = "modem",
323 [AOSS_QMP_LS_SLPI] = "slpi",
324 [AOSS_QMP_LS_SPSS] = "spss",
325 [AOSS_QMP_LS_VENUS] = "venus",
326};
327
328static int qmp_pd_add(struct qmp *qmp)
329{
330 struct genpd_onecell_data *data = &qmp->pd_data;
331 struct device *dev = qmp->dev;
332 struct qmp_pd *res;
333 size_t num = ARRAY_SIZE(sdm845_resources);
334 int ret;
335 int i;
336
337 res = devm_kcalloc(dev, num, sizeof(*res), GFP_KERNEL);
338 if (!res)
339 return -ENOMEM;
340
341 data->domains = devm_kcalloc(dev, num, sizeof(*data->domains),
342 GFP_KERNEL);
343 if (!data->domains)
344 return -ENOMEM;
345
346 for (i = 0; i < num; i++) {
347 res[i].qmp = qmp;
348 res[i].pd.name = sdm845_resources[i];
349 res[i].pd.power_on = qmp_pd_power_on;
350 res[i].pd.power_off = qmp_pd_power_off;
351
352 ret = pm_genpd_init(&res[i].pd, NULL, true);
353 if (ret < 0) {
354 dev_err(dev, "failed to init genpd\n");
355 goto unroll_genpds;
356 }
357
358 data->domains[i] = &res[i].pd;
359 }
360
361 data->num_domains = i;
362
363 ret = of_genpd_add_provider_onecell(dev->of_node, data);
364 if (ret < 0)
365 goto unroll_genpds;
366
367 return 0;
368
369unroll_genpds:
370 for (i--; i >= 0; i--)
371 pm_genpd_remove(data->domains[i]);
372
373 return ret;
374}
375
376static void qmp_pd_remove(struct qmp *qmp)
377{
378 struct genpd_onecell_data *data = &qmp->pd_data;
379 struct device *dev = qmp->dev;
380 int i;
381
382 of_genpd_del_provider(dev->of_node);
383
384 for (i = 0; i < data->num_domains; i++)
385 pm_genpd_remove(data->domains[i]);
386}
387
388static int qmp_probe(struct platform_device *pdev)
389{
390 struct resource *res;
391 struct qmp *qmp;
392 int irq;
393 int ret;
394
395 qmp = devm_kzalloc(&pdev->dev, sizeof(*qmp), GFP_KERNEL);
396 if (!qmp)
397 return -ENOMEM;
398
399 qmp->dev = &pdev->dev;
400 init_waitqueue_head(&qmp->event);
401 mutex_init(&qmp->tx_lock);
402
403 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
404 qmp->msgram = devm_ioremap_resource(&pdev->dev, res);
405 if (IS_ERR(qmp->msgram))
406 return PTR_ERR(qmp->msgram);
407
408 qmp->mbox_client.dev = &pdev->dev;
409 qmp->mbox_client.knows_txdone = true;
410 qmp->mbox_chan = mbox_request_channel(&qmp->mbox_client, 0);
411 if (IS_ERR(qmp->mbox_chan)) {
412 dev_err(&pdev->dev, "failed to acquire ipc mailbox\n");
413 return PTR_ERR(qmp->mbox_chan);
414 }
415
416 irq = platform_get_irq(pdev, 0);
417 ret = devm_request_irq(&pdev->dev, irq, qmp_intr, IRQF_ONESHOT,
418 "aoss-qmp", qmp);
419 if (ret < 0) {
420 dev_err(&pdev->dev, "failed to request interrupt\n");
421 goto err_free_mbox;
422 }
423
424 ret = qmp_open(qmp);
425 if (ret < 0)
426 goto err_free_mbox;
427
428 ret = qmp_qdss_clk_add(qmp);
429 if (ret)
430 goto err_close_qmp;
431
432 ret = qmp_pd_add(qmp);
433 if (ret)
434 goto err_remove_qdss_clk;
435
436 platform_set_drvdata(pdev, qmp);
437
438 return 0;
439
440err_remove_qdss_clk:
441 qmp_qdss_clk_remove(qmp);
442err_close_qmp:
443 qmp_close(qmp);
444err_free_mbox:
445 mbox_free_channel(qmp->mbox_chan);
446
447 return ret;
448}
449
450static int qmp_remove(struct platform_device *pdev)
451{
452 struct qmp *qmp = platform_get_drvdata(pdev);
453
454 qmp_qdss_clk_remove(qmp);
455 qmp_pd_remove(qmp);
456
457 qmp_close(qmp);
458 mbox_free_channel(qmp->mbox_chan);
459
460 return 0;
461}
462
463static const struct of_device_id qmp_dt_match[] = {
464 { .compatible = "qcom,sdm845-aoss-qmp", },
465 {}
466};
467MODULE_DEVICE_TABLE(of, qmp_dt_match);
468
469static struct platform_driver qmp_driver = {
470 .driver = {
471 .name = "qcom_aoss_qmp",
472 .of_match_table = qmp_dt_match,
473 },
474 .probe = qmp_probe,
475 .remove = qmp_remove,
476};
477module_platform_driver(qmp_driver);
478
479MODULE_DESCRIPTION("Qualcomm AOSS QMP driver");
480MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index 005326050c23..3c1a55cf25d6 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -16,56 +16,76 @@
16 16
17#define domain_to_rpmpd(domain) container_of(domain, struct rpmpd, pd) 17#define domain_to_rpmpd(domain) container_of(domain, struct rpmpd, pd)
18 18
19/* Resource types */ 19/* Resource types:
20 * RPMPD_X is X encoded as a little-endian, lower-case, ASCII string */
20#define RPMPD_SMPA 0x61706d73 21#define RPMPD_SMPA 0x61706d73
21#define RPMPD_LDOA 0x616f646c 22#define RPMPD_LDOA 0x616f646c
23#define RPMPD_RWCX 0x78637772
24#define RPMPD_RWMX 0x786d7772
25#define RPMPD_RWLC 0x636c7772
26#define RPMPD_RWLM 0x6d6c7772
27#define RPMPD_RWSC 0x63737772
28#define RPMPD_RWSM 0x6d737772
22 29
23/* Operation Keys */ 30/* Operation Keys */
24#define KEY_CORNER 0x6e726f63 /* corn */ 31#define KEY_CORNER 0x6e726f63 /* corn */
25#define KEY_ENABLE 0x6e657773 /* swen */ 32#define KEY_ENABLE 0x6e657773 /* swen */
26#define KEY_FLOOR_CORNER 0x636676 /* vfc */ 33#define KEY_FLOOR_CORNER 0x636676 /* vfc */
34#define KEY_FLOOR_LEVEL 0x6c6676 /* vfl */
35#define KEY_LEVEL 0x6c766c76 /* vlvl */
27 36
28#define MAX_RPMPD_STATE 6 37#define MAX_8996_RPMPD_STATE 6
29 38
30#define DEFINE_RPMPD_CORNER_SMPA(_platform, _name, _active, r_id) \ 39#define DEFINE_RPMPD_PAIR(_platform, _name, _active, r_type, r_key, \
40 r_id) \
31 static struct rpmpd _platform##_##_active; \ 41 static struct rpmpd _platform##_##_active; \
32 static struct rpmpd _platform##_##_name = { \ 42 static struct rpmpd _platform##_##_name = { \
33 .pd = { .name = #_name, }, \ 43 .pd = { .name = #_name, }, \
34 .peer = &_platform##_##_active, \ 44 .peer = &_platform##_##_active, \
35 .res_type = RPMPD_SMPA, \ 45 .res_type = RPMPD_##r_type, \
36 .res_id = r_id, \ 46 .res_id = r_id, \
37 .key = KEY_CORNER, \ 47 .key = KEY_##r_key, \
38 }; \ 48 }; \
39 static struct rpmpd _platform##_##_active = { \ 49 static struct rpmpd _platform##_##_active = { \
40 .pd = { .name = #_active, }, \ 50 .pd = { .name = #_active, }, \
41 .peer = &_platform##_##_name, \ 51 .peer = &_platform##_##_name, \
42 .active_only = true, \ 52 .active_only = true, \
43 .res_type = RPMPD_SMPA, \ 53 .res_type = RPMPD_##r_type, \
44 .res_id = r_id, \ 54 .res_id = r_id, \
45 .key = KEY_CORNER, \ 55 .key = KEY_##r_key, \
46 } 56 }
47 57
48#define DEFINE_RPMPD_CORNER_LDOA(_platform, _name, r_id) \ 58#define DEFINE_RPMPD_CORNER(_platform, _name, r_type, r_id) \
49 static struct rpmpd _platform##_##_name = { \ 59 static struct rpmpd _platform##_##_name = { \
50 .pd = { .name = #_name, }, \ 60 .pd = { .name = #_name, }, \
51 .res_type = RPMPD_LDOA, \ 61 .res_type = RPMPD_##r_type, \
52 .res_id = r_id, \ 62 .res_id = r_id, \
53 .key = KEY_CORNER, \ 63 .key = KEY_CORNER, \
54 } 64 }
55 65
56#define DEFINE_RPMPD_VFC(_platform, _name, r_id, r_type) \ 66#define DEFINE_RPMPD_LEVEL(_platform, _name, r_type, r_id) \
57 static struct rpmpd _platform##_##_name = { \ 67 static struct rpmpd _platform##_##_name = { \
58 .pd = { .name = #_name, }, \ 68 .pd = { .name = #_name, }, \
59 .res_type = r_type, \ 69 .res_type = RPMPD_##r_type, \
60 .res_id = r_id, \ 70 .res_id = r_id, \
61 .key = KEY_FLOOR_CORNER, \ 71 .key = KEY_LEVEL, \
62 } 72 }
63 73
64#define DEFINE_RPMPD_VFC_SMPA(_platform, _name, r_id) \ 74#define DEFINE_RPMPD_VFC(_platform, _name, r_type, r_id) \
65 DEFINE_RPMPD_VFC(_platform, _name, r_id, RPMPD_SMPA) 75 static struct rpmpd _platform##_##_name = { \
76 .pd = { .name = #_name, }, \
77 .res_type = RPMPD_##r_type, \
78 .res_id = r_id, \
79 .key = KEY_FLOOR_CORNER, \
80 }
66 81
67#define DEFINE_RPMPD_VFC_LDOA(_platform, _name, r_id) \ 82#define DEFINE_RPMPD_VFL(_platform, _name, r_type, r_id) \
68 DEFINE_RPMPD_VFC(_platform, _name, r_id, RPMPD_LDOA) 83 static struct rpmpd _platform##_##_name = { \
84 .pd = { .name = #_name, }, \
85 .res_type = RPMPD_##r_type, \
86 .res_id = r_id, \
87 .key = KEY_FLOOR_LEVEL, \
88 }
69 89
70struct rpmpd_req { 90struct rpmpd_req {
71 __le32 key; 91 __le32 key;
@@ -83,23 +103,25 @@ struct rpmpd {
83 const int res_type; 103 const int res_type;
84 const int res_id; 104 const int res_id;
85 struct qcom_smd_rpm *rpm; 105 struct qcom_smd_rpm *rpm;
106 unsigned int max_state;
86 __le32 key; 107 __le32 key;
87}; 108};
88 109
89struct rpmpd_desc { 110struct rpmpd_desc {
90 struct rpmpd **rpmpds; 111 struct rpmpd **rpmpds;
91 size_t num_pds; 112 size_t num_pds;
113 unsigned int max_state;
92}; 114};
93 115
94static DEFINE_MUTEX(rpmpd_lock); 116static DEFINE_MUTEX(rpmpd_lock);
95 117
96/* msm8996 RPM Power domains */ 118/* msm8996 RPM Power domains */
97DEFINE_RPMPD_CORNER_SMPA(msm8996, vddcx, vddcx_ao, 1); 119DEFINE_RPMPD_PAIR(msm8996, vddcx, vddcx_ao, SMPA, CORNER, 1);
98DEFINE_RPMPD_CORNER_SMPA(msm8996, vddmx, vddmx_ao, 2); 120DEFINE_RPMPD_PAIR(msm8996, vddmx, vddmx_ao, SMPA, CORNER, 2);
99DEFINE_RPMPD_CORNER_LDOA(msm8996, vddsscx, 26); 121DEFINE_RPMPD_CORNER(msm8996, vddsscx, LDOA, 26);
100 122
101DEFINE_RPMPD_VFC_SMPA(msm8996, vddcx_vfc, 1); 123DEFINE_RPMPD_VFC(msm8996, vddcx_vfc, SMPA, 1);
102DEFINE_RPMPD_VFC_LDOA(msm8996, vddsscx_vfc, 26); 124DEFINE_RPMPD_VFC(msm8996, vddsscx_vfc, LDOA, 26);
103 125
104static struct rpmpd *msm8996_rpmpds[] = { 126static struct rpmpd *msm8996_rpmpds[] = {
105 [MSM8996_VDDCX] = &msm8996_vddcx, 127 [MSM8996_VDDCX] = &msm8996_vddcx,
@@ -114,10 +136,71 @@ static struct rpmpd *msm8996_rpmpds[] = {
114static const struct rpmpd_desc msm8996_desc = { 136static const struct rpmpd_desc msm8996_desc = {
115 .rpmpds = msm8996_rpmpds, 137 .rpmpds = msm8996_rpmpds,
116 .num_pds = ARRAY_SIZE(msm8996_rpmpds), 138 .num_pds = ARRAY_SIZE(msm8996_rpmpds),
139 .max_state = MAX_8996_RPMPD_STATE,
140};
141
142/* msm8998 RPM Power domains */
143DEFINE_RPMPD_PAIR(msm8998, vddcx, vddcx_ao, RWCX, LEVEL, 0);
144DEFINE_RPMPD_VFL(msm8998, vddcx_vfl, RWCX, 0);
145
146DEFINE_RPMPD_PAIR(msm8998, vddmx, vddmx_ao, RWMX, LEVEL, 0);
147DEFINE_RPMPD_VFL(msm8998, vddmx_vfl, RWMX, 0);
148
149DEFINE_RPMPD_LEVEL(msm8998, vdd_ssccx, RWSC, 0);
150DEFINE_RPMPD_VFL(msm8998, vdd_ssccx_vfl, RWSC, 0);
151
152DEFINE_RPMPD_LEVEL(msm8998, vdd_sscmx, RWSM, 0);
153DEFINE_RPMPD_VFL(msm8998, vdd_sscmx_vfl, RWSM, 0);
154
155static struct rpmpd *msm8998_rpmpds[] = {
156 [MSM8998_VDDCX] = &msm8998_vddcx,
157 [MSM8998_VDDCX_AO] = &msm8998_vddcx_ao,
158 [MSM8998_VDDCX_VFL] = &msm8998_vddcx_vfl,
159 [MSM8998_VDDMX] = &msm8998_vddmx,
160 [MSM8998_VDDMX_AO] = &msm8998_vddmx_ao,
161 [MSM8998_VDDMX_VFL] = &msm8998_vddmx_vfl,
162 [MSM8998_SSCCX] = &msm8998_vdd_ssccx,
163 [MSM8998_SSCCX_VFL] = &msm8998_vdd_ssccx_vfl,
164 [MSM8998_SSCMX] = &msm8998_vdd_sscmx,
165 [MSM8998_SSCMX_VFL] = &msm8998_vdd_sscmx_vfl,
166};
167
168static const struct rpmpd_desc msm8998_desc = {
169 .rpmpds = msm8998_rpmpds,
170 .num_pds = ARRAY_SIZE(msm8998_rpmpds),
171 .max_state = RPM_SMD_LEVEL_BINNING,
172};
173
174/* qcs404 RPM Power domains */
175DEFINE_RPMPD_PAIR(qcs404, vddmx, vddmx_ao, RWMX, LEVEL, 0);
176DEFINE_RPMPD_VFL(qcs404, vddmx_vfl, RWMX, 0);
177
178DEFINE_RPMPD_LEVEL(qcs404, vdd_lpicx, RWLC, 0);
179DEFINE_RPMPD_VFL(qcs404, vdd_lpicx_vfl, RWLC, 0);
180
181DEFINE_RPMPD_LEVEL(qcs404, vdd_lpimx, RWLM, 0);
182DEFINE_RPMPD_VFL(qcs404, vdd_lpimx_vfl, RWLM, 0);
183
184static struct rpmpd *qcs404_rpmpds[] = {
185 [QCS404_VDDMX] = &qcs404_vddmx,
186 [QCS404_VDDMX_AO] = &qcs404_vddmx_ao,
187 [QCS404_VDDMX_VFL] = &qcs404_vddmx_vfl,
188 [QCS404_LPICX] = &qcs404_vdd_lpicx,
189 [QCS404_LPICX_VFL] = &qcs404_vdd_lpicx_vfl,
190 [QCS404_LPIMX] = &qcs404_vdd_lpimx,
191 [QCS404_LPIMX_VFL] = &qcs404_vdd_lpimx_vfl,
192};
193
194static const struct rpmpd_desc qcs404_desc = {
195 .rpmpds = qcs404_rpmpds,
196 .num_pds = ARRAY_SIZE(qcs404_rpmpds),
197 .max_state = RPM_SMD_LEVEL_BINNING,
117}; 198};
118 199
119static const struct of_device_id rpmpd_match_table[] = { 200static const struct of_device_id rpmpd_match_table[] = {
120 { .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc }, 201 { .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc },
202 { .compatible = "qcom,msm8998-rpmpd", .data = &msm8998_desc },
203 { .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc },
121 { } 204 { }
122}; 205};
123 206
@@ -225,14 +308,16 @@ static int rpmpd_set_performance(struct generic_pm_domain *domain,
225 int ret = 0; 308 int ret = 0;
226 struct rpmpd *pd = domain_to_rpmpd(domain); 309 struct rpmpd *pd = domain_to_rpmpd(domain);
227 310
228 if (state > MAX_RPMPD_STATE) 311 if (state > pd->max_state)
229 goto out; 312 state = pd->max_state;
230 313
231 mutex_lock(&rpmpd_lock); 314 mutex_lock(&rpmpd_lock);
232 315
233 pd->corner = state; 316 pd->corner = state;
234 317
235 if (!pd->enabled && pd->key != KEY_FLOOR_CORNER) 318 /* Always send updates for vfc and vfl */
319 if (!pd->enabled && pd->key != KEY_FLOOR_CORNER &&
320 pd->key != KEY_FLOOR_LEVEL)
236 goto out; 321 goto out;
237 322
238 ret = rpmpd_aggregate_corner(pd); 323 ret = rpmpd_aggregate_corner(pd);
@@ -287,6 +372,7 @@ static int rpmpd_probe(struct platform_device *pdev)
287 } 372 }
288 373
289 rpmpds[i]->rpm = rpm; 374 rpmpds[i]->rpm = rpm;
375 rpmpds[i]->max_state = desc->max_state;
290 rpmpds[i]->pd.power_off = rpmpd_power_off; 376 rpmpds[i]->pd.power_off = rpmpd_power_off;
291 rpmpds[i]->pd.power_on = rpmpd_power_on; 377 rpmpds[i]->pd.power_on = rpmpd_power_on;
292 rpmpds[i]->pd.set_performance_state = rpmpd_set_performance; 378 rpmpds[i]->pd.set_performance_state = rpmpd_set_performance;
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
index 3342332cc007..54eb6cfc5d5b 100644
--- a/drivers/soc/rockchip/pm_domains.c
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -86,47 +86,47 @@ struct rockchip_pmu {
86#define to_rockchip_pd(gpd) container_of(gpd, struct rockchip_pm_domain, genpd) 86#define to_rockchip_pd(gpd) container_of(gpd, struct rockchip_pm_domain, genpd)
87 87
88#define DOMAIN(pwr, status, req, idle, ack, wakeup) \ 88#define DOMAIN(pwr, status, req, idle, ack, wakeup) \
89{ \ 89{ \
90 .pwr_mask = (pwr >= 0) ? BIT(pwr) : 0, \ 90 .pwr_mask = (pwr), \
91 .status_mask = (status >= 0) ? BIT(status) : 0, \ 91 .status_mask = (status), \
92 .req_mask = (req >= 0) ? BIT(req) : 0, \ 92 .req_mask = (req), \
93 .idle_mask = (idle >= 0) ? BIT(idle) : 0, \ 93 .idle_mask = (idle), \
94 .ack_mask = (ack >= 0) ? BIT(ack) : 0, \ 94 .ack_mask = (ack), \
95 .active_wakeup = wakeup, \ 95 .active_wakeup = (wakeup), \
96} 96}
97 97
98#define DOMAIN_M(pwr, status, req, idle, ack, wakeup) \ 98#define DOMAIN_M(pwr, status, req, idle, ack, wakeup) \
99{ \ 99{ \
100 .pwr_w_mask = (pwr >= 0) ? BIT(pwr + 16) : 0, \ 100 .pwr_w_mask = (pwr) << 16, \
101 .pwr_mask = (pwr >= 0) ? BIT(pwr) : 0, \ 101 .pwr_mask = (pwr), \
102 .status_mask = (status >= 0) ? BIT(status) : 0, \ 102 .status_mask = (status), \
103 .req_w_mask = (req >= 0) ? BIT(req + 16) : 0, \ 103 .req_w_mask = (req) << 16, \
104 .req_mask = (req >= 0) ? BIT(req) : 0, \ 104 .req_mask = (req), \
105 .idle_mask = (idle >= 0) ? BIT(idle) : 0, \ 105 .idle_mask = (idle), \
106 .ack_mask = (ack >= 0) ? BIT(ack) : 0, \ 106 .ack_mask = (ack), \
107 .active_wakeup = wakeup, \ 107 .active_wakeup = wakeup, \
108} 108}
109 109
110#define DOMAIN_RK3036(req, ack, idle, wakeup) \ 110#define DOMAIN_RK3036(req, ack, idle, wakeup) \
111{ \ 111{ \
112 .req_mask = (req >= 0) ? BIT(req) : 0, \ 112 .req_mask = (req), \
113 .req_w_mask = (req >= 0) ? BIT(req + 16) : 0, \ 113 .req_w_mask = (req) << 16, \
114 .ack_mask = (ack >= 0) ? BIT(ack) : 0, \ 114 .ack_mask = (ack), \
115 .idle_mask = (idle >= 0) ? BIT(idle) : 0, \ 115 .idle_mask = (idle), \
116 .active_wakeup = wakeup, \ 116 .active_wakeup = wakeup, \
117} 117}
118 118
119#define DOMAIN_PX30(pwr, status, req, wakeup) \ 119#define DOMAIN_PX30(pwr, status, req, wakeup) \
120 DOMAIN_M(pwr, status, req, (req) + 16, req, wakeup) 120 DOMAIN_M(pwr, status, req, (req) << 16, req, wakeup)
121 121
122#define DOMAIN_RK3288(pwr, status, req, wakeup) \ 122#define DOMAIN_RK3288(pwr, status, req, wakeup) \
123 DOMAIN(pwr, status, req, req, (req) + 16, wakeup) 123 DOMAIN(pwr, status, req, req, (req) << 16, wakeup)
124 124
125#define DOMAIN_RK3328(pwr, status, req, wakeup) \ 125#define DOMAIN_RK3328(pwr, status, req, wakeup) \
126 DOMAIN_M(pwr, pwr, req, (req) + 10, req, wakeup) 126 DOMAIN_M(pwr, pwr, req, (req) << 10, req, wakeup)
127 127
128#define DOMAIN_RK3368(pwr, status, req, wakeup) \ 128#define DOMAIN_RK3368(pwr, status, req, wakeup) \
129 DOMAIN(pwr, status, req, (req) + 16, req, wakeup) 129 DOMAIN(pwr, status, req, (req) << 16, req, wakeup)
130 130
131#define DOMAIN_RK3399(pwr, status, req, wakeup) \ 131#define DOMAIN_RK3399(pwr, status, req, wakeup) \
132 DOMAIN(pwr, status, req, req, req, wakeup) 132 DOMAIN(pwr, status, req, req, req, wakeup)
@@ -716,129 +716,129 @@ err_out:
716} 716}
717 717
718static const struct rockchip_domain_info px30_pm_domains[] = { 718static const struct rockchip_domain_info px30_pm_domains[] = {
719 [PX30_PD_USB] = DOMAIN_PX30(5, 5, 10, false), 719 [PX30_PD_USB] = DOMAIN_PX30(BIT(5), BIT(5), BIT(10), false),
720 [PX30_PD_SDCARD] = DOMAIN_PX30(8, 8, 9, false), 720 [PX30_PD_SDCARD] = DOMAIN_PX30(BIT(8), BIT(8), BIT(9), false),
721 [PX30_PD_GMAC] = DOMAIN_PX30(10, 10, 6, false), 721 [PX30_PD_GMAC] = DOMAIN_PX30(BIT(10), BIT(10), BIT(6), false),
722 [PX30_PD_MMC_NAND] = DOMAIN_PX30(11, 11, 5, false), 722 [PX30_PD_MMC_NAND] = DOMAIN_PX30(BIT(11), BIT(11), BIT(5), false),
723 [PX30_PD_VPU] = DOMAIN_PX30(12, 12, 14, false), 723 [PX30_PD_VPU] = DOMAIN_PX30(BIT(12), BIT(12), BIT(14), false),
724 [PX30_PD_VO] = DOMAIN_PX30(13, 13, 7, false), 724 [PX30_PD_VO] = DOMAIN_PX30(BIT(13), BIT(13), BIT(7), false),
725 [PX30_PD_VI] = DOMAIN_PX30(14, 14, 8, false), 725 [PX30_PD_VI] = DOMAIN_PX30(BIT(14), BIT(14), BIT(8), false),
726 [PX30_PD_GPU] = DOMAIN_PX30(15, 15, 2, false), 726 [PX30_PD_GPU] = DOMAIN_PX30(BIT(15), BIT(15), BIT(2), false),
727}; 727};
728 728
729static const struct rockchip_domain_info rk3036_pm_domains[] = { 729static const struct rockchip_domain_info rk3036_pm_domains[] = {
730 [RK3036_PD_MSCH] = DOMAIN_RK3036(14, 23, 30, true), 730 [RK3036_PD_MSCH] = DOMAIN_RK3036(BIT(14), BIT(23), BIT(30), true),
731 [RK3036_PD_CORE] = DOMAIN_RK3036(13, 17, 24, false), 731 [RK3036_PD_CORE] = DOMAIN_RK3036(BIT(13), BIT(17), BIT(24), false),
732 [RK3036_PD_PERI] = DOMAIN_RK3036(12, 18, 25, false), 732 [RK3036_PD_PERI] = DOMAIN_RK3036(BIT(12), BIT(18), BIT(25), false),
733 [RK3036_PD_VIO] = DOMAIN_RK3036(11, 19, 26, false), 733 [RK3036_PD_VIO] = DOMAIN_RK3036(BIT(11), BIT(19), BIT(26), false),
734 [RK3036_PD_VPU] = DOMAIN_RK3036(10, 20, 27, false), 734 [RK3036_PD_VPU] = DOMAIN_RK3036(BIT(10), BIT(20), BIT(27), false),
735 [RK3036_PD_GPU] = DOMAIN_RK3036(9, 21, 28, false), 735 [RK3036_PD_GPU] = DOMAIN_RK3036(BIT(9), BIT(21), BIT(28), false),
736 [RK3036_PD_SYS] = DOMAIN_RK3036(8, 22, 29, false), 736 [RK3036_PD_SYS] = DOMAIN_RK3036(BIT(8), BIT(22), BIT(29), false),
737}; 737};
738 738
739static const struct rockchip_domain_info rk3066_pm_domains[] = { 739static const struct rockchip_domain_info rk3066_pm_domains[] = {
740 [RK3066_PD_GPU] = DOMAIN(9, 9, 3, 24, 29, false), 740 [RK3066_PD_GPU] = DOMAIN(BIT(9), BIT(9), BIT(3), BIT(24), BIT(29), false),
741 [RK3066_PD_VIDEO] = DOMAIN(8, 8, 4, 23, 28, false), 741 [RK3066_PD_VIDEO] = DOMAIN(BIT(8), BIT(8), BIT(4), BIT(23), BIT(28), false),
742 [RK3066_PD_VIO] = DOMAIN(7, 7, 5, 22, 27, false), 742 [RK3066_PD_VIO] = DOMAIN(BIT(7), BIT(7), BIT(5), BIT(22), BIT(27), false),
743 [RK3066_PD_PERI] = DOMAIN(6, 6, 2, 25, 30, false), 743 [RK3066_PD_PERI] = DOMAIN(BIT(6), BIT(6), BIT(2), BIT(25), BIT(30), false),
744 [RK3066_PD_CPU] = DOMAIN(-1, 5, 1, 26, 31, false), 744 [RK3066_PD_CPU] = DOMAIN(0, BIT(5), BIT(1), BIT(26), BIT(31), false),
745}; 745};
746 746
747static const struct rockchip_domain_info rk3128_pm_domains[] = { 747static const struct rockchip_domain_info rk3128_pm_domains[] = {
748 [RK3128_PD_CORE] = DOMAIN_RK3288(0, 0, 4, false), 748 [RK3128_PD_CORE] = DOMAIN_RK3288(BIT(0), BIT(0), BIT(4), false),
749 [RK3128_PD_MSCH] = DOMAIN_RK3288(-1, -1, 6, true), 749 [RK3128_PD_MSCH] = DOMAIN_RK3288(0, 0, BIT(6), true),
750 [RK3128_PD_VIO] = DOMAIN_RK3288(3, 3, 2, false), 750 [RK3128_PD_VIO] = DOMAIN_RK3288(BIT(3), BIT(3), BIT(2), false),
751 [RK3128_PD_VIDEO] = DOMAIN_RK3288(2, 2, 1, false), 751 [RK3128_PD_VIDEO] = DOMAIN_RK3288(BIT(2), BIT(2), BIT(1), false),
752 [RK3128_PD_GPU] = DOMAIN_RK3288(1, 1, 3, false), 752 [RK3128_PD_GPU] = DOMAIN_RK3288(BIT(1), BIT(1), BIT(3), false),
753}; 753};
754 754
755static const struct rockchip_domain_info rk3188_pm_domains[] = { 755static const struct rockchip_domain_info rk3188_pm_domains[] = {
756 [RK3188_PD_GPU] = DOMAIN(9, 9, 3, 24, 29, false), 756 [RK3188_PD_GPU] = DOMAIN(BIT(9), BIT(9), BIT(3), BIT(24), BIT(29), false),
757 [RK3188_PD_VIDEO] = DOMAIN(8, 8, 4, 23, 28, false), 757 [RK3188_PD_VIDEO] = DOMAIN(BIT(8), BIT(8), BIT(4), BIT(23), BIT(28), false),
758 [RK3188_PD_VIO] = DOMAIN(7, 7, 5, 22, 27, false), 758 [RK3188_PD_VIO] = DOMAIN(BIT(7), BIT(7), BIT(5), BIT(22), BIT(27), false),
759 [RK3188_PD_PERI] = DOMAIN(6, 6, 2, 25, 30, false), 759 [RK3188_PD_PERI] = DOMAIN(BIT(6), BIT(6), BIT(2), BIT(25), BIT(30), false),
760 [RK3188_PD_CPU] = DOMAIN(5, 5, 1, 26, 31, false), 760 [RK3188_PD_CPU] = DOMAIN(BIT(5), BIT(5), BIT(1), BIT(26), BIT(31), false),
761}; 761};
762 762
763static const struct rockchip_domain_info rk3228_pm_domains[] = { 763static const struct rockchip_domain_info rk3228_pm_domains[] = {
764 [RK3228_PD_CORE] = DOMAIN_RK3036(0, 0, 16, true), 764 [RK3228_PD_CORE] = DOMAIN_RK3036(BIT(0), BIT(0), BIT(16), true),
765 [RK3228_PD_MSCH] = DOMAIN_RK3036(1, 1, 17, true), 765 [RK3228_PD_MSCH] = DOMAIN_RK3036(BIT(1), BIT(1), BIT(17), true),
766 [RK3228_PD_BUS] = DOMAIN_RK3036(2, 2, 18, true), 766 [RK3228_PD_BUS] = DOMAIN_RK3036(BIT(2), BIT(2), BIT(18), true),
767 [RK3228_PD_SYS] = DOMAIN_RK3036(3, 3, 19, true), 767 [RK3228_PD_SYS] = DOMAIN_RK3036(BIT(3), BIT(3), BIT(19), true),
768 [RK3228_PD_VIO] = DOMAIN_RK3036(4, 4, 20, false), 768 [RK3228_PD_VIO] = DOMAIN_RK3036(BIT(4), BIT(4), BIT(20), false),
769 [RK3228_PD_VOP] = DOMAIN_RK3036(5, 5, 21, false), 769 [RK3228_PD_VOP] = DOMAIN_RK3036(BIT(5), BIT(5), BIT(21), false),
770 [RK3228_PD_VPU] = DOMAIN_RK3036(6, 6, 22, false), 770 [RK3228_PD_VPU] = DOMAIN_RK3036(BIT(6), BIT(6), BIT(22), false),
771 [RK3228_PD_RKVDEC] = DOMAIN_RK3036(7, 7, 23, false), 771 [RK3228_PD_RKVDEC] = DOMAIN_RK3036(BIT(7), BIT(7), BIT(23), false),
772 [RK3228_PD_GPU] = DOMAIN_RK3036(8, 8, 24, false), 772 [RK3228_PD_GPU] = DOMAIN_RK3036(BIT(8), BIT(8), BIT(24), false),
773 [RK3228_PD_PERI] = DOMAIN_RK3036(9, 9, 25, true), 773 [RK3228_PD_PERI] = DOMAIN_RK3036(BIT(9), BIT(9), BIT(25), true),
774 [RK3228_PD_GMAC] = DOMAIN_RK3036(10, 10, 26, false), 774 [RK3228_PD_GMAC] = DOMAIN_RK3036(BIT(10), BIT(10), BIT(26), false),
775}; 775};
776 776
777static const struct rockchip_domain_info rk3288_pm_domains[] = { 777static const struct rockchip_domain_info rk3288_pm_domains[] = {
778 [RK3288_PD_VIO] = DOMAIN_RK3288(7, 7, 4, false), 778 [RK3288_PD_VIO] = DOMAIN_RK3288(BIT(7), BIT(7), BIT(4), false),
779 [RK3288_PD_HEVC] = DOMAIN_RK3288(14, 10, 9, false), 779 [RK3288_PD_HEVC] = DOMAIN_RK3288(BIT(14), BIT(10), BIT(9), false),
780 [RK3288_PD_VIDEO] = DOMAIN_RK3288(8, 8, 3, false), 780 [RK3288_PD_VIDEO] = DOMAIN_RK3288(BIT(8), BIT(8), BIT(3), false),
781 [RK3288_PD_GPU] = DOMAIN_RK3288(9, 9, 2, false), 781 [RK3288_PD_GPU] = DOMAIN_RK3288(BIT(9), BIT(9), BIT(2), false),
782}; 782};
783 783
784static const struct rockchip_domain_info rk3328_pm_domains[] = { 784static const struct rockchip_domain_info rk3328_pm_domains[] = {
785 [RK3328_PD_CORE] = DOMAIN_RK3328(-1, 0, 0, false), 785 [RK3328_PD_CORE] = DOMAIN_RK3328(0, BIT(0), BIT(0), false),
786 [RK3328_PD_GPU] = DOMAIN_RK3328(-1, 1, 1, false), 786 [RK3328_PD_GPU] = DOMAIN_RK3328(0, BIT(1), BIT(1), false),
787 [RK3328_PD_BUS] = DOMAIN_RK3328(-1, 2, 2, true), 787 [RK3328_PD_BUS] = DOMAIN_RK3328(0, BIT(2), BIT(2), true),
788 [RK3328_PD_MSCH] = DOMAIN_RK3328(-1, 3, 3, true), 788 [RK3328_PD_MSCH] = DOMAIN_RK3328(0, BIT(3), BIT(3), true),
789 [RK3328_PD_PERI] = DOMAIN_RK3328(-1, 4, 4, true), 789 [RK3328_PD_PERI] = DOMAIN_RK3328(0, BIT(4), BIT(4), true),
790 [RK3328_PD_VIDEO] = DOMAIN_RK3328(-1, 5, 5, false), 790 [RK3328_PD_VIDEO] = DOMAIN_RK3328(0, BIT(5), BIT(5), false),
791 [RK3328_PD_HEVC] = DOMAIN_RK3328(-1, 6, 6, false), 791 [RK3328_PD_HEVC] = DOMAIN_RK3328(0, BIT(6), BIT(6), false),
792 [RK3328_PD_VIO] = DOMAIN_RK3328(-1, 8, 8, false), 792 [RK3328_PD_VIO] = DOMAIN_RK3328(0, BIT(8), BIT(8), false),
793 [RK3328_PD_VPU] = DOMAIN_RK3328(-1, 9, 9, false), 793 [RK3328_PD_VPU] = DOMAIN_RK3328(0, BIT(9), BIT(9), false),
794}; 794};
795 795
796static const struct rockchip_domain_info rk3366_pm_domains[] = { 796static const struct rockchip_domain_info rk3366_pm_domains[] = {
797 [RK3366_PD_PERI] = DOMAIN_RK3368(10, 10, 6, true), 797 [RK3366_PD_PERI] = DOMAIN_RK3368(BIT(10), BIT(10), BIT(6), true),
798 [RK3366_PD_VIO] = DOMAIN_RK3368(14, 14, 8, false), 798 [RK3366_PD_VIO] = DOMAIN_RK3368(BIT(14), BIT(14), BIT(8), false),
799 [RK3366_PD_VIDEO] = DOMAIN_RK3368(13, 13, 7, false), 799 [RK3366_PD_VIDEO] = DOMAIN_RK3368(BIT(13), BIT(13), BIT(7), false),
800 [RK3366_PD_RKVDEC] = DOMAIN_RK3368(11, 11, 7, false), 800 [RK3366_PD_RKVDEC] = DOMAIN_RK3368(BIT(11), BIT(11), BIT(7), false),
801 [RK3366_PD_WIFIBT] = DOMAIN_RK3368(8, 8, 9, false), 801 [RK3366_PD_WIFIBT] = DOMAIN_RK3368(BIT(8), BIT(8), BIT(9), false),
802 [RK3366_PD_VPU] = DOMAIN_RK3368(12, 12, 7, false), 802 [RK3366_PD_VPU] = DOMAIN_RK3368(BIT(12), BIT(12), BIT(7), false),
803 [RK3366_PD_GPU] = DOMAIN_RK3368(15, 15, 2, false), 803 [RK3366_PD_GPU] = DOMAIN_RK3368(BIT(15), BIT(15), BIT(2), false),
804}; 804};
805 805
806static const struct rockchip_domain_info rk3368_pm_domains[] = { 806static const struct rockchip_domain_info rk3368_pm_domains[] = {
807 [RK3368_PD_PERI] = DOMAIN_RK3368(13, 12, 6, true), 807 [RK3368_PD_PERI] = DOMAIN_RK3368(BIT(13), BIT(12), BIT(6), true),
808 [RK3368_PD_VIO] = DOMAIN_RK3368(15, 14, 8, false), 808 [RK3368_PD_VIO] = DOMAIN_RK3368(BIT(15), BIT(14), BIT(8), false),
809 [RK3368_PD_VIDEO] = DOMAIN_RK3368(14, 13, 7, false), 809 [RK3368_PD_VIDEO] = DOMAIN_RK3368(BIT(14), BIT(13), BIT(7), false),
810 [RK3368_PD_GPU_0] = DOMAIN_RK3368(16, 15, 2, false), 810 [RK3368_PD_GPU_0] = DOMAIN_RK3368(BIT(16), BIT(15), BIT(2), false),
811 [RK3368_PD_GPU_1] = DOMAIN_RK3368(17, 16, 2, false), 811 [RK3368_PD_GPU_1] = DOMAIN_RK3368(BIT(17), BIT(16), BIT(2), false),
812}; 812};
813 813
814static const struct rockchip_domain_info rk3399_pm_domains[] = { 814static const struct rockchip_domain_info rk3399_pm_domains[] = {
815 [RK3399_PD_TCPD0] = DOMAIN_RK3399(8, 8, -1, false), 815 [RK3399_PD_TCPD0] = DOMAIN_RK3399(BIT(8), BIT(8), 0, false),
816 [RK3399_PD_TCPD1] = DOMAIN_RK3399(9, 9, -1, false), 816 [RK3399_PD_TCPD1] = DOMAIN_RK3399(BIT(9), BIT(9), 0, false),
817 [RK3399_PD_CCI] = DOMAIN_RK3399(10, 10, -1, true), 817 [RK3399_PD_CCI] = DOMAIN_RK3399(BIT(10), BIT(10), 0, true),
818 [RK3399_PD_CCI0] = DOMAIN_RK3399(-1, -1, 15, true), 818 [RK3399_PD_CCI0] = DOMAIN_RK3399(0, 0, BIT(15), true),
819 [RK3399_PD_CCI1] = DOMAIN_RK3399(-1, -1, 16, true), 819 [RK3399_PD_CCI1] = DOMAIN_RK3399(0, 0, BIT(16), true),
820 [RK3399_PD_PERILP] = DOMAIN_RK3399(11, 11, 1, true), 820 [RK3399_PD_PERILP] = DOMAIN_RK3399(BIT(11), BIT(11), BIT(1), true),
821 [RK3399_PD_PERIHP] = DOMAIN_RK3399(12, 12, 2, true), 821 [RK3399_PD_PERIHP] = DOMAIN_RK3399(BIT(12), BIT(12), BIT(2), true),
822 [RK3399_PD_CENTER] = DOMAIN_RK3399(13, 13, 14, true), 822 [RK3399_PD_CENTER] = DOMAIN_RK3399(BIT(13), BIT(13), BIT(14), true),
823 [RK3399_PD_VIO] = DOMAIN_RK3399(14, 14, 17, false), 823 [RK3399_PD_VIO] = DOMAIN_RK3399(BIT(14), BIT(14), BIT(17), false),
824 [RK3399_PD_GPU] = DOMAIN_RK3399(15, 15, 0, false), 824 [RK3399_PD_GPU] = DOMAIN_RK3399(BIT(15), BIT(15), BIT(0), false),
825 [RK3399_PD_VCODEC] = DOMAIN_RK3399(16, 16, 3, false), 825 [RK3399_PD_VCODEC] = DOMAIN_RK3399(BIT(16), BIT(16), BIT(3), false),
826 [RK3399_PD_VDU] = DOMAIN_RK3399(17, 17, 4, false), 826 [RK3399_PD_VDU] = DOMAIN_RK3399(BIT(17), BIT(17), BIT(4), false),
827 [RK3399_PD_RGA] = DOMAIN_RK3399(18, 18, 5, false), 827 [RK3399_PD_RGA] = DOMAIN_RK3399(BIT(18), BIT(18), BIT(5), false),
828 [RK3399_PD_IEP] = DOMAIN_RK3399(19, 19, 6, false), 828 [RK3399_PD_IEP] = DOMAIN_RK3399(BIT(19), BIT(19), BIT(6), false),
829 [RK3399_PD_VO] = DOMAIN_RK3399(20, 20, -1, false), 829 [RK3399_PD_VO] = DOMAIN_RK3399(BIT(20), BIT(20), 0, false),
830 [RK3399_PD_VOPB] = DOMAIN_RK3399(-1, -1, 7, false), 830 [RK3399_PD_VOPB] = DOMAIN_RK3399(0, 0, BIT(7), false),
831 [RK3399_PD_VOPL] = DOMAIN_RK3399(-1, -1, 8, false), 831 [RK3399_PD_VOPL] = DOMAIN_RK3399(0, 0, BIT(8), false),
832 [RK3399_PD_ISP0] = DOMAIN_RK3399(22, 22, 9, false), 832 [RK3399_PD_ISP0] = DOMAIN_RK3399(BIT(22), BIT(22), BIT(9), false),
833 [RK3399_PD_ISP1] = DOMAIN_RK3399(23, 23, 10, false), 833 [RK3399_PD_ISP1] = DOMAIN_RK3399(BIT(23), BIT(23), BIT(10), false),
834 [RK3399_PD_HDCP] = DOMAIN_RK3399(24, 24, 11, false), 834 [RK3399_PD_HDCP] = DOMAIN_RK3399(BIT(24), BIT(24), BIT(11), false),
835 [RK3399_PD_GMAC] = DOMAIN_RK3399(25, 25, 23, true), 835 [RK3399_PD_GMAC] = DOMAIN_RK3399(BIT(25), BIT(25), BIT(23), true),
836 [RK3399_PD_EMMC] = DOMAIN_RK3399(26, 26, 24, true), 836 [RK3399_PD_EMMC] = DOMAIN_RK3399(BIT(26), BIT(26), BIT(24), true),
837 [RK3399_PD_USB3] = DOMAIN_RK3399(27, 27, 12, true), 837 [RK3399_PD_USB3] = DOMAIN_RK3399(BIT(27), BIT(27), BIT(12), true),
838 [RK3399_PD_EDP] = DOMAIN_RK3399(28, 28, 22, false), 838 [RK3399_PD_EDP] = DOMAIN_RK3399(BIT(28), BIT(28), BIT(22), false),
839 [RK3399_PD_GIC] = DOMAIN_RK3399(29, 29, 27, true), 839 [RK3399_PD_GIC] = DOMAIN_RK3399(BIT(29), BIT(29), BIT(27), true),
840 [RK3399_PD_SD] = DOMAIN_RK3399(30, 30, 28, true), 840 [RK3399_PD_SD] = DOMAIN_RK3399(BIT(30), BIT(30), BIT(28), true),
841 [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399(31, 31, 29, true), 841 [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399(BIT(31), BIT(31), BIT(29), true),
842}; 842};
843 843
844static const struct rockchip_pmu_info px30_pmu = { 844static const struct rockchip_pmu_info px30_pmu = {
diff --git a/drivers/soc/tegra/Kconfig b/drivers/soc/tegra/Kconfig
index fbfce48ffb0d..c8ef05d6b8c7 100644
--- a/drivers/soc/tegra/Kconfig
+++ b/drivers/soc/tegra/Kconfig
@@ -109,6 +109,7 @@ config ARCH_TEGRA_186_SOC
109config ARCH_TEGRA_194_SOC 109config ARCH_TEGRA_194_SOC
110 bool "NVIDIA Tegra194 SoC" 110 bool "NVIDIA Tegra194 SoC"
111 select MAILBOX 111 select MAILBOX
112 select PINCTRL_TEGRA194
112 select TEGRA_BPMP 113 select TEGRA_BPMP
113 select TEGRA_HSP_MBOX 114 select TEGRA_HSP_MBOX
114 select TEGRA_IVC 115 select TEGRA_IVC
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index 9b84bcc356d0..3eb44e65b326 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -133,8 +133,10 @@ static int tegra_fuse_probe(struct platform_device *pdev)
133 133
134 fuse->clk = devm_clk_get(&pdev->dev, "fuse"); 134 fuse->clk = devm_clk_get(&pdev->dev, "fuse");
135 if (IS_ERR(fuse->clk)) { 135 if (IS_ERR(fuse->clk)) {
136 dev_err(&pdev->dev, "failed to get FUSE clock: %ld", 136 if (PTR_ERR(fuse->clk) != -EPROBE_DEFER)
137 PTR_ERR(fuse->clk)); 137 dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
138 PTR_ERR(fuse->clk));
139
138 fuse->base = base; 140 fuse->base = base;
139 return PTR_ERR(fuse->clk); 141 return PTR_ERR(fuse->clk);
140 } 142 }
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 17e7796a832b..9f9c1c677cf4 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -232,6 +232,11 @@ struct tegra_pmc_soc {
232 const char * const *reset_levels; 232 const char * const *reset_levels;
233 unsigned int num_reset_levels; 233 unsigned int num_reset_levels;
234 234
235 /*
236 * These describe events that can wake the system from sleep (i.e.
237 * LP0 or SC7). Wakeup from other sleep states (such as LP1 or LP2)
238 * are dealt with in the LIC.
239 */
235 const struct tegra_wake_event *wake_events; 240 const struct tegra_wake_event *wake_events;
236 unsigned int num_wake_events; 241 unsigned int num_wake_events;
237}; 242};
@@ -1855,6 +1860,9 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
1855 unsigned int i; 1860 unsigned int i;
1856 int err = 0; 1861 int err = 0;
1857 1862
1863 if (WARN_ON(num_irqs > 1))
1864 return -EINVAL;
1865
1858 for (i = 0; i < soc->num_wake_events; i++) { 1866 for (i = 0; i < soc->num_wake_events; i++) {
1859 const struct tegra_wake_event *event = &soc->wake_events[i]; 1867 const struct tegra_wake_event *event = &soc->wake_events[i];
1860 1868
@@ -1895,6 +1903,11 @@ static int tegra_pmc_irq_alloc(struct irq_domain *domain, unsigned int virq,
1895 } 1903 }
1896 } 1904 }
1897 1905
1906 /*
1907 * For interrupts that don't have associated wake events, assign a
1908 * dummy hardware IRQ number. This is used in the ->irq_set_type()
1909 * and ->irq_set_wake() callbacks to return early for these IRQs.
1910 */
1898 if (i == soc->num_wake_events) 1911 if (i == soc->num_wake_events)
1899 err = irq_domain_set_hwirq_and_chip(domain, virq, ULONG_MAX, 1912 err = irq_domain_set_hwirq_and_chip(domain, virq, ULONG_MAX,
1900 &pmc->irq, pmc); 1913 &pmc->irq, pmc);
@@ -1913,6 +1926,10 @@ static int tegra_pmc_irq_set_wake(struct irq_data *data, unsigned int on)
1913 unsigned int offset, bit; 1926 unsigned int offset, bit;
1914 u32 value; 1927 u32 value;
1915 1928
1929 /* nothing to do if there's no associated wake event */
1930 if (WARN_ON(data->hwirq == ULONG_MAX))
1931 return 0;
1932
1916 offset = data->hwirq / 32; 1933 offset = data->hwirq / 32;
1917 bit = data->hwirq % 32; 1934 bit = data->hwirq % 32;
1918 1935
@@ -1940,6 +1957,7 @@ static int tegra_pmc_irq_set_type(struct irq_data *data, unsigned int type)
1940 struct tegra_pmc *pmc = irq_data_get_irq_chip_data(data); 1957 struct tegra_pmc *pmc = irq_data_get_irq_chip_data(data);
1941 u32 value; 1958 u32 value;
1942 1959
1960 /* nothing to do if there's no associated wake event */
1943 if (data->hwirq == ULONG_MAX) 1961 if (data->hwirq == ULONG_MAX)
1944 return 0; 1962 return 0;
1945 1963
diff --git a/include/dt-bindings/power/qcom-aoss-qmp.h b/include/dt-bindings/power/qcom-aoss-qmp.h
new file mode 100644
index 000000000000..ec336d31dee4
--- /dev/null
+++ b/include/dt-bindings/power/qcom-aoss-qmp.h
@@ -0,0 +1,14 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright (c) 2018, Linaro Ltd. */
3
4#ifndef __DT_BINDINGS_POWER_QCOM_AOSS_QMP_H
5#define __DT_BINDINGS_POWER_QCOM_AOSS_QMP_H
6
7#define AOSS_QMP_LS_CDSP 0
8#define AOSS_QMP_LS_LPASS 1
9#define AOSS_QMP_LS_MODEM 2
10#define AOSS_QMP_LS_SLPI 3
11#define AOSS_QMP_LS_SPSS 4
12#define AOSS_QMP_LS_VENUS 5
13
14#endif
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index 87d9c6611682..93e36d011527 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -36,4 +36,38 @@
36#define MSM8996_VDDSSCX 5 36#define MSM8996_VDDSSCX 5
37#define MSM8996_VDDSSCX_VFC 6 37#define MSM8996_VDDSSCX_VFC 6
38 38
39/* MSM8998 Power Domain Indexes */
40#define MSM8998_VDDCX 0
41#define MSM8998_VDDCX_AO 1
42#define MSM8998_VDDCX_VFL 2
43#define MSM8998_VDDMX 3
44#define MSM8998_VDDMX_AO 4
45#define MSM8998_VDDMX_VFL 5
46#define MSM8998_SSCCX 6
47#define MSM8998_SSCCX_VFL 7
48#define MSM8998_SSCMX 8
49#define MSM8998_SSCMX_VFL 9
50
51/* QCS404 Power Domains */
52#define QCS404_VDDMX 0
53#define QCS404_VDDMX_AO 1
54#define QCS404_VDDMX_VFL 2
55#define QCS404_LPICX 3
56#define QCS404_LPICX_VFL 4
57#define QCS404_LPIMX 5
58#define QCS404_LPIMX_VFL 6
59
60/* RPM SMD Power Domain performance levels */
61#define RPM_SMD_LEVEL_RETENTION 16
62#define RPM_SMD_LEVEL_RETENTION_PLUS 32
63#define RPM_SMD_LEVEL_MIN_SVS 48
64#define RPM_SMD_LEVEL_LOW_SVS 64
65#define RPM_SMD_LEVEL_SVS 128
66#define RPM_SMD_LEVEL_SVS_PLUS 192
67#define RPM_SMD_LEVEL_NOM 256
68#define RPM_SMD_LEVEL_NOM_PLUS 320
69#define RPM_SMD_LEVEL_TURBO 384
70#define RPM_SMD_LEVEL_TURBO_NO_CPR 416
71#define RPM_SMD_LEVEL_BINNING 512
72
39#endif 73#endif
diff --git a/include/dt-bindings/reset/bitmain,bm1880-reset.h b/include/dt-bindings/reset/bitmain,bm1880-reset.h
new file mode 100644
index 000000000000..4c0de5223773
--- /dev/null
+++ b/include/dt-bindings/reset/bitmain,bm1880-reset.h
@@ -0,0 +1,51 @@
1/* SPDX-License-Identifier: GPL-2.0+ */
2/*
3 * Copyright (c) 2018 Bitmain Ltd.
4 * Copyright (c) 2019 Linaro Ltd.
5 */
6
7#ifndef _DT_BINDINGS_BM1880_RESET_H
8#define _DT_BINDINGS_BM1880_RESET_H
9
10#define BM1880_RST_MAIN_AP 0
11#define BM1880_RST_SECOND_AP 1
12#define BM1880_RST_DDR 2
13#define BM1880_RST_VIDEO 3
14#define BM1880_RST_JPEG 4
15#define BM1880_RST_VPP 5
16#define BM1880_RST_GDMA 6
17#define BM1880_RST_AXI_SRAM 7
18#define BM1880_RST_TPU 8
19#define BM1880_RST_USB 9
20#define BM1880_RST_ETH0 10
21#define BM1880_RST_ETH1 11
22#define BM1880_RST_NAND 12
23#define BM1880_RST_EMMC 13
24#define BM1880_RST_SD 14
25#define BM1880_RST_SDMA 15
26#define BM1880_RST_I2S0 16
27#define BM1880_RST_I2S1 17
28#define BM1880_RST_UART0_1_CLK 18
29#define BM1880_RST_UART0_1_ACLK 19
30#define BM1880_RST_UART2_3_CLK 20
31#define BM1880_RST_UART2_3_ACLK 21
32#define BM1880_RST_MINER 22
33#define BM1880_RST_I2C0 23
34#define BM1880_RST_I2C1 24
35#define BM1880_RST_I2C2 25
36#define BM1880_RST_I2C3 26
37#define BM1880_RST_I2C4 27
38#define BM1880_RST_PWM0 28
39#define BM1880_RST_PWM1 29
40#define BM1880_RST_PWM2 30
41#define BM1880_RST_PWM3 31
42#define BM1880_RST_SPI 32
43#define BM1880_RST_GPIO0 33
44#define BM1880_RST_GPIO1 34
45#define BM1880_RST_GPIO2 35
46#define BM1880_RST_EFUSE 36
47#define BM1880_RST_WDT 37
48#define BM1880_RST_AHB_ROM 38
49#define BM1880_RST_SPIC 39
50
51#endif /* _DT_BINDINGS_BM1880_RESET_H */
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
index 9256c0305968..0c587d4fc718 100644
--- a/include/linux/platform_data/ti-sysc.h
+++ b/include/linux/platform_data/ti-sysc.h
@@ -19,6 +19,7 @@ enum ti_sysc_module_type {
19 19
20struct ti_sysc_cookie { 20struct ti_sysc_cookie {
21 void *data; 21 void *data;
22 void *clkdm;
22}; 23};
23 24
24/** 25/**
@@ -46,6 +47,10 @@ struct sysc_regbits {
46 s8 emufree_shift; 47 s8 emufree_shift;
47}; 48};
48 49
50#define SYSC_MODULE_QUIRK_HDQ1W BIT(17)
51#define SYSC_MODULE_QUIRK_I2C BIT(16)
52#define SYSC_MODULE_QUIRK_WDT BIT(15)
53#define SYSS_QUIRK_RESETDONE_INVERTED BIT(14)
49#define SYSC_QUIRK_SWSUP_MSTANDBY BIT(13) 54#define SYSC_QUIRK_SWSUP_MSTANDBY BIT(13)
50#define SYSC_QUIRK_SWSUP_SIDLE_ACT BIT(12) 55#define SYSC_QUIRK_SWSUP_SIDLE_ACT BIT(12)
51#define SYSC_QUIRK_SWSUP_SIDLE BIT(11) 56#define SYSC_QUIRK_SWSUP_SIDLE BIT(11)
@@ -125,9 +130,16 @@ struct ti_sysc_module_data {
125}; 130};
126 131
127struct device; 132struct device;
133struct clk;
128 134
129struct ti_sysc_platform_data { 135struct ti_sysc_platform_data {
130 struct of_dev_auxdata *auxdata; 136 struct of_dev_auxdata *auxdata;
137 int (*init_clockdomain)(struct device *dev, struct clk *fck,
138 struct clk *ick, struct ti_sysc_cookie *cookie);
139 void (*clkdm_deny_idle)(struct device *dev,
140 const struct ti_sysc_cookie *cookie);
141 void (*clkdm_allow_idle)(struct device *dev,
142 const struct ti_sysc_cookie *cookie);
131 int (*init_module)(struct device *dev, 143 int (*init_module)(struct device *dev,
132 const struct ti_sysc_module_data *data, 144 const struct ti_sysc_module_data *data,
133 struct ti_sysc_cookie *cookie); 145 struct ti_sysc_cookie *cookie);
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index 3105055c00a7..9ff2e9357e9a 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -144,6 +144,7 @@ struct scmi_power_ops {
144struct scmi_sensor_info { 144struct scmi_sensor_info {
145 u32 id; 145 u32 id;
146 u8 type; 146 u8 type;
147 s8 scale;
147 char name[SCMI_MAX_STR_SIZE]; 148 char name[SCMI_MAX_STR_SIZE];
148}; 149};
149 150
diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h
index 406e6717d252..6c610e188a44 100644
--- a/include/linux/soc/ti/ti_sci_protocol.h
+++ b/include/linux/soc/ti/ti_sci_protocol.h
@@ -241,12 +241,254 @@ struct ti_sci_rm_irq_ops {
241 u16 global_event, u8 vint_status_bit); 241 u16 global_event, u8 vint_status_bit);
242}; 242};
243 243
244/* RA config.addr_lo parameter is valid for RM ring configure TI_SCI message */
245#define TI_SCI_MSG_VALUE_RM_RING_ADDR_LO_VALID BIT(0)
246/* RA config.addr_hi parameter is valid for RM ring configure TI_SCI message */
247#define TI_SCI_MSG_VALUE_RM_RING_ADDR_HI_VALID BIT(1)
248 /* RA config.count parameter is valid for RM ring configure TI_SCI message */
249#define TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID BIT(2)
250/* RA config.mode parameter is valid for RM ring configure TI_SCI message */
251#define TI_SCI_MSG_VALUE_RM_RING_MODE_VALID BIT(3)
252/* RA config.size parameter is valid for RM ring configure TI_SCI message */
253#define TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID BIT(4)
254/* RA config.order_id parameter is valid for RM ring configure TISCI message */
255#define TI_SCI_MSG_VALUE_RM_RING_ORDER_ID_VALID BIT(5)
256
257#define TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER \
258 (TI_SCI_MSG_VALUE_RM_RING_ADDR_LO_VALID | \
259 TI_SCI_MSG_VALUE_RM_RING_ADDR_HI_VALID | \
260 TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID | \
261 TI_SCI_MSG_VALUE_RM_RING_MODE_VALID | \
262 TI_SCI_MSG_VALUE_RM_RING_SIZE_VALID)
263
264/**
265 * struct ti_sci_rm_ringacc_ops - Ring Accelerator Management operations
266 * @config: configure the SoC Navigator Subsystem Ring Accelerator ring
267 * @get_config: get the SoC Navigator Subsystem Ring Accelerator ring
268 * configuration
269 */
270struct ti_sci_rm_ringacc_ops {
271 int (*config)(const struct ti_sci_handle *handle,
272 u32 valid_params, u16 nav_id, u16 index,
273 u32 addr_lo, u32 addr_hi, u32 count, u8 mode,
274 u8 size, u8 order_id
275 );
276 int (*get_config)(const struct ti_sci_handle *handle,
277 u32 nav_id, u32 index, u8 *mode,
278 u32 *addr_lo, u32 *addr_hi, u32 *count,
279 u8 *size, u8 *order_id);
280};
281
282/**
283 * struct ti_sci_rm_psil_ops - PSI-L thread operations
284 * @pair: pair PSI-L source thread to a destination thread.
285 * If the src_thread is mapped to UDMA tchan, the corresponding channel's
286 * TCHAN_THRD_ID register is updated.
287 * If the dst_thread is mapped to UDMA rchan, the corresponding channel's
288 * RCHAN_THRD_ID register is updated.
289 * @unpair: unpair PSI-L source thread from a destination thread.
290 * If the src_thread is mapped to UDMA tchan, the corresponding channel's
291 * TCHAN_THRD_ID register is cleared.
292 * If the dst_thread is mapped to UDMA rchan, the corresponding channel's
293 * RCHAN_THRD_ID register is cleared.
294 */
295struct ti_sci_rm_psil_ops {
296 int (*pair)(const struct ti_sci_handle *handle, u32 nav_id,
297 u32 src_thread, u32 dst_thread);
298 int (*unpair)(const struct ti_sci_handle *handle, u32 nav_id,
299 u32 src_thread, u32 dst_thread);
300};
301
302/* UDMAP channel types */
303#define TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR 2
304#define TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR_SB 3 /* RX only */
305#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR 10
306#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBVR 11
307#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR 12
308#define TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBVR 13
309
310#define TI_SCI_RM_UDMAP_RX_FLOW_DESC_HOST 0
311#define TI_SCI_RM_UDMAP_RX_FLOW_DESC_MONO 2
312
313#define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES 1
314#define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES 2
315#define TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES 3
316
317/* UDMAP TX/RX channel valid_params common declarations */
318#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID BIT(0)
319#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID BIT(1)
320#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID BIT(2)
321#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID BIT(3)
322#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID BIT(4)
323#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_PRIORITY_VALID BIT(5)
324#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_QOS_VALID BIT(6)
325#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_ORDER_ID_VALID BIT(7)
326#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_SCHED_PRIORITY_VALID BIT(8)
327#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID BIT(14)
328
329/**
330 * Configures a Navigator Subsystem UDMAP transmit channel
331 *
332 * Configures a Navigator Subsystem UDMAP transmit channel registers.
333 * See @ti_sci_msg_rm_udmap_tx_ch_cfg_req
334 */
335struct ti_sci_msg_rm_udmap_tx_ch_cfg {
336 u32 valid_params;
337#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID BIT(9)
338#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID BIT(10)
339#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID BIT(11)
340#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_CREDIT_COUNT_VALID BIT(12)
341#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FDEPTH_VALID BIT(13)
342 u16 nav_id;
343 u16 index;
344 u8 tx_pause_on_err;
345 u8 tx_filt_einfo;
346 u8 tx_filt_pswords;
347 u8 tx_atype;
348 u8 tx_chan_type;
349 u8 tx_supr_tdpkt;
350 u16 tx_fetch_size;
351 u8 tx_credit_count;
352 u16 txcq_qnum;
353 u8 tx_priority;
354 u8 tx_qos;
355 u8 tx_orderid;
356 u16 fdepth;
357 u8 tx_sched_priority;
358 u8 tx_burst_size;
359};
360
361/**
362 * Configures a Navigator Subsystem UDMAP receive channel
363 *
364 * Configures a Navigator Subsystem UDMAP receive channel registers.
365 * See @ti_sci_msg_rm_udmap_rx_ch_cfg_req
366 */
367struct ti_sci_msg_rm_udmap_rx_ch_cfg {
368 u32 valid_params;
369#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID BIT(9)
370#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID BIT(10)
371#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID BIT(11)
372#define TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID BIT(12)
373 u16 nav_id;
374 u16 index;
375 u16 rx_fetch_size;
376 u16 rxcq_qnum;
377 u8 rx_priority;
378 u8 rx_qos;
379 u8 rx_orderid;
380 u8 rx_sched_priority;
381 u16 flowid_start;
382 u16 flowid_cnt;
383 u8 rx_pause_on_err;
384 u8 rx_atype;
385 u8 rx_chan_type;
386 u8 rx_ignore_short;
387 u8 rx_ignore_long;
388 u8 rx_burst_size;
389};
390
391/**
392 * Configures a Navigator Subsystem UDMAP receive flow
393 *
394 * Configures a Navigator Subsystem UDMAP receive flow's registers.
395 * See @tis_ci_msg_rm_udmap_flow_cfg_req
396 */
397struct ti_sci_msg_rm_udmap_flow_cfg {
398 u32 valid_params;
399#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID BIT(0)
400#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID BIT(1)
401#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID BIT(2)
402#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID BIT(3)
403#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SOP_OFFSET_VALID BIT(4)
404#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID BIT(5)
405#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_VALID BIT(6)
406#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_VALID BIT(7)
407#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_VALID BIT(8)
408#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_VALID BIT(9)
409#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID BIT(10)
410#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID BIT(11)
411#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID BIT(12)
412#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID BIT(13)
413#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID BIT(14)
414#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID BIT(15)
415#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID BIT(16)
416#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID BIT(17)
417#define TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PS_LOCATION_VALID BIT(18)
418 u16 nav_id;
419 u16 flow_index;
420 u8 rx_einfo_present;
421 u8 rx_psinfo_present;
422 u8 rx_error_handling;
423 u8 rx_desc_type;
424 u16 rx_sop_offset;
425 u16 rx_dest_qnum;
426 u8 rx_src_tag_hi;
427 u8 rx_src_tag_lo;
428 u8 rx_dest_tag_hi;
429 u8 rx_dest_tag_lo;
430 u8 rx_src_tag_hi_sel;
431 u8 rx_src_tag_lo_sel;
432 u8 rx_dest_tag_hi_sel;
433 u8 rx_dest_tag_lo_sel;
434 u16 rx_fdq0_sz0_qnum;
435 u16 rx_fdq1_qnum;
436 u16 rx_fdq2_qnum;
437 u16 rx_fdq3_qnum;
438 u8 rx_ps_location;
439};
440
441/**
442 * struct ti_sci_rm_udmap_ops - UDMA Management operations
443 * @tx_ch_cfg: configure SoC Navigator Subsystem UDMA transmit channel.
444 * @rx_ch_cfg: configure SoC Navigator Subsystem UDMA receive channel.
445 * @rx_flow_cfg1: configure SoC Navigator Subsystem UDMA receive flow.
446 */
447struct ti_sci_rm_udmap_ops {
448 int (*tx_ch_cfg)(const struct ti_sci_handle *handle,
449 const struct ti_sci_msg_rm_udmap_tx_ch_cfg *params);
450 int (*rx_ch_cfg)(const struct ti_sci_handle *handle,
451 const struct ti_sci_msg_rm_udmap_rx_ch_cfg *params);
452 int (*rx_flow_cfg)(const struct ti_sci_handle *handle,
453 const struct ti_sci_msg_rm_udmap_flow_cfg *params);
454};
455
456/**
457 * struct ti_sci_proc_ops - Processor Control operations
458 * @request: Request to control a physical processor. The requesting host
459 * should be in the processor access list
460 * @release: Relinquish a physical processor control
461 * @handover: Handover a physical processor control to another host
462 * in the permitted list
463 * @set_config: Set base configuration of a processor
464 * @set_control: Setup limited control flags in specific cases
465 * @get_status: Get the state of physical processor
466 *
467 * NOTE: The following paramteres are generic in nature for all these ops,
468 * -handle: Pointer to TI SCI handle as retrieved by *ti_sci_get_handle
469 * -pid: Processor ID
470 * -hid: Host ID
471 */
472struct ti_sci_proc_ops {
473 int (*request)(const struct ti_sci_handle *handle, u8 pid);
474 int (*release)(const struct ti_sci_handle *handle, u8 pid);
475 int (*handover)(const struct ti_sci_handle *handle, u8 pid, u8 hid);
476 int (*set_config)(const struct ti_sci_handle *handle, u8 pid,
477 u64 boot_vector, u32 cfg_set, u32 cfg_clr);
478 int (*set_control)(const struct ti_sci_handle *handle, u8 pid,
479 u32 ctrl_set, u32 ctrl_clr);
480 int (*get_status)(const struct ti_sci_handle *handle, u8 pid,
481 u64 *boot_vector, u32 *cfg_flags, u32 *ctrl_flags,
482 u32 *status_flags);
483};
484
244/** 485/**
245 * struct ti_sci_ops - Function support for TI SCI 486 * struct ti_sci_ops - Function support for TI SCI
246 * @dev_ops: Device specific operations 487 * @dev_ops: Device specific operations
247 * @clk_ops: Clock specific operations 488 * @clk_ops: Clock specific operations
248 * @rm_core_ops: Resource management core operations. 489 * @rm_core_ops: Resource management core operations.
249 * @rm_irq_ops: IRQ management specific operations 490 * @rm_irq_ops: IRQ management specific operations
491 * @proc_ops: Processor Control specific operations
250 */ 492 */
251struct ti_sci_ops { 493struct ti_sci_ops {
252 struct ti_sci_core_ops core_ops; 494 struct ti_sci_core_ops core_ops;
@@ -254,6 +496,10 @@ struct ti_sci_ops {
254 struct ti_sci_clk_ops clk_ops; 496 struct ti_sci_clk_ops clk_ops;
255 struct ti_sci_rm_core_ops rm_core_ops; 497 struct ti_sci_rm_core_ops rm_core_ops;
256 struct ti_sci_rm_irq_ops rm_irq_ops; 498 struct ti_sci_rm_irq_ops rm_irq_ops;
499 struct ti_sci_rm_ringacc_ops rm_ring_ops;
500 struct ti_sci_rm_psil_ops rm_psil_ops;
501 struct ti_sci_rm_udmap_ops rm_udmap_ops;
502 struct ti_sci_proc_ops proc_ops;
257}; 503};
258 504
259/** 505/**
diff --git a/include/soc/fsl/bman.h b/include/soc/fsl/bman.h
index 5b99cb2ea5ef..173e4049d963 100644
--- a/include/soc/fsl/bman.h
+++ b/include/soc/fsl/bman.h
@@ -133,5 +133,13 @@ int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num);
133 * failed to probe or 0 if the bman driver did not probed yet. 133 * failed to probe or 0 if the bman driver did not probed yet.
134 */ 134 */
135int bman_is_probed(void); 135int bman_is_probed(void);
136/**
137 * bman_portals_probed - Check if all cpu bound bman portals are probed
138 *
139 * Returns 1 if all the required cpu bound bman portals successfully probed,
140 * -1 if probe errors appeared or 0 if the bman portals did not yet finished
141 * probing.
142 */
143int bman_portals_probed(void);
136 144
137#endif /* __FSL_BMAN_H */ 145#endif /* __FSL_BMAN_H */
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index 5cc7af06c1ba..aa31c05a103a 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -1195,6 +1195,15 @@ int qman_release_cgrid(u32 id);
1195int qman_is_probed(void); 1195int qman_is_probed(void);
1196 1196
1197/** 1197/**
1198 * qman_portals_probed - Check if all cpu bound qman portals are probed
1199 *
1200 * Returns 1 if all the required cpu bound qman portals successfully probed,
1201 * -1 if probe errors appeared or 0 if the qman portals did not yet finished
1202 * probing.
1203 */
1204int qman_portals_probed(void);
1205
1206/**
1198 * qman_dqrr_get_ithresh - Get coalesce interrupt threshold 1207 * qman_dqrr_get_ithresh - Get coalesce interrupt threshold
1199 * @portal: portal to get the value for 1208 * @portal: portal to get the value for
1200 * @ithresh: threshold pointer 1209 * @ithresh: threshold pointer
diff --git a/lib/Kconfig b/lib/Kconfig
index 52a7b2e6fb74..f33d66fc0e86 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -531,14 +531,6 @@ config LRU_CACHE
531config CLZ_TAB 531config CLZ_TAB
532 bool 532 bool
533 533
534config DDR
535 bool "JEDEC DDR data"
536 help
537 Data from JEDEC specs for DDR SDRAM memories,
538 particularly the AC timing parameters and addressing
539 information. This data is useful for drivers handling
540 DDR SDRAM controllers.
541
542config IRQ_POLL 534config IRQ_POLL
543 bool "IRQ polling library" 535 bool "IRQ polling library"
544 help 536 help
diff --git a/lib/Makefile b/lib/Makefile
index 59067f51f3ab..095601ce371d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -209,8 +209,6 @@ obj-$(CONFIG_SIGNATURE) += digsig.o
209 209
210lib-$(CONFIG_CLZ_TAB) += clz_tab.o 210lib-$(CONFIG_CLZ_TAB) += clz_tab.o
211 211
212obj-$(CONFIG_DDR) += jedec_ddr_data.o
213
214obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o 212obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o
215obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o 213obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
216 214