aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt26
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/rpmh-rsc.txt137
-rw-r--r--drivers/soc/qcom/Kconfig27
-rw-r--r--drivers/soc/qcom/Makefile6
-rw-r--r--drivers/soc/qcom/llcc-sdm845.c94
-rw-r--r--drivers/soc/qcom/llcc-slice.c338
-rw-r--r--drivers/soc/qcom/rmtfs_mem.c3
-rw-r--r--drivers/soc/qcom/rpmh-internal.h114
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c693
-rw-r--r--drivers/soc/qcom/rpmh.c513
-rw-r--r--drivers/soc/qcom/smem.c10
-rw-r--r--drivers/soc/qcom/trace-rpmh.h82
-rw-r--r--include/dt-bindings/soc/qcom,rpmh-rsc.h14
-rw-r--r--include/linux/qcom_scm.h4
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h180
-rw-r--r--include/soc/qcom/rpmh.h51
-rw-r--r--include/soc/qcom/tcs.h56
17 files changed, 2341 insertions, 7 deletions
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
new file mode 100644
index 000000000000..5e85749262ae
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
@@ -0,0 +1,26 @@
1== Introduction==
2
3LLCC (Last Level Cache Controller) provides last level of cache memory in SOC,
4that can be shared by multiple clients. Clients here are different cores in the
5SOC, the idea is to minimize the local caches at the clients and migrate to
6common pool of memory. Cache memory is divided into partitions called slices
7which are assigned to clients. Clients can query the slice details, activate
8and deactivate them.
9
10Properties:
11- compatible:
12 Usage: required
13 Value type: <string>
14 Definition: must be "qcom,sdm845-llcc"
15
16- reg:
17 Usage: required
18 Value Type: <prop-encoded-array>
19 Definition: Start address and the the size of the register region.
20
21Example:
22
23 cache-controller@1100000 {
24 compatible = "qcom,sdm845-llcc";
25 reg = <0x1100000 0x250000>;
26 };
diff --git a/Documentation/devicetree/bindings/soc/qcom/rpmh-rsc.txt b/Documentation/devicetree/bindings/soc/qcom/rpmh-rsc.txt
new file mode 100644
index 000000000000..9b86d1eff219
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/rpmh-rsc.txt
@@ -0,0 +1,137 @@
1RPMH RSC:
2------------
3
4Resource Power Manager Hardened (RPMH) is the mechanism for communicating with
5the hardened resource accelerators on Qualcomm SoCs. Requests to the resources
6can be written to the Trigger Command Set (TCS) registers and using a (addr,
7val) pair and triggered. Messages in the TCS are then sent in sequence over an
8internal bus.
9
10The hardware block (Direct Resource Voter or DRV) is a part of the h/w entity
11(Resource State Coordinator a.k.a RSC) that can handle multiple sleep and
12active/wake resource requests. Multiple such DRVs can exist in a SoC and can
13be written to from Linux. The structure of each DRV follows the same template
14with a few variations that are captured by the properties here.
15
16A TCS may be triggered from Linux or triggered by the F/W after all the CPUs
17have powered off to facilitate idle power saving. TCS could be classified as -
18
19 ACTIVE /* Triggered by Linux */
20 SLEEP /* Triggered by F/W */
21 WAKE /* Triggered by F/W */
22 CONTROL /* Triggered by F/W */
23
24The order in which they are described in the DT, should match the hardware
25configuration.
26
27Requests can be made for the state of a resource, when the subsystem is active
28or idle. When all subsystems like Modem, GPU, CPU are idle, the resource state
29will be an aggregate of the sleep votes from each of those subsystems. Clients
30may request a sleep value for their shared resources in addition to the active
31mode requests.
32
33Properties:
34
35- compatible:
36 Usage: required
37 Value type: <string>
38 Definition: Should be "qcom,rpmh-rsc".
39
40- reg:
41 Usage: required
42 Value type: <prop-encoded-array>
43 Definition: The first register specifies the base address of the
44 DRV(s). The number of DRVs in the dependent on the RSC.
45 The tcs-offset specifies the start address of the
46 TCS in the DRVs.
47
48- reg-names:
49 Usage: required
50 Value type: <string>
51 Definition: Maps the register specified in the reg property. Must be
52 "drv-0", "drv-1", "drv-2" etc and "tcs-offset". The
53
54- interrupts:
55 Usage: required
56 Value type: <prop-encoded-interrupt>
57 Definition: The interrupt that trips when a message complete/response
58 is received for this DRV from the accelerators.
59
60- qcom,drv-id:
61 Usage: required
62 Value type: <u32>
63 Definition: The id of the DRV in the RSC block that will be used by
64 this controller.
65
66- qcom,tcs-config:
67 Usage: required
68 Value type: <prop-encoded-array>
69 Definition: The tuple defining the configuration of TCS.
70 Must have 2 cells which describe each TCS type.
71 <type number_of_tcs>.
72 The order of the TCS must match the hardware
73 configuration.
74 - Cell #1 (TCS Type): TCS types to be specified -
75 ACTIVE_TCS
76 SLEEP_TCS
77 WAKE_TCS
78 CONTROL_TCS
79 - Cell #2 (Number of TCS): <u32>
80
81- label:
82 Usage: optional
83 Value type: <string>
84 Definition: Name for the RSC. The name would be used in trace logs.
85
86Drivers that want to use the RSC to communicate with RPMH must specify their
87bindings as child nodes of the RSC controllers they wish to communicate with.
88
89Example 1:
90
91For a TCS whose RSC base address is is 0x179C0000 and is at a DRV id of 2, the
92register offsets for DRV2 start at 0D00, the register calculations are like
93this -
94DRV0: 0x179C0000
95DRV2: 0x179C0000 + 0x10000 = 0x179D0000
96DRV2: 0x179C0000 + 0x10000 * 2 = 0x179E0000
97TCS-OFFSET: 0xD00
98
99 apps_rsc: rsc@179c0000 {
100 label = "apps_rsc";
101 compatible = "qcom,rpmh-rsc";
102 reg = <0x179c0000 0x10000>,
103 <0x179d0000 0x10000>,
104 <0x179e0000 0x10000>;
105 reg-names = "drv-0", "drv-1", "drv-2";
106 interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
107 <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
108 <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
109 qcom,tcs-offset = <0xd00>;
110 qcom,drv-id = <2>;
111 qcom,tcs-config = <ACTIVE_TCS 2>,
112 <SLEEP_TCS 3>,
113 <WAKE_TCS 3>,
114 <CONTROL_TCS 1>;
115 };
116
117Example 2:
118
119For a TCS whose RSC base address is 0xAF20000 and is at DRV id of 0, the
120register offsets for DRV0 start at 01C00, the register calculations are like
121this -
122DRV0: 0xAF20000
123TCS-OFFSET: 0x1C00
124
125 disp_rsc: rsc@af20000 {
126 label = "disp_rsc";
127 compatible = "qcom,rpmh-rsc";
128 reg = <0xaf20000 0x10000>;
129 reg-names = "drv-0";
130 interrupts = <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>;
131 qcom,tcs-offset = <0x1c00>;
132 qcom,drv-id = <0>;
133 qcom,tcs-config = <ACTIVE_TCS 0>,
134 <SLEEP_TCS 1>,
135 <WAKE_TCS 1>,
136 <CONTROL_TCS 0>;
137 };
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 9dc02f390ba3..ccbdb398fa63 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -39,6 +39,23 @@ config QCOM_GSBI
39 functions for connecting the underlying serial UART, SPI, and I2C 39 functions for connecting the underlying serial UART, SPI, and I2C
40 devices to the output pins. 40 devices to the output pins.
41 41
42config QCOM_LLCC
43 tristate "Qualcomm Technologies, Inc. LLCC driver"
44 depends on ARCH_QCOM
45 help
46 Qualcomm Technologies, Inc. platform specific
47 Last Level Cache Controller(LLCC) driver. This provides interfaces
48 to clients that use the LLCC. Say yes here to enable LLCC slice
49 driver.
50
51config QCOM_SDM845_LLCC
52 tristate "Qualcomm Technologies, Inc. SDM845 LLCC driver"
53 depends on QCOM_LLCC
54 help
55 Say yes here to enable the LLCC driver for SDM845. This provides
56 data required to configure LLCC so that clients can start using the
57 LLCC slices.
58
42config QCOM_MDT_LOADER 59config QCOM_MDT_LOADER
43 tristate 60 tristate
44 select QCOM_SCM 61 select QCOM_SCM
@@ -74,6 +91,16 @@ config QCOM_RMTFS_MEM
74 91
75 Say y here if you intend to boot the modem remoteproc. 92 Say y here if you intend to boot the modem remoteproc.
76 93
94config QCOM_RPMH
95 bool "Qualcomm RPM-Hardened (RPMH) Communication"
96 depends on ARCH_QCOM && ARM64 && OF || COMPILE_TEST
97 help
98 Support for communication with the hardened-RPM blocks in
99 Qualcomm Technologies Inc (QTI) SoCs. RPMH communication uses an
100 internal bus to transmit state requests for shared resources. A set
101 of hardware components aggregate requests for these resources and
102 help apply the aggregated state on the resource.
103
77config QCOM_SMEM 104config QCOM_SMEM
78 tristate "Qualcomm Shared Memory Manager (SMEM)" 105 tristate "Qualcomm Shared Memory Manager (SMEM)"
79 depends on ARCH_QCOM 106 depends on ARCH_QCOM
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 19dcf957cb3a..f25b54cd6cf8 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,4 +1,5 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2CFLAGS_rpmh-rsc.o := -I$(src)
2obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o 3obj-$(CONFIG_QCOM_GENI_SE) += qcom-geni-se.o
3obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o 4obj-$(CONFIG_QCOM_COMMAND_DB) += cmd-db.o
4obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o 5obj-$(CONFIG_QCOM_GLINK_SSR) += glink_ssr.o
@@ -8,6 +9,9 @@ obj-$(CONFIG_QCOM_PM) += spm.o
8obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o 9obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
9qmi_helpers-y += qmi_encdec.o qmi_interface.o 10qmi_helpers-y += qmi_encdec.o qmi_interface.o
10obj-$(CONFIG_QCOM_RMTFS_MEM) += rmtfs_mem.o 11obj-$(CONFIG_QCOM_RMTFS_MEM) += rmtfs_mem.o
12obj-$(CONFIG_QCOM_RPMH) += qcom_rpmh.o
13qcom_rpmh-y += rpmh-rsc.o
14qcom_rpmh-y += rpmh.o
11obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o 15obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o
12obj-$(CONFIG_QCOM_SMEM) += smem.o 16obj-$(CONFIG_QCOM_SMEM) += smem.o
13obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o 17obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
@@ -15,3 +19,5 @@ obj-$(CONFIG_QCOM_SMP2P) += smp2p.o
15obj-$(CONFIG_QCOM_SMSM) += smsm.o 19obj-$(CONFIG_QCOM_SMSM) += smsm.o
16obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o 20obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
17obj-$(CONFIG_QCOM_APR) += apr.o 21obj-$(CONFIG_QCOM_APR) += apr.o
22obj-$(CONFIG_QCOM_LLCC) += llcc-slice.o
23obj-$(CONFIG_QCOM_SDM845_LLCC) += llcc-sdm845.o
diff --git a/drivers/soc/qcom/llcc-sdm845.c b/drivers/soc/qcom/llcc-sdm845.c
new file mode 100644
index 000000000000..2e1e4f0a5db8
--- /dev/null
+++ b/drivers/soc/qcom/llcc-sdm845.c
@@ -0,0 +1,94 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
4 *
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/of.h>
10#include <linux/of_device.h>
11#include <linux/soc/qcom/llcc-qcom.h>
12
13/*
14 * SCT(System Cache Table) entry contains of the following members:
15 * usecase_id: Unique id for the client's use case
16 * slice_id: llcc slice id for each client
17 * max_cap: The maximum capacity of the cache slice provided in KB
18 * priority: Priority of the client used to select victim line for replacement
19 * fixed_size: Boolean indicating if the slice has a fixed capacity
20 * bonus_ways: Bonus ways are additional ways to be used for any slice,
21 * if client ends up using more than reserved cache ways. Bonus
22 * ways are allocated only if they are not reserved for some
23 * other client.
24 * res_ways: Reserved ways for the cache slice, the reserved ways cannot
25 * be used by any other client than the one its assigned to.
26 * cache_mode: Each slice operates as a cache, this controls the mode of the
27 * slice: normal or TCM(Tightly Coupled Memory)
28 * probe_target_ways: Determines what ways to probe for access hit. When
29 * configured to 1 only bonus and reserved ways are probed.
30 * When configured to 0 all ways in llcc are probed.
31 * dis_cap_alloc: Disable capacity based allocation for a client
32 * retain_on_pc: If this bit is set and client has maintained active vote
33 * then the ways assigned to this client are not flushed on power
34 * collapse.
35 * activate_on_init: Activate the slice immediately after the SCT is programmed
36 */
37#define SCT_ENTRY(uid, sid, mc, p, fs, bway, rway, cmod, ptw, dca, rp, a) \
38 { \
39 .usecase_id = uid, \
40 .slice_id = sid, \
41 .max_cap = mc, \
42 .priority = p, \
43 .fixed_size = fs, \
44 .bonus_ways = bway, \
45 .res_ways = rway, \
46 .cache_mode = cmod, \
47 .probe_target_ways = ptw, \
48 .dis_cap_alloc = dca, \
49 .retain_on_pc = rp, \
50 .activate_on_init = a, \
51 }
52
53static struct llcc_slice_config sdm845_data[] = {
54 SCT_ENTRY(LLCC_CPUSS, 1, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 1),
55 SCT_ENTRY(LLCC_VIDSC0, 2, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0),
56 SCT_ENTRY(LLCC_VIDSC1, 3, 512, 2, 1, 0x0, 0x0f0, 0, 0, 1, 1, 0),
57 SCT_ENTRY(LLCC_ROTATOR, 4, 563, 2, 1, 0x0, 0x00e, 2, 0, 1, 1, 0),
58 SCT_ENTRY(LLCC_VOICE, 5, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
59 SCT_ENTRY(LLCC_AUDIO, 6, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
60 SCT_ENTRY(LLCC_MDMHPGRW, 7, 1024, 2, 0, 0xfc, 0xf00, 0, 0, 1, 1, 0),
61 SCT_ENTRY(LLCC_MDM, 8, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
62 SCT_ENTRY(LLCC_CMPT, 10, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
63 SCT_ENTRY(LLCC_GPUHTW, 11, 512, 1, 1, 0xc, 0x0, 0, 0, 1, 1, 0),
64 SCT_ENTRY(LLCC_GPU, 12, 2304, 1, 0, 0xff0, 0x2, 0, 0, 1, 1, 0),
65 SCT_ENTRY(LLCC_MMUHWT, 13, 256, 2, 0, 0x0, 0x1, 0, 0, 1, 0, 1),
66 SCT_ENTRY(LLCC_CMPTDMA, 15, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
67 SCT_ENTRY(LLCC_DISP, 16, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
68 SCT_ENTRY(LLCC_VIDFW, 17, 2816, 1, 0, 0xffc, 0x2, 0, 0, 1, 1, 0),
69 SCT_ENTRY(LLCC_MDMHPFX, 20, 1024, 2, 1, 0x0, 0xf00, 0, 0, 1, 1, 0),
70 SCT_ENTRY(LLCC_MDMPNG, 21, 1024, 0, 1, 0x1e, 0x0, 0, 0, 1, 1, 0),
71 SCT_ENTRY(LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0),
72};
73
74static int sdm845_qcom_llcc_probe(struct platform_device *pdev)
75{
76 return qcom_llcc_probe(pdev, sdm845_data, ARRAY_SIZE(sdm845_data));
77}
78
79static const struct of_device_id sdm845_qcom_llcc_of_match[] = {
80 { .compatible = "qcom,sdm845-llcc", },
81 { }
82};
83
84static struct platform_driver sdm845_qcom_llcc_driver = {
85 .driver = {
86 .name = "sdm845-llcc",
87 .of_match_table = sdm845_qcom_llcc_of_match,
88 },
89 .probe = sdm845_qcom_llcc_probe,
90};
91module_platform_driver(sdm845_qcom_llcc_driver);
92
93MODULE_DESCRIPTION("QCOM sdm845 LLCC driver");
94MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c
new file mode 100644
index 000000000000..54063a31132f
--- /dev/null
+++ b/drivers/soc/qcom/llcc-slice.c
@@ -0,0 +1,338 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
4 *
5 */
6
7#include <linux/bitmap.h>
8#include <linux/bitops.h>
9#include <linux/device.h>
10#include <linux/io.h>
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/of_device.h>
15#include <linux/regmap.h>
16#include <linux/slab.h>
17#include <linux/soc/qcom/llcc-qcom.h>
18
19#define ACTIVATE BIT(0)
20#define DEACTIVATE BIT(1)
21#define ACT_CTRL_OPCODE_ACTIVATE BIT(0)
22#define ACT_CTRL_OPCODE_DEACTIVATE BIT(1)
23#define ACT_CTRL_ACT_TRIG BIT(0)
24#define ACT_CTRL_OPCODE_SHIFT 0x01
25#define ATTR1_PROBE_TARGET_WAYS_SHIFT 0x02
26#define ATTR1_FIXED_SIZE_SHIFT 0x03
27#define ATTR1_PRIORITY_SHIFT 0x04
28#define ATTR1_MAX_CAP_SHIFT 0x10
29#define ATTR0_RES_WAYS_MASK GENMASK(11, 0)
30#define ATTR0_BONUS_WAYS_MASK GENMASK(27, 16)
31#define ATTR0_BONUS_WAYS_SHIFT 0x10
32#define LLCC_STATUS_READ_DELAY 100
33
34#define CACHE_LINE_SIZE_SHIFT 6
35
36#define LLCC_COMMON_STATUS0 0x0003000c
37#define LLCC_LB_CNT_MASK GENMASK(31, 28)
38#define LLCC_LB_CNT_SHIFT 28
39
40#define MAX_CAP_TO_BYTES(n) (n * SZ_1K)
41#define LLCC_TRP_ACT_CTRLn(n) (n * SZ_4K)
42#define LLCC_TRP_STATUSn(n) (4 + n * SZ_4K)
43#define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + SZ_8 * n)
44#define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + SZ_8 * n)
45
46#define BANK_OFFSET_STRIDE 0x80000
47
48static struct llcc_drv_data *drv_data;
49
50static const struct regmap_config llcc_regmap_config = {
51 .reg_bits = 32,
52 .reg_stride = 4,
53 .val_bits = 32,
54 .fast_io = true,
55};
56
57/**
58 * llcc_slice_getd - get llcc slice descriptor
59 * @uid: usecase_id for the client
60 *
61 * A pointer to llcc slice descriptor will be returned on success and
62 * and error pointer is returned on failure
63 */
64struct llcc_slice_desc *llcc_slice_getd(u32 uid)
65{
66 const struct llcc_slice_config *cfg;
67 struct llcc_slice_desc *desc;
68 u32 sz, count;
69
70 cfg = drv_data->cfg;
71 sz = drv_data->cfg_size;
72
73 for (count = 0; cfg && count < sz; count++, cfg++)
74 if (cfg->usecase_id == uid)
75 break;
76
77 if (count == sz || !cfg)
78 return ERR_PTR(-ENODEV);
79
80 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
81 if (!desc)
82 return ERR_PTR(-ENOMEM);
83
84 desc->slice_id = cfg->slice_id;
85 desc->slice_size = cfg->max_cap;
86
87 return desc;
88}
89EXPORT_SYMBOL_GPL(llcc_slice_getd);
90
91/**
92 * llcc_slice_putd - llcc slice descritpor
93 * @desc: Pointer to llcc slice descriptor
94 */
95void llcc_slice_putd(struct llcc_slice_desc *desc)
96{
97 kfree(desc);
98}
99EXPORT_SYMBOL_GPL(llcc_slice_putd);
100
101static int llcc_update_act_ctrl(u32 sid,
102 u32 act_ctrl_reg_val, u32 status)
103{
104 u32 act_ctrl_reg;
105 u32 status_reg;
106 u32 slice_status;
107 int ret;
108
109 act_ctrl_reg = drv_data->bcast_off + LLCC_TRP_ACT_CTRLn(sid);
110 status_reg = drv_data->bcast_off + LLCC_TRP_STATUSn(sid);
111
112 /* Set the ACTIVE trigger */
113 act_ctrl_reg_val |= ACT_CTRL_ACT_TRIG;
114 ret = regmap_write(drv_data->regmap, act_ctrl_reg, act_ctrl_reg_val);
115 if (ret)
116 return ret;
117
118 /* Clear the ACTIVE trigger */
119 act_ctrl_reg_val &= ~ACT_CTRL_ACT_TRIG;
120 ret = regmap_write(drv_data->regmap, act_ctrl_reg, act_ctrl_reg_val);
121 if (ret)
122 return ret;
123
124 ret = regmap_read_poll_timeout(drv_data->regmap, status_reg,
125 slice_status, !(slice_status & status),
126 0, LLCC_STATUS_READ_DELAY);
127 return ret;
128}
129
130/**
131 * llcc_slice_activate - Activate the llcc slice
132 * @desc: Pointer to llcc slice descriptor
133 *
134 * A value of zero will be returned on success and a negative errno will
135 * be returned in error cases
136 */
137int llcc_slice_activate(struct llcc_slice_desc *desc)
138{
139 int ret;
140 u32 act_ctrl_val;
141
142 mutex_lock(&drv_data->lock);
143 if (test_bit(desc->slice_id, drv_data->bitmap)) {
144 mutex_unlock(&drv_data->lock);
145 return 0;
146 }
147
148 act_ctrl_val = ACT_CTRL_OPCODE_ACTIVATE << ACT_CTRL_OPCODE_SHIFT;
149
150 ret = llcc_update_act_ctrl(desc->slice_id, act_ctrl_val,
151 DEACTIVATE);
152 if (ret) {
153 mutex_unlock(&drv_data->lock);
154 return ret;
155 }
156
157 __set_bit(desc->slice_id, drv_data->bitmap);
158 mutex_unlock(&drv_data->lock);
159
160 return ret;
161}
162EXPORT_SYMBOL_GPL(llcc_slice_activate);
163
164/**
165 * llcc_slice_deactivate - Deactivate the llcc slice
166 * @desc: Pointer to llcc slice descriptor
167 *
168 * A value of zero will be returned on success and a negative errno will
169 * be returned in error cases
170 */
171int llcc_slice_deactivate(struct llcc_slice_desc *desc)
172{
173 u32 act_ctrl_val;
174 int ret;
175
176 mutex_lock(&drv_data->lock);
177 if (!test_bit(desc->slice_id, drv_data->bitmap)) {
178 mutex_unlock(&drv_data->lock);
179 return 0;
180 }
181 act_ctrl_val = ACT_CTRL_OPCODE_DEACTIVATE << ACT_CTRL_OPCODE_SHIFT;
182
183 ret = llcc_update_act_ctrl(desc->slice_id, act_ctrl_val,
184 ACTIVATE);
185 if (ret) {
186 mutex_unlock(&drv_data->lock);
187 return ret;
188 }
189
190 __clear_bit(desc->slice_id, drv_data->bitmap);
191 mutex_unlock(&drv_data->lock);
192
193 return ret;
194}
195EXPORT_SYMBOL_GPL(llcc_slice_deactivate);
196
197/**
198 * llcc_get_slice_id - return the slice id
199 * @desc: Pointer to llcc slice descriptor
200 */
201int llcc_get_slice_id(struct llcc_slice_desc *desc)
202{
203 return desc->slice_id;
204}
205EXPORT_SYMBOL_GPL(llcc_get_slice_id);
206
207/**
208 * llcc_get_slice_size - return the slice id
209 * @desc: Pointer to llcc slice descriptor
210 */
211size_t llcc_get_slice_size(struct llcc_slice_desc *desc)
212{
213 return desc->slice_size;
214}
215EXPORT_SYMBOL_GPL(llcc_get_slice_size);
216
217static int qcom_llcc_cfg_program(struct platform_device *pdev)
218{
219 int i;
220 u32 attr1_cfg;
221 u32 attr0_cfg;
222 u32 attr1_val;
223 u32 attr0_val;
224 u32 max_cap_cacheline;
225 u32 sz;
226 int ret;
227 const struct llcc_slice_config *llcc_table;
228 struct llcc_slice_desc desc;
229 u32 bcast_off = drv_data->bcast_off;
230
231 sz = drv_data->cfg_size;
232 llcc_table = drv_data->cfg;
233
234 for (i = 0; i < sz; i++) {
235 attr1_cfg = bcast_off +
236 LLCC_TRP_ATTR1_CFGn(llcc_table[i].slice_id);
237 attr0_cfg = bcast_off +
238 LLCC_TRP_ATTR0_CFGn(llcc_table[i].slice_id);
239
240 attr1_val = llcc_table[i].cache_mode;
241 attr1_val |= llcc_table[i].probe_target_ways <<
242 ATTR1_PROBE_TARGET_WAYS_SHIFT;
243 attr1_val |= llcc_table[i].fixed_size <<
244 ATTR1_FIXED_SIZE_SHIFT;
245 attr1_val |= llcc_table[i].priority <<
246 ATTR1_PRIORITY_SHIFT;
247
248 max_cap_cacheline = MAX_CAP_TO_BYTES(llcc_table[i].max_cap);
249
250 /* LLCC instances can vary for each target.
251 * The SW writes to broadcast register which gets propagated
252 * to each llcc instace (llcc0,.. llccN).
253 * Since the size of the memory is divided equally amongst the
254 * llcc instances, we need to configure the max cap accordingly.
255 */
256 max_cap_cacheline = max_cap_cacheline / drv_data->num_banks;
257 max_cap_cacheline >>= CACHE_LINE_SIZE_SHIFT;
258 attr1_val |= max_cap_cacheline << ATTR1_MAX_CAP_SHIFT;
259
260 attr0_val = llcc_table[i].res_ways & ATTR0_RES_WAYS_MASK;
261 attr0_val |= llcc_table[i].bonus_ways << ATTR0_BONUS_WAYS_SHIFT;
262
263 ret = regmap_write(drv_data->regmap, attr1_cfg, attr1_val);
264 if (ret)
265 return ret;
266 ret = regmap_write(drv_data->regmap, attr0_cfg, attr0_val);
267 if (ret)
268 return ret;
269 if (llcc_table[i].activate_on_init) {
270 desc.slice_id = llcc_table[i].slice_id;
271 ret = llcc_slice_activate(&desc);
272 }
273 }
274 return ret;
275}
276
277int qcom_llcc_probe(struct platform_device *pdev,
278 const struct llcc_slice_config *llcc_cfg, u32 sz)
279{
280 u32 num_banks;
281 struct device *dev = &pdev->dev;
282 struct resource *res;
283 void __iomem *base;
284 int ret, i;
285
286 drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
287 if (!drv_data)
288 return -ENOMEM;
289
290 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
291 base = devm_ioremap_resource(&pdev->dev, res);
292 if (IS_ERR(base))
293 return PTR_ERR(base);
294
295 drv_data->regmap = devm_regmap_init_mmio(dev, base,
296 &llcc_regmap_config);
297 if (IS_ERR(drv_data->regmap))
298 return PTR_ERR(drv_data->regmap);
299
300 ret = regmap_read(drv_data->regmap, LLCC_COMMON_STATUS0,
301 &num_banks);
302 if (ret)
303 return ret;
304
305 num_banks &= LLCC_LB_CNT_MASK;
306 num_banks >>= LLCC_LB_CNT_SHIFT;
307 drv_data->num_banks = num_banks;
308
309 for (i = 0; i < sz; i++)
310 if (llcc_cfg[i].slice_id > drv_data->max_slices)
311 drv_data->max_slices = llcc_cfg[i].slice_id;
312
313 drv_data->offsets = devm_kcalloc(dev, num_banks, sizeof(u32),
314 GFP_KERNEL);
315 if (!drv_data->offsets)
316 return -ENOMEM;
317
318 for (i = 0; i < num_banks; i++)
319 drv_data->offsets[i] = i * BANK_OFFSET_STRIDE;
320
321 drv_data->bcast_off = num_banks * BANK_OFFSET_STRIDE;
322
323 drv_data->bitmap = devm_kcalloc(dev,
324 BITS_TO_LONGS(drv_data->max_slices), sizeof(unsigned long),
325 GFP_KERNEL);
326 if (!drv_data->bitmap)
327 return -ENOMEM;
328
329 drv_data->cfg = llcc_cfg;
330 drv_data->cfg_size = sz;
331 mutex_init(&drv_data->lock);
332 platform_set_drvdata(pdev, drv_data);
333
334 return qcom_llcc_cfg_program(pdev);
335}
336EXPORT_SYMBOL_GPL(qcom_llcc_probe);
337
338MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c
index c8999e38b005..8a3678c2e83c 100644
--- a/drivers/soc/qcom/rmtfs_mem.c
+++ b/drivers/soc/qcom/rmtfs_mem.c
@@ -184,6 +184,7 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
184 device_initialize(&rmtfs_mem->dev); 184 device_initialize(&rmtfs_mem->dev);
185 rmtfs_mem->dev.parent = &pdev->dev; 185 rmtfs_mem->dev.parent = &pdev->dev;
186 rmtfs_mem->dev.groups = qcom_rmtfs_mem_groups; 186 rmtfs_mem->dev.groups = qcom_rmtfs_mem_groups;
187 rmtfs_mem->dev.release = qcom_rmtfs_mem_release_device;
187 188
188 rmtfs_mem->base = devm_memremap(&rmtfs_mem->dev, rmtfs_mem->addr, 189 rmtfs_mem->base = devm_memremap(&rmtfs_mem->dev, rmtfs_mem->addr,
189 rmtfs_mem->size, MEMREMAP_WC); 190 rmtfs_mem->size, MEMREMAP_WC);
@@ -206,8 +207,6 @@ static int qcom_rmtfs_mem_probe(struct platform_device *pdev)
206 goto put_device; 207 goto put_device;
207 } 208 }
208 209
209 rmtfs_mem->dev.release = qcom_rmtfs_mem_release_device;
210
211 ret = of_property_read_u32(node, "qcom,vmid", &vmid); 210 ret = of_property_read_u32(node, "qcom,vmid", &vmid);
212 if (ret < 0 && ret != -EINVAL) { 211 if (ret < 0 && ret != -EINVAL) {
213 dev_err(&pdev->dev, "failed to parse qcom,vmid\n"); 212 dev_err(&pdev->dev, "failed to parse qcom,vmid\n");
diff --git a/drivers/soc/qcom/rpmh-internal.h b/drivers/soc/qcom/rpmh-internal.h
new file mode 100644
index 000000000000..a7bbbb67991c
--- /dev/null
+++ b/drivers/soc/qcom/rpmh-internal.h
@@ -0,0 +1,114 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4 */
5
6
7#ifndef __RPM_INTERNAL_H__
8#define __RPM_INTERNAL_H__
9
10#include <linux/bitmap.h>
11#include <soc/qcom/tcs.h>
12
13#define TCS_TYPE_NR 4
14#define MAX_CMDS_PER_TCS 16
15#define MAX_TCS_PER_TYPE 3
16#define MAX_TCS_NR (MAX_TCS_PER_TYPE * TCS_TYPE_NR)
17#define MAX_TCS_SLOTS (MAX_CMDS_PER_TCS * MAX_TCS_PER_TYPE)
18
19struct rsc_drv;
20
21/**
22 * struct tcs_group: group of Trigger Command Sets (TCS) to send state requests
23 * to the controller
24 *
25 * @drv: the controller
26 * @type: type of the TCS in this group - active, sleep, wake
27 * @mask: mask of the TCSes relative to all the TCSes in the RSC
28 * @offset: start of the TCS group relative to the TCSes in the RSC
29 * @num_tcs: number of TCSes in this type
30 * @ncpt: number of commands in each TCS
31 * @lock: lock for synchronizing this TCS writes
32 * @req: requests that are sent from the TCS
33 * @cmd_cache: flattened cache of cmds in sleep/wake TCS
34 * @slots: indicates which of @cmd_addr are occupied
35 */
36struct tcs_group {
37 struct rsc_drv *drv;
38 int type;
39 u32 mask;
40 u32 offset;
41 int num_tcs;
42 int ncpt;
43 spinlock_t lock;
44 const struct tcs_request *req[MAX_TCS_PER_TYPE];
45 u32 *cmd_cache;
46 DECLARE_BITMAP(slots, MAX_TCS_SLOTS);
47};
48
49/**
50 * struct rpmh_request: the message to be sent to rpmh-rsc
51 *
52 * @msg: the request
53 * @cmd: the payload that will be part of the @msg
54 * @completion: triggered when request is done
55 * @dev: the device making the request
56 * @err: err return from the controller
57 * @needs_free: check to free dynamically allocated request object
58 */
59struct rpmh_request {
60 struct tcs_request msg;
61 struct tcs_cmd cmd[MAX_RPMH_PAYLOAD];
62 struct completion *completion;
63 const struct device *dev;
64 int err;
65 bool needs_free;
66};
67
68/**
69 * struct rpmh_ctrlr: our representation of the controller
70 *
71 * @cache: the list of cached requests
72 * @cache_lock: synchronize access to the cache data
73 * @dirty: was the cache updated since flush
74 * @batch_cache: Cache sleep and wake requests sent as batch
75 */
76struct rpmh_ctrlr {
77 struct list_head cache;
78 spinlock_t cache_lock;
79 bool dirty;
80 struct list_head batch_cache;
81};
82
83/**
84 * struct rsc_drv: the Direct Resource Voter (DRV) of the
85 * Resource State Coordinator controller (RSC)
86 *
87 * @name: controller identifier
88 * @tcs_base: start address of the TCS registers in this controller
89 * @id: instance id in the controller (Direct Resource Voter)
90 * @num_tcs: number of TCSes in this DRV
91 * @tcs: TCS groups
92 * @tcs_in_use: s/w state of the TCS
93 * @lock: synchronize state of the controller
94 * @client: handle to the DRV's client.
95 */
96struct rsc_drv {
97 const char *name;
98 void __iomem *tcs_base;
99 int id;
100 int num_tcs;
101 struct tcs_group tcs[TCS_TYPE_NR];
102 DECLARE_BITMAP(tcs_in_use, MAX_TCS_NR);
103 spinlock_t lock;
104 struct rpmh_ctrlr client;
105};
106
107int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg);
108int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv,
109 const struct tcs_request *msg);
110int rpmh_rsc_invalidate(struct rsc_drv *drv);
111
112void rpmh_tx_done(const struct tcs_request *msg, int r);
113
114#endif /* __RPM_INTERNAL_H__ */
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
new file mode 100644
index 000000000000..ee75da66d64b
--- /dev/null
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -0,0 +1,693 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4 */
5
6#define pr_fmt(fmt) "%s " fmt, KBUILD_MODNAME
7
8#include <linux/atomic.h>
9#include <linux/delay.h>
10#include <linux/interrupt.h>
11#include <linux/io.h>
12#include <linux/kernel.h>
13#include <linux/list.h>
14#include <linux/of.h>
15#include <linux/of_irq.h>
16#include <linux/of_platform.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20
21#include <soc/qcom/cmd-db.h>
22#include <soc/qcom/tcs.h>
23#include <dt-bindings/soc/qcom,rpmh-rsc.h>
24
25#include "rpmh-internal.h"
26
27#define CREATE_TRACE_POINTS
28#include "trace-rpmh.h"
29
30#define RSC_DRV_TCS_OFFSET 672
31#define RSC_DRV_CMD_OFFSET 20
32
33/* DRV Configuration Information Register */
34#define DRV_PRNT_CHLD_CONFIG 0x0C
35#define DRV_NUM_TCS_MASK 0x3F
36#define DRV_NUM_TCS_SHIFT 6
37#define DRV_NCPT_MASK 0x1F
38#define DRV_NCPT_SHIFT 27
39
40/* Register offsets */
41#define RSC_DRV_IRQ_ENABLE 0x00
42#define RSC_DRV_IRQ_STATUS 0x04
43#define RSC_DRV_IRQ_CLEAR 0x08
44#define RSC_DRV_CMD_WAIT_FOR_CMPL 0x10
45#define RSC_DRV_CONTROL 0x14
46#define RSC_DRV_STATUS 0x18
47#define RSC_DRV_CMD_ENABLE 0x1C
48#define RSC_DRV_CMD_MSGID 0x30
49#define RSC_DRV_CMD_ADDR 0x34
50#define RSC_DRV_CMD_DATA 0x38
51#define RSC_DRV_CMD_STATUS 0x3C
52#define RSC_DRV_CMD_RESP_DATA 0x40
53
54#define TCS_AMC_MODE_ENABLE BIT(16)
55#define TCS_AMC_MODE_TRIGGER BIT(24)
56
57/* TCS CMD register bit mask */
58#define CMD_MSGID_LEN 8
59#define CMD_MSGID_RESP_REQ BIT(8)
60#define CMD_MSGID_WRITE BIT(16)
61#define CMD_STATUS_ISSUED BIT(8)
62#define CMD_STATUS_COMPL BIT(16)
63
64static u32 read_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id)
65{
66 return readl_relaxed(drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
67 RSC_DRV_CMD_OFFSET * cmd_id);
68}
69
70static void write_tcs_cmd(struct rsc_drv *drv, int reg, int tcs_id, int cmd_id,
71 u32 data)
72{
73 writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id +
74 RSC_DRV_CMD_OFFSET * cmd_id);
75}
76
77static void write_tcs_reg(struct rsc_drv *drv, int reg, int tcs_id, u32 data)
78{
79 writel_relaxed(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
80}
81
82static void write_tcs_reg_sync(struct rsc_drv *drv, int reg, int tcs_id,
83 u32 data)
84{
85 writel(data, drv->tcs_base + reg + RSC_DRV_TCS_OFFSET * tcs_id);
86 for (;;) {
87 if (data == readl(drv->tcs_base + reg +
88 RSC_DRV_TCS_OFFSET * tcs_id))
89 break;
90 udelay(1);
91 }
92}
93
94static bool tcs_is_free(struct rsc_drv *drv, int tcs_id)
95{
96 return !test_bit(tcs_id, drv->tcs_in_use) &&
97 read_tcs_reg(drv, RSC_DRV_STATUS, tcs_id, 0);
98}
99
100static struct tcs_group *get_tcs_of_type(struct rsc_drv *drv, int type)
101{
102 return &drv->tcs[type];
103}
104
105static int tcs_invalidate(struct rsc_drv *drv, int type)
106{
107 int m;
108 struct tcs_group *tcs;
109
110 tcs = get_tcs_of_type(drv, type);
111
112 spin_lock(&tcs->lock);
113 if (bitmap_empty(tcs->slots, MAX_TCS_SLOTS)) {
114 spin_unlock(&tcs->lock);
115 return 0;
116 }
117
118 for (m = tcs->offset; m < tcs->offset + tcs->num_tcs; m++) {
119 if (!tcs_is_free(drv, m)) {
120 spin_unlock(&tcs->lock);
121 return -EAGAIN;
122 }
123 write_tcs_reg_sync(drv, RSC_DRV_CMD_ENABLE, m, 0);
124 }
125 bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
126 spin_unlock(&tcs->lock);
127
128 return 0;
129}
130
131/**
132 * rpmh_rsc_invalidate - Invalidate sleep and wake TCSes
133 *
134 * @drv: the RSC controller
135 */
136int rpmh_rsc_invalidate(struct rsc_drv *drv)
137{
138 int ret;
139
140 ret = tcs_invalidate(drv, SLEEP_TCS);
141 if (!ret)
142 ret = tcs_invalidate(drv, WAKE_TCS);
143
144 return ret;
145}
146
147static struct tcs_group *get_tcs_for_msg(struct rsc_drv *drv,
148 const struct tcs_request *msg)
149{
150 int type, ret;
151 struct tcs_group *tcs;
152
153 switch (msg->state) {
154 case RPMH_ACTIVE_ONLY_STATE:
155 type = ACTIVE_TCS;
156 break;
157 case RPMH_WAKE_ONLY_STATE:
158 type = WAKE_TCS;
159 break;
160 case RPMH_SLEEP_STATE:
161 type = SLEEP_TCS;
162 break;
163 default:
164 return ERR_PTR(-EINVAL);
165 }
166
167 /*
168 * If we are making an active request on a RSC that does not have a
169 * dedicated TCS for active state use, then re-purpose a wake TCS to
170 * send active votes.
171 * NOTE: The driver must be aware that this RSC does not have a
172 * dedicated AMC, and therefore would invalidate the sleep and wake
173 * TCSes before making an active state request.
174 */
175 tcs = get_tcs_of_type(drv, type);
176 if (msg->state == RPMH_ACTIVE_ONLY_STATE && !tcs->num_tcs) {
177 tcs = get_tcs_of_type(drv, WAKE_TCS);
178 if (tcs->num_tcs) {
179 ret = rpmh_rsc_invalidate(drv);
180 if (ret)
181 return ERR_PTR(ret);
182 }
183 }
184
185 return tcs;
186}
187
188static const struct tcs_request *get_req_from_tcs(struct rsc_drv *drv,
189 int tcs_id)
190{
191 struct tcs_group *tcs;
192 int i;
193
194 for (i = 0; i < TCS_TYPE_NR; i++) {
195 tcs = &drv->tcs[i];
196 if (tcs->mask & BIT(tcs_id))
197 return tcs->req[tcs_id - tcs->offset];
198 }
199
200 return NULL;
201}
202
203/**
204 * tcs_tx_done: TX Done interrupt handler
205 */
206static irqreturn_t tcs_tx_done(int irq, void *p)
207{
208 struct rsc_drv *drv = p;
209 int i, j, err = 0;
210 unsigned long irq_status;
211 const struct tcs_request *req;
212 struct tcs_cmd *cmd;
213
214 irq_status = read_tcs_reg(drv, RSC_DRV_IRQ_STATUS, 0, 0);
215
216 for_each_set_bit(i, &irq_status, BITS_PER_LONG) {
217 req = get_req_from_tcs(drv, i);
218 if (!req) {
219 WARN_ON(1);
220 goto skip;
221 }
222
223 err = 0;
224 for (j = 0; j < req->num_cmds; j++) {
225 u32 sts;
226
227 cmd = &req->cmds[j];
228 sts = read_tcs_reg(drv, RSC_DRV_CMD_STATUS, i, j);
229 if (!(sts & CMD_STATUS_ISSUED) ||
230 ((req->wait_for_compl || cmd->wait) &&
231 !(sts & CMD_STATUS_COMPL))) {
232 pr_err("Incomplete request: %s: addr=%#x data=%#x",
233 drv->name, cmd->addr, cmd->data);
234 err = -EIO;
235 }
236 }
237
238 trace_rpmh_tx_done(drv, i, req, err);
239skip:
240 /* Reclaim the TCS */
241 write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, i, 0);
242 write_tcs_reg(drv, RSC_DRV_IRQ_CLEAR, 0, BIT(i));
243 spin_lock(&drv->lock);
244 clear_bit(i, drv->tcs_in_use);
245 spin_unlock(&drv->lock);
246 if (req)
247 rpmh_tx_done(req, err);
248 }
249
250 return IRQ_HANDLED;
251}
252
253static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
254 const struct tcs_request *msg)
255{
256 u32 msgid, cmd_msgid;
257 u32 cmd_enable = 0;
258 u32 cmd_complete;
259 struct tcs_cmd *cmd;
260 int i, j;
261
262 cmd_msgid = CMD_MSGID_LEN;
263 cmd_msgid |= msg->wait_for_compl ? CMD_MSGID_RESP_REQ : 0;
264 cmd_msgid |= CMD_MSGID_WRITE;
265
266 cmd_complete = read_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, 0);
267
268 for (i = 0, j = cmd_id; i < msg->num_cmds; i++, j++) {
269 cmd = &msg->cmds[i];
270 cmd_enable |= BIT(j);
271 cmd_complete |= cmd->wait << j;
272 msgid = cmd_msgid;
273 msgid |= cmd->wait ? CMD_MSGID_RESP_REQ : 0;
274
275 write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
276 write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
277 write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
278 trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
279 }
280
281 write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
282 cmd_enable |= read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
283 write_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, cmd_enable);
284}
285
286static void __tcs_trigger(struct rsc_drv *drv, int tcs_id)
287{
288 u32 enable;
289
290 /*
291 * HW req: Clear the DRV_CONTROL and enable TCS again
292 * While clearing ensure that the AMC mode trigger is cleared
293 * and then the mode enable is cleared.
294 */
295 enable = read_tcs_reg(drv, RSC_DRV_CONTROL, tcs_id, 0);
296 enable &= ~TCS_AMC_MODE_TRIGGER;
297 write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
298 enable &= ~TCS_AMC_MODE_ENABLE;
299 write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
300
301 /* Enable the AMC mode on the TCS and then trigger the TCS */
302 enable = TCS_AMC_MODE_ENABLE;
303 write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
304 enable |= TCS_AMC_MODE_TRIGGER;
305 write_tcs_reg_sync(drv, RSC_DRV_CONTROL, tcs_id, enable);
306}
307
308static int check_for_req_inflight(struct rsc_drv *drv, struct tcs_group *tcs,
309 const struct tcs_request *msg)
310{
311 unsigned long curr_enabled;
312 u32 addr;
313 int i, j, k;
314 int tcs_id = tcs->offset;
315
316 for (i = 0; i < tcs->num_tcs; i++, tcs_id++) {
317 if (tcs_is_free(drv, tcs_id))
318 continue;
319
320 curr_enabled = read_tcs_reg(drv, RSC_DRV_CMD_ENABLE, tcs_id, 0);
321
322 for_each_set_bit(j, &curr_enabled, MAX_CMDS_PER_TCS) {
323 addr = read_tcs_reg(drv, RSC_DRV_CMD_ADDR, tcs_id, j);
324 for (k = 0; k < msg->num_cmds; k++) {
325 if (addr == msg->cmds[k].addr)
326 return -EBUSY;
327 }
328 }
329 }
330
331 return 0;
332}
333
334static int find_free_tcs(struct tcs_group *tcs)
335{
336 int i;
337
338 for (i = 0; i < tcs->num_tcs; i++) {
339 if (tcs_is_free(tcs->drv, tcs->offset + i))
340 return tcs->offset + i;
341 }
342
343 return -EBUSY;
344}
345
346static int tcs_write(struct rsc_drv *drv, const struct tcs_request *msg)
347{
348 struct tcs_group *tcs;
349 int tcs_id;
350 unsigned long flags;
351 int ret;
352
353 tcs = get_tcs_for_msg(drv, msg);
354 if (IS_ERR(tcs))
355 return PTR_ERR(tcs);
356
357 spin_lock_irqsave(&tcs->lock, flags);
358 spin_lock(&drv->lock);
359 /*
360 * The h/w does not like if we send a request to the same address,
361 * when one is already in-flight or being processed.
362 */
363 ret = check_for_req_inflight(drv, tcs, msg);
364 if (ret) {
365 spin_unlock(&drv->lock);
366 goto done_write;
367 }
368
369 tcs_id = find_free_tcs(tcs);
370 if (tcs_id < 0) {
371 ret = tcs_id;
372 spin_unlock(&drv->lock);
373 goto done_write;
374 }
375
376 tcs->req[tcs_id - tcs->offset] = msg;
377 set_bit(tcs_id, drv->tcs_in_use);
378 spin_unlock(&drv->lock);
379
380 __tcs_buffer_write(drv, tcs_id, 0, msg);
381 __tcs_trigger(drv, tcs_id);
382
383done_write:
384 spin_unlock_irqrestore(&tcs->lock, flags);
385 return ret;
386}
387
388/**
389 * rpmh_rsc_send_data: Validate the incoming message and write to the
390 * appropriate TCS block.
391 *
392 * @drv: the controller
393 * @msg: the data to be sent
394 *
395 * Return: 0 on success, -EINVAL on error.
396 * Note: This call blocks until a valid data is written to the TCS.
397 */
398int rpmh_rsc_send_data(struct rsc_drv *drv, const struct tcs_request *msg)
399{
400 int ret;
401
402 if (!msg || !msg->cmds || !msg->num_cmds ||
403 msg->num_cmds > MAX_RPMH_PAYLOAD) {
404 WARN_ON(1);
405 return -EINVAL;
406 }
407
408 do {
409 ret = tcs_write(drv, msg);
410 if (ret == -EBUSY) {
411 pr_info_ratelimited("TCS Busy, retrying RPMH message send: addr=%#x\n",
412 msg->cmds[0].addr);
413 udelay(10);
414 }
415 } while (ret == -EBUSY);
416
417 return ret;
418}
419
420static int find_match(const struct tcs_group *tcs, const struct tcs_cmd *cmd,
421 int len)
422{
423 int i, j;
424
425 /* Check for already cached commands */
426 for_each_set_bit(i, tcs->slots, MAX_TCS_SLOTS) {
427 if (tcs->cmd_cache[i] != cmd[0].addr)
428 continue;
429 if (i + len >= tcs->num_tcs * tcs->ncpt)
430 goto seq_err;
431 for (j = 0; j < len; j++) {
432 if (tcs->cmd_cache[i + j] != cmd[j].addr)
433 goto seq_err;
434 }
435 return i;
436 }
437
438 return -ENODATA;
439
440seq_err:
441 WARN(1, "Message does not match previous sequence.\n");
442 return -EINVAL;
443}
444
445static int find_slots(struct tcs_group *tcs, const struct tcs_request *msg,
446 int *tcs_id, int *cmd_id)
447{
448 int slot, offset;
449 int i = 0;
450
451 /* Find if we already have the msg in our TCS */
452 slot = find_match(tcs, msg->cmds, msg->num_cmds);
453 if (slot >= 0)
454 goto copy_data;
455
456 /* Do over, until we can fit the full payload in a TCS */
457 do {
458 slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
459 i, msg->num_cmds, 0);
460 if (slot == tcs->num_tcs * tcs->ncpt)
461 return -ENOMEM;
462 i += tcs->ncpt;
463 } while (slot + msg->num_cmds - 1 >= i);
464
465copy_data:
466 bitmap_set(tcs->slots, slot, msg->num_cmds);
467 /* Copy the addresses of the resources over to the slots */
468 for (i = 0; i < msg->num_cmds; i++)
469 tcs->cmd_cache[slot + i] = msg->cmds[i].addr;
470
471 offset = slot / tcs->ncpt;
472 *tcs_id = offset + tcs->offset;
473 *cmd_id = slot % tcs->ncpt;
474
475 return 0;
476}
477
478static int tcs_ctrl_write(struct rsc_drv *drv, const struct tcs_request *msg)
479{
480 struct tcs_group *tcs;
481 int tcs_id = 0, cmd_id = 0;
482 unsigned long flags;
483 int ret;
484
485 tcs = get_tcs_for_msg(drv, msg);
486 if (IS_ERR(tcs))
487 return PTR_ERR(tcs);
488
489 spin_lock_irqsave(&tcs->lock, flags);
490 /* find the TCS id and the command in the TCS to write to */
491 ret = find_slots(tcs, msg, &tcs_id, &cmd_id);
492 if (!ret)
493 __tcs_buffer_write(drv, tcs_id, cmd_id, msg);
494 spin_unlock_irqrestore(&tcs->lock, flags);
495
496 return ret;
497}
498
499/**
500 * rpmh_rsc_write_ctrl_data: Write request to the controller
501 *
502 * @drv: the controller
503 * @msg: the data to be written to the controller
504 *
505 * There is no response returned for writing the request to the controller.
506 */
507int rpmh_rsc_write_ctrl_data(struct rsc_drv *drv, const struct tcs_request *msg)
508{
509 if (!msg || !msg->cmds || !msg->num_cmds ||
510 msg->num_cmds > MAX_RPMH_PAYLOAD) {
511 pr_err("Payload error\n");
512 return -EINVAL;
513 }
514
515 /* Data sent to this API will not be sent immediately */
516 if (msg->state == RPMH_ACTIVE_ONLY_STATE)
517 return -EINVAL;
518
519 return tcs_ctrl_write(drv, msg);
520}
521
522static int rpmh_probe_tcs_config(struct platform_device *pdev,
523 struct rsc_drv *drv)
524{
525 struct tcs_type_config {
526 u32 type;
527 u32 n;
528 } tcs_cfg[TCS_TYPE_NR] = { { 0 } };
529 struct device_node *dn = pdev->dev.of_node;
530 u32 config, max_tcs, ncpt, offset;
531 int i, ret, n, st = 0;
532 struct tcs_group *tcs;
533 struct resource *res;
534 void __iomem *base;
535 char drv_id[10] = {0};
536
537 snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
538 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
539 base = devm_ioremap_resource(&pdev->dev, res);
540 if (IS_ERR(base))
541 return PTR_ERR(base);
542
543 ret = of_property_read_u32(dn, "qcom,tcs-offset", &offset);
544 if (ret)
545 return ret;
546 drv->tcs_base = base + offset;
547
548 config = readl_relaxed(base + DRV_PRNT_CHLD_CONFIG);
549
550 max_tcs = config;
551 max_tcs &= DRV_NUM_TCS_MASK << (DRV_NUM_TCS_SHIFT * drv->id);
552 max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->id);
553
554 ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
555 ncpt = ncpt >> DRV_NCPT_SHIFT;
556
557 n = of_property_count_u32_elems(dn, "qcom,tcs-config");
558 if (n != 2 * TCS_TYPE_NR)
559 return -EINVAL;
560
561 for (i = 0; i < TCS_TYPE_NR; i++) {
562 ret = of_property_read_u32_index(dn, "qcom,tcs-config",
563 i * 2, &tcs_cfg[i].type);
564 if (ret)
565 return ret;
566 if (tcs_cfg[i].type >= TCS_TYPE_NR)
567 return -EINVAL;
568
569 ret = of_property_read_u32_index(dn, "qcom,tcs-config",
570 i * 2 + 1, &tcs_cfg[i].n);
571 if (ret)
572 return ret;
573 if (tcs_cfg[i].n > MAX_TCS_PER_TYPE)
574 return -EINVAL;
575 }
576
577 for (i = 0; i < TCS_TYPE_NR; i++) {
578 tcs = &drv->tcs[tcs_cfg[i].type];
579 if (tcs->drv)
580 return -EINVAL;
581 tcs->drv = drv;
582 tcs->type = tcs_cfg[i].type;
583 tcs->num_tcs = tcs_cfg[i].n;
584 tcs->ncpt = ncpt;
585 spin_lock_init(&tcs->lock);
586
587 if (!tcs->num_tcs || tcs->type == CONTROL_TCS)
588 continue;
589
590 if (st + tcs->num_tcs > max_tcs ||
591 st + tcs->num_tcs >= BITS_PER_BYTE * sizeof(tcs->mask))
592 return -EINVAL;
593
594 tcs->mask = ((1 << tcs->num_tcs) - 1) << st;
595 tcs->offset = st;
596 st += tcs->num_tcs;
597
598 /*
599 * Allocate memory to cache sleep and wake requests to
600 * avoid reading TCS register memory.
601 */
602 if (tcs->type == ACTIVE_TCS)
603 continue;
604
605 tcs->cmd_cache = devm_kcalloc(&pdev->dev,
606 tcs->num_tcs * ncpt, sizeof(u32),
607 GFP_KERNEL);
608 if (!tcs->cmd_cache)
609 return -ENOMEM;
610 }
611
612 drv->num_tcs = st;
613
614 return 0;
615}
616
617static int rpmh_rsc_probe(struct platform_device *pdev)
618{
619 struct device_node *dn = pdev->dev.of_node;
620 struct rsc_drv *drv;
621 int ret, irq;
622
623 /*
624 * Even though RPMh doesn't directly use cmd-db, all of its children
625 * do. To avoid adding this check to our children we'll do it now.
626 */
627 ret = cmd_db_ready();
628 if (ret) {
629 if (ret != -EPROBE_DEFER)
630 dev_err(&pdev->dev, "Command DB not available (%d)\n",
631 ret);
632 return ret;
633 }
634
635 drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
636 if (!drv)
637 return -ENOMEM;
638
639 ret = of_property_read_u32(dn, "qcom,drv-id", &drv->id);
640 if (ret)
641 return ret;
642
643 drv->name = of_get_property(dn, "label", NULL);
644 if (!drv->name)
645 drv->name = dev_name(&pdev->dev);
646
647 ret = rpmh_probe_tcs_config(pdev, drv);
648 if (ret)
649 return ret;
650
651 spin_lock_init(&drv->lock);
652 bitmap_zero(drv->tcs_in_use, MAX_TCS_NR);
653
654 irq = platform_get_irq(pdev, drv->id);
655 if (irq < 0)
656 return irq;
657
658 ret = devm_request_irq(&pdev->dev, irq, tcs_tx_done,
659 IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
660 drv->name, drv);
661 if (ret)
662 return ret;
663
664 /* Enable the active TCS to send requests immediately */
665 write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, drv->tcs[ACTIVE_TCS].mask);
666
667 spin_lock_init(&drv->client.cache_lock);
668 INIT_LIST_HEAD(&drv->client.cache);
669 INIT_LIST_HEAD(&drv->client.batch_cache);
670
671 dev_set_drvdata(&pdev->dev, drv);
672
673 return devm_of_platform_populate(&pdev->dev);
674}
675
676static const struct of_device_id rpmh_drv_match[] = {
677 { .compatible = "qcom,rpmh-rsc", },
678 { }
679};
680
681static struct platform_driver rpmh_driver = {
682 .probe = rpmh_rsc_probe,
683 .driver = {
684 .name = "rpmh",
685 .of_match_table = rpmh_drv_match,
686 },
687};
688
689static int __init rpmh_driver_init(void)
690{
691 return platform_driver_register(&rpmh_driver);
692}
693arch_initcall(rpmh_driver_init);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
new file mode 100644
index 000000000000..c7beb6841289
--- /dev/null
+++ b/drivers/soc/qcom/rpmh.c
@@ -0,0 +1,513 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4 */
5
6#include <linux/atomic.h>
7#include <linux/bug.h>
8#include <linux/interrupt.h>
9#include <linux/jiffies.h>
10#include <linux/kernel.h>
11#include <linux/list.h>
12#include <linux/module.h>
13#include <linux/of.h>
14#include <linux/platform_device.h>
15#include <linux/slab.h>
16#include <linux/spinlock.h>
17#include <linux/types.h>
18#include <linux/wait.h>
19
20#include <soc/qcom/rpmh.h>
21
22#include "rpmh-internal.h"
23
24#define RPMH_TIMEOUT_MS msecs_to_jiffies(10000)
25
26#define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name) \
27 struct rpmh_request name = { \
28 .msg = { \
29 .state = s, \
30 .cmds = name.cmd, \
31 .num_cmds = 0, \
32 .wait_for_compl = true, \
33 }, \
34 .cmd = { { 0 } }, \
35 .completion = q, \
36 .dev = dev, \
37 .needs_free = false, \
38 }
39
40#define ctrlr_to_drv(ctrlr) container_of(ctrlr, struct rsc_drv, client)
41
42/**
43 * struct cache_req: the request object for caching
44 *
45 * @addr: the address of the resource
46 * @sleep_val: the sleep vote
47 * @wake_val: the wake vote
48 * @list: linked list obj
49 */
50struct cache_req {
51 u32 addr;
52 u32 sleep_val;
53 u32 wake_val;
54 struct list_head list;
55};
56
57/**
58 * struct batch_cache_req - An entry in our batch catch
59 *
60 * @list: linked list obj
61 * @count: number of messages
62 * @rpm_msgs: the messages
63 */
64
65struct batch_cache_req {
66 struct list_head list;
67 int count;
68 struct rpmh_request rpm_msgs[];
69};
70
71static struct rpmh_ctrlr *get_rpmh_ctrlr(const struct device *dev)
72{
73 struct rsc_drv *drv = dev_get_drvdata(dev->parent);
74
75 return &drv->client;
76}
77
78void rpmh_tx_done(const struct tcs_request *msg, int r)
79{
80 struct rpmh_request *rpm_msg = container_of(msg, struct rpmh_request,
81 msg);
82 struct completion *compl = rpm_msg->completion;
83
84 rpm_msg->err = r;
85
86 if (r)
87 dev_err(rpm_msg->dev, "RPMH TX fail in msg addr=%#x, err=%d\n",
88 rpm_msg->msg.cmds[0].addr, r);
89
90 if (!compl)
91 goto exit;
92
93 /* Signal the blocking thread we are done */
94 complete(compl);
95
96exit:
97 if (rpm_msg->needs_free)
98 kfree(rpm_msg);
99}
100
101static struct cache_req *__find_req(struct rpmh_ctrlr *ctrlr, u32 addr)
102{
103 struct cache_req *p, *req = NULL;
104
105 list_for_each_entry(p, &ctrlr->cache, list) {
106 if (p->addr == addr) {
107 req = p;
108 break;
109 }
110 }
111
112 return req;
113}
114
115static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
116 enum rpmh_state state,
117 struct tcs_cmd *cmd)
118{
119 struct cache_req *req;
120 unsigned long flags;
121
122 spin_lock_irqsave(&ctrlr->cache_lock, flags);
123 req = __find_req(ctrlr, cmd->addr);
124 if (req)
125 goto existing;
126
127 req = kzalloc(sizeof(*req), GFP_ATOMIC);
128 if (!req) {
129 req = ERR_PTR(-ENOMEM);
130 goto unlock;
131 }
132
133 req->addr = cmd->addr;
134 req->sleep_val = req->wake_val = UINT_MAX;
135 INIT_LIST_HEAD(&req->list);
136 list_add_tail(&req->list, &ctrlr->cache);
137
138existing:
139 switch (state) {
140 case RPMH_ACTIVE_ONLY_STATE:
141 if (req->sleep_val != UINT_MAX)
142 req->wake_val = cmd->data;
143 break;
144 case RPMH_WAKE_ONLY_STATE:
145 req->wake_val = cmd->data;
146 break;
147 case RPMH_SLEEP_STATE:
148 req->sleep_val = cmd->data;
149 break;
150 default:
151 break;
152 }
153
154 ctrlr->dirty = true;
155unlock:
156 spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
157
158 return req;
159}
160
161/**
162 * __rpmh_write: Cache and send the RPMH request
163 *
164 * @dev: The device making the request
165 * @state: Active/Sleep request type
166 * @rpm_msg: The data that needs to be sent (cmds).
167 *
168 * Cache the RPMH request and send if the state is ACTIVE_ONLY.
169 * SLEEP/WAKE_ONLY requests are not sent to the controller at
170 * this time. Use rpmh_flush() to send them to the controller.
171 */
172static int __rpmh_write(const struct device *dev, enum rpmh_state state,
173 struct rpmh_request *rpm_msg)
174{
175 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
176 int ret = -EINVAL;
177 struct cache_req *req;
178 int i;
179
180 rpm_msg->msg.state = state;
181
182 /* Cache the request in our store and link the payload */
183 for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
184 req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
185 if (IS_ERR(req))
186 return PTR_ERR(req);
187 }
188
189 rpm_msg->msg.state = state;
190
191 if (state == RPMH_ACTIVE_ONLY_STATE) {
192 WARN_ON(irqs_disabled());
193 ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
194 } else {
195 ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
196 &rpm_msg->msg);
197 /* Clean up our call by spoofing tx_done */
198 rpmh_tx_done(&rpm_msg->msg, ret);
199 }
200
201 return ret;
202}
203
204static int __fill_rpmh_msg(struct rpmh_request *req, enum rpmh_state state,
205 const struct tcs_cmd *cmd, u32 n)
206{
207 if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
208 return -EINVAL;
209
210 memcpy(req->cmd, cmd, n * sizeof(*cmd));
211
212 req->msg.state = state;
213 req->msg.cmds = req->cmd;
214 req->msg.num_cmds = n;
215
216 return 0;
217}
218
219/**
220 * rpmh_write_async: Write a set of RPMH commands
221 *
222 * @dev: The device making the request
223 * @state: Active/sleep set
224 * @cmd: The payload data
225 * @n: The number of elements in payload
226 *
227 * Write a set of RPMH commands, the order of commands is maintained
228 * and will be sent as a single shot.
229 */
230int rpmh_write_async(const struct device *dev, enum rpmh_state state,
231 const struct tcs_cmd *cmd, u32 n)
232{
233 struct rpmh_request *rpm_msg;
234 int ret;
235
236 rpm_msg = kzalloc(sizeof(*rpm_msg), GFP_ATOMIC);
237 if (!rpm_msg)
238 return -ENOMEM;
239 rpm_msg->needs_free = true;
240
241 ret = __fill_rpmh_msg(rpm_msg, state, cmd, n);
242 if (ret) {
243 kfree(rpm_msg);
244 return ret;
245 }
246
247 return __rpmh_write(dev, state, rpm_msg);
248}
249EXPORT_SYMBOL(rpmh_write_async);
250
251/**
252 * rpmh_write: Write a set of RPMH commands and block until response
253 *
254 * @rc: The RPMH handle got from rpmh_get_client
255 * @state: Active/sleep set
256 * @cmd: The payload data
257 * @n: The number of elements in @cmd
258 *
259 * May sleep. Do not call from atomic contexts.
260 */
261int rpmh_write(const struct device *dev, enum rpmh_state state,
262 const struct tcs_cmd *cmd, u32 n)
263{
264 DECLARE_COMPLETION_ONSTACK(compl);
265 DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
266 int ret;
267
268 if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
269 return -EINVAL;
270
271 memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
272 rpm_msg.msg.num_cmds = n;
273
274 ret = __rpmh_write(dev, state, &rpm_msg);
275 if (ret)
276 return ret;
277
278 ret = wait_for_completion_timeout(&compl, RPMH_TIMEOUT_MS);
279 WARN_ON(!ret);
280 return (ret > 0) ? 0 : -ETIMEDOUT;
281}
282EXPORT_SYMBOL(rpmh_write);
283
284static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
285{
286 unsigned long flags;
287
288 spin_lock_irqsave(&ctrlr->cache_lock, flags);
289 list_add_tail(&req->list, &ctrlr->batch_cache);
290 spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
291}
292
293static int flush_batch(struct rpmh_ctrlr *ctrlr)
294{
295 struct batch_cache_req *req;
296 const struct rpmh_request *rpm_msg;
297 unsigned long flags;
298 int ret = 0;
299 int i;
300
301 /* Send Sleep/Wake requests to the controller, expect no response */
302 spin_lock_irqsave(&ctrlr->cache_lock, flags);
303 list_for_each_entry(req, &ctrlr->batch_cache, list) {
304 for (i = 0; i < req->count; i++) {
305 rpm_msg = req->rpm_msgs + i;
306 ret = rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr),
307 &rpm_msg->msg);
308 if (ret)
309 break;
310 }
311 }
312 spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
313
314 return ret;
315}
316
317static void invalidate_batch(struct rpmh_ctrlr *ctrlr)
318{
319 struct batch_cache_req *req, *tmp;
320 unsigned long flags;
321
322 spin_lock_irqsave(&ctrlr->cache_lock, flags);
323 list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
324 kfree(req);
325 INIT_LIST_HEAD(&ctrlr->batch_cache);
326 spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
327}
328
329/**
330 * rpmh_write_batch: Write multiple sets of RPMH commands and wait for the
331 * batch to finish.
332 *
333 * @dev: the device making the request
334 * @state: Active/sleep set
335 * @cmd: The payload data
336 * @n: The array of count of elements in each batch, 0 terminated.
337 *
338 * Write a request to the RSC controller without caching. If the request
339 * state is ACTIVE, then the requests are treated as completion request
340 * and sent to the controller immediately. The function waits until all the
341 * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
342 * request is sent as fire-n-forget and no ack is expected.
343 *
344 * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
345 */
346int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
347 const struct tcs_cmd *cmd, u32 *n)
348{
349 struct batch_cache_req *req;
350 struct rpmh_request *rpm_msgs;
351 DECLARE_COMPLETION_ONSTACK(compl);
352 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
353 unsigned long time_left;
354 int count = 0;
355 int ret, i, j;
356
357 if (!cmd || !n)
358 return -EINVAL;
359
360 while (n[count] > 0)
361 count++;
362 if (!count)
363 return -EINVAL;
364
365 req = kzalloc(sizeof(*req) + count * sizeof(req->rpm_msgs[0]),
366 GFP_ATOMIC);
367 if (!req)
368 return -ENOMEM;
369 req->count = count;
370 rpm_msgs = req->rpm_msgs;
371
372 for (i = 0; i < count; i++) {
373 __fill_rpmh_msg(rpm_msgs + i, state, cmd, n[i]);
374 cmd += n[i];
375 }
376
377 if (state != RPMH_ACTIVE_ONLY_STATE) {
378 cache_batch(ctrlr, req);
379 return 0;
380 }
381
382 for (i = 0; i < count; i++) {
383 rpm_msgs[i].completion = &compl;
384 ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msgs[i].msg);
385 if (ret) {
386 pr_err("Error(%d) sending RPMH message addr=%#x\n",
387 ret, rpm_msgs[i].msg.cmds[0].addr);
388 for (j = i; j < count; j++)
389 rpmh_tx_done(&rpm_msgs[j].msg, ret);
390 break;
391 }
392 }
393
394 time_left = RPMH_TIMEOUT_MS;
395 for (i = 0; i < count; i++) {
396 time_left = wait_for_completion_timeout(&compl, time_left);
397 if (!time_left) {
398 /*
399 * Better hope they never finish because they'll signal
400 * the completion on our stack and that's bad once
401 * we've returned from the function.
402 */
403 WARN_ON(1);
404 ret = -ETIMEDOUT;
405 goto exit;
406 }
407 }
408
409exit:
410 kfree(req);
411
412 return ret;
413}
414EXPORT_SYMBOL(rpmh_write_batch);
415
416static int is_req_valid(struct cache_req *req)
417{
418 return (req->sleep_val != UINT_MAX &&
419 req->wake_val != UINT_MAX &&
420 req->sleep_val != req->wake_val);
421}
422
423static int send_single(const struct device *dev, enum rpmh_state state,
424 u32 addr, u32 data)
425{
426 DEFINE_RPMH_MSG_ONSTACK(dev, state, NULL, rpm_msg);
427 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
428
429 /* Wake sets are always complete and sleep sets are not */
430 rpm_msg.msg.wait_for_compl = (state == RPMH_WAKE_ONLY_STATE);
431 rpm_msg.cmd[0].addr = addr;
432 rpm_msg.cmd[0].data = data;
433 rpm_msg.msg.num_cmds = 1;
434
435 return rpmh_rsc_write_ctrl_data(ctrlr_to_drv(ctrlr), &rpm_msg.msg);
436}
437
438/**
439 * rpmh_flush: Flushes the buffered active and sleep sets to TCS
440 *
441 * @dev: The device making the request
442 *
443 * Return: -EBUSY if the controller is busy, probably waiting on a response
444 * to a RPMH request sent earlier.
445 *
446 * This function is always called from the sleep code from the last CPU
447 * that is powering down the entire system. Since no other RPMH API would be
448 * executing at this time, it is safe to run lockless.
449 */
450int rpmh_flush(const struct device *dev)
451{
452 struct cache_req *p;
453 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
454 int ret;
455
456 if (!ctrlr->dirty) {
457 pr_debug("Skipping flush, TCS has latest data.\n");
458 return 0;
459 }
460
461 /* First flush the cached batch requests */
462 ret = flush_batch(ctrlr);
463 if (ret)
464 return ret;
465
466 /*
467 * Nobody else should be calling this function other than system PM,
468 * hence we can run without locks.
469 */
470 list_for_each_entry(p, &ctrlr->cache, list) {
471 if (!is_req_valid(p)) {
472 pr_debug("%s: skipping RPMH req: a:%#x s:%#x w:%#x",
473 __func__, p->addr, p->sleep_val, p->wake_val);
474 continue;
475 }
476 ret = send_single(dev, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
477 if (ret)
478 return ret;
479 ret = send_single(dev, RPMH_WAKE_ONLY_STATE,
480 p->addr, p->wake_val);
481 if (ret)
482 return ret;
483 }
484
485 ctrlr->dirty = false;
486
487 return 0;
488}
489EXPORT_SYMBOL(rpmh_flush);
490
491/**
492 * rpmh_invalidate: Invalidate all sleep and active sets
493 * sets.
494 *
495 * @dev: The device making the request
496 *
497 * Invalidate the sleep and active values in the TCS blocks.
498 */
499int rpmh_invalidate(const struct device *dev)
500{
501 struct rpmh_ctrlr *ctrlr = get_rpmh_ctrlr(dev);
502 int ret;
503
504 invalidate_batch(ctrlr);
505 ctrlr->dirty = true;
506
507 do {
508 ret = rpmh_rsc_invalidate(ctrlr_to_drv(ctrlr));
509 } while (ret == -EAGAIN);
510
511 return ret;
512}
513EXPORT_SYMBOL(rpmh_invalidate);
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 70b2ee80d6bd..bf4bd71ab53f 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -364,11 +364,6 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
364 end = phdr_to_last_uncached_entry(phdr); 364 end = phdr_to_last_uncached_entry(phdr);
365 cached = phdr_to_last_cached_entry(phdr); 365 cached = phdr_to_last_cached_entry(phdr);
366 366
367 if (smem->global_partition) {
368 dev_err(smem->dev, "Already found the global partition\n");
369 return -EINVAL;
370 }
371
372 while (hdr < end) { 367 while (hdr < end) {
373 if (hdr->canary != SMEM_PRIVATE_CANARY) 368 if (hdr->canary != SMEM_PRIVATE_CANARY)
374 goto bad_canary; 369 goto bad_canary;
@@ -736,6 +731,11 @@ static int qcom_smem_set_global_partition(struct qcom_smem *smem)
736 bool found = false; 731 bool found = false;
737 int i; 732 int i;
738 733
734 if (smem->global_partition) {
735 dev_err(smem->dev, "Already found the global partition\n");
736 return -EINVAL;
737 }
738
739 ptable = qcom_smem_get_ptable(smem); 739 ptable = qcom_smem_get_ptable(smem);
740 if (IS_ERR(ptable)) 740 if (IS_ERR(ptable))
741 return PTR_ERR(ptable); 741 return PTR_ERR(ptable);
diff --git a/drivers/soc/qcom/trace-rpmh.h b/drivers/soc/qcom/trace-rpmh.h
new file mode 100644
index 000000000000..feb0cb455e37
--- /dev/null
+++ b/drivers/soc/qcom/trace-rpmh.h
@@ -0,0 +1,82 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4 */
5
6#if !defined(_TRACE_RPMH_H) || defined(TRACE_HEADER_MULTI_READ)
7#define _TRACE_RPMH_H
8
9#undef TRACE_SYSTEM
10#define TRACE_SYSTEM rpmh
11
12#include <linux/tracepoint.h>
13#include "rpmh-internal.h"
14
15TRACE_EVENT(rpmh_tx_done,
16
17 TP_PROTO(struct rsc_drv *d, int m, const struct tcs_request *r, int e),
18
19 TP_ARGS(d, m, r, e),
20
21 TP_STRUCT__entry(
22 __string(name, d->name)
23 __field(int, m)
24 __field(u32, addr)
25 __field(u32, data)
26 __field(int, err)
27 ),
28
29 TP_fast_assign(
30 __assign_str(name, d->name);
31 __entry->m = m;
32 __entry->addr = r->cmds[0].addr;
33 __entry->data = r->cmds[0].data;
34 __entry->err = e;
35 ),
36
37 TP_printk("%s: ack: tcs-m: %d addr: %#x data: %#x errno: %d",
38 __get_str(name), __entry->m, __entry->addr, __entry->data,
39 __entry->err)
40);
41
42TRACE_EVENT(rpmh_send_msg,
43
44 TP_PROTO(struct rsc_drv *d, int m, int n, u32 h,
45 const struct tcs_cmd *c),
46
47 TP_ARGS(d, m, n, h, c),
48
49 TP_STRUCT__entry(
50 __string(name, d->name)
51 __field(int, m)
52 __field(int, n)
53 __field(u32, hdr)
54 __field(u32, addr)
55 __field(u32, data)
56 __field(bool, wait)
57 ),
58
59 TP_fast_assign(
60 __assign_str(name, d->name);
61 __entry->m = m;
62 __entry->n = n;
63 __entry->hdr = h;
64 __entry->addr = c->addr;
65 __entry->data = c->data;
66 __entry->wait = c->wait;
67 ),
68
69 TP_printk("%s: send-msg: tcs(m): %d cmd(n): %d msgid: %#x addr: %#x data: %#x complete: %d",
70 __get_str(name), __entry->m, __entry->n, __entry->hdr,
71 __entry->addr, __entry->data, __entry->wait)
72);
73
74#endif /* _TRACE_RPMH_H */
75
76#undef TRACE_INCLUDE_PATH
77#define TRACE_INCLUDE_PATH .
78
79#undef TRACE_INCLUDE_FILE
80#define TRACE_INCLUDE_FILE trace-rpmh
81
82#include <trace/define_trace.h>
diff --git a/include/dt-bindings/soc/qcom,rpmh-rsc.h b/include/dt-bindings/soc/qcom,rpmh-rsc.h
new file mode 100644
index 000000000000..868f998ea998
--- /dev/null
+++ b/include/dt-bindings/soc/qcom,rpmh-rsc.h
@@ -0,0 +1,14 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4 */
5
6#ifndef __DT_QCOM_RPMH_RSC_H__
7#define __DT_QCOM_RPMH_RSC_H__
8
9#define SLEEP_TCS 0
10#define WAKE_TCS 1
11#define ACTIVE_TCS 2
12#define CONTROL_TCS 3
13
14#endif /* __DT_QCOM_RPMH_RSC_H__ */
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index b401b962afff..5d65521260b3 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -87,6 +87,10 @@ static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
87static inline int 87static inline int
88qcom_scm_pas_auth_and_reset(u32 peripheral) { return -ENODEV; } 88qcom_scm_pas_auth_and_reset(u32 peripheral) { return -ENODEV; }
89static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; } 89static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; }
90static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
91 unsigned int *src,
92 struct qcom_scm_vmperm *newvm,
93 int dest_cnt) { return -ENODEV; }
90static inline void qcom_scm_cpu_power_down(u32 flags) {} 94static inline void qcom_scm_cpu_power_down(u32 flags) {}
91static inline u32 qcom_scm_get_version(void) { return 0; } 95static inline u32 qcom_scm_get_version(void) { return 0; }
92static inline u32 96static inline u32
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
new file mode 100644
index 000000000000..7e3b9c605ab2
--- /dev/null
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -0,0 +1,180 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
4 *
5 */
6
7#include <linux/platform_device.h>
8#ifndef __LLCC_QCOM__
9#define __LLCC_QCOM__
10
11#define LLCC_CPUSS 1
12#define LLCC_VIDSC0 2
13#define LLCC_VIDSC1 3
14#define LLCC_ROTATOR 4
15#define LLCC_VOICE 5
16#define LLCC_AUDIO 6
17#define LLCC_MDMHPGRW 7
18#define LLCC_MDM 8
19#define LLCC_CMPT 10
20#define LLCC_GPUHTW 11
21#define LLCC_GPU 12
22#define LLCC_MMUHWT 13
23#define LLCC_CMPTDMA 15
24#define LLCC_DISP 16
25#define LLCC_VIDFW 17
26#define LLCC_MDMHPFX 20
27#define LLCC_MDMPNG 21
28#define LLCC_AUDHW 22
29
30/**
31 * llcc_slice_desc - Cache slice descriptor
32 * @slice_id: llcc slice id
33 * @slice_size: Size allocated for the llcc slice
34 */
35struct llcc_slice_desc {
36 u32 slice_id;
37 size_t slice_size;
38};
39
40/**
41 * llcc_slice_config - Data associated with the llcc slice
42 * @usecase_id: usecase id for which the llcc slice is used
43 * @slice_id: llcc slice id assigned to each slice
44 * @max_cap: maximum capacity of the llcc slice
45 * @priority: priority of the llcc slice
46 * @fixed_size: whether the llcc slice can grow beyond its size
47 * @bonus_ways: bonus ways associated with llcc slice
48 * @res_ways: reserved ways associated with llcc slice
49 * @cache_mode: mode of the llcc slice
50 * @probe_target_ways: Probe only reserved and bonus ways on a cache miss
51 * @dis_cap_alloc: Disable capacity based allocation
52 * @retain_on_pc: Retain through power collapse
53 * @activate_on_init: activate the slice on init
54 */
55struct llcc_slice_config {
56 u32 usecase_id;
57 u32 slice_id;
58 u32 max_cap;
59 u32 priority;
60 bool fixed_size;
61 u32 bonus_ways;
62 u32 res_ways;
63 u32 cache_mode;
64 u32 probe_target_ways;
65 bool dis_cap_alloc;
66 bool retain_on_pc;
67 bool activate_on_init;
68};
69
70/**
71 * llcc_drv_data - Data associated with the llcc driver
72 * @regmap: regmap associated with the llcc device
73 * @cfg: pointer to the data structure for slice configuration
74 * @lock: mutex associated with each slice
75 * @cfg_size: size of the config data table
76 * @max_slices: max slices as read from device tree
77 * @bcast_off: Offset of the broadcast bank
78 * @num_banks: Number of llcc banks
79 * @bitmap: Bit map to track the active slice ids
80 * @offsets: Pointer to the bank offsets array
81 */
82struct llcc_drv_data {
83 struct regmap *regmap;
84 const struct llcc_slice_config *cfg;
85 struct mutex lock;
86 u32 cfg_size;
87 u32 max_slices;
88 u32 bcast_off;
89 u32 num_banks;
90 unsigned long *bitmap;
91 u32 *offsets;
92};
93
94#if IS_ENABLED(CONFIG_QCOM_LLCC)
95/**
96 * llcc_slice_getd - get llcc slice descriptor
97 * @uid: usecase_id of the client
98 */
99struct llcc_slice_desc *llcc_slice_getd(u32 uid);
100
101/**
102 * llcc_slice_putd - llcc slice descritpor
103 * @desc: Pointer to llcc slice descriptor
104 */
105void llcc_slice_putd(struct llcc_slice_desc *desc);
106
107/**
108 * llcc_get_slice_id - get slice id
109 * @desc: Pointer to llcc slice descriptor
110 */
111int llcc_get_slice_id(struct llcc_slice_desc *desc);
112
113/**
114 * llcc_get_slice_size - llcc slice size
115 * @desc: Pointer to llcc slice descriptor
116 */
117size_t llcc_get_slice_size(struct llcc_slice_desc *desc);
118
119/**
120 * llcc_slice_activate - Activate the llcc slice
121 * @desc: Pointer to llcc slice descriptor
122 */
123int llcc_slice_activate(struct llcc_slice_desc *desc);
124
125/**
126 * llcc_slice_deactivate - Deactivate the llcc slice
127 * @desc: Pointer to llcc slice descriptor
128 */
129int llcc_slice_deactivate(struct llcc_slice_desc *desc);
130
131/**
132 * qcom_llcc_probe - program the sct table
133 * @pdev: platform device pointer
134 * @table: soc sct table
135 * @sz: Size of the config table
136 */
137int qcom_llcc_probe(struct platform_device *pdev,
138 const struct llcc_slice_config *table, u32 sz);
139#else
140static inline struct llcc_slice_desc *llcc_slice_getd(u32 uid)
141{
142 return NULL;
143}
144
145static inline void llcc_slice_putd(struct llcc_slice_desc *desc)
146{
147
148};
149
150static inline int llcc_get_slice_id(struct llcc_slice_desc *desc)
151{
152 return -EINVAL;
153}
154
155static inline size_t llcc_get_slice_size(struct llcc_slice_desc *desc)
156{
157 return 0;
158}
159static inline int llcc_slice_activate(struct llcc_slice_desc *desc)
160{
161 return -EINVAL;
162}
163
164static inline int llcc_slice_deactivate(struct llcc_slice_desc *desc)
165{
166 return -EINVAL;
167}
168static inline int qcom_llcc_probe(struct platform_device *pdev,
169 const struct llcc_slice_config *table, u32 sz)
170{
171 return -ENODEV;
172}
173
174static inline int qcom_llcc_remove(struct platform_device *pdev)
175{
176 return -ENODEV;
177}
178#endif
179
180#endif
diff --git a/include/soc/qcom/rpmh.h b/include/soc/qcom/rpmh.h
new file mode 100644
index 000000000000..619e07c75da9
--- /dev/null
+++ b/include/soc/qcom/rpmh.h
@@ -0,0 +1,51 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4 */
5
6#ifndef __SOC_QCOM_RPMH_H__
7#define __SOC_QCOM_RPMH_H__
8
9#include <soc/qcom/tcs.h>
10#include <linux/platform_device.h>
11
12
13#if IS_ENABLED(CONFIG_QCOM_RPMH)
14int rpmh_write(const struct device *dev, enum rpmh_state state,
15 const struct tcs_cmd *cmd, u32 n);
16
17int rpmh_write_async(const struct device *dev, enum rpmh_state state,
18 const struct tcs_cmd *cmd, u32 n);
19
20int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
21 const struct tcs_cmd *cmd, u32 *n);
22
23int rpmh_flush(const struct device *dev);
24
25int rpmh_invalidate(const struct device *dev);
26
27#else
28
29static inline int rpmh_write(const struct device *dev, enum rpmh_state state,
30 const struct tcs_cmd *cmd, u32 n)
31{ return -ENODEV; }
32
33static inline int rpmh_write_async(const struct device *dev,
34 enum rpmh_state state,
35 const struct tcs_cmd *cmd, u32 n)
36{ return -ENODEV; }
37
38static inline int rpmh_write_batch(const struct device *dev,
39 enum rpmh_state state,
40 const struct tcs_cmd *cmd, u32 *n)
41{ return -ENODEV; }
42
43static inline int rpmh_flush(const struct device *dev)
44{ return -ENODEV; }
45
46static inline int rpmh_invalidate(const struct device *dev)
47{ return -ENODEV; }
48
49#endif /* CONFIG_QCOM_RPMH */
50
51#endif /* __SOC_QCOM_RPMH_H__ */
diff --git a/include/soc/qcom/tcs.h b/include/soc/qcom/tcs.h
new file mode 100644
index 000000000000..262876a59e86
--- /dev/null
+++ b/include/soc/qcom/tcs.h
@@ -0,0 +1,56 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
4 */
5
6#ifndef __SOC_QCOM_TCS_H__
7#define __SOC_QCOM_TCS_H__
8
9#define MAX_RPMH_PAYLOAD 16
10
11/**
12 * rpmh_state: state for the request
13 *
14 * RPMH_SLEEP_STATE: State of the resource when the processor subsystem
15 * is powered down. There is no client using the
16 * resource actively.
17 * RPMH_WAKE_ONLY_STATE: Resume resource state to the value previously
18 * requested before the processor was powered down.
19 * RPMH_ACTIVE_ONLY_STATE: Active or AMC mode requests. Resource state
20 * is aggregated immediately.
21 */
22enum rpmh_state {
23 RPMH_SLEEP_STATE,
24 RPMH_WAKE_ONLY_STATE,
25 RPMH_ACTIVE_ONLY_STATE,
26};
27
28/**
29 * struct tcs_cmd: an individual request to RPMH.
30 *
31 * @addr: the address of the resource slv_id:18:16 | offset:0:15
32 * @data: the resource state request
33 * @wait: wait for this request to be complete before sending the next
34 */
35struct tcs_cmd {
36 u32 addr;
37 u32 data;
38 u32 wait;
39};
40
41/**
42 * struct tcs_request: A set of tcs_cmds sent together in a TCS
43 *
44 * @state: state for the request.
45 * @wait_for_compl: wait until we get a response from the h/w accelerator
46 * @num_cmds: the number of @cmds in this request
47 * @cmds: an array of tcs_cmds
48 */
49struct tcs_request {
50 enum rpmh_state state;
51 u32 wait_for_compl;
52 u32 num_cmds;
53 struct tcs_cmd *cmds;
54};
55
56#endif /* __SOC_QCOM_TCS_H__ */