aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/soc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-10 18:00:03 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-10 18:00:03 -0500
commitb44a3d2a85c64208a57362a1728efb58a6556cd6 (patch)
tree293302b3ac918eb75b442fa035eb976850163b1d /drivers/soc
parent56e0464980febfa50432a070261579415c72664e (diff)
parentd13a5c8c4c3dbe299659bcff805f79a2c83e2bbc (diff)
Merge tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull ARM SoC driver updates from Olof Johansson: "As we've enabled multiplatform kernels on ARM, and greatly done away with the contents under arch/arm/mach-*, there's still need for SoC-related drivers to go somewhere. Many of them go in through other driver trees, but we still have drivers/soc to hold some of the "doesn't fit anywhere" lowlevel code that might be shared between ARM and ARM64 (or just in general makes sense to not have under the architecture directory). This branch contains mostly such code: - Drivers for qualcomm SoCs for SMEM, SMD and SMD-RPM, used to communicate with power management blocks on these SoCs for use by clock, regulator and bus frequency drivers. - Allwinner Reduced Serial Bus driver, again used to communicate with PMICs. - Drivers for ARM's SCPI (System Control Processor). Not to be confused with PSCI (Power State Coordination Interface). SCPI is used to communicate with the assistant embedded cores doing power management, and we have yet to see how many of them will implement this for their hardware vs abstracting in other ways (or not at all like in the past). - To make confusion between SCPI and PSCI more likely, this release also includes an update of PSCI to interface version 1.0. - Rockchip support for power domains. - A driver to talk to the firmware on Raspberry Pi" * tag 'armsoc-drivers' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (57 commits) soc: qcom: smd-rpm: Correct size of outgoing message bus: sunxi-rsb: Add driver for Allwinner Reduced Serial Bus bus: sunxi-rsb: Add Allwinner Reduced Serial Bus (RSB) controller bindings ARM: bcm2835: add mutual inclusion protection drivers: psci: make PSCI 1.0 functions initialization version dependent dt-bindings: Correct paths in Rockchip power domains binding document soc: rockchip: power-domain: don't try to print the clock name in error case soc: qcom/smem: add HWSPINLOCK dependency clk: berlin: add cpuclk ARM: berlin: dts: add CLKID_CPU for BG2Q ARM: bcm2835: Add the Raspberry Pi firmware driver soc: qcom: smem: Move RPM message ram out of smem DT node soc: qcom: smd-rpm: Correct the active vs sleep state flagging soc: qcom: smd: delete unneeded of_node_put firmware: qcom-scm: build for correct architecture level soc: qcom: smd: Correct SMEM items for upper channels qcom-scm: add missing prototype for qcom_scm_is_available() qcom-scm: fix endianess issue in __qcom_scm_is_call_available soc: qcom: smd: Reject send of too big packets soc: qcom: smd: Handle big endian CPUs ...
Diffstat (limited to 'drivers/soc')
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/qcom/Kconfig17
-rw-r--r--drivers/soc/qcom/smd-rpm.c68
-rw-r--r--drivers/soc/qcom/smd.c296
-rw-r--r--drivers/soc/qcom/smem.c368
-rw-r--r--drivers/soc/rockchip/Kconfig18
-rw-r--r--drivers/soc/rockchip/Makefile4
-rw-r--r--drivers/soc/rockchip/pm_domains.c490
9 files changed, 938 insertions, 325 deletions
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index c9c0fcce98a7..4e853ed2c82b 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -3,6 +3,7 @@ menu "SOC (System On Chip) specific Drivers"
3source "drivers/soc/brcmstb/Kconfig" 3source "drivers/soc/brcmstb/Kconfig"
4source "drivers/soc/mediatek/Kconfig" 4source "drivers/soc/mediatek/Kconfig"
5source "drivers/soc/qcom/Kconfig" 5source "drivers/soc/qcom/Kconfig"
6source "drivers/soc/rockchip/Kconfig"
6source "drivers/soc/sunxi/Kconfig" 7source "drivers/soc/sunxi/Kconfig"
7source "drivers/soc/ti/Kconfig" 8source "drivers/soc/ti/Kconfig"
8source "drivers/soc/versatile/Kconfig" 9source "drivers/soc/versatile/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 4e27f10367f0..f2ba2e932ae1 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_SOC_BRCMSTB) += brcmstb/
6obj-$(CONFIG_MACH_DOVE) += dove/ 6obj-$(CONFIG_MACH_DOVE) += dove/
7obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/ 7obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/
8obj-$(CONFIG_ARCH_QCOM) += qcom/ 8obj-$(CONFIG_ARCH_QCOM) += qcom/
9obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
9obj-$(CONFIG_ARCH_SUNXI) += sunxi/ 10obj-$(CONFIG_ARCH_SUNXI) += sunxi/
10obj-$(CONFIG_ARCH_TEGRA) += tegra/ 11obj-$(CONFIG_ARCH_TEGRA) += tegra/
11obj-$(CONFIG_SOC_TI) += ti/ 12obj-$(CONFIG_SOC_TI) += ti/
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index ba47b70f4d85..eec76141d9b9 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -19,6 +19,15 @@ config QCOM_PM
19 modes. It interface with various system drivers to put the cores in 19 modes. It interface with various system drivers to put the cores in
20 low power modes. 20 low power modes.
21 21
22config QCOM_SMEM
23 tristate "Qualcomm Shared Memory Manager (SMEM)"
24 depends on ARCH_QCOM
25 depends on HWSPINLOCK
26 help
27 Say y here to enable support for the Qualcomm Shared Memory Manager.
28 The driver provides an interface to items in a heap shared among all
29 processors in a Qualcomm platform.
30
22config QCOM_SMD 31config QCOM_SMD
23 tristate "Qualcomm Shared Memory Driver (SMD)" 32 tristate "Qualcomm Shared Memory Driver (SMD)"
24 depends on QCOM_SMEM 33 depends on QCOM_SMEM
@@ -40,11 +49,3 @@ config QCOM_SMD_RPM
40 49
41 Say M here if you want to include support for the Qualcomm RPM as a 50 Say M here if you want to include support for the Qualcomm RPM as a
42 module. This will build a module called "qcom-smd-rpm". 51 module. This will build a module called "qcom-smd-rpm".
43
44config QCOM_SMEM
45 tristate "Qualcomm Shared Memory Manager (SMEM)"
46 depends on ARCH_QCOM
47 help
48 Say y here to enable support for the Qualcomm Shared Memory Manager.
49 The driver provides an interface to items in a heap shared among all
50 processors in a Qualcomm platform.
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index 1392ccf14a20..2969321e1b09 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -17,6 +17,7 @@
17#include <linux/of_platform.h> 17#include <linux/of_platform.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/slab.h>
20 21
21#include <linux/soc/qcom/smd.h> 22#include <linux/soc/qcom/smd.h>
22#include <linux/soc/qcom/smd-rpm.h> 23#include <linux/soc/qcom/smd-rpm.h>
@@ -44,8 +45,8 @@ struct qcom_smd_rpm {
44 * @length: length of the payload 45 * @length: length of the payload
45 */ 46 */
46struct qcom_rpm_header { 47struct qcom_rpm_header {
47 u32 service_type; 48 __le32 service_type;
48 u32 length; 49 __le32 length;
49}; 50};
50 51
51/** 52/**
@@ -57,11 +58,11 @@ struct qcom_rpm_header {
57 * @data_len: length of the payload following this header 58 * @data_len: length of the payload following this header
58 */ 59 */
59struct qcom_rpm_request { 60struct qcom_rpm_request {
60 u32 msg_id; 61 __le32 msg_id;
61 u32 flags; 62 __le32 flags;
62 u32 type; 63 __le32 type;
63 u32 id; 64 __le32 id;
64 u32 data_len; 65 __le32 data_len;
65}; 66};
66 67
67/** 68/**
@@ -74,10 +75,10 @@ struct qcom_rpm_request {
74 * Multiple of these messages can be stacked in an rpm message. 75 * Multiple of these messages can be stacked in an rpm message.
75 */ 76 */
76struct qcom_rpm_message { 77struct qcom_rpm_message {
77 u32 msg_type; 78 __le32 msg_type;
78 u32 length; 79 __le32 length;
79 union { 80 union {
80 u32 msg_id; 81 __le32 msg_id;
81 u8 message[0]; 82 u8 message[0];
82 }; 83 };
83}; 84};
@@ -104,30 +105,34 @@ int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
104 static unsigned msg_id = 1; 105 static unsigned msg_id = 1;
105 int left; 106 int left;
106 int ret; 107 int ret;
107
108 struct { 108 struct {
109 struct qcom_rpm_header hdr; 109 struct qcom_rpm_header hdr;
110 struct qcom_rpm_request req; 110 struct qcom_rpm_request req;
111 u8 payload[count]; 111 u8 payload[];
112 } pkt; 112 } *pkt;
113 size_t size = sizeof(*pkt) + count;
113 114
114 /* SMD packets to the RPM may not exceed 256 bytes */ 115 /* SMD packets to the RPM may not exceed 256 bytes */
115 if (WARN_ON(sizeof(pkt) >= 256)) 116 if (WARN_ON(size >= 256))
116 return -EINVAL; 117 return -EINVAL;
117 118
119 pkt = kmalloc(size, GFP_KERNEL);
120 if (!pkt)
121 return -ENOMEM;
122
118 mutex_lock(&rpm->lock); 123 mutex_lock(&rpm->lock);
119 124
120 pkt.hdr.service_type = RPM_SERVICE_TYPE_REQUEST; 125 pkt->hdr.service_type = cpu_to_le32(RPM_SERVICE_TYPE_REQUEST);
121 pkt.hdr.length = sizeof(struct qcom_rpm_request) + count; 126 pkt->hdr.length = cpu_to_le32(sizeof(struct qcom_rpm_request) + count);
122 127
123 pkt.req.msg_id = msg_id++; 128 pkt->req.msg_id = cpu_to_le32(msg_id++);
124 pkt.req.flags = BIT(state); 129 pkt->req.flags = cpu_to_le32(state);
125 pkt.req.type = type; 130 pkt->req.type = cpu_to_le32(type);
126 pkt.req.id = id; 131 pkt->req.id = cpu_to_le32(id);
127 pkt.req.data_len = count; 132 pkt->req.data_len = cpu_to_le32(count);
128 memcpy(pkt.payload, buf, count); 133 memcpy(pkt->payload, buf, count);
129 134
130 ret = qcom_smd_send(rpm->rpm_channel, &pkt, sizeof(pkt)); 135 ret = qcom_smd_send(rpm->rpm_channel, pkt, size);
131 if (ret) 136 if (ret)
132 goto out; 137 goto out;
133 138
@@ -138,6 +143,7 @@ int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
138 ret = rpm->ack_status; 143 ret = rpm->ack_status;
139 144
140out: 145out:
146 kfree(pkt);
141 mutex_unlock(&rpm->lock); 147 mutex_unlock(&rpm->lock);
142 return ret; 148 return ret;
143} 149}
@@ -148,27 +154,29 @@ static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev,
148 size_t count) 154 size_t count)
149{ 155{
150 const struct qcom_rpm_header *hdr = data; 156 const struct qcom_rpm_header *hdr = data;
157 size_t hdr_length = le32_to_cpu(hdr->length);
151 const struct qcom_rpm_message *msg; 158 const struct qcom_rpm_message *msg;
152 struct qcom_smd_rpm *rpm = dev_get_drvdata(&qsdev->dev); 159 struct qcom_smd_rpm *rpm = dev_get_drvdata(&qsdev->dev);
153 const u8 *buf = data + sizeof(struct qcom_rpm_header); 160 const u8 *buf = data + sizeof(struct qcom_rpm_header);
154 const u8 *end = buf + hdr->length; 161 const u8 *end = buf + hdr_length;
155 char msgbuf[32]; 162 char msgbuf[32];
156 int status = 0; 163 int status = 0;
157 u32 len; 164 u32 len, msg_length;
158 165
159 if (hdr->service_type != RPM_SERVICE_TYPE_REQUEST || 166 if (le32_to_cpu(hdr->service_type) != RPM_SERVICE_TYPE_REQUEST ||
160 hdr->length < sizeof(struct qcom_rpm_message)) { 167 hdr_length < sizeof(struct qcom_rpm_message)) {
161 dev_err(&qsdev->dev, "invalid request\n"); 168 dev_err(&qsdev->dev, "invalid request\n");
162 return 0; 169 return 0;
163 } 170 }
164 171
165 while (buf < end) { 172 while (buf < end) {
166 msg = (struct qcom_rpm_message *)buf; 173 msg = (struct qcom_rpm_message *)buf;
167 switch (msg->msg_type) { 174 msg_length = le32_to_cpu(msg->length);
175 switch (le32_to_cpu(msg->msg_type)) {
168 case RPM_MSG_TYPE_MSG_ID: 176 case RPM_MSG_TYPE_MSG_ID:
169 break; 177 break;
170 case RPM_MSG_TYPE_ERR: 178 case RPM_MSG_TYPE_ERR:
171 len = min_t(u32, ALIGN(msg->length, 4), sizeof(msgbuf)); 179 len = min_t(u32, ALIGN(msg_length, 4), sizeof(msgbuf));
172 memcpy_fromio(msgbuf, msg->message, len); 180 memcpy_fromio(msgbuf, msg->message, len);
173 msgbuf[len - 1] = 0; 181 msgbuf[len - 1] = 0;
174 182
@@ -179,7 +187,7 @@ static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev,
179 break; 187 break;
180 } 188 }
181 189
182 buf = PTR_ALIGN(buf + 2 * sizeof(u32) + msg->length, 4); 190 buf = PTR_ALIGN(buf + 2 * sizeof(u32) + msg_length, 4);
183 } 191 }
184 192
185 rpm->ack_status = status; 193 rpm->ack_status = status;
diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c
index a6155c917d52..86b598cff91a 100644
--- a/drivers/soc/qcom/smd.c
+++ b/drivers/soc/qcom/smd.c
@@ -65,7 +65,9 @@
65 */ 65 */
66 66
67struct smd_channel_info; 67struct smd_channel_info;
68struct smd_channel_info_pair;
68struct smd_channel_info_word; 69struct smd_channel_info_word;
70struct smd_channel_info_word_pair;
69 71
70#define SMD_ALLOC_TBL_COUNT 2 72#define SMD_ALLOC_TBL_COUNT 2
71#define SMD_ALLOC_TBL_SIZE 64 73#define SMD_ALLOC_TBL_SIZE 64
@@ -85,8 +87,8 @@ static const struct {
85 .fifo_base_id = 338 87 .fifo_base_id = 338
86 }, 88 },
87 { 89 {
88 .alloc_tbl_id = 14, 90 .alloc_tbl_id = 266,
89 .info_base_id = 266, 91 .info_base_id = 138,
90 .fifo_base_id = 202, 92 .fifo_base_id = 202,
91 }, 93 },
92}; 94};
@@ -151,10 +153,8 @@ enum smd_channel_state {
151 * @name: name of the channel 153 * @name: name of the channel
152 * @state: local state of the channel 154 * @state: local state of the channel
153 * @remote_state: remote state of the channel 155 * @remote_state: remote state of the channel
154 * @tx_info: byte aligned outgoing channel info 156 * @info: byte aligned outgoing/incoming channel info
155 * @rx_info: byte aligned incoming channel info 157 * @info_word: word aligned outgoing/incoming channel info
156 * @tx_info_word: word aligned outgoing channel info
157 * @rx_info_word: word aligned incoming channel info
158 * @tx_lock: lock to make writes to the channel mutually exclusive 158 * @tx_lock: lock to make writes to the channel mutually exclusive
159 * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR 159 * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR
160 * @tx_fifo: pointer to the outgoing ring buffer 160 * @tx_fifo: pointer to the outgoing ring buffer
@@ -175,11 +175,8 @@ struct qcom_smd_channel {
175 enum smd_channel_state state; 175 enum smd_channel_state state;
176 enum smd_channel_state remote_state; 176 enum smd_channel_state remote_state;
177 177
178 struct smd_channel_info *tx_info; 178 struct smd_channel_info_pair *info;
179 struct smd_channel_info *rx_info; 179 struct smd_channel_info_word_pair *info_word;
180
181 struct smd_channel_info_word *tx_info_word;
182 struct smd_channel_info_word *rx_info_word;
183 180
184 struct mutex tx_lock; 181 struct mutex tx_lock;
185 wait_queue_head_t fblockread_event; 182 wait_queue_head_t fblockread_event;
@@ -215,7 +212,7 @@ struct qcom_smd {
215 * Format of the smd_info smem items, for byte aligned channels. 212 * Format of the smd_info smem items, for byte aligned channels.
216 */ 213 */
217struct smd_channel_info { 214struct smd_channel_info {
218 u32 state; 215 __le32 state;
219 u8 fDSR; 216 u8 fDSR;
220 u8 fCTS; 217 u8 fCTS;
221 u8 fCD; 218 u8 fCD;
@@ -224,46 +221,104 @@ struct smd_channel_info {
224 u8 fTAIL; 221 u8 fTAIL;
225 u8 fSTATE; 222 u8 fSTATE;
226 u8 fBLOCKREADINTR; 223 u8 fBLOCKREADINTR;
227 u32 tail; 224 __le32 tail;
228 u32 head; 225 __le32 head;
226};
227
228struct smd_channel_info_pair {
229 struct smd_channel_info tx;
230 struct smd_channel_info rx;
229}; 231};
230 232
231/* 233/*
232 * Format of the smd_info smem items, for word aligned channels. 234 * Format of the smd_info smem items, for word aligned channels.
233 */ 235 */
234struct smd_channel_info_word { 236struct smd_channel_info_word {
235 u32 state; 237 __le32 state;
236 u32 fDSR; 238 __le32 fDSR;
237 u32 fCTS; 239 __le32 fCTS;
238 u32 fCD; 240 __le32 fCD;
239 u32 fRI; 241 __le32 fRI;
240 u32 fHEAD; 242 __le32 fHEAD;
241 u32 fTAIL; 243 __le32 fTAIL;
242 u32 fSTATE; 244 __le32 fSTATE;
243 u32 fBLOCKREADINTR; 245 __le32 fBLOCKREADINTR;
244 u32 tail; 246 __le32 tail;
245 u32 head; 247 __le32 head;
246}; 248};
247 249
248#define GET_RX_CHANNEL_INFO(channel, param) \ 250struct smd_channel_info_word_pair {
249 (channel->rx_info_word ? \ 251 struct smd_channel_info_word tx;
250 channel->rx_info_word->param : \ 252 struct smd_channel_info_word rx;
251 channel->rx_info->param) 253};
252
253#define SET_RX_CHANNEL_INFO(channel, param, value) \
254 (channel->rx_info_word ? \
255 (channel->rx_info_word->param = value) : \
256 (channel->rx_info->param = value))
257
258#define GET_TX_CHANNEL_INFO(channel, param) \
259 (channel->tx_info_word ? \
260 channel->tx_info_word->param : \
261 channel->tx_info->param)
262 254
263#define SET_TX_CHANNEL_INFO(channel, param, value) \ 255#define GET_RX_CHANNEL_FLAG(channel, param) \
264 (channel->tx_info_word ? \ 256 ({ \
265 (channel->tx_info_word->param = value) : \ 257 BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \
266 (channel->tx_info->param = value)) 258 channel->info_word ? \
259 le32_to_cpu(channel->info_word->rx.param) : \
260 channel->info->rx.param; \
261 })
262
263#define GET_RX_CHANNEL_INFO(channel, param) \
264 ({ \
265 BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \
266 le32_to_cpu(channel->info_word ? \
267 channel->info_word->rx.param : \
268 channel->info->rx.param); \
269 })
270
271#define SET_RX_CHANNEL_FLAG(channel, param, value) \
272 ({ \
273 BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \
274 if (channel->info_word) \
275 channel->info_word->rx.param = cpu_to_le32(value); \
276 else \
277 channel->info->rx.param = value; \
278 })
279
280#define SET_RX_CHANNEL_INFO(channel, param, value) \
281 ({ \
282 BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \
283 if (channel->info_word) \
284 channel->info_word->rx.param = cpu_to_le32(value); \
285 else \
286 channel->info->rx.param = cpu_to_le32(value); \
287 })
288
289#define GET_TX_CHANNEL_FLAG(channel, param) \
290 ({ \
291 BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \
292 channel->info_word ? \
293 le32_to_cpu(channel->info_word->tx.param) : \
294 channel->info->tx.param; \
295 })
296
297#define GET_TX_CHANNEL_INFO(channel, param) \
298 ({ \
299 BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \
300 le32_to_cpu(channel->info_word ? \
301 channel->info_word->tx.param : \
302 channel->info->tx.param); \
303 })
304
305#define SET_TX_CHANNEL_FLAG(channel, param, value) \
306 ({ \
307 BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \
308 if (channel->info_word) \
309 channel->info_word->tx.param = cpu_to_le32(value); \
310 else \
311 channel->info->tx.param = value; \
312 })
313
314#define SET_TX_CHANNEL_INFO(channel, param, value) \
315 ({ \
316 BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \
317 if (channel->info_word) \
318 channel->info_word->tx.param = cpu_to_le32(value); \
319 else \
320 channel->info->tx.param = cpu_to_le32(value); \
321 })
267 322
268/** 323/**
269 * struct qcom_smd_alloc_entry - channel allocation entry 324 * struct qcom_smd_alloc_entry - channel allocation entry
@@ -274,9 +329,9 @@ struct smd_channel_info_word {
274 */ 329 */
275struct qcom_smd_alloc_entry { 330struct qcom_smd_alloc_entry {
276 u8 name[20]; 331 u8 name[20];
277 u32 cid; 332 __le32 cid;
278 u32 flags; 333 __le32 flags;
279 u32 ref_count; 334 __le32 ref_count;
280} __packed; 335} __packed;
281 336
282#define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff 337#define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff
@@ -305,14 +360,14 @@ static void qcom_smd_signal_channel(struct qcom_smd_channel *channel)
305static void qcom_smd_channel_reset(struct qcom_smd_channel *channel) 360static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
306{ 361{
307 SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED); 362 SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
308 SET_TX_CHANNEL_INFO(channel, fDSR, 0); 363 SET_TX_CHANNEL_FLAG(channel, fDSR, 0);
309 SET_TX_CHANNEL_INFO(channel, fCTS, 0); 364 SET_TX_CHANNEL_FLAG(channel, fCTS, 0);
310 SET_TX_CHANNEL_INFO(channel, fCD, 0); 365 SET_TX_CHANNEL_FLAG(channel, fCD, 0);
311 SET_TX_CHANNEL_INFO(channel, fRI, 0); 366 SET_TX_CHANNEL_FLAG(channel, fRI, 0);
312 SET_TX_CHANNEL_INFO(channel, fHEAD, 0); 367 SET_TX_CHANNEL_FLAG(channel, fHEAD, 0);
313 SET_TX_CHANNEL_INFO(channel, fTAIL, 0); 368 SET_TX_CHANNEL_FLAG(channel, fTAIL, 0);
314 SET_TX_CHANNEL_INFO(channel, fSTATE, 1); 369 SET_TX_CHANNEL_FLAG(channel, fSTATE, 1);
315 SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1); 370 SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
316 SET_TX_CHANNEL_INFO(channel, head, 0); 371 SET_TX_CHANNEL_INFO(channel, head, 0);
317 SET_TX_CHANNEL_INFO(channel, tail, 0); 372 SET_TX_CHANNEL_INFO(channel, tail, 0);
318 373
@@ -350,12 +405,12 @@ static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
350 405
351 dev_dbg(edge->smd->dev, "set_state(%s, %d)\n", channel->name, state); 406 dev_dbg(edge->smd->dev, "set_state(%s, %d)\n", channel->name, state);
352 407
353 SET_TX_CHANNEL_INFO(channel, fDSR, is_open); 408 SET_TX_CHANNEL_FLAG(channel, fDSR, is_open);
354 SET_TX_CHANNEL_INFO(channel, fCTS, is_open); 409 SET_TX_CHANNEL_FLAG(channel, fCTS, is_open);
355 SET_TX_CHANNEL_INFO(channel, fCD, is_open); 410 SET_TX_CHANNEL_FLAG(channel, fCD, is_open);
356 411
357 SET_TX_CHANNEL_INFO(channel, state, state); 412 SET_TX_CHANNEL_INFO(channel, state, state);
358 SET_TX_CHANNEL_INFO(channel, fSTATE, 1); 413 SET_TX_CHANNEL_FLAG(channel, fSTATE, 1);
359 414
360 channel->state = state; 415 channel->state = state;
361 qcom_smd_signal_channel(channel); 416 qcom_smd_signal_channel(channel);
@@ -364,20 +419,15 @@ static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel,
364/* 419/*
365 * Copy count bytes of data using 32bit accesses, if that's required. 420 * Copy count bytes of data using 32bit accesses, if that's required.
366 */ 421 */
367static void smd_copy_to_fifo(void __iomem *_dst, 422static void smd_copy_to_fifo(void __iomem *dst,
368 const void *_src, 423 const void *src,
369 size_t count, 424 size_t count,
370 bool word_aligned) 425 bool word_aligned)
371{ 426{
372 u32 *dst = (u32 *)_dst;
373 u32 *src = (u32 *)_src;
374
375 if (word_aligned) { 427 if (word_aligned) {
376 count /= sizeof(u32); 428 __iowrite32_copy(dst, src, count / sizeof(u32));
377 while (count--)
378 writel_relaxed(*src++, dst++);
379 } else { 429 } else {
380 memcpy_toio(_dst, _src, count); 430 memcpy_toio(dst, src, count);
381 } 431 }
382} 432}
383 433
@@ -395,7 +445,7 @@ static void smd_copy_from_fifo(void *_dst,
395 if (word_aligned) { 445 if (word_aligned) {
396 count /= sizeof(u32); 446 count /= sizeof(u32);
397 while (count--) 447 while (count--)
398 *dst++ = readl_relaxed(src++); 448 *dst++ = __raw_readl(src++);
399 } else { 449 } else {
400 memcpy_fromio(_dst, _src, count); 450 memcpy_fromio(_dst, _src, count);
401 } 451 }
@@ -412,7 +462,7 @@ static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel,
412 unsigned tail; 462 unsigned tail;
413 size_t len; 463 size_t len;
414 464
415 word_aligned = channel->rx_info_word != NULL; 465 word_aligned = channel->info_word;
416 tail = GET_RX_CHANNEL_INFO(channel, tail); 466 tail = GET_RX_CHANNEL_INFO(channel, tail);
417 467
418 len = min_t(size_t, count, channel->fifo_size - tail); 468 len = min_t(size_t, count, channel->fifo_size - tail);
@@ -491,7 +541,7 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
491{ 541{
492 bool need_state_scan = false; 542 bool need_state_scan = false;
493 int remote_state; 543 int remote_state;
494 u32 pktlen; 544 __le32 pktlen;
495 int avail; 545 int avail;
496 int ret; 546 int ret;
497 547
@@ -502,10 +552,10 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
502 need_state_scan = true; 552 need_state_scan = true;
503 } 553 }
504 /* Indicate that we have seen any state change */ 554 /* Indicate that we have seen any state change */
505 SET_RX_CHANNEL_INFO(channel, fSTATE, 0); 555 SET_RX_CHANNEL_FLAG(channel, fSTATE, 0);
506 556
507 /* Signal waiting qcom_smd_send() about the interrupt */ 557 /* Signal waiting qcom_smd_send() about the interrupt */
508 if (!GET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR)) 558 if (!GET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR))
509 wake_up_interruptible(&channel->fblockread_event); 559 wake_up_interruptible(&channel->fblockread_event);
510 560
511 /* Don't consume any data until we've opened the channel */ 561 /* Don't consume any data until we've opened the channel */
@@ -513,7 +563,7 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
513 goto out; 563 goto out;
514 564
515 /* Indicate that we've seen the new data */ 565 /* Indicate that we've seen the new data */
516 SET_RX_CHANNEL_INFO(channel, fHEAD, 0); 566 SET_RX_CHANNEL_FLAG(channel, fHEAD, 0);
517 567
518 /* Consume data */ 568 /* Consume data */
519 for (;;) { 569 for (;;) {
@@ -522,7 +572,7 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
522 if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) { 572 if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) {
523 qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen)); 573 qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen));
524 qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN); 574 qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN);
525 channel->pkt_size = pktlen; 575 channel->pkt_size = le32_to_cpu(pktlen);
526 } else if (channel->pkt_size && avail >= channel->pkt_size) { 576 } else if (channel->pkt_size && avail >= channel->pkt_size) {
527 ret = qcom_smd_channel_recv_single(channel); 577 ret = qcom_smd_channel_recv_single(channel);
528 if (ret) 578 if (ret)
@@ -533,10 +583,10 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
533 } 583 }
534 584
535 /* Indicate that we have seen and updated tail */ 585 /* Indicate that we have seen and updated tail */
536 SET_RX_CHANNEL_INFO(channel, fTAIL, 1); 586 SET_RX_CHANNEL_FLAG(channel, fTAIL, 1);
537 587
538 /* Signal the remote that we've consumed the data (if requested) */ 588 /* Signal the remote that we've consumed the data (if requested) */
539 if (!GET_RX_CHANNEL_INFO(channel, fBLOCKREADINTR)) { 589 if (!GET_RX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) {
540 /* Ensure ordering of channel info updates */ 590 /* Ensure ordering of channel info updates */
541 wmb(); 591 wmb();
542 592
@@ -627,7 +677,7 @@ static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
627 unsigned head; 677 unsigned head;
628 size_t len; 678 size_t len;
629 679
630 word_aligned = channel->tx_info_word != NULL; 680 word_aligned = channel->info_word;
631 head = GET_TX_CHANNEL_INFO(channel, head); 681 head = GET_TX_CHANNEL_INFO(channel, head);
632 682
633 len = min_t(size_t, count, channel->fifo_size - head); 683 len = min_t(size_t, count, channel->fifo_size - head);
@@ -665,12 +715,16 @@ static int qcom_smd_write_fifo(struct qcom_smd_channel *channel,
665 */ 715 */
666int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len) 716int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
667{ 717{
668 u32 hdr[5] = {len,}; 718 __le32 hdr[5] = { cpu_to_le32(len), };
669 int tlen = sizeof(hdr) + len; 719 int tlen = sizeof(hdr) + len;
670 int ret; 720 int ret;
671 721
672 /* Word aligned channels only accept word size aligned data */ 722 /* Word aligned channels only accept word size aligned data */
673 if (channel->rx_info_word != NULL && len % 4) 723 if (channel->info_word && len % 4)
724 return -EINVAL;
725
726 /* Reject packets that are too big */
727 if (tlen >= channel->fifo_size)
674 return -EINVAL; 728 return -EINVAL;
675 729
676 ret = mutex_lock_interruptible(&channel->tx_lock); 730 ret = mutex_lock_interruptible(&channel->tx_lock);
@@ -683,7 +737,7 @@ int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
683 goto out; 737 goto out;
684 } 738 }
685 739
686 SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0); 740 SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 0);
687 741
688 ret = wait_event_interruptible(channel->fblockread_event, 742 ret = wait_event_interruptible(channel->fblockread_event,
689 qcom_smd_get_tx_avail(channel) >= tlen || 743 qcom_smd_get_tx_avail(channel) >= tlen ||
@@ -691,15 +745,15 @@ int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
691 if (ret) 745 if (ret)
692 goto out; 746 goto out;
693 747
694 SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1); 748 SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1);
695 } 749 }
696 750
697 SET_TX_CHANNEL_INFO(channel, fTAIL, 0); 751 SET_TX_CHANNEL_FLAG(channel, fTAIL, 0);
698 752
699 qcom_smd_write_fifo(channel, hdr, sizeof(hdr)); 753 qcom_smd_write_fifo(channel, hdr, sizeof(hdr));
700 qcom_smd_write_fifo(channel, data, len); 754 qcom_smd_write_fifo(channel, data, len);
701 755
702 SET_TX_CHANNEL_INFO(channel, fHEAD, 1); 756 SET_TX_CHANNEL_FLAG(channel, fHEAD, 1);
703 757
704 /* Ensure ordering of channel info updates */ 758 /* Ensure ordering of channel info updates */
705 wmb(); 759 wmb();
@@ -727,6 +781,19 @@ static struct qcom_smd_driver *to_smd_driver(struct device *dev)
727 781
728static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv) 782static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv)
729{ 783{
784 struct qcom_smd_device *qsdev = to_smd_device(dev);
785 struct qcom_smd_driver *qsdrv = container_of(drv, struct qcom_smd_driver, driver);
786 const struct qcom_smd_id *match = qsdrv->smd_match_table;
787 const char *name = qsdev->channel->name;
788
789 if (match) {
790 while (match->name[0]) {
791 if (!strcmp(match->name, name))
792 return 1;
793 match++;
794 }
795 }
796
730 return of_driver_match_device(dev, drv); 797 return of_driver_match_device(dev, drv);
731} 798}
732 799
@@ -854,10 +921,8 @@ static struct device_node *qcom_smd_match_channel(struct device_node *edge_node,
854 for_each_available_child_of_node(edge_node, child) { 921 for_each_available_child_of_node(edge_node, child) {
855 key = "qcom,smd-channels"; 922 key = "qcom,smd-channels";
856 ret = of_property_read_string(child, key, &name); 923 ret = of_property_read_string(child, key, &name);
857 if (ret) { 924 if (ret)
858 of_node_put(child);
859 continue; 925 continue;
860 }
861 926
862 if (strcmp(name, channel) == 0) 927 if (strcmp(name, channel) == 0)
863 return child; 928 return child;
@@ -880,19 +945,17 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel)
880 if (channel->qsdev) 945 if (channel->qsdev)
881 return -EEXIST; 946 return -EEXIST;
882 947
883 node = qcom_smd_match_channel(edge->of_node, channel->name);
884 if (!node) {
885 dev_dbg(smd->dev, "no match for '%s'\n", channel->name);
886 return -ENXIO;
887 }
888
889 dev_dbg(smd->dev, "registering '%s'\n", channel->name); 948 dev_dbg(smd->dev, "registering '%s'\n", channel->name);
890 949
891 qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL); 950 qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL);
892 if (!qsdev) 951 if (!qsdev)
893 return -ENOMEM; 952 return -ENOMEM;
894 953
895 dev_set_name(&qsdev->dev, "%s.%s", edge->of_node->name, node->name); 954 node = qcom_smd_match_channel(edge->of_node, channel->name);
955 dev_set_name(&qsdev->dev, "%s.%s",
956 edge->of_node->name,
957 node ? node->name : channel->name);
958
896 qsdev->dev.parent = smd->dev; 959 qsdev->dev.parent = smd->dev;
897 qsdev->dev.bus = &qcom_smd_bus; 960 qsdev->dev.bus = &qcom_smd_bus;
898 qsdev->dev.release = qcom_smd_release_device; 961 qsdev->dev.release = qcom_smd_release_device;
@@ -978,21 +1041,20 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
978 spin_lock_init(&channel->recv_lock); 1041 spin_lock_init(&channel->recv_lock);
979 init_waitqueue_head(&channel->fblockread_event); 1042 init_waitqueue_head(&channel->fblockread_event);
980 1043
981 ret = qcom_smem_get(edge->remote_pid, smem_info_item, (void **)&info, 1044 info = qcom_smem_get(edge->remote_pid, smem_info_item, &info_size);
982 &info_size); 1045 if (IS_ERR(info)) {
983 if (ret) 1046 ret = PTR_ERR(info);
984 goto free_name_and_channel; 1047 goto free_name_and_channel;
1048 }
985 1049
986 /* 1050 /*
987 * Use the size of the item to figure out which channel info struct to 1051 * Use the size of the item to figure out which channel info struct to
988 * use. 1052 * use.
989 */ 1053 */
990 if (info_size == 2 * sizeof(struct smd_channel_info_word)) { 1054 if (info_size == 2 * sizeof(struct smd_channel_info_word)) {
991 channel->tx_info_word = info; 1055 channel->info_word = info;
992 channel->rx_info_word = info + sizeof(struct smd_channel_info_word);
993 } else if (info_size == 2 * sizeof(struct smd_channel_info)) { 1056 } else if (info_size == 2 * sizeof(struct smd_channel_info)) {
994 channel->tx_info = info; 1057 channel->info = info;
995 channel->rx_info = info + sizeof(struct smd_channel_info);
996 } else { 1058 } else {
997 dev_err(smd->dev, 1059 dev_err(smd->dev,
998 "channel info of size %zu not supported\n", info_size); 1060 "channel info of size %zu not supported\n", info_size);
@@ -1000,10 +1062,11 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
1000 goto free_name_and_channel; 1062 goto free_name_and_channel;
1001 } 1063 }
1002 1064
1003 ret = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_base, 1065 fifo_base = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_size);
1004 &fifo_size); 1066 if (IS_ERR(fifo_base)) {
1005 if (ret) 1067 ret = PTR_ERR(fifo_base);
1006 goto free_name_and_channel; 1068 goto free_name_and_channel;
1069 }
1007 1070
1008 /* The channel consist of a rx and tx fifo of equal size */ 1071 /* The channel consist of a rx and tx fifo of equal size */
1009 fifo_size /= 2; 1072 fifo_size /= 2;
@@ -1040,20 +1103,19 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge)
1040 unsigned long flags; 1103 unsigned long flags;
1041 unsigned fifo_id; 1104 unsigned fifo_id;
1042 unsigned info_id; 1105 unsigned info_id;
1043 int ret;
1044 int tbl; 1106 int tbl;
1045 int i; 1107 int i;
1108 u32 eflags, cid;
1046 1109
1047 for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) { 1110 for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) {
1048 ret = qcom_smem_get(edge->remote_pid, 1111 alloc_tbl = qcom_smem_get(edge->remote_pid,
1049 smem_items[tbl].alloc_tbl_id, 1112 smem_items[tbl].alloc_tbl_id, NULL);
1050 (void **)&alloc_tbl, 1113 if (IS_ERR(alloc_tbl))
1051 NULL);
1052 if (ret < 0)
1053 continue; 1114 continue;
1054 1115
1055 for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) { 1116 for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) {
1056 entry = &alloc_tbl[i]; 1117 entry = &alloc_tbl[i];
1118 eflags = le32_to_cpu(entry->flags);
1057 if (test_bit(i, edge->allocated[tbl])) 1119 if (test_bit(i, edge->allocated[tbl]))
1058 continue; 1120 continue;
1059 1121
@@ -1063,14 +1125,15 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge)
1063 if (!entry->name[0]) 1125 if (!entry->name[0])
1064 continue; 1126 continue;
1065 1127
1066 if (!(entry->flags & SMD_CHANNEL_FLAGS_PACKET)) 1128 if (!(eflags & SMD_CHANNEL_FLAGS_PACKET))
1067 continue; 1129 continue;
1068 1130
1069 if ((entry->flags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id) 1131 if ((eflags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id)
1070 continue; 1132 continue;
1071 1133
1072 info_id = smem_items[tbl].info_base_id + entry->cid; 1134 cid = le32_to_cpu(entry->cid);
1073 fifo_id = smem_items[tbl].fifo_base_id + entry->cid; 1135 info_id = smem_items[tbl].info_base_id + cid;
1136 fifo_id = smem_items[tbl].fifo_base_id + cid;
1074 1137
1075 channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name); 1138 channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name);
1076 if (IS_ERR(channel)) 1139 if (IS_ERR(channel))
@@ -1227,11 +1290,12 @@ static int qcom_smd_probe(struct platform_device *pdev)
1227 int num_edges; 1290 int num_edges;
1228 int ret; 1291 int ret;
1229 int i = 0; 1292 int i = 0;
1293 void *p;
1230 1294
1231 /* Wait for smem */ 1295 /* Wait for smem */
1232 ret = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL, NULL); 1296 p = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL);
1233 if (ret == -EPROBE_DEFER) 1297 if (PTR_ERR(p) == -EPROBE_DEFER)
1234 return ret; 1298 return PTR_ERR(p);
1235 1299
1236 num_edges = of_get_available_child_count(pdev->dev.of_node); 1300 num_edges = of_get_available_child_count(pdev->dev.of_node);
1237 array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge); 1301 array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge);
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 52365188a1c2..19019aa092e8 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -92,9 +92,9 @@
92 * @params: parameters to the command 92 * @params: parameters to the command
93 */ 93 */
94struct smem_proc_comm { 94struct smem_proc_comm {
95 u32 command; 95 __le32 command;
96 u32 status; 96 __le32 status;
97 u32 params[2]; 97 __le32 params[2];
98}; 98};
99 99
100/** 100/**
@@ -106,10 +106,10 @@ struct smem_proc_comm {
106 * the default region. bits 0,1 are reserved 106 * the default region. bits 0,1 are reserved
107 */ 107 */
108struct smem_global_entry { 108struct smem_global_entry {
109 u32 allocated; 109 __le32 allocated;
110 u32 offset; 110 __le32 offset;
111 u32 size; 111 __le32 size;
112 u32 aux_base; /* bits 1:0 reserved */ 112 __le32 aux_base; /* bits 1:0 reserved */
113}; 113};
114#define AUX_BASE_MASK 0xfffffffc 114#define AUX_BASE_MASK 0xfffffffc
115 115
@@ -125,11 +125,11 @@ struct smem_global_entry {
125 */ 125 */
126struct smem_header { 126struct smem_header {
127 struct smem_proc_comm proc_comm[4]; 127 struct smem_proc_comm proc_comm[4];
128 u32 version[32]; 128 __le32 version[32];
129 u32 initialized; 129 __le32 initialized;
130 u32 free_offset; 130 __le32 free_offset;
131 u32 available; 131 __le32 available;
132 u32 reserved; 132 __le32 reserved;
133 struct smem_global_entry toc[SMEM_ITEM_COUNT]; 133 struct smem_global_entry toc[SMEM_ITEM_COUNT];
134}; 134};
135 135
@@ -143,12 +143,12 @@ struct smem_header {
143 * @reserved: reserved entries for later use 143 * @reserved: reserved entries for later use
144 */ 144 */
145struct smem_ptable_entry { 145struct smem_ptable_entry {
146 u32 offset; 146 __le32 offset;
147 u32 size; 147 __le32 size;
148 u32 flags; 148 __le32 flags;
149 u16 host0; 149 __le16 host0;
150 u16 host1; 150 __le16 host1;
151 u32 reserved[8]; 151 __le32 reserved[8];
152}; 152};
153 153
154/** 154/**
@@ -160,13 +160,14 @@ struct smem_ptable_entry {
160 * @entry: list of @smem_ptable_entry for the @num_entries partitions 160 * @entry: list of @smem_ptable_entry for the @num_entries partitions
161 */ 161 */
162struct smem_ptable { 162struct smem_ptable {
163 u32 magic; 163 u8 magic[4];
164 u32 version; 164 __le32 version;
165 u32 num_entries; 165 __le32 num_entries;
166 u32 reserved[5]; 166 __le32 reserved[5];
167 struct smem_ptable_entry entry[]; 167 struct smem_ptable_entry entry[];
168}; 168};
169#define SMEM_PTABLE_MAGIC 0x434f5424 /* "$TOC" */ 169
170static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */
170 171
171/** 172/**
172 * struct smem_partition_header - header of the partitions 173 * struct smem_partition_header - header of the partitions
@@ -181,15 +182,16 @@ struct smem_ptable {
181 * @reserved: for now reserved entries 182 * @reserved: for now reserved entries
182 */ 183 */
183struct smem_partition_header { 184struct smem_partition_header {
184 u32 magic; 185 u8 magic[4];
185 u16 host0; 186 __le16 host0;
186 u16 host1; 187 __le16 host1;
187 u32 size; 188 __le32 size;
188 u32 offset_free_uncached; 189 __le32 offset_free_uncached;
189 u32 offset_free_cached; 190 __le32 offset_free_cached;
190 u32 reserved[3]; 191 __le32 reserved[3];
191}; 192};
192#define SMEM_PART_MAGIC 0x54525024 /* "$PRT" */ 193
194static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 };
193 195
194/** 196/**
195 * struct smem_private_entry - header of each item in the private partition 197 * struct smem_private_entry - header of each item in the private partition
@@ -201,12 +203,12 @@ struct smem_partition_header {
201 * @reserved: for now reserved entry 203 * @reserved: for now reserved entry
202 */ 204 */
203struct smem_private_entry { 205struct smem_private_entry {
204 u16 canary; 206 u16 canary; /* bytes are the same so no swapping needed */
205 u16 item; 207 __le16 item;
206 u32 size; /* includes padding bytes */ 208 __le32 size; /* includes padding bytes */
207 u16 padding_data; 209 __le16 padding_data;
208 u16 padding_hdr; 210 __le16 padding_hdr;
209 u32 reserved; 211 __le32 reserved;
210}; 212};
211#define SMEM_PRIVATE_CANARY 0xa5a5 213#define SMEM_PRIVATE_CANARY 0xa5a5
212 214
@@ -242,6 +244,45 @@ struct qcom_smem {
242 struct smem_region regions[0]; 244 struct smem_region regions[0];
243}; 245};
244 246
247static struct smem_private_entry *
248phdr_to_last_private_entry(struct smem_partition_header *phdr)
249{
250 void *p = phdr;
251
252 return p + le32_to_cpu(phdr->offset_free_uncached);
253}
254
255static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr)
256{
257 void *p = phdr;
258
259 return p + le32_to_cpu(phdr->offset_free_cached);
260}
261
262static struct smem_private_entry *
263phdr_to_first_private_entry(struct smem_partition_header *phdr)
264{
265 void *p = phdr;
266
267 return p + sizeof(*phdr);
268}
269
270static struct smem_private_entry *
271private_entry_next(struct smem_private_entry *e)
272{
273 void *p = e;
274
275 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) +
276 le32_to_cpu(e->size);
277}
278
279static void *entry_to_item(struct smem_private_entry *e)
280{
281 void *p = e;
282
283 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr);
284}
285
245/* Pointer to the one and only smem handle */ 286/* Pointer to the one and only smem handle */
246static struct qcom_smem *__smem; 287static struct qcom_smem *__smem;
247 288
@@ -254,16 +295,16 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
254 size_t size) 295 size_t size)
255{ 296{
256 struct smem_partition_header *phdr; 297 struct smem_partition_header *phdr;
257 struct smem_private_entry *hdr; 298 struct smem_private_entry *hdr, *end;
258 size_t alloc_size; 299 size_t alloc_size;
259 void *p; 300 void *cached;
260 301
261 phdr = smem->partitions[host]; 302 phdr = smem->partitions[host];
303 hdr = phdr_to_first_private_entry(phdr);
304 end = phdr_to_last_private_entry(phdr);
305 cached = phdr_to_first_cached_entry(phdr);
262 306
263 p = (void *)phdr + sizeof(*phdr); 307 while (hdr < end) {
264 while (p < (void *)phdr + phdr->offset_free_uncached) {
265 hdr = p;
266
267 if (hdr->canary != SMEM_PRIVATE_CANARY) { 308 if (hdr->canary != SMEM_PRIVATE_CANARY) {
268 dev_err(smem->dev, 309 dev_err(smem->dev,
269 "Found invalid canary in host %d partition\n", 310 "Found invalid canary in host %d partition\n",
@@ -271,24 +312,23 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
271 return -EINVAL; 312 return -EINVAL;
272 } 313 }
273 314
274 if (hdr->item == item) 315 if (le16_to_cpu(hdr->item) == item)
275 return -EEXIST; 316 return -EEXIST;
276 317
277 p += sizeof(*hdr) + hdr->padding_hdr + hdr->size; 318 hdr = private_entry_next(hdr);
278 } 319 }
279 320
280 /* Check that we don't grow into the cached region */ 321 /* Check that we don't grow into the cached region */
281 alloc_size = sizeof(*hdr) + ALIGN(size, 8); 322 alloc_size = sizeof(*hdr) + ALIGN(size, 8);
282 if (p + alloc_size >= (void *)phdr + phdr->offset_free_cached) { 323 if ((void *)hdr + alloc_size >= cached) {
283 dev_err(smem->dev, "Out of memory\n"); 324 dev_err(smem->dev, "Out of memory\n");
284 return -ENOSPC; 325 return -ENOSPC;
285 } 326 }
286 327
287 hdr = p;
288 hdr->canary = SMEM_PRIVATE_CANARY; 328 hdr->canary = SMEM_PRIVATE_CANARY;
289 hdr->item = item; 329 hdr->item = cpu_to_le16(item);
290 hdr->size = ALIGN(size, 8); 330 hdr->size = cpu_to_le32(ALIGN(size, 8));
291 hdr->padding_data = hdr->size - size; 331 hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size);
292 hdr->padding_hdr = 0; 332 hdr->padding_hdr = 0;
293 333
294 /* 334 /*
@@ -297,7 +337,7 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
297 * gets a consistent view of the linked list. 337 * gets a consistent view of the linked list.
298 */ 338 */
299 wmb(); 339 wmb();
300 phdr->offset_free_uncached += alloc_size; 340 le32_add_cpu(&phdr->offset_free_uncached, alloc_size);
301 341
302 return 0; 342 return 0;
303} 343}
@@ -318,11 +358,11 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
318 return -EEXIST; 358 return -EEXIST;
319 359
320 size = ALIGN(size, 8); 360 size = ALIGN(size, 8);
321 if (WARN_ON(size > header->available)) 361 if (WARN_ON(size > le32_to_cpu(header->available)))
322 return -ENOMEM; 362 return -ENOMEM;
323 363
324 entry->offset = header->free_offset; 364 entry->offset = header->free_offset;
325 entry->size = size; 365 entry->size = cpu_to_le32(size);
326 366
327 /* 367 /*
328 * Ensure the header is consistent before we mark the item allocated, 368 * Ensure the header is consistent before we mark the item allocated,
@@ -330,10 +370,10 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem,
330 * even though they do not take the spinlock on read. 370 * even though they do not take the spinlock on read.
331 */ 371 */
332 wmb(); 372 wmb();
333 entry->allocated = 1; 373 entry->allocated = cpu_to_le32(1);
334 374
335 header->free_offset += size; 375 le32_add_cpu(&header->free_offset, size);
336 header->available -= size; 376 le32_add_cpu(&header->available, -size);
337 377
338 return 0; 378 return 0;
339} 379}
@@ -378,10 +418,9 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
378} 418}
379EXPORT_SYMBOL(qcom_smem_alloc); 419EXPORT_SYMBOL(qcom_smem_alloc);
380 420
381static int qcom_smem_get_global(struct qcom_smem *smem, 421static void *qcom_smem_get_global(struct qcom_smem *smem,
382 unsigned item, 422 unsigned item,
383 void **ptr, 423 size_t *size)
384 size_t *size)
385{ 424{
386 struct smem_header *header; 425 struct smem_header *header;
387 struct smem_region *area; 426 struct smem_region *area;
@@ -390,100 +429,94 @@ static int qcom_smem_get_global(struct qcom_smem *smem,
390 unsigned i; 429 unsigned i;
391 430
392 if (WARN_ON(item >= SMEM_ITEM_COUNT)) 431 if (WARN_ON(item >= SMEM_ITEM_COUNT))
393 return -EINVAL; 432 return ERR_PTR(-EINVAL);
394 433
395 header = smem->regions[0].virt_base; 434 header = smem->regions[0].virt_base;
396 entry = &header->toc[item]; 435 entry = &header->toc[item];
397 if (!entry->allocated) 436 if (!entry->allocated)
398 return -ENXIO; 437 return ERR_PTR(-ENXIO);
399 438
400 if (ptr != NULL) { 439 aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK;
401 aux_base = entry->aux_base & AUX_BASE_MASK;
402 440
403 for (i = 0; i < smem->num_regions; i++) { 441 for (i = 0; i < smem->num_regions; i++) {
404 area = &smem->regions[i]; 442 area = &smem->regions[i];
405 443
406 if (area->aux_base == aux_base || !aux_base) { 444 if (area->aux_base == aux_base || !aux_base) {
407 *ptr = area->virt_base + entry->offset; 445 if (size != NULL)
408 break; 446 *size = le32_to_cpu(entry->size);
409 } 447 return area->virt_base + le32_to_cpu(entry->offset);
410 } 448 }
411 } 449 }
412 if (size != NULL)
413 *size = entry->size;
414 450
415 return 0; 451 return ERR_PTR(-ENOENT);
416} 452}
417 453
418static int qcom_smem_get_private(struct qcom_smem *smem, 454static void *qcom_smem_get_private(struct qcom_smem *smem,
419 unsigned host, 455 unsigned host,
420 unsigned item, 456 unsigned item,
421 void **ptr, 457 size_t *size)
422 size_t *size)
423{ 458{
424 struct smem_partition_header *phdr; 459 struct smem_partition_header *phdr;
425 struct smem_private_entry *hdr; 460 struct smem_private_entry *e, *end;
426 void *p;
427 461
428 phdr = smem->partitions[host]; 462 phdr = smem->partitions[host];
463 e = phdr_to_first_private_entry(phdr);
464 end = phdr_to_last_private_entry(phdr);
429 465
430 p = (void *)phdr + sizeof(*phdr); 466 while (e < end) {
431 while (p < (void *)phdr + phdr->offset_free_uncached) { 467 if (e->canary != SMEM_PRIVATE_CANARY) {
432 hdr = p;
433
434 if (hdr->canary != SMEM_PRIVATE_CANARY) {
435 dev_err(smem->dev, 468 dev_err(smem->dev,
436 "Found invalid canary in host %d partition\n", 469 "Found invalid canary in host %d partition\n",
437 host); 470 host);
438 return -EINVAL; 471 return ERR_PTR(-EINVAL);
439 } 472 }
440 473
441 if (hdr->item == item) { 474 if (le16_to_cpu(e->item) == item) {
442 if (ptr != NULL)
443 *ptr = p + sizeof(*hdr) + hdr->padding_hdr;
444
445 if (size != NULL) 475 if (size != NULL)
446 *size = hdr->size - hdr->padding_data; 476 *size = le32_to_cpu(e->size) -
477 le16_to_cpu(e->padding_data);
447 478
448 return 0; 479 return entry_to_item(e);
449 } 480 }
450 481
451 p += sizeof(*hdr) + hdr->padding_hdr + hdr->size; 482 e = private_entry_next(e);
452 } 483 }
453 484
454 return -ENOENT; 485 return ERR_PTR(-ENOENT);
455} 486}
456 487
457/** 488/**
458 * qcom_smem_get() - resolve ptr of size of a smem item 489 * qcom_smem_get() - resolve ptr of size of a smem item
459 * @host: the remote processor, or -1 490 * @host: the remote processor, or -1
460 * @item: smem item handle 491 * @item: smem item handle
461 * @ptr: pointer to be filled out with address of the item
462 * @size: pointer to be filled out with size of the item 492 * @size: pointer to be filled out with size of the item
463 * 493 *
464 * Looks up pointer and size of a smem item. 494 * Looks up smem item and returns pointer to it. Size of smem
495 * item is returned in @size.
465 */ 496 */
466int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size) 497void *qcom_smem_get(unsigned host, unsigned item, size_t *size)
467{ 498{
468 unsigned long flags; 499 unsigned long flags;
469 int ret; 500 int ret;
501 void *ptr = ERR_PTR(-EPROBE_DEFER);
470 502
471 if (!__smem) 503 if (!__smem)
472 return -EPROBE_DEFER; 504 return ptr;
473 505
474 ret = hwspin_lock_timeout_irqsave(__smem->hwlock, 506 ret = hwspin_lock_timeout_irqsave(__smem->hwlock,
475 HWSPINLOCK_TIMEOUT, 507 HWSPINLOCK_TIMEOUT,
476 &flags); 508 &flags);
477 if (ret) 509 if (ret)
478 return ret; 510 return ERR_PTR(ret);
479 511
480 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) 512 if (host < SMEM_HOST_COUNT && __smem->partitions[host])
481 ret = qcom_smem_get_private(__smem, host, item, ptr, size); 513 ptr = qcom_smem_get_private(__smem, host, item, size);
482 else 514 else
483 ret = qcom_smem_get_global(__smem, item, ptr, size); 515 ptr = qcom_smem_get_global(__smem, item, size);
484 516
485 hwspin_unlock_irqrestore(__smem->hwlock, &flags); 517 hwspin_unlock_irqrestore(__smem->hwlock, &flags);
486 return ret; 518
519 return ptr;
487 520
488} 521}
489EXPORT_SYMBOL(qcom_smem_get); 522EXPORT_SYMBOL(qcom_smem_get);
@@ -506,10 +539,11 @@ int qcom_smem_get_free_space(unsigned host)
506 539
507 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { 540 if (host < SMEM_HOST_COUNT && __smem->partitions[host]) {
508 phdr = __smem->partitions[host]; 541 phdr = __smem->partitions[host];
509 ret = phdr->offset_free_cached - phdr->offset_free_uncached; 542 ret = le32_to_cpu(phdr->offset_free_cached) -
543 le32_to_cpu(phdr->offset_free_uncached);
510 } else { 544 } else {
511 header = __smem->regions[0].virt_base; 545 header = __smem->regions[0].virt_base;
512 ret = header->available; 546 ret = le32_to_cpu(header->available);
513 } 547 }
514 548
515 return ret; 549 return ret;
@@ -518,13 +552,11 @@ EXPORT_SYMBOL(qcom_smem_get_free_space);
518 552
519static int qcom_smem_get_sbl_version(struct qcom_smem *smem) 553static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
520{ 554{
521 unsigned *versions; 555 __le32 *versions;
522 size_t size; 556 size_t size;
523 int ret;
524 557
525 ret = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, 558 versions = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, &size);
526 (void **)&versions, &size); 559 if (IS_ERR(versions)) {
527 if (ret < 0) {
528 dev_err(smem->dev, "Unable to read the version item\n"); 560 dev_err(smem->dev, "Unable to read the version item\n");
529 return -ENOENT; 561 return -ENOENT;
530 } 562 }
@@ -534,7 +566,7 @@ static int qcom_smem_get_sbl_version(struct qcom_smem *smem)
534 return -EINVAL; 566 return -EINVAL;
535 } 567 }
536 568
537 return versions[SMEM_MASTER_SBL_VERSION_INDEX]; 569 return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]);
538} 570}
539 571
540static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, 572static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
@@ -544,35 +576,38 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
544 struct smem_ptable_entry *entry; 576 struct smem_ptable_entry *entry;
545 struct smem_ptable *ptable; 577 struct smem_ptable *ptable;
546 unsigned remote_host; 578 unsigned remote_host;
579 u32 version, host0, host1;
547 int i; 580 int i;
548 581
549 ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K; 582 ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K;
550 if (ptable->magic != SMEM_PTABLE_MAGIC) 583 if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic)))
551 return 0; 584 return 0;
552 585
553 if (ptable->version != 1) { 586 version = le32_to_cpu(ptable->version);
587 if (version != 1) {
554 dev_err(smem->dev, 588 dev_err(smem->dev,
555 "Unsupported partition header version %d\n", 589 "Unsupported partition header version %d\n", version);
556 ptable->version);
557 return -EINVAL; 590 return -EINVAL;
558 } 591 }
559 592
560 for (i = 0; i < ptable->num_entries; i++) { 593 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) {
561 entry = &ptable->entry[i]; 594 entry = &ptable->entry[i];
595 host0 = le16_to_cpu(entry->host0);
596 host1 = le16_to_cpu(entry->host1);
562 597
563 if (entry->host0 != local_host && entry->host1 != local_host) 598 if (host0 != local_host && host1 != local_host)
564 continue; 599 continue;
565 600
566 if (!entry->offset) 601 if (!le32_to_cpu(entry->offset))
567 continue; 602 continue;
568 603
569 if (!entry->size) 604 if (!le32_to_cpu(entry->size))
570 continue; 605 continue;
571 606
572 if (entry->host0 == local_host) 607 if (host0 == local_host)
573 remote_host = entry->host1; 608 remote_host = host1;
574 else 609 else
575 remote_host = entry->host0; 610 remote_host = host0;
576 611
577 if (remote_host >= SMEM_HOST_COUNT) { 612 if (remote_host >= SMEM_HOST_COUNT) {
578 dev_err(smem->dev, 613 dev_err(smem->dev,
@@ -588,21 +623,24 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
588 return -EINVAL; 623 return -EINVAL;
589 } 624 }
590 625
591 header = smem->regions[0].virt_base + entry->offset; 626 header = smem->regions[0].virt_base + le32_to_cpu(entry->offset);
627 host0 = le16_to_cpu(header->host0);
628 host1 = le16_to_cpu(header->host1);
592 629
593 if (header->magic != SMEM_PART_MAGIC) { 630 if (memcmp(header->magic, SMEM_PART_MAGIC,
631 sizeof(header->magic))) {
594 dev_err(smem->dev, 632 dev_err(smem->dev,
595 "Partition %d has invalid magic\n", i); 633 "Partition %d has invalid magic\n", i);
596 return -EINVAL; 634 return -EINVAL;
597 } 635 }
598 636
599 if (header->host0 != local_host && header->host1 != local_host) { 637 if (host0 != local_host && host1 != local_host) {
600 dev_err(smem->dev, 638 dev_err(smem->dev,
601 "Partition %d hosts are invalid\n", i); 639 "Partition %d hosts are invalid\n", i);
602 return -EINVAL; 640 return -EINVAL;
603 } 641 }
604 642
605 if (header->host0 != remote_host && header->host1 != remote_host) { 643 if (host0 != remote_host && host1 != remote_host) {
606 dev_err(smem->dev, 644 dev_err(smem->dev,
607 "Partition %d hosts are invalid\n", i); 645 "Partition %d hosts are invalid\n", i);
608 return -EINVAL; 646 return -EINVAL;
@@ -614,7 +652,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
614 return -EINVAL; 652 return -EINVAL;
615 } 653 }
616 654
617 if (header->offset_free_uncached > header->size) { 655 if (le32_to_cpu(header->offset_free_uncached) > le32_to_cpu(header->size)) {
618 dev_err(smem->dev, 656 dev_err(smem->dev,
619 "Partition %d has invalid free pointer\n", i); 657 "Partition %d has invalid free pointer\n", i);
620 return -EINVAL; 658 return -EINVAL;
@@ -626,37 +664,47 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem,
626 return 0; 664 return 0;
627} 665}
628 666
629static int qcom_smem_count_mem_regions(struct platform_device *pdev) 667static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
668 const char *name, int i)
630{ 669{
631 struct resource *res; 670 struct device_node *np;
632 int num_regions = 0; 671 struct resource r;
633 int i; 672 int ret;
634
635 for (i = 0; i < pdev->num_resources; i++) {
636 res = &pdev->resource[i];
637 673
638 if (resource_type(res) == IORESOURCE_MEM) 674 np = of_parse_phandle(dev->of_node, name, 0);
639 num_regions++; 675 if (!np) {
676 dev_err(dev, "No %s specified\n", name);
677 return -EINVAL;
640 } 678 }
641 679
642 return num_regions; 680 ret = of_address_to_resource(np, 0, &r);
681 of_node_put(np);
682 if (ret)
683 return ret;
684
685 smem->regions[i].aux_base = (u32)r.start;
686 smem->regions[i].size = resource_size(&r);
687 smem->regions[i].virt_base = devm_ioremap_nocache(dev, r.start,
688 resource_size(&r));
689 if (!smem->regions[i].virt_base)
690 return -ENOMEM;
691
692 return 0;
643} 693}
644 694
645static int qcom_smem_probe(struct platform_device *pdev) 695static int qcom_smem_probe(struct platform_device *pdev)
646{ 696{
647 struct smem_header *header; 697 struct smem_header *header;
648 struct device_node *np;
649 struct qcom_smem *smem; 698 struct qcom_smem *smem;
650 struct resource *res;
651 struct resource r;
652 size_t array_size; 699 size_t array_size;
653 int num_regions = 0; 700 int num_regions;
654 int hwlock_id; 701 int hwlock_id;
655 u32 version; 702 u32 version;
656 int ret; 703 int ret;
657 int i;
658 704
659 num_regions = qcom_smem_count_mem_regions(pdev) + 1; 705 num_regions = 1;
706 if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
707 num_regions++;
660 708
661 array_size = num_regions * sizeof(struct smem_region); 709 array_size = num_regions * sizeof(struct smem_region);
662 smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL); 710 smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL);
@@ -666,39 +714,17 @@ static int qcom_smem_probe(struct platform_device *pdev)
666 smem->dev = &pdev->dev; 714 smem->dev = &pdev->dev;
667 smem->num_regions = num_regions; 715 smem->num_regions = num_regions;
668 716
669 np = of_parse_phandle(pdev->dev.of_node, "memory-region", 0); 717 ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0);
670 if (!np) {
671 dev_err(&pdev->dev, "No memory-region specified\n");
672 return -EINVAL;
673 }
674
675 ret = of_address_to_resource(np, 0, &r);
676 of_node_put(np);
677 if (ret) 718 if (ret)
678 return ret; 719 return ret;
679 720
680 smem->regions[0].aux_base = (u32)r.start; 721 if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev,
681 smem->regions[0].size = resource_size(&r); 722 "qcom,rpm-msg-ram", 1)))
682 smem->regions[0].virt_base = devm_ioremap_nocache(&pdev->dev, 723 return ret;
683 r.start,
684 resource_size(&r));
685 if (!smem->regions[0].virt_base)
686 return -ENOMEM;
687
688 for (i = 1; i < num_regions; i++) {
689 res = platform_get_resource(pdev, IORESOURCE_MEM, i - 1);
690
691 smem->regions[i].aux_base = (u32)res->start;
692 smem->regions[i].size = resource_size(res);
693 smem->regions[i].virt_base = devm_ioremap_nocache(&pdev->dev,
694 res->start,
695 resource_size(res));
696 if (!smem->regions[i].virt_base)
697 return -ENOMEM;
698 }
699 724
700 header = smem->regions[0].virt_base; 725 header = smem->regions[0].virt_base;
701 if (header->initialized != 1 || header->reserved) { 726 if (le32_to_cpu(header->initialized) != 1 ||
727 le32_to_cpu(header->reserved)) {
702 dev_err(&pdev->dev, "SMEM is not initialized by SBL\n"); 728 dev_err(&pdev->dev, "SMEM is not initialized by SBL\n");
703 return -EINVAL; 729 return -EINVAL;
704 } 730 }
@@ -730,8 +756,8 @@ static int qcom_smem_probe(struct platform_device *pdev)
730 756
731static int qcom_smem_remove(struct platform_device *pdev) 757static int qcom_smem_remove(struct platform_device *pdev)
732{ 758{
733 __smem = NULL;
734 hwspin_lock_free(__smem->hwlock); 759 hwspin_lock_free(__smem->hwlock);
760 __smem = NULL;
735 761
736 return 0; 762 return 0;
737} 763}
diff --git a/drivers/soc/rockchip/Kconfig b/drivers/soc/rockchip/Kconfig
new file mode 100644
index 000000000000..7140ff825598
--- /dev/null
+++ b/drivers/soc/rockchip/Kconfig
@@ -0,0 +1,18 @@
1if ARCH_ROCKCHIP || COMPILE_TEST
2
3#
4# Rockchip Soc drivers
5#
6config ROCKCHIP_PM_DOMAINS
7 bool "Rockchip generic power domain"
8 depends on PM
9 select PM_GENERIC_DOMAINS
10 help
11 Say y here to enable power domain support.
12 In order to meet high performance and low power requirements, a power
13 management unit is designed or saving power when RK3288 in low power
14 mode. The RK3288 PMU is dedicated for managing the power of the whole chip.
15
16 If unsure, say N.
17
18endif
diff --git a/drivers/soc/rockchip/Makefile b/drivers/soc/rockchip/Makefile
new file mode 100644
index 000000000000..3d73d0672d22
--- /dev/null
+++ b/drivers/soc/rockchip/Makefile
@@ -0,0 +1,4 @@
1#
2# Rockchip Soc drivers
3#
4obj-$(CONFIG_ROCKCHIP_PM_DOMAINS) += pm_domains.o
diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
new file mode 100644
index 000000000000..534c58937a56
--- /dev/null
+++ b/drivers/soc/rockchip/pm_domains.c
@@ -0,0 +1,490 @@
1/*
2 * Rockchip Generic power domain support.
3 *
4 * Copyright (c) 2015 ROCKCHIP, Co. Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/io.h>
12#include <linux/err.h>
13#include <linux/pm_clock.h>
14#include <linux/pm_domain.h>
15#include <linux/of_address.h>
16#include <linux/of_platform.h>
17#include <linux/clk.h>
18#include <linux/regmap.h>
19#include <linux/mfd/syscon.h>
20#include <dt-bindings/power/rk3288-power.h>
21
22struct rockchip_domain_info {
23 int pwr_mask;
24 int status_mask;
25 int req_mask;
26 int idle_mask;
27 int ack_mask;
28};
29
30struct rockchip_pmu_info {
31 u32 pwr_offset;
32 u32 status_offset;
33 u32 req_offset;
34 u32 idle_offset;
35 u32 ack_offset;
36
37 u32 core_pwrcnt_offset;
38 u32 gpu_pwrcnt_offset;
39
40 unsigned int core_power_transition_time;
41 unsigned int gpu_power_transition_time;
42
43 int num_domains;
44 const struct rockchip_domain_info *domain_info;
45};
46
47struct rockchip_pm_domain {
48 struct generic_pm_domain genpd;
49 const struct rockchip_domain_info *info;
50 struct rockchip_pmu *pmu;
51 int num_clks;
52 struct clk *clks[];
53};
54
55struct rockchip_pmu {
56 struct device *dev;
57 struct regmap *regmap;
58 const struct rockchip_pmu_info *info;
59 struct mutex mutex; /* mutex lock for pmu */
60 struct genpd_onecell_data genpd_data;
61 struct generic_pm_domain *domains[];
62};
63
64#define to_rockchip_pd(gpd) container_of(gpd, struct rockchip_pm_domain, genpd)
65
66#define DOMAIN(pwr, status, req, idle, ack) \
67{ \
68 .pwr_mask = BIT(pwr), \
69 .status_mask = BIT(status), \
70 .req_mask = BIT(req), \
71 .idle_mask = BIT(idle), \
72 .ack_mask = BIT(ack), \
73}
74
75#define DOMAIN_RK3288(pwr, status, req) \
76 DOMAIN(pwr, status, req, req, (req) + 16)
77
78static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd)
79{
80 struct rockchip_pmu *pmu = pd->pmu;
81 const struct rockchip_domain_info *pd_info = pd->info;
82 unsigned int val;
83
84 regmap_read(pmu->regmap, pmu->info->idle_offset, &val);
85 return (val & pd_info->idle_mask) == pd_info->idle_mask;
86}
87
88static int rockchip_pmu_set_idle_request(struct rockchip_pm_domain *pd,
89 bool idle)
90{
91 const struct rockchip_domain_info *pd_info = pd->info;
92 struct rockchip_pmu *pmu = pd->pmu;
93 unsigned int val;
94
95 regmap_update_bits(pmu->regmap, pmu->info->req_offset,
96 pd_info->req_mask, idle ? -1U : 0);
97
98 dsb(sy);
99
100 do {
101 regmap_read(pmu->regmap, pmu->info->ack_offset, &val);
102 } while ((val & pd_info->ack_mask) != (idle ? pd_info->ack_mask : 0));
103
104 while (rockchip_pmu_domain_is_idle(pd) != idle)
105 cpu_relax();
106
107 return 0;
108}
109
110static bool rockchip_pmu_domain_is_on(struct rockchip_pm_domain *pd)
111{
112 struct rockchip_pmu *pmu = pd->pmu;
113 unsigned int val;
114
115 regmap_read(pmu->regmap, pmu->info->status_offset, &val);
116
117 /* 1'b0: power on, 1'b1: power off */
118 return !(val & pd->info->status_mask);
119}
120
121static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
122 bool on)
123{
124 struct rockchip_pmu *pmu = pd->pmu;
125
126 regmap_update_bits(pmu->regmap, pmu->info->pwr_offset,
127 pd->info->pwr_mask, on ? 0 : -1U);
128
129 dsb(sy);
130
131 while (rockchip_pmu_domain_is_on(pd) != on)
132 cpu_relax();
133}
134
135static int rockchip_pd_power(struct rockchip_pm_domain *pd, bool power_on)
136{
137 int i;
138
139 mutex_lock(&pd->pmu->mutex);
140
141 if (rockchip_pmu_domain_is_on(pd) != power_on) {
142 for (i = 0; i < pd->num_clks; i++)
143 clk_enable(pd->clks[i]);
144
145 if (!power_on) {
146 /* FIXME: add code to save AXI_QOS */
147
148 /* if powering down, idle request to NIU first */
149 rockchip_pmu_set_idle_request(pd, true);
150 }
151
152 rockchip_do_pmu_set_power_domain(pd, power_on);
153
154 if (power_on) {
155 /* if powering up, leave idle mode */
156 rockchip_pmu_set_idle_request(pd, false);
157
158 /* FIXME: add code to restore AXI_QOS */
159 }
160
161 for (i = pd->num_clks - 1; i >= 0; i--)
162 clk_disable(pd->clks[i]);
163 }
164
165 mutex_unlock(&pd->pmu->mutex);
166 return 0;
167}
168
169static int rockchip_pd_power_on(struct generic_pm_domain *domain)
170{
171 struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
172
173 return rockchip_pd_power(pd, true);
174}
175
176static int rockchip_pd_power_off(struct generic_pm_domain *domain)
177{
178 struct rockchip_pm_domain *pd = to_rockchip_pd(domain);
179
180 return rockchip_pd_power(pd, false);
181}
182
183static int rockchip_pd_attach_dev(struct generic_pm_domain *genpd,
184 struct device *dev)
185{
186 struct clk *clk;
187 int i;
188 int error;
189
190 dev_dbg(dev, "attaching to power domain '%s'\n", genpd->name);
191
192 error = pm_clk_create(dev);
193 if (error) {
194 dev_err(dev, "pm_clk_create failed %d\n", error);
195 return error;
196 }
197
198 i = 0;
199 while ((clk = of_clk_get(dev->of_node, i++)) && !IS_ERR(clk)) {
200 dev_dbg(dev, "adding clock '%pC' to list of PM clocks\n", clk);
201 error = pm_clk_add_clk(dev, clk);
202 if (error) {
203 dev_err(dev, "pm_clk_add_clk failed %d\n", error);
204 clk_put(clk);
205 pm_clk_destroy(dev);
206 return error;
207 }
208 }
209
210 return 0;
211}
212
213static void rockchip_pd_detach_dev(struct generic_pm_domain *genpd,
214 struct device *dev)
215{
216 dev_dbg(dev, "detaching from power domain '%s'\n", genpd->name);
217
218 pm_clk_destroy(dev);
219}
220
221static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu,
222 struct device_node *node)
223{
224 const struct rockchip_domain_info *pd_info;
225 struct rockchip_pm_domain *pd;
226 struct clk *clk;
227 int clk_cnt;
228 int i;
229 u32 id;
230 int error;
231
232 error = of_property_read_u32(node, "reg", &id);
233 if (error) {
234 dev_err(pmu->dev,
235 "%s: failed to retrieve domain id (reg): %d\n",
236 node->name, error);
237 return -EINVAL;
238 }
239
240 if (id >= pmu->info->num_domains) {
241 dev_err(pmu->dev, "%s: invalid domain id %d\n",
242 node->name, id);
243 return -EINVAL;
244 }
245
246 pd_info = &pmu->info->domain_info[id];
247 if (!pd_info) {
248 dev_err(pmu->dev, "%s: undefined domain id %d\n",
249 node->name, id);
250 return -EINVAL;
251 }
252
253 clk_cnt = of_count_phandle_with_args(node, "clocks", "#clock-cells");
254 pd = devm_kzalloc(pmu->dev,
255 sizeof(*pd) + clk_cnt * sizeof(pd->clks[0]),
256 GFP_KERNEL);
257 if (!pd)
258 return -ENOMEM;
259
260 pd->info = pd_info;
261 pd->pmu = pmu;
262
263 for (i = 0; i < clk_cnt; i++) {
264 clk = of_clk_get(node, i);
265 if (IS_ERR(clk)) {
266 error = PTR_ERR(clk);
267 dev_err(pmu->dev,
268 "%s: failed to get clk at index %d: %d\n",
269 node->name, i, error);
270 goto err_out;
271 }
272
273 error = clk_prepare(clk);
274 if (error) {
275 dev_err(pmu->dev,
276 "%s: failed to prepare clk %pC (index %d): %d\n",
277 node->name, clk, i, error);
278 clk_put(clk);
279 goto err_out;
280 }
281
282 pd->clks[pd->num_clks++] = clk;
283
284 dev_dbg(pmu->dev, "added clock '%pC' to domain '%s'\n",
285 clk, node->name);
286 }
287
288 error = rockchip_pd_power(pd, true);
289 if (error) {
290 dev_err(pmu->dev,
291 "failed to power on domain '%s': %d\n",
292 node->name, error);
293 goto err_out;
294 }
295
296 pd->genpd.name = node->name;
297 pd->genpd.power_off = rockchip_pd_power_off;
298 pd->genpd.power_on = rockchip_pd_power_on;
299 pd->genpd.attach_dev = rockchip_pd_attach_dev;
300 pd->genpd.detach_dev = rockchip_pd_detach_dev;
301 pd->genpd.flags = GENPD_FLAG_PM_CLK;
302 pm_genpd_init(&pd->genpd, NULL, false);
303
304 pmu->genpd_data.domains[id] = &pd->genpd;
305 return 0;
306
307err_out:
308 while (--i >= 0) {
309 clk_unprepare(pd->clks[i]);
310 clk_put(pd->clks[i]);
311 }
312 return error;
313}
314
315static void rockchip_pm_remove_one_domain(struct rockchip_pm_domain *pd)
316{
317 int i;
318
319 for (i = 0; i < pd->num_clks; i++) {
320 clk_unprepare(pd->clks[i]);
321 clk_put(pd->clks[i]);
322 }
323
324 /* protect the zeroing of pm->num_clks */
325 mutex_lock(&pd->pmu->mutex);
326 pd->num_clks = 0;
327 mutex_unlock(&pd->pmu->mutex);
328
329 /* devm will free our memory */
330}
331
332static void rockchip_pm_domain_cleanup(struct rockchip_pmu *pmu)
333{
334 struct generic_pm_domain *genpd;
335 struct rockchip_pm_domain *pd;
336 int i;
337
338 for (i = 0; i < pmu->genpd_data.num_domains; i++) {
339 genpd = pmu->genpd_data.domains[i];
340 if (genpd) {
341 pd = to_rockchip_pd(genpd);
342 rockchip_pm_remove_one_domain(pd);
343 }
344 }
345
346 /* devm will free our memory */
347}
348
349static void rockchip_configure_pd_cnt(struct rockchip_pmu *pmu,
350 u32 domain_reg_offset,
351 unsigned int count)
352{
353 /* First configure domain power down transition count ... */
354 regmap_write(pmu->regmap, domain_reg_offset, count);
355 /* ... and then power up count. */
356 regmap_write(pmu->regmap, domain_reg_offset + 4, count);
357}
358
359static int rockchip_pm_domain_probe(struct platform_device *pdev)
360{
361 struct device *dev = &pdev->dev;
362 struct device_node *np = dev->of_node;
363 struct device_node *node;
364 struct device *parent;
365 struct rockchip_pmu *pmu;
366 const struct of_device_id *match;
367 const struct rockchip_pmu_info *pmu_info;
368 int error;
369
370 if (!np) {
371 dev_err(dev, "device tree node not found\n");
372 return -ENODEV;
373 }
374
375 match = of_match_device(dev->driver->of_match_table, dev);
376 if (!match || !match->data) {
377 dev_err(dev, "missing pmu data\n");
378 return -EINVAL;
379 }
380
381 pmu_info = match->data;
382
383 pmu = devm_kzalloc(dev,
384 sizeof(*pmu) +
385 pmu_info->num_domains * sizeof(pmu->domains[0]),
386 GFP_KERNEL);
387 if (!pmu)
388 return -ENOMEM;
389
390 pmu->dev = &pdev->dev;
391 mutex_init(&pmu->mutex);
392
393 pmu->info = pmu_info;
394
395 pmu->genpd_data.domains = pmu->domains;
396 pmu->genpd_data.num_domains = pmu_info->num_domains;
397
398 parent = dev->parent;
399 if (!parent) {
400 dev_err(dev, "no parent for syscon devices\n");
401 return -ENODEV;
402 }
403
404 pmu->regmap = syscon_node_to_regmap(parent->of_node);
405
406 /*
407 * Configure power up and down transition delays for CORE
408 * and GPU domains.
409 */
410 rockchip_configure_pd_cnt(pmu, pmu_info->core_pwrcnt_offset,
411 pmu_info->core_power_transition_time);
412 rockchip_configure_pd_cnt(pmu, pmu_info->gpu_pwrcnt_offset,
413 pmu_info->gpu_power_transition_time);
414
415 error = -ENODEV;
416
417 for_each_available_child_of_node(np, node) {
418 error = rockchip_pm_add_one_domain(pmu, node);
419 if (error) {
420 dev_err(dev, "failed to handle node %s: %d\n",
421 node->name, error);
422 goto err_out;
423 }
424 }
425
426 if (error) {
427 dev_dbg(dev, "no power domains defined\n");
428 goto err_out;
429 }
430
431 of_genpd_add_provider_onecell(np, &pmu->genpd_data);
432
433 return 0;
434
435err_out:
436 rockchip_pm_domain_cleanup(pmu);
437 return error;
438}
439
440static const struct rockchip_domain_info rk3288_pm_domains[] = {
441 [RK3288_PD_VIO] = DOMAIN_RK3288(7, 7, 4),
442 [RK3288_PD_HEVC] = DOMAIN_RK3288(14, 10, 9),
443 [RK3288_PD_VIDEO] = DOMAIN_RK3288(8, 8, 3),
444 [RK3288_PD_GPU] = DOMAIN_RK3288(9, 9, 2),
445};
446
447static const struct rockchip_pmu_info rk3288_pmu = {
448 .pwr_offset = 0x08,
449 .status_offset = 0x0c,
450 .req_offset = 0x10,
451 .idle_offset = 0x14,
452 .ack_offset = 0x14,
453
454 .core_pwrcnt_offset = 0x34,
455 .gpu_pwrcnt_offset = 0x3c,
456
457 .core_power_transition_time = 24, /* 1us */
458 .gpu_power_transition_time = 24, /* 1us */
459
460 .num_domains = ARRAY_SIZE(rk3288_pm_domains),
461 .domain_info = rk3288_pm_domains,
462};
463
464static const struct of_device_id rockchip_pm_domain_dt_match[] = {
465 {
466 .compatible = "rockchip,rk3288-power-controller",
467 .data = (void *)&rk3288_pmu,
468 },
469 { /* sentinel */ },
470};
471
472static struct platform_driver rockchip_pm_domain_driver = {
473 .probe = rockchip_pm_domain_probe,
474 .driver = {
475 .name = "rockchip-pm-domain",
476 .of_match_table = rockchip_pm_domain_dt_match,
477 /*
478 * We can't forcibly eject devices form power domain,
479 * so we can't really remove power domains once they
480 * were added.
481 */
482 .suppress_bind_attrs = true,
483 },
484};
485
486static int __init rockchip_pm_domain_drv_register(void)
487{
488 return platform_driver_register(&rockchip_pm_domain_driver);
489}
490postcore_initcall(rockchip_pm_domain_drv_register);