diff options
author | Arnd Bergmann <arnd@arndb.de> | 2015-10-15 17:03:24 -0400 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2015-10-15 17:03:24 -0400 |
commit | ead67421a981961aa2f7dd98d9187185dd782389 (patch) | |
tree | 315415057298adeaad9c14bc435bd159f33873f8 | |
parent | 41e602e8af1300471e79e298a7276226344071db (diff) | |
parent | d0bfd7c9b162612de55ca2d204403b90dc278db6 (diff) |
Merge tag 'qcom-soc-for-4.4' of git://codeaurora.org/quic/kernel/agross-msm into next/drivers
Pull "Qualcomm ARM Based SoC Updates for 4.4" from Andy Gross:
* Implement id_table driver matching in SMD
* Avoid NULL pointer exception on remove of SMEM
* Reorder SMEM/SMD configs
* Make qcom_smem_get() return a pointer
* Handle big endian CPUs correctly in SMEM
* Represent SMD channel layout in structures
* Use __iowrite32_copy() in SMD
* Remove use of VLAIs in SMD
* Handle big endian CPUs correctly in SMD/RPM
* Handle big endian CPUs corretly in SMD
* Reject sending SMD packets that are too large
* Fix endianness issue in SCM __qcom_scm_is_call_available
* Add missing prototype for qcom_scm_is_available()
* Correct SMEM items for upper channels
* Use architecture level to build SCM correctly
* Delete unneeded of_node_put in SMD
* Correct active/slep state flagging in SMD/RPM
* Move RPM message ram out of SMEM DT node
* tag 'qcom-soc-for-4.4' of git://codeaurora.org/quic/kernel/agross-msm:
soc: qcom: smem: Move RPM message ram out of smem DT node
soc: qcom: smd-rpm: Correct the active vs sleep state flagging
soc: qcom: smd: delete unneeded of_node_put
firmware: qcom-scm: build for correct architecture level
soc: qcom: smd: Correct SMEM items for upper channels
qcom-scm: add missing prototype for qcom_scm_is_available()
qcom-scm: fix endianess issue in __qcom_scm_is_call_available
soc: qcom: smd: Reject send of too big packets
soc: qcom: smd: Handle big endian CPUs
soc: qcom: smd_rpm: Handle big endian CPUs
soc: qcom: smd: Remove use of VLAIS
soc: qcom: smd: Use __iowrite32_copy() instead of open-coding it
soc: qcom: smd: Represent channel layout in structures
soc: qcom: smem: Handle big endian CPUs
soc: qcom: Make qcom_smem_get() return a pointer
soc: qcom: Reorder SMEM/SMD configs
soc: qcom: smem: Avoid NULL pointer exception on remove
soc: qcom: smd: Implement id_table driver matching
-rw-r--r-- | arch/arm/boot/dts/qcom-msm8974.dtsi | 17 | ||||
-rw-r--r-- | drivers/firmware/Makefile | 2 | ||||
-rw-r--r-- | drivers/firmware/qcom_scm-32.c | 6 | ||||
-rw-r--r-- | drivers/soc/qcom/Kconfig | 16 | ||||
-rw-r--r-- | drivers/soc/qcom/smd-rpm.c | 68 | ||||
-rw-r--r-- | drivers/soc/qcom/smd.c | 296 | ||||
-rw-r--r-- | drivers/soc/qcom/smem.c | 368 | ||||
-rw-r--r-- | include/linux/qcom_scm.h | 2 | ||||
-rw-r--r-- | include/linux/soc/qcom/smd.h | 11 | ||||
-rw-r--r-- | include/linux/soc/qcom/smem.h | 2 |
10 files changed, 452 insertions, 336 deletions
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi index ab8e57250468..93e315053bdd 100644 --- a/arch/arm/boot/dts/qcom-msm8974.dtsi +++ b/arch/arm/boot/dts/qcom-msm8974.dtsi | |||
@@ -100,6 +100,15 @@ | |||
100 | clock-frequency = <19200000>; | 100 | clock-frequency = <19200000>; |
101 | }; | 101 | }; |
102 | 102 | ||
103 | smem { | ||
104 | compatible = "qcom,smem"; | ||
105 | |||
106 | memory-region = <&smem_region>; | ||
107 | qcom,rpm-msg-ram = <&rpm_msg_ram>; | ||
108 | |||
109 | hwlocks = <&tcsr_mutex 3>; | ||
110 | }; | ||
111 | |||
103 | soc: soc { | 112 | soc: soc { |
104 | #address-cells = <1>; | 113 | #address-cells = <1>; |
105 | #size-cells = <1>; | 114 | #size-cells = <1>; |
@@ -250,13 +259,9 @@ | |||
250 | #hwlock-cells = <1>; | 259 | #hwlock-cells = <1>; |
251 | }; | 260 | }; |
252 | 261 | ||
253 | smem@fa00000 { | 262 | rpm_msg_ram: memory@fc428000 { |
254 | compatible = "qcom,smem"; | 263 | compatible = "qcom,rpm-msg-ram"; |
255 | |||
256 | memory-region = <&smem_region>; | ||
257 | reg = <0xfc428000 0x4000>; | 264 | reg = <0xfc428000 0x4000>; |
258 | |||
259 | hwlocks = <&tcsr_mutex 3>; | ||
260 | }; | 265 | }; |
261 | 266 | ||
262 | blsp1_uart2: serial@f991e000 { | 267 | blsp1_uart2: serial@f991e000 { |
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index b8a521741418..b984dd7d9ccb 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile | |||
@@ -16,7 +16,7 @@ obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o | |||
16 | obj-$(CONFIG_QCOM_SCM) += qcom_scm.o | 16 | obj-$(CONFIG_QCOM_SCM) += qcom_scm.o |
17 | obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o | 17 | obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o |
18 | obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o | 18 | obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o |
19 | CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1) | 19 | CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a |
20 | 20 | ||
21 | obj-y += broadcom/ | 21 | obj-y += broadcom/ |
22 | obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ | 22 | obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ |
diff --git a/drivers/firmware/qcom_scm-32.c b/drivers/firmware/qcom_scm-32.c index 29e6850665eb..0883292f640f 100644 --- a/drivers/firmware/qcom_scm-32.c +++ b/drivers/firmware/qcom_scm-32.c | |||
@@ -480,15 +480,15 @@ void __qcom_scm_cpu_power_down(u32 flags) | |||
480 | int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id) | 480 | int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id) |
481 | { | 481 | { |
482 | int ret; | 482 | int ret; |
483 | u32 svc_cmd = (svc_id << 10) | cmd_id; | 483 | __le32 svc_cmd = cpu_to_le32((svc_id << 10) | cmd_id); |
484 | u32 ret_val = 0; | 484 | __le32 ret_val = 0; |
485 | 485 | ||
486 | ret = qcom_scm_call(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, &svc_cmd, | 486 | ret = qcom_scm_call(QCOM_SCM_SVC_INFO, QCOM_IS_CALL_AVAIL_CMD, &svc_cmd, |
487 | sizeof(svc_cmd), &ret_val, sizeof(ret_val)); | 487 | sizeof(svc_cmd), &ret_val, sizeof(ret_val)); |
488 | if (ret) | 488 | if (ret) |
489 | return ret; | 489 | return ret; |
490 | 490 | ||
491 | return ret_val; | 491 | return le32_to_cpu(ret_val); |
492 | } | 492 | } |
493 | 493 | ||
494 | int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) | 494 | int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp) |
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index ba47b70f4d85..3e4d2133c3d2 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig | |||
@@ -19,6 +19,14 @@ config QCOM_PM | |||
19 | modes. It interface with various system drivers to put the cores in | 19 | modes. It interface with various system drivers to put the cores in |
20 | low power modes. | 20 | low power modes. |
21 | 21 | ||
22 | config QCOM_SMEM | ||
23 | tristate "Qualcomm Shared Memory Manager (SMEM)" | ||
24 | depends on ARCH_QCOM | ||
25 | help | ||
26 | Say y here to enable support for the Qualcomm Shared Memory Manager. | ||
27 | The driver provides an interface to items in a heap shared among all | ||
28 | processors in a Qualcomm platform. | ||
29 | |||
22 | config QCOM_SMD | 30 | config QCOM_SMD |
23 | tristate "Qualcomm Shared Memory Driver (SMD)" | 31 | tristate "Qualcomm Shared Memory Driver (SMD)" |
24 | depends on QCOM_SMEM | 32 | depends on QCOM_SMEM |
@@ -40,11 +48,3 @@ config QCOM_SMD_RPM | |||
40 | 48 | ||
41 | Say M here if you want to include support for the Qualcomm RPM as a | 49 | Say M here if you want to include support for the Qualcomm RPM as a |
42 | module. This will build a module called "qcom-smd-rpm". | 50 | module. This will build a module called "qcom-smd-rpm". |
43 | |||
44 | config QCOM_SMEM | ||
45 | tristate "Qualcomm Shared Memory Manager (SMEM)" | ||
46 | depends on ARCH_QCOM | ||
47 | help | ||
48 | Say y here to enable support for the Qualcomm Shared Memory Manager. | ||
49 | The driver provides an interface to items in a heap shared among all | ||
50 | processors in a Qualcomm platform. | ||
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c index 1392ccf14a20..1ee02d2587b2 100644 --- a/drivers/soc/qcom/smd-rpm.c +++ b/drivers/soc/qcom/smd-rpm.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/of_platform.h> | 17 | #include <linux/of_platform.h> |
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/slab.h> | ||
20 | 21 | ||
21 | #include <linux/soc/qcom/smd.h> | 22 | #include <linux/soc/qcom/smd.h> |
22 | #include <linux/soc/qcom/smd-rpm.h> | 23 | #include <linux/soc/qcom/smd-rpm.h> |
@@ -44,8 +45,8 @@ struct qcom_smd_rpm { | |||
44 | * @length: length of the payload | 45 | * @length: length of the payload |
45 | */ | 46 | */ |
46 | struct qcom_rpm_header { | 47 | struct qcom_rpm_header { |
47 | u32 service_type; | 48 | __le32 service_type; |
48 | u32 length; | 49 | __le32 length; |
49 | }; | 50 | }; |
50 | 51 | ||
51 | /** | 52 | /** |
@@ -57,11 +58,11 @@ struct qcom_rpm_header { | |||
57 | * @data_len: length of the payload following this header | 58 | * @data_len: length of the payload following this header |
58 | */ | 59 | */ |
59 | struct qcom_rpm_request { | 60 | struct qcom_rpm_request { |
60 | u32 msg_id; | 61 | __le32 msg_id; |
61 | u32 flags; | 62 | __le32 flags; |
62 | u32 type; | 63 | __le32 type; |
63 | u32 id; | 64 | __le32 id; |
64 | u32 data_len; | 65 | __le32 data_len; |
65 | }; | 66 | }; |
66 | 67 | ||
67 | /** | 68 | /** |
@@ -74,10 +75,10 @@ struct qcom_rpm_request { | |||
74 | * Multiple of these messages can be stacked in an rpm message. | 75 | * Multiple of these messages can be stacked in an rpm message. |
75 | */ | 76 | */ |
76 | struct qcom_rpm_message { | 77 | struct qcom_rpm_message { |
77 | u32 msg_type; | 78 | __le32 msg_type; |
78 | u32 length; | 79 | __le32 length; |
79 | union { | 80 | union { |
80 | u32 msg_id; | 81 | __le32 msg_id; |
81 | u8 message[0]; | 82 | u8 message[0]; |
82 | }; | 83 | }; |
83 | }; | 84 | }; |
@@ -104,30 +105,34 @@ int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm, | |||
104 | static unsigned msg_id = 1; | 105 | static unsigned msg_id = 1; |
105 | int left; | 106 | int left; |
106 | int ret; | 107 | int ret; |
107 | |||
108 | struct { | 108 | struct { |
109 | struct qcom_rpm_header hdr; | 109 | struct qcom_rpm_header hdr; |
110 | struct qcom_rpm_request req; | 110 | struct qcom_rpm_request req; |
111 | u8 payload[count]; | 111 | u8 payload[]; |
112 | } pkt; | 112 | } *pkt; |
113 | size_t size = sizeof(*pkt) + count; | ||
113 | 114 | ||
114 | /* SMD packets to the RPM may not exceed 256 bytes */ | 115 | /* SMD packets to the RPM may not exceed 256 bytes */ |
115 | if (WARN_ON(sizeof(pkt) >= 256)) | 116 | if (WARN_ON(size >= 256)) |
116 | return -EINVAL; | 117 | return -EINVAL; |
117 | 118 | ||
119 | pkt = kmalloc(size, GFP_KERNEL); | ||
120 | if (!pkt) | ||
121 | return -ENOMEM; | ||
122 | |||
118 | mutex_lock(&rpm->lock); | 123 | mutex_lock(&rpm->lock); |
119 | 124 | ||
120 | pkt.hdr.service_type = RPM_SERVICE_TYPE_REQUEST; | 125 | pkt->hdr.service_type = cpu_to_le32(RPM_SERVICE_TYPE_REQUEST); |
121 | pkt.hdr.length = sizeof(struct qcom_rpm_request) + count; | 126 | pkt->hdr.length = cpu_to_le32(sizeof(struct qcom_rpm_request) + count); |
122 | 127 | ||
123 | pkt.req.msg_id = msg_id++; | 128 | pkt->req.msg_id = cpu_to_le32(msg_id++); |
124 | pkt.req.flags = BIT(state); | 129 | pkt->req.flags = cpu_to_le32(state); |
125 | pkt.req.type = type; | 130 | pkt->req.type = cpu_to_le32(type); |
126 | pkt.req.id = id; | 131 | pkt->req.id = cpu_to_le32(id); |
127 | pkt.req.data_len = count; | 132 | pkt->req.data_len = cpu_to_le32(count); |
128 | memcpy(pkt.payload, buf, count); | 133 | memcpy(pkt->payload, buf, count); |
129 | 134 | ||
130 | ret = qcom_smd_send(rpm->rpm_channel, &pkt, sizeof(pkt)); | 135 | ret = qcom_smd_send(rpm->rpm_channel, pkt, sizeof(*pkt)); |
131 | if (ret) | 136 | if (ret) |
132 | goto out; | 137 | goto out; |
133 | 138 | ||
@@ -138,6 +143,7 @@ int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm, | |||
138 | ret = rpm->ack_status; | 143 | ret = rpm->ack_status; |
139 | 144 | ||
140 | out: | 145 | out: |
146 | kfree(pkt); | ||
141 | mutex_unlock(&rpm->lock); | 147 | mutex_unlock(&rpm->lock); |
142 | return ret; | 148 | return ret; |
143 | } | 149 | } |
@@ -148,27 +154,29 @@ static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev, | |||
148 | size_t count) | 154 | size_t count) |
149 | { | 155 | { |
150 | const struct qcom_rpm_header *hdr = data; | 156 | const struct qcom_rpm_header *hdr = data; |
157 | size_t hdr_length = le32_to_cpu(hdr->length); | ||
151 | const struct qcom_rpm_message *msg; | 158 | const struct qcom_rpm_message *msg; |
152 | struct qcom_smd_rpm *rpm = dev_get_drvdata(&qsdev->dev); | 159 | struct qcom_smd_rpm *rpm = dev_get_drvdata(&qsdev->dev); |
153 | const u8 *buf = data + sizeof(struct qcom_rpm_header); | 160 | const u8 *buf = data + sizeof(struct qcom_rpm_header); |
154 | const u8 *end = buf + hdr->length; | 161 | const u8 *end = buf + hdr_length; |
155 | char msgbuf[32]; | 162 | char msgbuf[32]; |
156 | int status = 0; | 163 | int status = 0; |
157 | u32 len; | 164 | u32 len, msg_length; |
158 | 165 | ||
159 | if (hdr->service_type != RPM_SERVICE_TYPE_REQUEST || | 166 | if (le32_to_cpu(hdr->service_type) != RPM_SERVICE_TYPE_REQUEST || |
160 | hdr->length < sizeof(struct qcom_rpm_message)) { | 167 | hdr_length < sizeof(struct qcom_rpm_message)) { |
161 | dev_err(&qsdev->dev, "invalid request\n"); | 168 | dev_err(&qsdev->dev, "invalid request\n"); |
162 | return 0; | 169 | return 0; |
163 | } | 170 | } |
164 | 171 | ||
165 | while (buf < end) { | 172 | while (buf < end) { |
166 | msg = (struct qcom_rpm_message *)buf; | 173 | msg = (struct qcom_rpm_message *)buf; |
167 | switch (msg->msg_type) { | 174 | msg_length = le32_to_cpu(msg->length); |
175 | switch (le32_to_cpu(msg->msg_type)) { | ||
168 | case RPM_MSG_TYPE_MSG_ID: | 176 | case RPM_MSG_TYPE_MSG_ID: |
169 | break; | 177 | break; |
170 | case RPM_MSG_TYPE_ERR: | 178 | case RPM_MSG_TYPE_ERR: |
171 | len = min_t(u32, ALIGN(msg->length, 4), sizeof(msgbuf)); | 179 | len = min_t(u32, ALIGN(msg_length, 4), sizeof(msgbuf)); |
172 | memcpy_fromio(msgbuf, msg->message, len); | 180 | memcpy_fromio(msgbuf, msg->message, len); |
173 | msgbuf[len - 1] = 0; | 181 | msgbuf[len - 1] = 0; |
174 | 182 | ||
@@ -179,7 +187,7 @@ static int qcom_smd_rpm_callback(struct qcom_smd_device *qsdev, | |||
179 | break; | 187 | break; |
180 | } | 188 | } |
181 | 189 | ||
182 | buf = PTR_ALIGN(buf + 2 * sizeof(u32) + msg->length, 4); | 190 | buf = PTR_ALIGN(buf + 2 * sizeof(u32) + msg_length, 4); |
183 | } | 191 | } |
184 | 192 | ||
185 | rpm->ack_status = status; | 193 | rpm->ack_status = status; |
diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c index a6155c917d52..86b598cff91a 100644 --- a/drivers/soc/qcom/smd.c +++ b/drivers/soc/qcom/smd.c | |||
@@ -65,7 +65,9 @@ | |||
65 | */ | 65 | */ |
66 | 66 | ||
67 | struct smd_channel_info; | 67 | struct smd_channel_info; |
68 | struct smd_channel_info_pair; | ||
68 | struct smd_channel_info_word; | 69 | struct smd_channel_info_word; |
70 | struct smd_channel_info_word_pair; | ||
69 | 71 | ||
70 | #define SMD_ALLOC_TBL_COUNT 2 | 72 | #define SMD_ALLOC_TBL_COUNT 2 |
71 | #define SMD_ALLOC_TBL_SIZE 64 | 73 | #define SMD_ALLOC_TBL_SIZE 64 |
@@ -85,8 +87,8 @@ static const struct { | |||
85 | .fifo_base_id = 338 | 87 | .fifo_base_id = 338 |
86 | }, | 88 | }, |
87 | { | 89 | { |
88 | .alloc_tbl_id = 14, | 90 | .alloc_tbl_id = 266, |
89 | .info_base_id = 266, | 91 | .info_base_id = 138, |
90 | .fifo_base_id = 202, | 92 | .fifo_base_id = 202, |
91 | }, | 93 | }, |
92 | }; | 94 | }; |
@@ -151,10 +153,8 @@ enum smd_channel_state { | |||
151 | * @name: name of the channel | 153 | * @name: name of the channel |
152 | * @state: local state of the channel | 154 | * @state: local state of the channel |
153 | * @remote_state: remote state of the channel | 155 | * @remote_state: remote state of the channel |
154 | * @tx_info: byte aligned outgoing channel info | 156 | * @info: byte aligned outgoing/incoming channel info |
155 | * @rx_info: byte aligned incoming channel info | 157 | * @info_word: word aligned outgoing/incoming channel info |
156 | * @tx_info_word: word aligned outgoing channel info | ||
157 | * @rx_info_word: word aligned incoming channel info | ||
158 | * @tx_lock: lock to make writes to the channel mutually exclusive | 158 | * @tx_lock: lock to make writes to the channel mutually exclusive |
159 | * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR | 159 | * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR |
160 | * @tx_fifo: pointer to the outgoing ring buffer | 160 | * @tx_fifo: pointer to the outgoing ring buffer |
@@ -175,11 +175,8 @@ struct qcom_smd_channel { | |||
175 | enum smd_channel_state state; | 175 | enum smd_channel_state state; |
176 | enum smd_channel_state remote_state; | 176 | enum smd_channel_state remote_state; |
177 | 177 | ||
178 | struct smd_channel_info *tx_info; | 178 | struct smd_channel_info_pair *info; |
179 | struct smd_channel_info *rx_info; | 179 | struct smd_channel_info_word_pair *info_word; |
180 | |||
181 | struct smd_channel_info_word *tx_info_word; | ||
182 | struct smd_channel_info_word *rx_info_word; | ||
183 | 180 | ||
184 | struct mutex tx_lock; | 181 | struct mutex tx_lock; |
185 | wait_queue_head_t fblockread_event; | 182 | wait_queue_head_t fblockread_event; |
@@ -215,7 +212,7 @@ struct qcom_smd { | |||
215 | * Format of the smd_info smem items, for byte aligned channels. | 212 | * Format of the smd_info smem items, for byte aligned channels. |
216 | */ | 213 | */ |
217 | struct smd_channel_info { | 214 | struct smd_channel_info { |
218 | u32 state; | 215 | __le32 state; |
219 | u8 fDSR; | 216 | u8 fDSR; |
220 | u8 fCTS; | 217 | u8 fCTS; |
221 | u8 fCD; | 218 | u8 fCD; |
@@ -224,46 +221,104 @@ struct smd_channel_info { | |||
224 | u8 fTAIL; | 221 | u8 fTAIL; |
225 | u8 fSTATE; | 222 | u8 fSTATE; |
226 | u8 fBLOCKREADINTR; | 223 | u8 fBLOCKREADINTR; |
227 | u32 tail; | 224 | __le32 tail; |
228 | u32 head; | 225 | __le32 head; |
226 | }; | ||
227 | |||
228 | struct smd_channel_info_pair { | ||
229 | struct smd_channel_info tx; | ||
230 | struct smd_channel_info rx; | ||
229 | }; | 231 | }; |
230 | 232 | ||
231 | /* | 233 | /* |
232 | * Format of the smd_info smem items, for word aligned channels. | 234 | * Format of the smd_info smem items, for word aligned channels. |
233 | */ | 235 | */ |
234 | struct smd_channel_info_word { | 236 | struct smd_channel_info_word { |
235 | u32 state; | 237 | __le32 state; |
236 | u32 fDSR; | 238 | __le32 fDSR; |
237 | u32 fCTS; | 239 | __le32 fCTS; |
238 | u32 fCD; | 240 | __le32 fCD; |
239 | u32 fRI; | 241 | __le32 fRI; |
240 | u32 fHEAD; | 242 | __le32 fHEAD; |
241 | u32 fTAIL; | 243 | __le32 fTAIL; |
242 | u32 fSTATE; | 244 | __le32 fSTATE; |
243 | u32 fBLOCKREADINTR; | 245 | __le32 fBLOCKREADINTR; |
244 | u32 tail; | 246 | __le32 tail; |
245 | u32 head; | 247 | __le32 head; |
246 | }; | 248 | }; |
247 | 249 | ||
248 | #define GET_RX_CHANNEL_INFO(channel, param) \ | 250 | struct smd_channel_info_word_pair { |
249 | (channel->rx_info_word ? \ | 251 | struct smd_channel_info_word tx; |
250 | channel->rx_info_word->param : \ | 252 | struct smd_channel_info_word rx; |
251 | channel->rx_info->param) | 253 | }; |
252 | |||
253 | #define SET_RX_CHANNEL_INFO(channel, param, value) \ | ||
254 | (channel->rx_info_word ? \ | ||
255 | (channel->rx_info_word->param = value) : \ | ||
256 | (channel->rx_info->param = value)) | ||
257 | |||
258 | #define GET_TX_CHANNEL_INFO(channel, param) \ | ||
259 | (channel->tx_info_word ? \ | ||
260 | channel->tx_info_word->param : \ | ||
261 | channel->tx_info->param) | ||
262 | 254 | ||
263 | #define SET_TX_CHANNEL_INFO(channel, param, value) \ | 255 | #define GET_RX_CHANNEL_FLAG(channel, param) \ |
264 | (channel->tx_info_word ? \ | 256 | ({ \ |
265 | (channel->tx_info_word->param = value) : \ | 257 | BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \ |
266 | (channel->tx_info->param = value)) | 258 | channel->info_word ? \ |
259 | le32_to_cpu(channel->info_word->rx.param) : \ | ||
260 | channel->info->rx.param; \ | ||
261 | }) | ||
262 | |||
263 | #define GET_RX_CHANNEL_INFO(channel, param) \ | ||
264 | ({ \ | ||
265 | BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \ | ||
266 | le32_to_cpu(channel->info_word ? \ | ||
267 | channel->info_word->rx.param : \ | ||
268 | channel->info->rx.param); \ | ||
269 | }) | ||
270 | |||
271 | #define SET_RX_CHANNEL_FLAG(channel, param, value) \ | ||
272 | ({ \ | ||
273 | BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u8)); \ | ||
274 | if (channel->info_word) \ | ||
275 | channel->info_word->rx.param = cpu_to_le32(value); \ | ||
276 | else \ | ||
277 | channel->info->rx.param = value; \ | ||
278 | }) | ||
279 | |||
280 | #define SET_RX_CHANNEL_INFO(channel, param, value) \ | ||
281 | ({ \ | ||
282 | BUILD_BUG_ON(sizeof(channel->info->rx.param) != sizeof(u32)); \ | ||
283 | if (channel->info_word) \ | ||
284 | channel->info_word->rx.param = cpu_to_le32(value); \ | ||
285 | else \ | ||
286 | channel->info->rx.param = cpu_to_le32(value); \ | ||
287 | }) | ||
288 | |||
289 | #define GET_TX_CHANNEL_FLAG(channel, param) \ | ||
290 | ({ \ | ||
291 | BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \ | ||
292 | channel->info_word ? \ | ||
293 | le32_to_cpu(channel->info_word->tx.param) : \ | ||
294 | channel->info->tx.param; \ | ||
295 | }) | ||
296 | |||
297 | #define GET_TX_CHANNEL_INFO(channel, param) \ | ||
298 | ({ \ | ||
299 | BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \ | ||
300 | le32_to_cpu(channel->info_word ? \ | ||
301 | channel->info_word->tx.param : \ | ||
302 | channel->info->tx.param); \ | ||
303 | }) | ||
304 | |||
305 | #define SET_TX_CHANNEL_FLAG(channel, param, value) \ | ||
306 | ({ \ | ||
307 | BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u8)); \ | ||
308 | if (channel->info_word) \ | ||
309 | channel->info_word->tx.param = cpu_to_le32(value); \ | ||
310 | else \ | ||
311 | channel->info->tx.param = value; \ | ||
312 | }) | ||
313 | |||
314 | #define SET_TX_CHANNEL_INFO(channel, param, value) \ | ||
315 | ({ \ | ||
316 | BUILD_BUG_ON(sizeof(channel->info->tx.param) != sizeof(u32)); \ | ||
317 | if (channel->info_word) \ | ||
318 | channel->info_word->tx.param = cpu_to_le32(value); \ | ||
319 | else \ | ||
320 | channel->info->tx.param = cpu_to_le32(value); \ | ||
321 | }) | ||
267 | 322 | ||
268 | /** | 323 | /** |
269 | * struct qcom_smd_alloc_entry - channel allocation entry | 324 | * struct qcom_smd_alloc_entry - channel allocation entry |
@@ -274,9 +329,9 @@ struct smd_channel_info_word { | |||
274 | */ | 329 | */ |
275 | struct qcom_smd_alloc_entry { | 330 | struct qcom_smd_alloc_entry { |
276 | u8 name[20]; | 331 | u8 name[20]; |
277 | u32 cid; | 332 | __le32 cid; |
278 | u32 flags; | 333 | __le32 flags; |
279 | u32 ref_count; | 334 | __le32 ref_count; |
280 | } __packed; | 335 | } __packed; |
281 | 336 | ||
282 | #define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff | 337 | #define SMD_CHANNEL_FLAGS_EDGE_MASK 0xff |
@@ -305,14 +360,14 @@ static void qcom_smd_signal_channel(struct qcom_smd_channel *channel) | |||
305 | static void qcom_smd_channel_reset(struct qcom_smd_channel *channel) | 360 | static void qcom_smd_channel_reset(struct qcom_smd_channel *channel) |
306 | { | 361 | { |
307 | SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED); | 362 | SET_TX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED); |
308 | SET_TX_CHANNEL_INFO(channel, fDSR, 0); | 363 | SET_TX_CHANNEL_FLAG(channel, fDSR, 0); |
309 | SET_TX_CHANNEL_INFO(channel, fCTS, 0); | 364 | SET_TX_CHANNEL_FLAG(channel, fCTS, 0); |
310 | SET_TX_CHANNEL_INFO(channel, fCD, 0); | 365 | SET_TX_CHANNEL_FLAG(channel, fCD, 0); |
311 | SET_TX_CHANNEL_INFO(channel, fRI, 0); | 366 | SET_TX_CHANNEL_FLAG(channel, fRI, 0); |
312 | SET_TX_CHANNEL_INFO(channel, fHEAD, 0); | 367 | SET_TX_CHANNEL_FLAG(channel, fHEAD, 0); |
313 | SET_TX_CHANNEL_INFO(channel, fTAIL, 0); | 368 | SET_TX_CHANNEL_FLAG(channel, fTAIL, 0); |
314 | SET_TX_CHANNEL_INFO(channel, fSTATE, 1); | 369 | SET_TX_CHANNEL_FLAG(channel, fSTATE, 1); |
315 | SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1); | 370 | SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1); |
316 | SET_TX_CHANNEL_INFO(channel, head, 0); | 371 | SET_TX_CHANNEL_INFO(channel, head, 0); |
317 | SET_TX_CHANNEL_INFO(channel, tail, 0); | 372 | SET_TX_CHANNEL_INFO(channel, tail, 0); |
318 | 373 | ||
@@ -350,12 +405,12 @@ static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel, | |||
350 | 405 | ||
351 | dev_dbg(edge->smd->dev, "set_state(%s, %d)\n", channel->name, state); | 406 | dev_dbg(edge->smd->dev, "set_state(%s, %d)\n", channel->name, state); |
352 | 407 | ||
353 | SET_TX_CHANNEL_INFO(channel, fDSR, is_open); | 408 | SET_TX_CHANNEL_FLAG(channel, fDSR, is_open); |
354 | SET_TX_CHANNEL_INFO(channel, fCTS, is_open); | 409 | SET_TX_CHANNEL_FLAG(channel, fCTS, is_open); |
355 | SET_TX_CHANNEL_INFO(channel, fCD, is_open); | 410 | SET_TX_CHANNEL_FLAG(channel, fCD, is_open); |
356 | 411 | ||
357 | SET_TX_CHANNEL_INFO(channel, state, state); | 412 | SET_TX_CHANNEL_INFO(channel, state, state); |
358 | SET_TX_CHANNEL_INFO(channel, fSTATE, 1); | 413 | SET_TX_CHANNEL_FLAG(channel, fSTATE, 1); |
359 | 414 | ||
360 | channel->state = state; | 415 | channel->state = state; |
361 | qcom_smd_signal_channel(channel); | 416 | qcom_smd_signal_channel(channel); |
@@ -364,20 +419,15 @@ static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel, | |||
364 | /* | 419 | /* |
365 | * Copy count bytes of data using 32bit accesses, if that's required. | 420 | * Copy count bytes of data using 32bit accesses, if that's required. |
366 | */ | 421 | */ |
367 | static void smd_copy_to_fifo(void __iomem *_dst, | 422 | static void smd_copy_to_fifo(void __iomem *dst, |
368 | const void *_src, | 423 | const void *src, |
369 | size_t count, | 424 | size_t count, |
370 | bool word_aligned) | 425 | bool word_aligned) |
371 | { | 426 | { |
372 | u32 *dst = (u32 *)_dst; | ||
373 | u32 *src = (u32 *)_src; | ||
374 | |||
375 | if (word_aligned) { | 427 | if (word_aligned) { |
376 | count /= sizeof(u32); | 428 | __iowrite32_copy(dst, src, count / sizeof(u32)); |
377 | while (count--) | ||
378 | writel_relaxed(*src++, dst++); | ||
379 | } else { | 429 | } else { |
380 | memcpy_toio(_dst, _src, count); | 430 | memcpy_toio(dst, src, count); |
381 | } | 431 | } |
382 | } | 432 | } |
383 | 433 | ||
@@ -395,7 +445,7 @@ static void smd_copy_from_fifo(void *_dst, | |||
395 | if (word_aligned) { | 445 | if (word_aligned) { |
396 | count /= sizeof(u32); | 446 | count /= sizeof(u32); |
397 | while (count--) | 447 | while (count--) |
398 | *dst++ = readl_relaxed(src++); | 448 | *dst++ = __raw_readl(src++); |
399 | } else { | 449 | } else { |
400 | memcpy_fromio(_dst, _src, count); | 450 | memcpy_fromio(_dst, _src, count); |
401 | } | 451 | } |
@@ -412,7 +462,7 @@ static size_t qcom_smd_channel_peek(struct qcom_smd_channel *channel, | |||
412 | unsigned tail; | 462 | unsigned tail; |
413 | size_t len; | 463 | size_t len; |
414 | 464 | ||
415 | word_aligned = channel->rx_info_word != NULL; | 465 | word_aligned = channel->info_word; |
416 | tail = GET_RX_CHANNEL_INFO(channel, tail); | 466 | tail = GET_RX_CHANNEL_INFO(channel, tail); |
417 | 467 | ||
418 | len = min_t(size_t, count, channel->fifo_size - tail); | 468 | len = min_t(size_t, count, channel->fifo_size - tail); |
@@ -491,7 +541,7 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel) | |||
491 | { | 541 | { |
492 | bool need_state_scan = false; | 542 | bool need_state_scan = false; |
493 | int remote_state; | 543 | int remote_state; |
494 | u32 pktlen; | 544 | __le32 pktlen; |
495 | int avail; | 545 | int avail; |
496 | int ret; | 546 | int ret; |
497 | 547 | ||
@@ -502,10 +552,10 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel) | |||
502 | need_state_scan = true; | 552 | need_state_scan = true; |
503 | } | 553 | } |
504 | /* Indicate that we have seen any state change */ | 554 | /* Indicate that we have seen any state change */ |
505 | SET_RX_CHANNEL_INFO(channel, fSTATE, 0); | 555 | SET_RX_CHANNEL_FLAG(channel, fSTATE, 0); |
506 | 556 | ||
507 | /* Signal waiting qcom_smd_send() about the interrupt */ | 557 | /* Signal waiting qcom_smd_send() about the interrupt */ |
508 | if (!GET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR)) | 558 | if (!GET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) |
509 | wake_up_interruptible(&channel->fblockread_event); | 559 | wake_up_interruptible(&channel->fblockread_event); |
510 | 560 | ||
511 | /* Don't consume any data until we've opened the channel */ | 561 | /* Don't consume any data until we've opened the channel */ |
@@ -513,7 +563,7 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel) | |||
513 | goto out; | 563 | goto out; |
514 | 564 | ||
515 | /* Indicate that we've seen the new data */ | 565 | /* Indicate that we've seen the new data */ |
516 | SET_RX_CHANNEL_INFO(channel, fHEAD, 0); | 566 | SET_RX_CHANNEL_FLAG(channel, fHEAD, 0); |
517 | 567 | ||
518 | /* Consume data */ | 568 | /* Consume data */ |
519 | for (;;) { | 569 | for (;;) { |
@@ -522,7 +572,7 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel) | |||
522 | if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) { | 572 | if (!channel->pkt_size && avail >= SMD_PACKET_HEADER_LEN) { |
523 | qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen)); | 573 | qcom_smd_channel_peek(channel, &pktlen, sizeof(pktlen)); |
524 | qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN); | 574 | qcom_smd_channel_advance(channel, SMD_PACKET_HEADER_LEN); |
525 | channel->pkt_size = pktlen; | 575 | channel->pkt_size = le32_to_cpu(pktlen); |
526 | } else if (channel->pkt_size && avail >= channel->pkt_size) { | 576 | } else if (channel->pkt_size && avail >= channel->pkt_size) { |
527 | ret = qcom_smd_channel_recv_single(channel); | 577 | ret = qcom_smd_channel_recv_single(channel); |
528 | if (ret) | 578 | if (ret) |
@@ -533,10 +583,10 @@ static bool qcom_smd_channel_intr(struct qcom_smd_channel *channel) | |||
533 | } | 583 | } |
534 | 584 | ||
535 | /* Indicate that we have seen and updated tail */ | 585 | /* Indicate that we have seen and updated tail */ |
536 | SET_RX_CHANNEL_INFO(channel, fTAIL, 1); | 586 | SET_RX_CHANNEL_FLAG(channel, fTAIL, 1); |
537 | 587 | ||
538 | /* Signal the remote that we've consumed the data (if requested) */ | 588 | /* Signal the remote that we've consumed the data (if requested) */ |
539 | if (!GET_RX_CHANNEL_INFO(channel, fBLOCKREADINTR)) { | 589 | if (!GET_RX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) { |
540 | /* Ensure ordering of channel info updates */ | 590 | /* Ensure ordering of channel info updates */ |
541 | wmb(); | 591 | wmb(); |
542 | 592 | ||
@@ -627,7 +677,7 @@ static int qcom_smd_write_fifo(struct qcom_smd_channel *channel, | |||
627 | unsigned head; | 677 | unsigned head; |
628 | size_t len; | 678 | size_t len; |
629 | 679 | ||
630 | word_aligned = channel->tx_info_word != NULL; | 680 | word_aligned = channel->info_word; |
631 | head = GET_TX_CHANNEL_INFO(channel, head); | 681 | head = GET_TX_CHANNEL_INFO(channel, head); |
632 | 682 | ||
633 | len = min_t(size_t, count, channel->fifo_size - head); | 683 | len = min_t(size_t, count, channel->fifo_size - head); |
@@ -665,12 +715,16 @@ static int qcom_smd_write_fifo(struct qcom_smd_channel *channel, | |||
665 | */ | 715 | */ |
666 | int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len) | 716 | int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len) |
667 | { | 717 | { |
668 | u32 hdr[5] = {len,}; | 718 | __le32 hdr[5] = { cpu_to_le32(len), }; |
669 | int tlen = sizeof(hdr) + len; | 719 | int tlen = sizeof(hdr) + len; |
670 | int ret; | 720 | int ret; |
671 | 721 | ||
672 | /* Word aligned channels only accept word size aligned data */ | 722 | /* Word aligned channels only accept word size aligned data */ |
673 | if (channel->rx_info_word != NULL && len % 4) | 723 | if (channel->info_word && len % 4) |
724 | return -EINVAL; | ||
725 | |||
726 | /* Reject packets that are too big */ | ||
727 | if (tlen >= channel->fifo_size) | ||
674 | return -EINVAL; | 728 | return -EINVAL; |
675 | 729 | ||
676 | ret = mutex_lock_interruptible(&channel->tx_lock); | 730 | ret = mutex_lock_interruptible(&channel->tx_lock); |
@@ -683,7 +737,7 @@ int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len) | |||
683 | goto out; | 737 | goto out; |
684 | } | 738 | } |
685 | 739 | ||
686 | SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0); | 740 | SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 0); |
687 | 741 | ||
688 | ret = wait_event_interruptible(channel->fblockread_event, | 742 | ret = wait_event_interruptible(channel->fblockread_event, |
689 | qcom_smd_get_tx_avail(channel) >= tlen || | 743 | qcom_smd_get_tx_avail(channel) >= tlen || |
@@ -691,15 +745,15 @@ int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len) | |||
691 | if (ret) | 745 | if (ret) |
692 | goto out; | 746 | goto out; |
693 | 747 | ||
694 | SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1); | 748 | SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1); |
695 | } | 749 | } |
696 | 750 | ||
697 | SET_TX_CHANNEL_INFO(channel, fTAIL, 0); | 751 | SET_TX_CHANNEL_FLAG(channel, fTAIL, 0); |
698 | 752 | ||
699 | qcom_smd_write_fifo(channel, hdr, sizeof(hdr)); | 753 | qcom_smd_write_fifo(channel, hdr, sizeof(hdr)); |
700 | qcom_smd_write_fifo(channel, data, len); | 754 | qcom_smd_write_fifo(channel, data, len); |
701 | 755 | ||
702 | SET_TX_CHANNEL_INFO(channel, fHEAD, 1); | 756 | SET_TX_CHANNEL_FLAG(channel, fHEAD, 1); |
703 | 757 | ||
704 | /* Ensure ordering of channel info updates */ | 758 | /* Ensure ordering of channel info updates */ |
705 | wmb(); | 759 | wmb(); |
@@ -727,6 +781,19 @@ static struct qcom_smd_driver *to_smd_driver(struct device *dev) | |||
727 | 781 | ||
728 | static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv) | 782 | static int qcom_smd_dev_match(struct device *dev, struct device_driver *drv) |
729 | { | 783 | { |
784 | struct qcom_smd_device *qsdev = to_smd_device(dev); | ||
785 | struct qcom_smd_driver *qsdrv = container_of(drv, struct qcom_smd_driver, driver); | ||
786 | const struct qcom_smd_id *match = qsdrv->smd_match_table; | ||
787 | const char *name = qsdev->channel->name; | ||
788 | |||
789 | if (match) { | ||
790 | while (match->name[0]) { | ||
791 | if (!strcmp(match->name, name)) | ||
792 | return 1; | ||
793 | match++; | ||
794 | } | ||
795 | } | ||
796 | |||
730 | return of_driver_match_device(dev, drv); | 797 | return of_driver_match_device(dev, drv); |
731 | } | 798 | } |
732 | 799 | ||
@@ -854,10 +921,8 @@ static struct device_node *qcom_smd_match_channel(struct device_node *edge_node, | |||
854 | for_each_available_child_of_node(edge_node, child) { | 921 | for_each_available_child_of_node(edge_node, child) { |
855 | key = "qcom,smd-channels"; | 922 | key = "qcom,smd-channels"; |
856 | ret = of_property_read_string(child, key, &name); | 923 | ret = of_property_read_string(child, key, &name); |
857 | if (ret) { | 924 | if (ret) |
858 | of_node_put(child); | ||
859 | continue; | 925 | continue; |
860 | } | ||
861 | 926 | ||
862 | if (strcmp(name, channel) == 0) | 927 | if (strcmp(name, channel) == 0) |
863 | return child; | 928 | return child; |
@@ -880,19 +945,17 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel) | |||
880 | if (channel->qsdev) | 945 | if (channel->qsdev) |
881 | return -EEXIST; | 946 | return -EEXIST; |
882 | 947 | ||
883 | node = qcom_smd_match_channel(edge->of_node, channel->name); | ||
884 | if (!node) { | ||
885 | dev_dbg(smd->dev, "no match for '%s'\n", channel->name); | ||
886 | return -ENXIO; | ||
887 | } | ||
888 | |||
889 | dev_dbg(smd->dev, "registering '%s'\n", channel->name); | 948 | dev_dbg(smd->dev, "registering '%s'\n", channel->name); |
890 | 949 | ||
891 | qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL); | 950 | qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL); |
892 | if (!qsdev) | 951 | if (!qsdev) |
893 | return -ENOMEM; | 952 | return -ENOMEM; |
894 | 953 | ||
895 | dev_set_name(&qsdev->dev, "%s.%s", edge->of_node->name, node->name); | 954 | node = qcom_smd_match_channel(edge->of_node, channel->name); |
955 | dev_set_name(&qsdev->dev, "%s.%s", | ||
956 | edge->of_node->name, | ||
957 | node ? node->name : channel->name); | ||
958 | |||
896 | qsdev->dev.parent = smd->dev; | 959 | qsdev->dev.parent = smd->dev; |
897 | qsdev->dev.bus = &qcom_smd_bus; | 960 | qsdev->dev.bus = &qcom_smd_bus; |
898 | qsdev->dev.release = qcom_smd_release_device; | 961 | qsdev->dev.release = qcom_smd_release_device; |
@@ -978,21 +1041,20 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed | |||
978 | spin_lock_init(&channel->recv_lock); | 1041 | spin_lock_init(&channel->recv_lock); |
979 | init_waitqueue_head(&channel->fblockread_event); | 1042 | init_waitqueue_head(&channel->fblockread_event); |
980 | 1043 | ||
981 | ret = qcom_smem_get(edge->remote_pid, smem_info_item, (void **)&info, | 1044 | info = qcom_smem_get(edge->remote_pid, smem_info_item, &info_size); |
982 | &info_size); | 1045 | if (IS_ERR(info)) { |
983 | if (ret) | 1046 | ret = PTR_ERR(info); |
984 | goto free_name_and_channel; | 1047 | goto free_name_and_channel; |
1048 | } | ||
985 | 1049 | ||
986 | /* | 1050 | /* |
987 | * Use the size of the item to figure out which channel info struct to | 1051 | * Use the size of the item to figure out which channel info struct to |
988 | * use. | 1052 | * use. |
989 | */ | 1053 | */ |
990 | if (info_size == 2 * sizeof(struct smd_channel_info_word)) { | 1054 | if (info_size == 2 * sizeof(struct smd_channel_info_word)) { |
991 | channel->tx_info_word = info; | 1055 | channel->info_word = info; |
992 | channel->rx_info_word = info + sizeof(struct smd_channel_info_word); | ||
993 | } else if (info_size == 2 * sizeof(struct smd_channel_info)) { | 1056 | } else if (info_size == 2 * sizeof(struct smd_channel_info)) { |
994 | channel->tx_info = info; | 1057 | channel->info = info; |
995 | channel->rx_info = info + sizeof(struct smd_channel_info); | ||
996 | } else { | 1058 | } else { |
997 | dev_err(smd->dev, | 1059 | dev_err(smd->dev, |
998 | "channel info of size %zu not supported\n", info_size); | 1060 | "channel info of size %zu not supported\n", info_size); |
@@ -1000,10 +1062,11 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed | |||
1000 | goto free_name_and_channel; | 1062 | goto free_name_and_channel; |
1001 | } | 1063 | } |
1002 | 1064 | ||
1003 | ret = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_base, | 1065 | fifo_base = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_size); |
1004 | &fifo_size); | 1066 | if (IS_ERR(fifo_base)) { |
1005 | if (ret) | 1067 | ret = PTR_ERR(fifo_base); |
1006 | goto free_name_and_channel; | 1068 | goto free_name_and_channel; |
1069 | } | ||
1007 | 1070 | ||
1008 | /* The channel consist of a rx and tx fifo of equal size */ | 1071 | /* The channel consist of a rx and tx fifo of equal size */ |
1009 | fifo_size /= 2; | 1072 | fifo_size /= 2; |
@@ -1040,20 +1103,19 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge) | |||
1040 | unsigned long flags; | 1103 | unsigned long flags; |
1041 | unsigned fifo_id; | 1104 | unsigned fifo_id; |
1042 | unsigned info_id; | 1105 | unsigned info_id; |
1043 | int ret; | ||
1044 | int tbl; | 1106 | int tbl; |
1045 | int i; | 1107 | int i; |
1108 | u32 eflags, cid; | ||
1046 | 1109 | ||
1047 | for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) { | 1110 | for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) { |
1048 | ret = qcom_smem_get(edge->remote_pid, | 1111 | alloc_tbl = qcom_smem_get(edge->remote_pid, |
1049 | smem_items[tbl].alloc_tbl_id, | 1112 | smem_items[tbl].alloc_tbl_id, NULL); |
1050 | (void **)&alloc_tbl, | 1113 | if (IS_ERR(alloc_tbl)) |
1051 | NULL); | ||
1052 | if (ret < 0) | ||
1053 | continue; | 1114 | continue; |
1054 | 1115 | ||
1055 | for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) { | 1116 | for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) { |
1056 | entry = &alloc_tbl[i]; | 1117 | entry = &alloc_tbl[i]; |
1118 | eflags = le32_to_cpu(entry->flags); | ||
1057 | if (test_bit(i, edge->allocated[tbl])) | 1119 | if (test_bit(i, edge->allocated[tbl])) |
1058 | continue; | 1120 | continue; |
1059 | 1121 | ||
@@ -1063,14 +1125,15 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge) | |||
1063 | if (!entry->name[0]) | 1125 | if (!entry->name[0]) |
1064 | continue; | 1126 | continue; |
1065 | 1127 | ||
1066 | if (!(entry->flags & SMD_CHANNEL_FLAGS_PACKET)) | 1128 | if (!(eflags & SMD_CHANNEL_FLAGS_PACKET)) |
1067 | continue; | 1129 | continue; |
1068 | 1130 | ||
1069 | if ((entry->flags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id) | 1131 | if ((eflags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id) |
1070 | continue; | 1132 | continue; |
1071 | 1133 | ||
1072 | info_id = smem_items[tbl].info_base_id + entry->cid; | 1134 | cid = le32_to_cpu(entry->cid); |
1073 | fifo_id = smem_items[tbl].fifo_base_id + entry->cid; | 1135 | info_id = smem_items[tbl].info_base_id + cid; |
1136 | fifo_id = smem_items[tbl].fifo_base_id + cid; | ||
1074 | 1137 | ||
1075 | channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name); | 1138 | channel = qcom_smd_create_channel(edge, info_id, fifo_id, entry->name); |
1076 | if (IS_ERR(channel)) | 1139 | if (IS_ERR(channel)) |
@@ -1227,11 +1290,12 @@ static int qcom_smd_probe(struct platform_device *pdev) | |||
1227 | int num_edges; | 1290 | int num_edges; |
1228 | int ret; | 1291 | int ret; |
1229 | int i = 0; | 1292 | int i = 0; |
1293 | void *p; | ||
1230 | 1294 | ||
1231 | /* Wait for smem */ | 1295 | /* Wait for smem */ |
1232 | ret = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL, NULL); | 1296 | p = qcom_smem_get(QCOM_SMEM_HOST_ANY, smem_items[0].alloc_tbl_id, NULL); |
1233 | if (ret == -EPROBE_DEFER) | 1297 | if (PTR_ERR(p) == -EPROBE_DEFER) |
1234 | return ret; | 1298 | return PTR_ERR(p); |
1235 | 1299 | ||
1236 | num_edges = of_get_available_child_count(pdev->dev.of_node); | 1300 | num_edges = of_get_available_child_count(pdev->dev.of_node); |
1237 | array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge); | 1301 | array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge); |
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c index 52365188a1c2..19019aa092e8 100644 --- a/drivers/soc/qcom/smem.c +++ b/drivers/soc/qcom/smem.c | |||
@@ -92,9 +92,9 @@ | |||
92 | * @params: parameters to the command | 92 | * @params: parameters to the command |
93 | */ | 93 | */ |
94 | struct smem_proc_comm { | 94 | struct smem_proc_comm { |
95 | u32 command; | 95 | __le32 command; |
96 | u32 status; | 96 | __le32 status; |
97 | u32 params[2]; | 97 | __le32 params[2]; |
98 | }; | 98 | }; |
99 | 99 | ||
100 | /** | 100 | /** |
@@ -106,10 +106,10 @@ struct smem_proc_comm { | |||
106 | * the default region. bits 0,1 are reserved | 106 | * the default region. bits 0,1 are reserved |
107 | */ | 107 | */ |
108 | struct smem_global_entry { | 108 | struct smem_global_entry { |
109 | u32 allocated; | 109 | __le32 allocated; |
110 | u32 offset; | 110 | __le32 offset; |
111 | u32 size; | 111 | __le32 size; |
112 | u32 aux_base; /* bits 1:0 reserved */ | 112 | __le32 aux_base; /* bits 1:0 reserved */ |
113 | }; | 113 | }; |
114 | #define AUX_BASE_MASK 0xfffffffc | 114 | #define AUX_BASE_MASK 0xfffffffc |
115 | 115 | ||
@@ -125,11 +125,11 @@ struct smem_global_entry { | |||
125 | */ | 125 | */ |
126 | struct smem_header { | 126 | struct smem_header { |
127 | struct smem_proc_comm proc_comm[4]; | 127 | struct smem_proc_comm proc_comm[4]; |
128 | u32 version[32]; | 128 | __le32 version[32]; |
129 | u32 initialized; | 129 | __le32 initialized; |
130 | u32 free_offset; | 130 | __le32 free_offset; |
131 | u32 available; | 131 | __le32 available; |
132 | u32 reserved; | 132 | __le32 reserved; |
133 | struct smem_global_entry toc[SMEM_ITEM_COUNT]; | 133 | struct smem_global_entry toc[SMEM_ITEM_COUNT]; |
134 | }; | 134 | }; |
135 | 135 | ||
@@ -143,12 +143,12 @@ struct smem_header { | |||
143 | * @reserved: reserved entries for later use | 143 | * @reserved: reserved entries for later use |
144 | */ | 144 | */ |
145 | struct smem_ptable_entry { | 145 | struct smem_ptable_entry { |
146 | u32 offset; | 146 | __le32 offset; |
147 | u32 size; | 147 | __le32 size; |
148 | u32 flags; | 148 | __le32 flags; |
149 | u16 host0; | 149 | __le16 host0; |
150 | u16 host1; | 150 | __le16 host1; |
151 | u32 reserved[8]; | 151 | __le32 reserved[8]; |
152 | }; | 152 | }; |
153 | 153 | ||
154 | /** | 154 | /** |
@@ -160,13 +160,14 @@ struct smem_ptable_entry { | |||
160 | * @entry: list of @smem_ptable_entry for the @num_entries partitions | 160 | * @entry: list of @smem_ptable_entry for the @num_entries partitions |
161 | */ | 161 | */ |
162 | struct smem_ptable { | 162 | struct smem_ptable { |
163 | u32 magic; | 163 | u8 magic[4]; |
164 | u32 version; | 164 | __le32 version; |
165 | u32 num_entries; | 165 | __le32 num_entries; |
166 | u32 reserved[5]; | 166 | __le32 reserved[5]; |
167 | struct smem_ptable_entry entry[]; | 167 | struct smem_ptable_entry entry[]; |
168 | }; | 168 | }; |
169 | #define SMEM_PTABLE_MAGIC 0x434f5424 /* "$TOC" */ | 169 | |
170 | static const u8 SMEM_PTABLE_MAGIC[] = { 0x24, 0x54, 0x4f, 0x43 }; /* "$TOC" */ | ||
170 | 171 | ||
171 | /** | 172 | /** |
172 | * struct smem_partition_header - header of the partitions | 173 | * struct smem_partition_header - header of the partitions |
@@ -181,15 +182,16 @@ struct smem_ptable { | |||
181 | * @reserved: for now reserved entries | 182 | * @reserved: for now reserved entries |
182 | */ | 183 | */ |
183 | struct smem_partition_header { | 184 | struct smem_partition_header { |
184 | u32 magic; | 185 | u8 magic[4]; |
185 | u16 host0; | 186 | __le16 host0; |
186 | u16 host1; | 187 | __le16 host1; |
187 | u32 size; | 188 | __le32 size; |
188 | u32 offset_free_uncached; | 189 | __le32 offset_free_uncached; |
189 | u32 offset_free_cached; | 190 | __le32 offset_free_cached; |
190 | u32 reserved[3]; | 191 | __le32 reserved[3]; |
191 | }; | 192 | }; |
192 | #define SMEM_PART_MAGIC 0x54525024 /* "$PRT" */ | 193 | |
194 | static const u8 SMEM_PART_MAGIC[] = { 0x24, 0x50, 0x52, 0x54 }; | ||
193 | 195 | ||
194 | /** | 196 | /** |
195 | * struct smem_private_entry - header of each item in the private partition | 197 | * struct smem_private_entry - header of each item in the private partition |
@@ -201,12 +203,12 @@ struct smem_partition_header { | |||
201 | * @reserved: for now reserved entry | 203 | * @reserved: for now reserved entry |
202 | */ | 204 | */ |
203 | struct smem_private_entry { | 205 | struct smem_private_entry { |
204 | u16 canary; | 206 | u16 canary; /* bytes are the same so no swapping needed */ |
205 | u16 item; | 207 | __le16 item; |
206 | u32 size; /* includes padding bytes */ | 208 | __le32 size; /* includes padding bytes */ |
207 | u16 padding_data; | 209 | __le16 padding_data; |
208 | u16 padding_hdr; | 210 | __le16 padding_hdr; |
209 | u32 reserved; | 211 | __le32 reserved; |
210 | }; | 212 | }; |
211 | #define SMEM_PRIVATE_CANARY 0xa5a5 | 213 | #define SMEM_PRIVATE_CANARY 0xa5a5 |
212 | 214 | ||
@@ -242,6 +244,45 @@ struct qcom_smem { | |||
242 | struct smem_region regions[0]; | 244 | struct smem_region regions[0]; |
243 | }; | 245 | }; |
244 | 246 | ||
247 | static struct smem_private_entry * | ||
248 | phdr_to_last_private_entry(struct smem_partition_header *phdr) | ||
249 | { | ||
250 | void *p = phdr; | ||
251 | |||
252 | return p + le32_to_cpu(phdr->offset_free_uncached); | ||
253 | } | ||
254 | |||
255 | static void *phdr_to_first_cached_entry(struct smem_partition_header *phdr) | ||
256 | { | ||
257 | void *p = phdr; | ||
258 | |||
259 | return p + le32_to_cpu(phdr->offset_free_cached); | ||
260 | } | ||
261 | |||
262 | static struct smem_private_entry * | ||
263 | phdr_to_first_private_entry(struct smem_partition_header *phdr) | ||
264 | { | ||
265 | void *p = phdr; | ||
266 | |||
267 | return p + sizeof(*phdr); | ||
268 | } | ||
269 | |||
270 | static struct smem_private_entry * | ||
271 | private_entry_next(struct smem_private_entry *e) | ||
272 | { | ||
273 | void *p = e; | ||
274 | |||
275 | return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) + | ||
276 | le32_to_cpu(e->size); | ||
277 | } | ||
278 | |||
279 | static void *entry_to_item(struct smem_private_entry *e) | ||
280 | { | ||
281 | void *p = e; | ||
282 | |||
283 | return p + sizeof(*e) + le16_to_cpu(e->padding_hdr); | ||
284 | } | ||
285 | |||
245 | /* Pointer to the one and only smem handle */ | 286 | /* Pointer to the one and only smem handle */ |
246 | static struct qcom_smem *__smem; | 287 | static struct qcom_smem *__smem; |
247 | 288 | ||
@@ -254,16 +295,16 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem, | |||
254 | size_t size) | 295 | size_t size) |
255 | { | 296 | { |
256 | struct smem_partition_header *phdr; | 297 | struct smem_partition_header *phdr; |
257 | struct smem_private_entry *hdr; | 298 | struct smem_private_entry *hdr, *end; |
258 | size_t alloc_size; | 299 | size_t alloc_size; |
259 | void *p; | 300 | void *cached; |
260 | 301 | ||
261 | phdr = smem->partitions[host]; | 302 | phdr = smem->partitions[host]; |
303 | hdr = phdr_to_first_private_entry(phdr); | ||
304 | end = phdr_to_last_private_entry(phdr); | ||
305 | cached = phdr_to_first_cached_entry(phdr); | ||
262 | 306 | ||
263 | p = (void *)phdr + sizeof(*phdr); | 307 | while (hdr < end) { |
264 | while (p < (void *)phdr + phdr->offset_free_uncached) { | ||
265 | hdr = p; | ||
266 | |||
267 | if (hdr->canary != SMEM_PRIVATE_CANARY) { | 308 | if (hdr->canary != SMEM_PRIVATE_CANARY) { |
268 | dev_err(smem->dev, | 309 | dev_err(smem->dev, |
269 | "Found invalid canary in host %d partition\n", | 310 | "Found invalid canary in host %d partition\n", |
@@ -271,24 +312,23 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem, | |||
271 | return -EINVAL; | 312 | return -EINVAL; |
272 | } | 313 | } |
273 | 314 | ||
274 | if (hdr->item == item) | 315 | if (le16_to_cpu(hdr->item) == item) |
275 | return -EEXIST; | 316 | return -EEXIST; |
276 | 317 | ||
277 | p += sizeof(*hdr) + hdr->padding_hdr + hdr->size; | 318 | hdr = private_entry_next(hdr); |
278 | } | 319 | } |
279 | 320 | ||
280 | /* Check that we don't grow into the cached region */ | 321 | /* Check that we don't grow into the cached region */ |
281 | alloc_size = sizeof(*hdr) + ALIGN(size, 8); | 322 | alloc_size = sizeof(*hdr) + ALIGN(size, 8); |
282 | if (p + alloc_size >= (void *)phdr + phdr->offset_free_cached) { | 323 | if ((void *)hdr + alloc_size >= cached) { |
283 | dev_err(smem->dev, "Out of memory\n"); | 324 | dev_err(smem->dev, "Out of memory\n"); |
284 | return -ENOSPC; | 325 | return -ENOSPC; |
285 | } | 326 | } |
286 | 327 | ||
287 | hdr = p; | ||
288 | hdr->canary = SMEM_PRIVATE_CANARY; | 328 | hdr->canary = SMEM_PRIVATE_CANARY; |
289 | hdr->item = item; | 329 | hdr->item = cpu_to_le16(item); |
290 | hdr->size = ALIGN(size, 8); | 330 | hdr->size = cpu_to_le32(ALIGN(size, 8)); |
291 | hdr->padding_data = hdr->size - size; | 331 | hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size); |
292 | hdr->padding_hdr = 0; | 332 | hdr->padding_hdr = 0; |
293 | 333 | ||
294 | /* | 334 | /* |
@@ -297,7 +337,7 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem, | |||
297 | * gets a consistent view of the linked list. | 337 | * gets a consistent view of the linked list. |
298 | */ | 338 | */ |
299 | wmb(); | 339 | wmb(); |
300 | phdr->offset_free_uncached += alloc_size; | 340 | le32_add_cpu(&phdr->offset_free_uncached, alloc_size); |
301 | 341 | ||
302 | return 0; | 342 | return 0; |
303 | } | 343 | } |
@@ -318,11 +358,11 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem, | |||
318 | return -EEXIST; | 358 | return -EEXIST; |
319 | 359 | ||
320 | size = ALIGN(size, 8); | 360 | size = ALIGN(size, 8); |
321 | if (WARN_ON(size > header->available)) | 361 | if (WARN_ON(size > le32_to_cpu(header->available))) |
322 | return -ENOMEM; | 362 | return -ENOMEM; |
323 | 363 | ||
324 | entry->offset = header->free_offset; | 364 | entry->offset = header->free_offset; |
325 | entry->size = size; | 365 | entry->size = cpu_to_le32(size); |
326 | 366 | ||
327 | /* | 367 | /* |
328 | * Ensure the header is consistent before we mark the item allocated, | 368 | * Ensure the header is consistent before we mark the item allocated, |
@@ -330,10 +370,10 @@ static int qcom_smem_alloc_global(struct qcom_smem *smem, | |||
330 | * even though they do not take the spinlock on read. | 370 | * even though they do not take the spinlock on read. |
331 | */ | 371 | */ |
332 | wmb(); | 372 | wmb(); |
333 | entry->allocated = 1; | 373 | entry->allocated = cpu_to_le32(1); |
334 | 374 | ||
335 | header->free_offset += size; | 375 | le32_add_cpu(&header->free_offset, size); |
336 | header->available -= size; | 376 | le32_add_cpu(&header->available, -size); |
337 | 377 | ||
338 | return 0; | 378 | return 0; |
339 | } | 379 | } |
@@ -378,10 +418,9 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size) | |||
378 | } | 418 | } |
379 | EXPORT_SYMBOL(qcom_smem_alloc); | 419 | EXPORT_SYMBOL(qcom_smem_alloc); |
380 | 420 | ||
381 | static int qcom_smem_get_global(struct qcom_smem *smem, | 421 | static void *qcom_smem_get_global(struct qcom_smem *smem, |
382 | unsigned item, | 422 | unsigned item, |
383 | void **ptr, | 423 | size_t *size) |
384 | size_t *size) | ||
385 | { | 424 | { |
386 | struct smem_header *header; | 425 | struct smem_header *header; |
387 | struct smem_region *area; | 426 | struct smem_region *area; |
@@ -390,100 +429,94 @@ static int qcom_smem_get_global(struct qcom_smem *smem, | |||
390 | unsigned i; | 429 | unsigned i; |
391 | 430 | ||
392 | if (WARN_ON(item >= SMEM_ITEM_COUNT)) | 431 | if (WARN_ON(item >= SMEM_ITEM_COUNT)) |
393 | return -EINVAL; | 432 | return ERR_PTR(-EINVAL); |
394 | 433 | ||
395 | header = smem->regions[0].virt_base; | 434 | header = smem->regions[0].virt_base; |
396 | entry = &header->toc[item]; | 435 | entry = &header->toc[item]; |
397 | if (!entry->allocated) | 436 | if (!entry->allocated) |
398 | return -ENXIO; | 437 | return ERR_PTR(-ENXIO); |
399 | 438 | ||
400 | if (ptr != NULL) { | 439 | aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK; |
401 | aux_base = entry->aux_base & AUX_BASE_MASK; | ||
402 | 440 | ||
403 | for (i = 0; i < smem->num_regions; i++) { | 441 | for (i = 0; i < smem->num_regions; i++) { |
404 | area = &smem->regions[i]; | 442 | area = &smem->regions[i]; |
405 | 443 | ||
406 | if (area->aux_base == aux_base || !aux_base) { | 444 | if (area->aux_base == aux_base || !aux_base) { |
407 | *ptr = area->virt_base + entry->offset; | 445 | if (size != NULL) |
408 | break; | 446 | *size = le32_to_cpu(entry->size); |
409 | } | 447 | return area->virt_base + le32_to_cpu(entry->offset); |
410 | } | 448 | } |
411 | } | 449 | } |
412 | if (size != NULL) | ||
413 | *size = entry->size; | ||
414 | 450 | ||
415 | return 0; | 451 | return ERR_PTR(-ENOENT); |
416 | } | 452 | } |
417 | 453 | ||
418 | static int qcom_smem_get_private(struct qcom_smem *smem, | 454 | static void *qcom_smem_get_private(struct qcom_smem *smem, |
419 | unsigned host, | 455 | unsigned host, |
420 | unsigned item, | 456 | unsigned item, |
421 | void **ptr, | 457 | size_t *size) |
422 | size_t *size) | ||
423 | { | 458 | { |
424 | struct smem_partition_header *phdr; | 459 | struct smem_partition_header *phdr; |
425 | struct smem_private_entry *hdr; | 460 | struct smem_private_entry *e, *end; |
426 | void *p; | ||
427 | 461 | ||
428 | phdr = smem->partitions[host]; | 462 | phdr = smem->partitions[host]; |
463 | e = phdr_to_first_private_entry(phdr); | ||
464 | end = phdr_to_last_private_entry(phdr); | ||
429 | 465 | ||
430 | p = (void *)phdr + sizeof(*phdr); | 466 | while (e < end) { |
431 | while (p < (void *)phdr + phdr->offset_free_uncached) { | 467 | if (e->canary != SMEM_PRIVATE_CANARY) { |
432 | hdr = p; | ||
433 | |||
434 | if (hdr->canary != SMEM_PRIVATE_CANARY) { | ||
435 | dev_err(smem->dev, | 468 | dev_err(smem->dev, |
436 | "Found invalid canary in host %d partition\n", | 469 | "Found invalid canary in host %d partition\n", |
437 | host); | 470 | host); |
438 | return -EINVAL; | 471 | return ERR_PTR(-EINVAL); |
439 | } | 472 | } |
440 | 473 | ||
441 | if (hdr->item == item) { | 474 | if (le16_to_cpu(e->item) == item) { |
442 | if (ptr != NULL) | ||
443 | *ptr = p + sizeof(*hdr) + hdr->padding_hdr; | ||
444 | |||
445 | if (size != NULL) | 475 | if (size != NULL) |
446 | *size = hdr->size - hdr->padding_data; | 476 | *size = le32_to_cpu(e->size) - |
477 | le16_to_cpu(e->padding_data); | ||
447 | 478 | ||
448 | return 0; | 479 | return entry_to_item(e); |
449 | } | 480 | } |
450 | 481 | ||
451 | p += sizeof(*hdr) + hdr->padding_hdr + hdr->size; | 482 | e = private_entry_next(e); |
452 | } | 483 | } |
453 | 484 | ||
454 | return -ENOENT; | 485 | return ERR_PTR(-ENOENT); |
455 | } | 486 | } |
456 | 487 | ||
457 | /** | 488 | /** |
458 | * qcom_smem_get() - resolve ptr of size of a smem item | 489 | * qcom_smem_get() - resolve ptr of size of a smem item |
459 | * @host: the remote processor, or -1 | 490 | * @host: the remote processor, or -1 |
460 | * @item: smem item handle | 491 | * @item: smem item handle |
461 | * @ptr: pointer to be filled out with address of the item | ||
462 | * @size: pointer to be filled out with size of the item | 492 | * @size: pointer to be filled out with size of the item |
463 | * | 493 | * |
464 | * Looks up pointer and size of a smem item. | 494 | * Looks up smem item and returns pointer to it. Size of smem |
495 | * item is returned in @size. | ||
465 | */ | 496 | */ |
466 | int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size) | 497 | void *qcom_smem_get(unsigned host, unsigned item, size_t *size) |
467 | { | 498 | { |
468 | unsigned long flags; | 499 | unsigned long flags; |
469 | int ret; | 500 | int ret; |
501 | void *ptr = ERR_PTR(-EPROBE_DEFER); | ||
470 | 502 | ||
471 | if (!__smem) | 503 | if (!__smem) |
472 | return -EPROBE_DEFER; | 504 | return ptr; |
473 | 505 | ||
474 | ret = hwspin_lock_timeout_irqsave(__smem->hwlock, | 506 | ret = hwspin_lock_timeout_irqsave(__smem->hwlock, |
475 | HWSPINLOCK_TIMEOUT, | 507 | HWSPINLOCK_TIMEOUT, |
476 | &flags); | 508 | &flags); |
477 | if (ret) | 509 | if (ret) |
478 | return ret; | 510 | return ERR_PTR(ret); |
479 | 511 | ||
480 | if (host < SMEM_HOST_COUNT && __smem->partitions[host]) | 512 | if (host < SMEM_HOST_COUNT && __smem->partitions[host]) |
481 | ret = qcom_smem_get_private(__smem, host, item, ptr, size); | 513 | ptr = qcom_smem_get_private(__smem, host, item, size); |
482 | else | 514 | else |
483 | ret = qcom_smem_get_global(__smem, item, ptr, size); | 515 | ptr = qcom_smem_get_global(__smem, item, size); |
484 | 516 | ||
485 | hwspin_unlock_irqrestore(__smem->hwlock, &flags); | 517 | hwspin_unlock_irqrestore(__smem->hwlock, &flags); |
486 | return ret; | 518 | |
519 | return ptr; | ||
487 | 520 | ||
488 | } | 521 | } |
489 | EXPORT_SYMBOL(qcom_smem_get); | 522 | EXPORT_SYMBOL(qcom_smem_get); |
@@ -506,10 +539,11 @@ int qcom_smem_get_free_space(unsigned host) | |||
506 | 539 | ||
507 | if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { | 540 | if (host < SMEM_HOST_COUNT && __smem->partitions[host]) { |
508 | phdr = __smem->partitions[host]; | 541 | phdr = __smem->partitions[host]; |
509 | ret = phdr->offset_free_cached - phdr->offset_free_uncached; | 542 | ret = le32_to_cpu(phdr->offset_free_cached) - |
543 | le32_to_cpu(phdr->offset_free_uncached); | ||
510 | } else { | 544 | } else { |
511 | header = __smem->regions[0].virt_base; | 545 | header = __smem->regions[0].virt_base; |
512 | ret = header->available; | 546 | ret = le32_to_cpu(header->available); |
513 | } | 547 | } |
514 | 548 | ||
515 | return ret; | 549 | return ret; |
@@ -518,13 +552,11 @@ EXPORT_SYMBOL(qcom_smem_get_free_space); | |||
518 | 552 | ||
519 | static int qcom_smem_get_sbl_version(struct qcom_smem *smem) | 553 | static int qcom_smem_get_sbl_version(struct qcom_smem *smem) |
520 | { | 554 | { |
521 | unsigned *versions; | 555 | __le32 *versions; |
522 | size_t size; | 556 | size_t size; |
523 | int ret; | ||
524 | 557 | ||
525 | ret = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, | 558 | versions = qcom_smem_get_global(smem, SMEM_ITEM_VERSION, &size); |
526 | (void **)&versions, &size); | 559 | if (IS_ERR(versions)) { |
527 | if (ret < 0) { | ||
528 | dev_err(smem->dev, "Unable to read the version item\n"); | 560 | dev_err(smem->dev, "Unable to read the version item\n"); |
529 | return -ENOENT; | 561 | return -ENOENT; |
530 | } | 562 | } |
@@ -534,7 +566,7 @@ static int qcom_smem_get_sbl_version(struct qcom_smem *smem) | |||
534 | return -EINVAL; | 566 | return -EINVAL; |
535 | } | 567 | } |
536 | 568 | ||
537 | return versions[SMEM_MASTER_SBL_VERSION_INDEX]; | 569 | return le32_to_cpu(versions[SMEM_MASTER_SBL_VERSION_INDEX]); |
538 | } | 570 | } |
539 | 571 | ||
540 | static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, | 572 | static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, |
@@ -544,35 +576,38 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, | |||
544 | struct smem_ptable_entry *entry; | 576 | struct smem_ptable_entry *entry; |
545 | struct smem_ptable *ptable; | 577 | struct smem_ptable *ptable; |
546 | unsigned remote_host; | 578 | unsigned remote_host; |
579 | u32 version, host0, host1; | ||
547 | int i; | 580 | int i; |
548 | 581 | ||
549 | ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K; | 582 | ptable = smem->regions[0].virt_base + smem->regions[0].size - SZ_4K; |
550 | if (ptable->magic != SMEM_PTABLE_MAGIC) | 583 | if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic))) |
551 | return 0; | 584 | return 0; |
552 | 585 | ||
553 | if (ptable->version != 1) { | 586 | version = le32_to_cpu(ptable->version); |
587 | if (version != 1) { | ||
554 | dev_err(smem->dev, | 588 | dev_err(smem->dev, |
555 | "Unsupported partition header version %d\n", | 589 | "Unsupported partition header version %d\n", version); |
556 | ptable->version); | ||
557 | return -EINVAL; | 590 | return -EINVAL; |
558 | } | 591 | } |
559 | 592 | ||
560 | for (i = 0; i < ptable->num_entries; i++) { | 593 | for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { |
561 | entry = &ptable->entry[i]; | 594 | entry = &ptable->entry[i]; |
595 | host0 = le16_to_cpu(entry->host0); | ||
596 | host1 = le16_to_cpu(entry->host1); | ||
562 | 597 | ||
563 | if (entry->host0 != local_host && entry->host1 != local_host) | 598 | if (host0 != local_host && host1 != local_host) |
564 | continue; | 599 | continue; |
565 | 600 | ||
566 | if (!entry->offset) | 601 | if (!le32_to_cpu(entry->offset)) |
567 | continue; | 602 | continue; |
568 | 603 | ||
569 | if (!entry->size) | 604 | if (!le32_to_cpu(entry->size)) |
570 | continue; | 605 | continue; |
571 | 606 | ||
572 | if (entry->host0 == local_host) | 607 | if (host0 == local_host) |
573 | remote_host = entry->host1; | 608 | remote_host = host1; |
574 | else | 609 | else |
575 | remote_host = entry->host0; | 610 | remote_host = host0; |
576 | 611 | ||
577 | if (remote_host >= SMEM_HOST_COUNT) { | 612 | if (remote_host >= SMEM_HOST_COUNT) { |
578 | dev_err(smem->dev, | 613 | dev_err(smem->dev, |
@@ -588,21 +623,24 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, | |||
588 | return -EINVAL; | 623 | return -EINVAL; |
589 | } | 624 | } |
590 | 625 | ||
591 | header = smem->regions[0].virt_base + entry->offset; | 626 | header = smem->regions[0].virt_base + le32_to_cpu(entry->offset); |
627 | host0 = le16_to_cpu(header->host0); | ||
628 | host1 = le16_to_cpu(header->host1); | ||
592 | 629 | ||
593 | if (header->magic != SMEM_PART_MAGIC) { | 630 | if (memcmp(header->magic, SMEM_PART_MAGIC, |
631 | sizeof(header->magic))) { | ||
594 | dev_err(smem->dev, | 632 | dev_err(smem->dev, |
595 | "Partition %d has invalid magic\n", i); | 633 | "Partition %d has invalid magic\n", i); |
596 | return -EINVAL; | 634 | return -EINVAL; |
597 | } | 635 | } |
598 | 636 | ||
599 | if (header->host0 != local_host && header->host1 != local_host) { | 637 | if (host0 != local_host && host1 != local_host) { |
600 | dev_err(smem->dev, | 638 | dev_err(smem->dev, |
601 | "Partition %d hosts are invalid\n", i); | 639 | "Partition %d hosts are invalid\n", i); |
602 | return -EINVAL; | 640 | return -EINVAL; |
603 | } | 641 | } |
604 | 642 | ||
605 | if (header->host0 != remote_host && header->host1 != remote_host) { | 643 | if (host0 != remote_host && host1 != remote_host) { |
606 | dev_err(smem->dev, | 644 | dev_err(smem->dev, |
607 | "Partition %d hosts are invalid\n", i); | 645 | "Partition %d hosts are invalid\n", i); |
608 | return -EINVAL; | 646 | return -EINVAL; |
@@ -614,7 +652,7 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, | |||
614 | return -EINVAL; | 652 | return -EINVAL; |
615 | } | 653 | } |
616 | 654 | ||
617 | if (header->offset_free_uncached > header->size) { | 655 | if (le32_to_cpu(header->offset_free_uncached) > le32_to_cpu(header->size)) { |
618 | dev_err(smem->dev, | 656 | dev_err(smem->dev, |
619 | "Partition %d has invalid free pointer\n", i); | 657 | "Partition %d has invalid free pointer\n", i); |
620 | return -EINVAL; | 658 | return -EINVAL; |
@@ -626,37 +664,47 @@ static int qcom_smem_enumerate_partitions(struct qcom_smem *smem, | |||
626 | return 0; | 664 | return 0; |
627 | } | 665 | } |
628 | 666 | ||
629 | static int qcom_smem_count_mem_regions(struct platform_device *pdev) | 667 | static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev, |
668 | const char *name, int i) | ||
630 | { | 669 | { |
631 | struct resource *res; | 670 | struct device_node *np; |
632 | int num_regions = 0; | 671 | struct resource r; |
633 | int i; | 672 | int ret; |
634 | |||
635 | for (i = 0; i < pdev->num_resources; i++) { | ||
636 | res = &pdev->resource[i]; | ||
637 | 673 | ||
638 | if (resource_type(res) == IORESOURCE_MEM) | 674 | np = of_parse_phandle(dev->of_node, name, 0); |
639 | num_regions++; | 675 | if (!np) { |
676 | dev_err(dev, "No %s specified\n", name); | ||
677 | return -EINVAL; | ||
640 | } | 678 | } |
641 | 679 | ||
642 | return num_regions; | 680 | ret = of_address_to_resource(np, 0, &r); |
681 | of_node_put(np); | ||
682 | if (ret) | ||
683 | return ret; | ||
684 | |||
685 | smem->regions[i].aux_base = (u32)r.start; | ||
686 | smem->regions[i].size = resource_size(&r); | ||
687 | smem->regions[i].virt_base = devm_ioremap_nocache(dev, r.start, | ||
688 | resource_size(&r)); | ||
689 | if (!smem->regions[i].virt_base) | ||
690 | return -ENOMEM; | ||
691 | |||
692 | return 0; | ||
643 | } | 693 | } |
644 | 694 | ||
645 | static int qcom_smem_probe(struct platform_device *pdev) | 695 | static int qcom_smem_probe(struct platform_device *pdev) |
646 | { | 696 | { |
647 | struct smem_header *header; | 697 | struct smem_header *header; |
648 | struct device_node *np; | ||
649 | struct qcom_smem *smem; | 698 | struct qcom_smem *smem; |
650 | struct resource *res; | ||
651 | struct resource r; | ||
652 | size_t array_size; | 699 | size_t array_size; |
653 | int num_regions = 0; | 700 | int num_regions; |
654 | int hwlock_id; | 701 | int hwlock_id; |
655 | u32 version; | 702 | u32 version; |
656 | int ret; | 703 | int ret; |
657 | int i; | ||
658 | 704 | ||
659 | num_regions = qcom_smem_count_mem_regions(pdev) + 1; | 705 | num_regions = 1; |
706 | if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL)) | ||
707 | num_regions++; | ||
660 | 708 | ||
661 | array_size = num_regions * sizeof(struct smem_region); | 709 | array_size = num_regions * sizeof(struct smem_region); |
662 | smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL); | 710 | smem = devm_kzalloc(&pdev->dev, sizeof(*smem) + array_size, GFP_KERNEL); |
@@ -666,39 +714,17 @@ static int qcom_smem_probe(struct platform_device *pdev) | |||
666 | smem->dev = &pdev->dev; | 714 | smem->dev = &pdev->dev; |
667 | smem->num_regions = num_regions; | 715 | smem->num_regions = num_regions; |
668 | 716 | ||
669 | np = of_parse_phandle(pdev->dev.of_node, "memory-region", 0); | 717 | ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0); |
670 | if (!np) { | ||
671 | dev_err(&pdev->dev, "No memory-region specified\n"); | ||
672 | return -EINVAL; | ||
673 | } | ||
674 | |||
675 | ret = of_address_to_resource(np, 0, &r); | ||
676 | of_node_put(np); | ||
677 | if (ret) | 718 | if (ret) |
678 | return ret; | 719 | return ret; |
679 | 720 | ||
680 | smem->regions[0].aux_base = (u32)r.start; | 721 | if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev, |
681 | smem->regions[0].size = resource_size(&r); | 722 | "qcom,rpm-msg-ram", 1))) |
682 | smem->regions[0].virt_base = devm_ioremap_nocache(&pdev->dev, | 723 | return ret; |
683 | r.start, | ||
684 | resource_size(&r)); | ||
685 | if (!smem->regions[0].virt_base) | ||
686 | return -ENOMEM; | ||
687 | |||
688 | for (i = 1; i < num_regions; i++) { | ||
689 | res = platform_get_resource(pdev, IORESOURCE_MEM, i - 1); | ||
690 | |||
691 | smem->regions[i].aux_base = (u32)res->start; | ||
692 | smem->regions[i].size = resource_size(res); | ||
693 | smem->regions[i].virt_base = devm_ioremap_nocache(&pdev->dev, | ||
694 | res->start, | ||
695 | resource_size(res)); | ||
696 | if (!smem->regions[i].virt_base) | ||
697 | return -ENOMEM; | ||
698 | } | ||
699 | 724 | ||
700 | header = smem->regions[0].virt_base; | 725 | header = smem->regions[0].virt_base; |
701 | if (header->initialized != 1 || header->reserved) { | 726 | if (le32_to_cpu(header->initialized) != 1 || |
727 | le32_to_cpu(header->reserved)) { | ||
702 | dev_err(&pdev->dev, "SMEM is not initialized by SBL\n"); | 728 | dev_err(&pdev->dev, "SMEM is not initialized by SBL\n"); |
703 | return -EINVAL; | 729 | return -EINVAL; |
704 | } | 730 | } |
@@ -730,8 +756,8 @@ static int qcom_smem_probe(struct platform_device *pdev) | |||
730 | 756 | ||
731 | static int qcom_smem_remove(struct platform_device *pdev) | 757 | static int qcom_smem_remove(struct platform_device *pdev) |
732 | { | 758 | { |
733 | __smem = NULL; | ||
734 | hwspin_lock_free(__smem->hwlock); | 759 | hwspin_lock_free(__smem->hwlock); |
760 | __smem = NULL; | ||
735 | 761 | ||
736 | return 0; | 762 | return 0; |
737 | } | 763 | } |
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 6e7d5ec65838..9e12000914b3 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h | |||
@@ -23,6 +23,8 @@ struct qcom_scm_hdcp_req { | |||
23 | u32 val; | 23 | u32 val; |
24 | }; | 24 | }; |
25 | 25 | ||
26 | extern bool qcom_scm_is_available(void); | ||
27 | |||
26 | extern bool qcom_scm_hdcp_available(void); | 28 | extern bool qcom_scm_hdcp_available(void); |
27 | extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, | 29 | extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, |
28 | u32 *resp); | 30 | u32 *resp); |
diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h index d7e50aa6a4ac..d0cb6d189a0a 100644 --- a/include/linux/soc/qcom/smd.h +++ b/include/linux/soc/qcom/smd.h | |||
@@ -9,6 +9,14 @@ struct qcom_smd_channel; | |||
9 | struct qcom_smd_lookup; | 9 | struct qcom_smd_lookup; |
10 | 10 | ||
11 | /** | 11 | /** |
12 | * struct qcom_smd_id - struct used for matching a smd device | ||
13 | * @name: name of the channel | ||
14 | */ | ||
15 | struct qcom_smd_id { | ||
16 | char name[20]; | ||
17 | }; | ||
18 | |||
19 | /** | ||
12 | * struct qcom_smd_device - smd device struct | 20 | * struct qcom_smd_device - smd device struct |
13 | * @dev: the device struct | 21 | * @dev: the device struct |
14 | * @channel: handle to the smd channel for this device | 22 | * @channel: handle to the smd channel for this device |
@@ -21,6 +29,7 @@ struct qcom_smd_device { | |||
21 | /** | 29 | /** |
22 | * struct qcom_smd_driver - smd driver struct | 30 | * struct qcom_smd_driver - smd driver struct |
23 | * @driver: underlying device driver | 31 | * @driver: underlying device driver |
32 | * @smd_match_table: static channel match table | ||
24 | * @probe: invoked when the smd channel is found | 33 | * @probe: invoked when the smd channel is found |
25 | * @remove: invoked when the smd channel is closed | 34 | * @remove: invoked when the smd channel is closed |
26 | * @callback: invoked when an inbound message is received on the channel, | 35 | * @callback: invoked when an inbound message is received on the channel, |
@@ -29,6 +38,8 @@ struct qcom_smd_device { | |||
29 | */ | 38 | */ |
30 | struct qcom_smd_driver { | 39 | struct qcom_smd_driver { |
31 | struct device_driver driver; | 40 | struct device_driver driver; |
41 | const struct qcom_smd_id *smd_match_table; | ||
42 | |||
32 | int (*probe)(struct qcom_smd_device *dev); | 43 | int (*probe)(struct qcom_smd_device *dev); |
33 | void (*remove)(struct qcom_smd_device *dev); | 44 | void (*remove)(struct qcom_smd_device *dev); |
34 | int (*callback)(struct qcom_smd_device *, const void *, size_t); | 45 | int (*callback)(struct qcom_smd_device *, const void *, size_t); |
diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h index bc9630d3aced..785e196ee2ca 100644 --- a/include/linux/soc/qcom/smem.h +++ b/include/linux/soc/qcom/smem.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #define QCOM_SMEM_HOST_ANY -1 | 4 | #define QCOM_SMEM_HOST_ANY -1 |
5 | 5 | ||
6 | int qcom_smem_alloc(unsigned host, unsigned item, size_t size); | 6 | int qcom_smem_alloc(unsigned host, unsigned item, size_t size); |
7 | int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size); | 7 | void *qcom_smem_get(unsigned host, unsigned item, size_t *size); |
8 | 8 | ||
9 | int qcom_smem_get_free_space(unsigned host); | 9 | int qcom_smem_get_free_space(unsigned host); |
10 | 10 | ||