diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-10 15:04:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-10 15:04:54 -0400 |
commit | 9ab89c407d904c284558bbcd285eb3baef9d8c07 (patch) | |
tree | 2190cc0bd4ec6520ba40b60ade62cbe971024d36 | |
parent | f77cfbe6455a67d4e9b69f08f07fc62cd11b0674 (diff) | |
parent | 0d72038c303c616a63415a07366f916b5edc3830 (diff) |
Merge tag 'rpmsg-v4.17' of git://github.com/andersson/remoteproc
Pull rpmsg updates from Bjorn Andersson:
- transition the rpmsg_trysend() code paths of SMD and GLINK to use
non-sleeping locks
- revert the overly optimistic handling of discovered SMD channels
- fix an issue in SMD where incoming messages race with the probing of
a client driver
* tag 'rpmsg-v4.17' of git://github.com/andersson/remoteproc:
rpmsg: smd: Use announce_create to process any receive work
rpmsg: Only invoke announce_create for rpdev with endpoints
rpmsg: smd: Fix container_of macros
Revert "rpmsg: smd: Create device for all channels"
rpmsg: glink: Use spinlock in tx path
rpmsg: smd: Use spinlock in tx path
rpmsg: smd: use put_device() if device_register fail
rpmsg: glink: use put_device() if device_register fail
-rw-r--r-- | drivers/rpmsg/qcom_glink_native.c | 18 | ||||
-rw-r--r-- | drivers/rpmsg/qcom_glink_smem.c | 3 | ||||
-rw-r--r-- | drivers/rpmsg/qcom_smd.c | 51 | ||||
-rw-r--r-- | drivers/rpmsg/rpmsg_core.c | 2 |
4 files changed, 52 insertions, 22 deletions
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index e0f31ed096a5..768ef542a841 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c | |||
@@ -113,7 +113,7 @@ struct qcom_glink { | |||
113 | spinlock_t rx_lock; | 113 | spinlock_t rx_lock; |
114 | struct list_head rx_queue; | 114 | struct list_head rx_queue; |
115 | 115 | ||
116 | struct mutex tx_lock; | 116 | spinlock_t tx_lock; |
117 | 117 | ||
118 | spinlock_t idr_lock; | 118 | spinlock_t idr_lock; |
119 | struct idr lcids; | 119 | struct idr lcids; |
@@ -288,15 +288,14 @@ static int qcom_glink_tx(struct qcom_glink *glink, | |||
288 | const void *data, size_t dlen, bool wait) | 288 | const void *data, size_t dlen, bool wait) |
289 | { | 289 | { |
290 | unsigned int tlen = hlen + dlen; | 290 | unsigned int tlen = hlen + dlen; |
291 | int ret; | 291 | unsigned long flags; |
292 | int ret = 0; | ||
292 | 293 | ||
293 | /* Reject packets that are too big */ | 294 | /* Reject packets that are too big */ |
294 | if (tlen >= glink->tx_pipe->length) | 295 | if (tlen >= glink->tx_pipe->length) |
295 | return -EINVAL; | 296 | return -EINVAL; |
296 | 297 | ||
297 | ret = mutex_lock_interruptible(&glink->tx_lock); | 298 | spin_lock_irqsave(&glink->tx_lock, flags); |
298 | if (ret) | ||
299 | return ret; | ||
300 | 299 | ||
301 | while (qcom_glink_tx_avail(glink) < tlen) { | 300 | while (qcom_glink_tx_avail(glink) < tlen) { |
302 | if (!wait) { | 301 | if (!wait) { |
@@ -304,7 +303,12 @@ static int qcom_glink_tx(struct qcom_glink *glink, | |||
304 | goto out; | 303 | goto out; |
305 | } | 304 | } |
306 | 305 | ||
306 | /* Wait without holding the tx_lock */ | ||
307 | spin_unlock_irqrestore(&glink->tx_lock, flags); | ||
308 | |||
307 | usleep_range(10000, 15000); | 309 | usleep_range(10000, 15000); |
310 | |||
311 | spin_lock_irqsave(&glink->tx_lock, flags); | ||
308 | } | 312 | } |
309 | 313 | ||
310 | qcom_glink_tx_write(glink, hdr, hlen, data, dlen); | 314 | qcom_glink_tx_write(glink, hdr, hlen, data, dlen); |
@@ -313,7 +317,7 @@ static int qcom_glink_tx(struct qcom_glink *glink, | |||
313 | mbox_client_txdone(glink->mbox_chan, 0); | 317 | mbox_client_txdone(glink->mbox_chan, 0); |
314 | 318 | ||
315 | out: | 319 | out: |
316 | mutex_unlock(&glink->tx_lock); | 320 | spin_unlock_irqrestore(&glink->tx_lock, flags); |
317 | 321 | ||
318 | return ret; | 322 | return ret; |
319 | } | 323 | } |
@@ -1567,7 +1571,7 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev, | |||
1567 | glink->features = features; | 1571 | glink->features = features; |
1568 | glink->intentless = intentless; | 1572 | glink->intentless = intentless; |
1569 | 1573 | ||
1570 | mutex_init(&glink->tx_lock); | 1574 | spin_lock_init(&glink->tx_lock); |
1571 | spin_lock_init(&glink->rx_lock); | 1575 | spin_lock_init(&glink->rx_lock); |
1572 | INIT_LIST_HEAD(&glink->rx_queue); | 1576 | INIT_LIST_HEAD(&glink->rx_queue); |
1573 | INIT_WORK(&glink->rx_work, qcom_glink_work); | 1577 | INIT_WORK(&glink->rx_work, qcom_glink_work); |
diff --git a/drivers/rpmsg/qcom_glink_smem.c b/drivers/rpmsg/qcom_glink_smem.c index 892f2b92a4d8..3fa9d43e2c87 100644 --- a/drivers/rpmsg/qcom_glink_smem.c +++ b/drivers/rpmsg/qcom_glink_smem.c | |||
@@ -217,6 +217,7 @@ struct qcom_glink *qcom_glink_smem_register(struct device *parent, | |||
217 | ret = device_register(dev); | 217 | ret = device_register(dev); |
218 | if (ret) { | 218 | if (ret) { |
219 | pr_err("failed to register glink edge\n"); | 219 | pr_err("failed to register glink edge\n"); |
220 | put_device(dev); | ||
220 | return ERR_PTR(ret); | 221 | return ERR_PTR(ret); |
221 | } | 222 | } |
222 | 223 | ||
@@ -299,7 +300,7 @@ struct qcom_glink *qcom_glink_smem_register(struct device *parent, | |||
299 | return glink; | 300 | return glink; |
300 | 301 | ||
301 | err_put_dev: | 302 | err_put_dev: |
302 | put_device(dev); | 303 | device_unregister(dev); |
303 | 304 | ||
304 | return ERR_PTR(ret); | 305 | return ERR_PTR(ret); |
305 | } | 306 | } |
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c index 92d0c6a7a837..5ce9bf7b897d 100644 --- a/drivers/rpmsg/qcom_smd.c +++ b/drivers/rpmsg/qcom_smd.c | |||
@@ -167,9 +167,9 @@ struct qcom_smd_endpoint { | |||
167 | struct qcom_smd_channel *qsch; | 167 | struct qcom_smd_channel *qsch; |
168 | }; | 168 | }; |
169 | 169 | ||
170 | #define to_smd_device(_rpdev) container_of(_rpdev, struct qcom_smd_device, rpdev) | 170 | #define to_smd_device(r) container_of(r, struct qcom_smd_device, rpdev) |
171 | #define to_smd_edge(d) container_of(d, struct qcom_smd_edge, dev) | 171 | #define to_smd_edge(d) container_of(d, struct qcom_smd_edge, dev) |
172 | #define to_smd_endpoint(ept) container_of(ept, struct qcom_smd_endpoint, ept) | 172 | #define to_smd_endpoint(e) container_of(e, struct qcom_smd_endpoint, ept) |
173 | 173 | ||
174 | /** | 174 | /** |
175 | * struct qcom_smd_channel - smd channel struct | 175 | * struct qcom_smd_channel - smd channel struct |
@@ -205,7 +205,7 @@ struct qcom_smd_channel { | |||
205 | struct smd_channel_info_pair *info; | 205 | struct smd_channel_info_pair *info; |
206 | struct smd_channel_info_word_pair *info_word; | 206 | struct smd_channel_info_word_pair *info_word; |
207 | 207 | ||
208 | struct mutex tx_lock; | 208 | spinlock_t tx_lock; |
209 | wait_queue_head_t fblockread_event; | 209 | wait_queue_head_t fblockread_event; |
210 | 210 | ||
211 | void *tx_fifo; | 211 | void *tx_fifo; |
@@ -729,6 +729,7 @@ static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data, | |||
729 | { | 729 | { |
730 | __le32 hdr[5] = { cpu_to_le32(len), }; | 730 | __le32 hdr[5] = { cpu_to_le32(len), }; |
731 | int tlen = sizeof(hdr) + len; | 731 | int tlen = sizeof(hdr) + len; |
732 | unsigned long flags; | ||
732 | int ret; | 733 | int ret; |
733 | 734 | ||
734 | /* Word aligned channels only accept word size aligned data */ | 735 | /* Word aligned channels only accept word size aligned data */ |
@@ -739,9 +740,11 @@ static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data, | |||
739 | if (tlen >= channel->fifo_size) | 740 | if (tlen >= channel->fifo_size) |
740 | return -EINVAL; | 741 | return -EINVAL; |
741 | 742 | ||
742 | ret = mutex_lock_interruptible(&channel->tx_lock); | 743 | /* Highlight the fact that if we enter the loop below we might sleep */ |
743 | if (ret) | 744 | if (wait) |
744 | return ret; | 745 | might_sleep(); |
746 | |||
747 | spin_lock_irqsave(&channel->tx_lock, flags); | ||
745 | 748 | ||
746 | while (qcom_smd_get_tx_avail(channel) < tlen && | 749 | while (qcom_smd_get_tx_avail(channel) < tlen && |
747 | channel->state == SMD_CHANNEL_OPENED) { | 750 | channel->state == SMD_CHANNEL_OPENED) { |
@@ -753,7 +756,7 @@ static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data, | |||
753 | SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 0); | 756 | SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 0); |
754 | 757 | ||
755 | /* Wait without holding the tx_lock */ | 758 | /* Wait without holding the tx_lock */ |
756 | mutex_unlock(&channel->tx_lock); | 759 | spin_unlock_irqrestore(&channel->tx_lock, flags); |
757 | 760 | ||
758 | ret = wait_event_interruptible(channel->fblockread_event, | 761 | ret = wait_event_interruptible(channel->fblockread_event, |
759 | qcom_smd_get_tx_avail(channel) >= tlen || | 762 | qcom_smd_get_tx_avail(channel) >= tlen || |
@@ -761,9 +764,7 @@ static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data, | |||
761 | if (ret) | 764 | if (ret) |
762 | return ret; | 765 | return ret; |
763 | 766 | ||
764 | ret = mutex_lock_interruptible(&channel->tx_lock); | 767 | spin_lock_irqsave(&channel->tx_lock, flags); |
765 | if (ret) | ||
766 | return ret; | ||
767 | 768 | ||
768 | SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1); | 769 | SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1); |
769 | } | 770 | } |
@@ -787,7 +788,7 @@ static int __qcom_smd_send(struct qcom_smd_channel *channel, const void *data, | |||
787 | qcom_smd_signal_channel(channel); | 788 | qcom_smd_signal_channel(channel); |
788 | 789 | ||
789 | out_unlock: | 790 | out_unlock: |
790 | mutex_unlock(&channel->tx_lock); | 791 | spin_unlock_irqrestore(&channel->tx_lock, flags); |
791 | 792 | ||
792 | return ret; | 793 | return ret; |
793 | } | 794 | } |
@@ -996,8 +997,26 @@ static struct device_node *qcom_smd_match_channel(struct device_node *edge_node, | |||
996 | return NULL; | 997 | return NULL; |
997 | } | 998 | } |
998 | 999 | ||
1000 | static int qcom_smd_announce_create(struct rpmsg_device *rpdev) | ||
1001 | { | ||
1002 | struct qcom_smd_endpoint *qept = to_smd_endpoint(rpdev->ept); | ||
1003 | struct qcom_smd_channel *channel = qept->qsch; | ||
1004 | unsigned long flags; | ||
1005 | bool kick_state; | ||
1006 | |||
1007 | spin_lock_irqsave(&channel->recv_lock, flags); | ||
1008 | kick_state = qcom_smd_channel_intr(channel); | ||
1009 | spin_unlock_irqrestore(&channel->recv_lock, flags); | ||
1010 | |||
1011 | if (kick_state) | ||
1012 | schedule_work(&channel->edge->state_work); | ||
1013 | |||
1014 | return 0; | ||
1015 | } | ||
1016 | |||
999 | static const struct rpmsg_device_ops qcom_smd_device_ops = { | 1017 | static const struct rpmsg_device_ops qcom_smd_device_ops = { |
1000 | .create_ept = qcom_smd_create_ept, | 1018 | .create_ept = qcom_smd_create_ept, |
1019 | .announce_create = qcom_smd_announce_create, | ||
1001 | }; | 1020 | }; |
1002 | 1021 | ||
1003 | static const struct rpmsg_endpoint_ops qcom_smd_endpoint_ops = { | 1022 | static const struct rpmsg_endpoint_ops qcom_smd_endpoint_ops = { |
@@ -1090,7 +1109,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed | |||
1090 | if (!channel->name) | 1109 | if (!channel->name) |
1091 | return ERR_PTR(-ENOMEM); | 1110 | return ERR_PTR(-ENOMEM); |
1092 | 1111 | ||
1093 | mutex_init(&channel->tx_lock); | 1112 | spin_lock_init(&channel->tx_lock); |
1094 | spin_lock_init(&channel->recv_lock); | 1113 | spin_lock_init(&channel->recv_lock); |
1095 | init_waitqueue_head(&channel->fblockread_event); | 1114 | init_waitqueue_head(&channel->fblockread_event); |
1096 | init_waitqueue_head(&channel->state_change_event); | 1115 | init_waitqueue_head(&channel->state_change_event); |
@@ -1234,6 +1253,11 @@ static void qcom_channel_state_worker(struct work_struct *work) | |||
1234 | if (channel->state != SMD_CHANNEL_CLOSED) | 1253 | if (channel->state != SMD_CHANNEL_CLOSED) |
1235 | continue; | 1254 | continue; |
1236 | 1255 | ||
1256 | remote_state = GET_RX_CHANNEL_INFO(channel, state); | ||
1257 | if (remote_state != SMD_CHANNEL_OPENING && | ||
1258 | remote_state != SMD_CHANNEL_OPENED) | ||
1259 | continue; | ||
1260 | |||
1237 | if (channel->registered) | 1261 | if (channel->registered) |
1238 | continue; | 1262 | continue; |
1239 | 1263 | ||
@@ -1408,6 +1432,7 @@ struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, | |||
1408 | ret = device_register(&edge->dev); | 1432 | ret = device_register(&edge->dev); |
1409 | if (ret) { | 1433 | if (ret) { |
1410 | pr_err("failed to register smd edge\n"); | 1434 | pr_err("failed to register smd edge\n"); |
1435 | put_device(&edge->dev); | ||
1411 | return ERR_PTR(ret); | 1436 | return ERR_PTR(ret); |
1412 | } | 1437 | } |
1413 | 1438 | ||
@@ -1428,7 +1453,7 @@ struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, | |||
1428 | return edge; | 1453 | return edge; |
1429 | 1454 | ||
1430 | unregister_dev: | 1455 | unregister_dev: |
1431 | put_device(&edge->dev); | 1456 | device_unregister(&edge->dev); |
1432 | return ERR_PTR(ret); | 1457 | return ERR_PTR(ret); |
1433 | } | 1458 | } |
1434 | EXPORT_SYMBOL(qcom_smd_register_edge); | 1459 | EXPORT_SYMBOL(qcom_smd_register_edge); |
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c index 5a081762afcc..920a02f0462c 100644 --- a/drivers/rpmsg/rpmsg_core.c +++ b/drivers/rpmsg/rpmsg_core.c | |||
@@ -442,7 +442,7 @@ static int rpmsg_dev_probe(struct device *dev) | |||
442 | goto out; | 442 | goto out; |
443 | } | 443 | } |
444 | 444 | ||
445 | if (rpdev->ops->announce_create) | 445 | if (ept && rpdev->ops->announce_create) |
446 | err = rpdev->ops->announce_create(rpdev); | 446 | err = rpdev->ops->announce_create(rpdev); |
447 | out: | 447 | out: |
448 | return err; | 448 | return err; |