summaryrefslogtreecommitdiffstats
path: root/drivers/rpmsg
diff options
context:
space:
mode:
authorBjorn Andersson <bjorn.andersson@linaro.org>2018-02-13 14:04:04 -0500
committerBjorn Andersson <bjorn.andersson@linaro.org>2018-03-19 22:52:54 -0400
commit29fc9b3873607d01b1ff1ae077982cf5629010af (patch)
treedf164e56cfbaebac29baf4f92352769029838e6c /drivers/rpmsg
parent33e3820dda8876792bd8135db633c741a07263be (diff)
rpmsg: glink: Use spinlock in tx path
Switch the tx_lock to a spinlock we allow clients to use rpmsg_trysend() from atomic context. In order to allow clients to sleep while waiting for space in the FIFO we release the lock temporarily around the delay; which should be replaced by sending a READ_NOTIF and waiting for the remote to signal us that space has been made available. Tested-by: Srinivas Kandagatla <srinivas.kandagatla@linaro.org> Signed-off-by: Bjorn Andersson <bjorn.andersson@linaro.org>
Diffstat (limited to 'drivers/rpmsg')
-rw-r--r--drivers/rpmsg/qcom_glink_native.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index e0f31ed096a5..768ef542a841 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -113,7 +113,7 @@ struct qcom_glink {
113 spinlock_t rx_lock; 113 spinlock_t rx_lock;
114 struct list_head rx_queue; 114 struct list_head rx_queue;
115 115
116 struct mutex tx_lock; 116 spinlock_t tx_lock;
117 117
118 spinlock_t idr_lock; 118 spinlock_t idr_lock;
119 struct idr lcids; 119 struct idr lcids;
@@ -288,15 +288,14 @@ static int qcom_glink_tx(struct qcom_glink *glink,
288 const void *data, size_t dlen, bool wait) 288 const void *data, size_t dlen, bool wait)
289{ 289{
290 unsigned int tlen = hlen + dlen; 290 unsigned int tlen = hlen + dlen;
291 int ret; 291 unsigned long flags;
292 int ret = 0;
292 293
293 /* Reject packets that are too big */ 294 /* Reject packets that are too big */
294 if (tlen >= glink->tx_pipe->length) 295 if (tlen >= glink->tx_pipe->length)
295 return -EINVAL; 296 return -EINVAL;
296 297
297 ret = mutex_lock_interruptible(&glink->tx_lock); 298 spin_lock_irqsave(&glink->tx_lock, flags);
298 if (ret)
299 return ret;
300 299
301 while (qcom_glink_tx_avail(glink) < tlen) { 300 while (qcom_glink_tx_avail(glink) < tlen) {
302 if (!wait) { 301 if (!wait) {
@@ -304,7 +303,12 @@ static int qcom_glink_tx(struct qcom_glink *glink,
304 goto out; 303 goto out;
305 } 304 }
306 305
306 /* Wait without holding the tx_lock */
307 spin_unlock_irqrestore(&glink->tx_lock, flags);
308
307 usleep_range(10000, 15000); 309 usleep_range(10000, 15000);
310
311 spin_lock_irqsave(&glink->tx_lock, flags);
308 } 312 }
309 313
310 qcom_glink_tx_write(glink, hdr, hlen, data, dlen); 314 qcom_glink_tx_write(glink, hdr, hlen, data, dlen);
@@ -313,7 +317,7 @@ static int qcom_glink_tx(struct qcom_glink *glink,
313 mbox_client_txdone(glink->mbox_chan, 0); 317 mbox_client_txdone(glink->mbox_chan, 0);
314 318
315out: 319out:
316 mutex_unlock(&glink->tx_lock); 320 spin_unlock_irqrestore(&glink->tx_lock, flags);
317 321
318 return ret; 322 return ret;
319} 323}
@@ -1567,7 +1571,7 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
1567 glink->features = features; 1571 glink->features = features;
1568 glink->intentless = intentless; 1572 glink->intentless = intentless;
1569 1573
1570 mutex_init(&glink->tx_lock); 1574 spin_lock_init(&glink->tx_lock);
1571 spin_lock_init(&glink->rx_lock); 1575 spin_lock_init(&glink->rx_lock);
1572 INIT_LIST_HEAD(&glink->rx_queue); 1576 INIT_LIST_HEAD(&glink->rx_queue);
1573 INIT_WORK(&glink->rx_work, qcom_glink_work); 1577 INIT_WORK(&glink->rx_work, qcom_glink_work);