summaryrefslogtreecommitdiffstats
path: root/drivers/firmware
diff options
context:
space:
mode:
authorSudeep Holla <sudeep.holla@arm.com>2017-07-20 09:39:57 -0400
committerSudeep Holla <sudeep.holla@arm.com>2018-02-28 11:37:57 -0500
commitd4c3751a8de2deeaae546b97650f895b62bbd1b4 (patch)
tree98d921a362ac0aba8182b9c59ad7bf60cec28b2a /drivers/firmware
parentbc40081d9825c7ed34501ebfc0a533047a07b16c (diff)
firmware: arm_scmi: add support for polling based SCMI transfers
It would be useful to have options to perform some SCMI transfers atomically by polling for the completion flag instead of interrupt driven. The SCMI specification has option to disable the interrupt and poll for the completion flag in the shared memory. This patch adds support for polling based SCMI transfers using that option. This might be used for uninterrupted/atomic DVFS operations from the scheduler context. Cc: Arnd Bergmann <arnd@arndb.de> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>
Diffstat (limited to 'drivers/firmware')
-rw-r--r--drivers/firmware/arm_scmi/driver.c55
1 files changed, 47 insertions, 8 deletions
diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
index f242b2e7c4b1..cf8a1719d425 100644
--- a/drivers/firmware/arm_scmi/driver.c
+++ b/drivers/firmware/arm_scmi/driver.c
@@ -18,10 +18,12 @@
18#include <linux/export.h> 18#include <linux/export.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/ktime.h>
21#include <linux/mailbox_client.h> 22#include <linux/mailbox_client.h>
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/of_address.h> 24#include <linux/of_address.h>
24#include <linux/of_device.h> 25#include <linux/of_device.h>
26#include <linux/processor.h>
25#include <linux/semaphore.h> 27#include <linux/semaphore.h>
26#include <linux/slab.h> 28#include <linux/slab.h>
27 29
@@ -335,6 +337,30 @@ void scmi_one_xfer_put(const struct scmi_handle *handle, struct scmi_xfer *xfer)
335 spin_unlock_irqrestore(&minfo->xfer_lock, flags); 337 spin_unlock_irqrestore(&minfo->xfer_lock, flags);
336} 338}
337 339
340static bool
341scmi_xfer_poll_done(const struct scmi_info *info, struct scmi_xfer *xfer)
342{
343 struct scmi_shared_mem *mem = info->tx_payload;
344 u16 xfer_id = MSG_XTRACT_TOKEN(le32_to_cpu(mem->msg_header));
345
346 if (xfer->hdr.seq != xfer_id)
347 return false;
348
349 return le32_to_cpu(mem->channel_status) &
350 (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
351 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
352}
353
354#define SCMI_MAX_POLL_TO_NS (100 * NSEC_PER_USEC)
355
356static bool scmi_xfer_done_no_timeout(const struct scmi_info *info,
357 struct scmi_xfer *xfer, ktime_t stop)
358{
359 ktime_t __cur = ktime_get();
360
361 return scmi_xfer_poll_done(info, xfer) || ktime_after(__cur, stop);
362}
363
338/** 364/**
339 * scmi_do_xfer() - Do one transfer 365 * scmi_do_xfer() - Do one transfer
340 * 366 *
@@ -361,15 +387,28 @@ int scmi_do_xfer(const struct scmi_handle *handle, struct scmi_xfer *xfer)
361 /* mbox_send_message returns non-negative value on success, so reset */ 387 /* mbox_send_message returns non-negative value on success, so reset */
362 ret = 0; 388 ret = 0;
363 389
364 /* And we wait for the response. */ 390 if (xfer->hdr.poll_completion) {
365 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); 391 ktime_t stop = ktime_add_ns(ktime_get(), SCMI_MAX_POLL_TO_NS);
366 if (!wait_for_completion_timeout(&xfer->done, timeout)) { 392
367 dev_err(dev, "mbox timed out in resp(caller: %pS)\n", 393 spin_until_cond(scmi_xfer_done_no_timeout(info, xfer, stop));
368 (void *)_RET_IP_); 394
369 ret = -ETIMEDOUT; 395 if (ktime_before(ktime_get(), stop))
370 } else if (xfer->hdr.status) { 396 scmi_fetch_response(xfer, info->tx_payload);
371 ret = scmi_to_linux_errno(xfer->hdr.status); 397 else
398 ret = -ETIMEDOUT;
399 } else {
400 /* And we wait for the response. */
401 timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms);
402 if (!wait_for_completion_timeout(&xfer->done, timeout)) {
403 dev_err(dev, "mbox timed out in resp(caller: %pS)\n",
404 (void *)_RET_IP_);
405 ret = -ETIMEDOUT;
406 }
372 } 407 }
408
409 if (!ret && xfer->hdr.status)
410 ret = scmi_to_linux_errno(xfer->hdr.status);
411
373 /* 412 /*
374 * NOTE: we might prefer not to need the mailbox ticker to manage the 413 * NOTE: we might prefer not to need the mailbox ticker to manage the
375 * transfer queueing since the protocol layer queues things by itself. 414 * transfer queueing since the protocol layer queues things by itself.