aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2014-12-09 16:38:05 -0500
committerMark Brown <broonie@kernel.org>2014-12-11 07:23:51 -0500
commit0461a4149836c792d186027c8c859637a4cfb11a (patch)
tree38b297f0532201cec0e3562bcd41f48d065c3416
parent983aee5d7090cf12b624f18533777caa09d067b1 (diff)
spi: Pump transfers inside calling context for spi_sync()
If we are using the standard SPI message pump (which all drivers should be transitioning over to) then special case the message enqueue and instead of starting the worker thread to push messages to the hardware do so in the context of the caller if the controller is idle. This avoids a context switch in the common case where the controller has a single user in a single thread, for short PIO transfers there may be no need to context switch away from the calling context to complete the transfer. The code is a bit more complex than is desirable in part due to the need to handle drivers not using the standard queue and in part due to handling the various combinations of bus locking and asynchronous submission in interrupt context. It is still suboptimal since it will still wake the message pump for each transfer in order to schedule idling of the hardware and if multiple contexts are using the controller simultaneously a caller may end up pumping a message for some random other thread rather than for itself, and if the thread ends up deferring due to another context idling the hardware then it will just busy wait. It can, however, have the benefit of aggregating power up and down of the hardware when a caller performs a series of transfers back to back without any need for the use of spi_async(). Signed-off-by: Mark Brown <broonie@kernel.org>
-rw-r--r--drivers/spi/spi.c66
-rw-r--r--include/linux/spi/spi.h2
2 files changed, 60 insertions, 8 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 0bc752d17be5..e1bf2579b9c0 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -882,6 +882,9 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
882 * needs processing and if so call out to the driver to initialize hardware 882 * needs processing and if so call out to the driver to initialize hardware
883 * and transfer each message. 883 * and transfer each message.
884 * 884 *
885 * Note that it is called both from the kthread itself and also from
886 * inside spi_sync(); the queue extraction handling at the top of the
887 * function should deal with this safely.
885 */ 888 */
886static void spi_pump_messages(struct kthread_work *work) 889static void spi_pump_messages(struct kthread_work *work)
887{ 890{
@@ -900,6 +903,13 @@ static void spi_pump_messages(struct kthread_work *work)
900 return; 903 return;
901 } 904 }
902 905
906 /* If another context is idling the device then defer */
907 if (master->idling) {
908 queue_kthread_work(&master->kworker, &master->pump_messages);
909 spin_unlock_irqrestore(&master->queue_lock, flags);
910 return;
911 }
912
903 /* Check if the queue is idle */ 913 /* Check if the queue is idle */
904 if (list_empty(&master->queue) || !master->running) { 914 if (list_empty(&master->queue) || !master->running) {
905 if (!master->busy) { 915 if (!master->busy) {
@@ -907,7 +917,9 @@ static void spi_pump_messages(struct kthread_work *work)
907 return; 917 return;
908 } 918 }
909 master->busy = false; 919 master->busy = false;
920 master->idling = true;
910 spin_unlock_irqrestore(&master->queue_lock, flags); 921 spin_unlock_irqrestore(&master->queue_lock, flags);
922
911 kfree(master->dummy_rx); 923 kfree(master->dummy_rx);
912 master->dummy_rx = NULL; 924 master->dummy_rx = NULL;
913 kfree(master->dummy_tx); 925 kfree(master->dummy_tx);
@@ -921,6 +933,10 @@ static void spi_pump_messages(struct kthread_work *work)
921 pm_runtime_put_autosuspend(master->dev.parent); 933 pm_runtime_put_autosuspend(master->dev.parent);
922 } 934 }
923 trace_spi_master_idle(master); 935 trace_spi_master_idle(master);
936
937 spin_lock_irqsave(&master->queue_lock, flags);
938 master->idling = false;
939 spin_unlock_irqrestore(&master->queue_lock, flags);
924 return; 940 return;
925 } 941 }
926 942
@@ -1161,12 +1177,9 @@ static int spi_destroy_queue(struct spi_master *master)
1161 return 0; 1177 return 0;
1162} 1178}
1163 1179
1164/** 1180static int __spi_queued_transfer(struct spi_device *spi,
1165 * spi_queued_transfer - transfer function for queued transfers 1181 struct spi_message *msg,
1166 * @spi: spi device which is requesting transfer 1182 bool need_pump)
1167 * @msg: spi message which is to handled is queued to driver queue
1168 */
1169static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1170{ 1183{
1171 struct spi_master *master = spi->master; 1184 struct spi_master *master = spi->master;
1172 unsigned long flags; 1185 unsigned long flags;
@@ -1181,13 +1194,23 @@ static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1181 msg->status = -EINPROGRESS; 1194 msg->status = -EINPROGRESS;
1182 1195
1183 list_add_tail(&msg->queue, &master->queue); 1196 list_add_tail(&msg->queue, &master->queue);
1184 if (!master->busy) 1197 if (!master->busy && need_pump)
1185 queue_kthread_work(&master->kworker, &master->pump_messages); 1198 queue_kthread_work(&master->kworker, &master->pump_messages);
1186 1199
1187 spin_unlock_irqrestore(&master->queue_lock, flags); 1200 spin_unlock_irqrestore(&master->queue_lock, flags);
1188 return 0; 1201 return 0;
1189} 1202}
1190 1203
1204/**
1205 * spi_queued_transfer - transfer function for queued transfers
1206 * @spi: spi device which is requesting transfer
1207 * @msg: spi message which is to handled is queued to driver queue
1208 */
1209static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
1210{
1211 return __spi_queued_transfer(spi, msg, true);
1212}
1213
1191static int spi_master_initialize_queue(struct spi_master *master) 1214static int spi_master_initialize_queue(struct spi_master *master)
1192{ 1215{
1193 int ret; 1216 int ret;
@@ -2102,19 +2125,46 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
2102 DECLARE_COMPLETION_ONSTACK(done); 2125 DECLARE_COMPLETION_ONSTACK(done);
2103 int status; 2126 int status;
2104 struct spi_master *master = spi->master; 2127 struct spi_master *master = spi->master;
2128 unsigned long flags;
2129
2130 status = __spi_validate(spi, message);
2131 if (status != 0)
2132 return status;
2105 2133
2106 message->complete = spi_complete; 2134 message->complete = spi_complete;
2107 message->context = &done; 2135 message->context = &done;
2136 message->spi = spi;
2108 2137
2109 if (!bus_locked) 2138 if (!bus_locked)
2110 mutex_lock(&master->bus_lock_mutex); 2139 mutex_lock(&master->bus_lock_mutex);
2111 2140
2112 status = spi_async_locked(spi, message); 2141 /* If we're not using the legacy transfer method then we will
2142 * try to transfer in the calling context so special case.
2143 * This code would be less tricky if we could remove the
2144 * support for driver implemented message queues.
2145 */
2146 if (master->transfer == spi_queued_transfer) {
2147 spin_lock_irqsave(&master->bus_lock_spinlock, flags);
2148
2149 trace_spi_message_submit(message);
2150
2151 status = __spi_queued_transfer(spi, message, false);
2152
2153 spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
2154 } else {
2155 status = spi_async_locked(spi, message);
2156 }
2113 2157
2114 if (!bus_locked) 2158 if (!bus_locked)
2115 mutex_unlock(&master->bus_lock_mutex); 2159 mutex_unlock(&master->bus_lock_mutex);
2116 2160
2117 if (status == 0) { 2161 if (status == 0) {
2162 /* Push out the messages in the calling context if we
2163 * can.
2164 */
2165 if (master->transfer == spi_queued_transfer)
2166 spi_pump_messages(&master->pump_messages);
2167
2118 wait_for_completion(&done); 2168 wait_for_completion(&done);
2119 status = message->status; 2169 status = message->status;
2120 } 2170 }
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index a6ef2a8e6de4..4e6db75e9469 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -260,6 +260,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
260 * @pump_messages: work struct for scheduling work to the message pump 260 * @pump_messages: work struct for scheduling work to the message pump
261 * @queue_lock: spinlock to syncronise access to message queue 261 * @queue_lock: spinlock to syncronise access to message queue
262 * @queue: message queue 262 * @queue: message queue
263 * @idling: the device is entering idle state
263 * @cur_msg: the currently in-flight message 264 * @cur_msg: the currently in-flight message
264 * @cur_msg_prepared: spi_prepare_message was called for the currently 265 * @cur_msg_prepared: spi_prepare_message was called for the currently
265 * in-flight message 266 * in-flight message
@@ -425,6 +426,7 @@ struct spi_master {
425 spinlock_t queue_lock; 426 spinlock_t queue_lock;
426 struct list_head queue; 427 struct list_head queue;
427 struct spi_message *cur_msg; 428 struct spi_message *cur_msg;
429 bool idling;
428 bool busy; 430 bool busy;
429 bool running; 431 bool running;
430 bool rt; 432 bool rt;