diff options
author | Boris Brezillon <boris.brezillon@bootlin.com> | 2018-11-06 11:05:32 -0500 |
---|---|---|
committer | Mark Brown <broonie@kernel.org> | 2018-11-20 11:26:42 -0500 |
commit | f86c24f4795303e4024bc113196de32782f6ccb5 (patch) | |
tree | c88321f68ca10c147ef97133ae823ba3a3d69678 /drivers | |
parent | 0ebb261a0b2d090de618a383d2378d4a00834958 (diff) |
spi: spi-mem: Split spi_mem_exec_op() code
The logic surrounding the ->exec_op() call applies to direct mapping
accessors. Move this code to separate functions to avoid duplicating
code.
Signed-off-by: Boris Brezillon <boris.brezillon@bootlin.com>
Reviewed-by: Miquel Raynal <miquel.raynal@bootlin.com>
Signed-off-by: Mark Brown <broonie@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/spi/spi-mem.c | 63 |
1 files changed, 42 insertions, 21 deletions
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index 967f581bca4f..7916e655afc8 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c | |||
@@ -213,6 +213,44 @@ bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) | |||
213 | } | 213 | } |
214 | EXPORT_SYMBOL_GPL(spi_mem_supports_op); | 214 | EXPORT_SYMBOL_GPL(spi_mem_supports_op); |
215 | 215 | ||
216 | static int spi_mem_access_start(struct spi_mem *mem) | ||
217 | { | ||
218 | struct spi_controller *ctlr = mem->spi->controller; | ||
219 | |||
220 | /* | ||
221 | * Flush the message queue before executing our SPI memory | ||
222 | * operation to prevent preemption of regular SPI transfers. | ||
223 | */ | ||
224 | spi_flush_queue(ctlr); | ||
225 | |||
226 | if (ctlr->auto_runtime_pm) { | ||
227 | int ret; | ||
228 | |||
229 | ret = pm_runtime_get_sync(ctlr->dev.parent); | ||
230 | if (ret < 0) { | ||
231 | dev_err(&ctlr->dev, "Failed to power device: %d\n", | ||
232 | ret); | ||
233 | return ret; | ||
234 | } | ||
235 | } | ||
236 | |||
237 | mutex_lock(&ctlr->bus_lock_mutex); | ||
238 | mutex_lock(&ctlr->io_mutex); | ||
239 | |||
240 | return 0; | ||
241 | } | ||
242 | |||
243 | static void spi_mem_access_end(struct spi_mem *mem) | ||
244 | { | ||
245 | struct spi_controller *ctlr = mem->spi->controller; | ||
246 | |||
247 | mutex_unlock(&ctlr->io_mutex); | ||
248 | mutex_unlock(&ctlr->bus_lock_mutex); | ||
249 | |||
250 | if (ctlr->auto_runtime_pm) | ||
251 | pm_runtime_put(ctlr->dev.parent); | ||
252 | } | ||
253 | |||
216 | /** | 254 | /** |
217 | * spi_mem_exec_op() - Execute a memory operation | 255 | * spi_mem_exec_op() - Execute a memory operation |
218 | * @mem: the SPI memory | 256 | * @mem: the SPI memory |
@@ -242,30 +280,13 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) | |||
242 | return -ENOTSUPP; | 280 | return -ENOTSUPP; |
243 | 281 | ||
244 | if (ctlr->mem_ops) { | 282 | if (ctlr->mem_ops) { |
245 | /* | 283 | ret = spi_mem_access_start(mem); |
246 | * Flush the message queue before executing our SPI memory | 284 | if (ret) |
247 | * operation to prevent preemption of regular SPI transfers. | 285 | return ret; |
248 | */ | ||
249 | spi_flush_queue(ctlr); | ||
250 | |||
251 | if (ctlr->auto_runtime_pm) { | ||
252 | ret = pm_runtime_get_sync(ctlr->dev.parent); | ||
253 | if (ret < 0) { | ||
254 | dev_err(&ctlr->dev, | ||
255 | "Failed to power device: %d\n", | ||
256 | ret); | ||
257 | return ret; | ||
258 | } | ||
259 | } | ||
260 | 286 | ||
261 | mutex_lock(&ctlr->bus_lock_mutex); | ||
262 | mutex_lock(&ctlr->io_mutex); | ||
263 | ret = ctlr->mem_ops->exec_op(mem, op); | 287 | ret = ctlr->mem_ops->exec_op(mem, op); |
264 | mutex_unlock(&ctlr->io_mutex); | ||
265 | mutex_unlock(&ctlr->bus_lock_mutex); | ||
266 | 288 | ||
267 | if (ctlr->auto_runtime_pm) | 289 | spi_mem_access_end(mem); |
268 | pm_runtime_put(ctlr->dev.parent); | ||
269 | 290 | ||
270 | /* | 291 | /* |
271 | * Some controllers only optimize specific paths (typically the | 292 | * Some controllers only optimize specific paths (typically the |