aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Gross <agross@codeaurora.org>2015-03-04 05:02:05 -0500
committerMark Brown <broonie@kernel.org>2015-03-07 06:21:20 -0500
commit612762e82ae6058d69b4ce734598491bf030afe7 (patch)
tree3031966b4c6c9a1aa5a68615ffc5deb7c3c35ad1
parentc517d838eb7d07bbe9507871fab3931deccff539 (diff)
spi: qup: Add DMA capabilities
This patch adds DMA capabilities to the spi-qup driver. If DMA channels are present, the QUP will use DMA instead of block mode for transfers to/from SPI peripherals for transactions larger than the length of a block. Signed-off-by: Andy Gross <agross@codeaurora.org> Signed-off-by: Stanimir Varbanov <stanimir.varbanov@linaro.org> Reviewed-by: Ivan T. Ivanov <iivanov@mm-sol.com Signed-off-by: Mark Brown <broonie@kernel.org>
-rw-r--r--Documentation/devicetree/bindings/spi/qcom,spi-qup.txt8
-rw-r--r--drivers/spi/spi-qup.c336
2 files changed, 312 insertions, 32 deletions
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt b/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt
index e2c88df2cc15..5c090771c016 100644
--- a/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt
+++ b/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt
@@ -33,6 +33,11 @@ Optional properties:
33 nodes. If unspecified, a single SPI device without a chip 33 nodes. If unspecified, a single SPI device without a chip
34 select can be used. 34 select can be used.
35 35
36- dmas: Two DMA channel specifiers following the convention outlined
37 in bindings/dma/dma.txt
38- dma-names: Names for the dma channels, if present. There must be at
39 least one channel named "tx" for transmit and named "rx" for
40 receive.
36 41
37SPI slave nodes must be children of the SPI master node and can contain 42SPI slave nodes must be children of the SPI master node and can contain
38properties described in Documentation/devicetree/bindings/spi/spi-bus.txt 43properties described in Documentation/devicetree/bindings/spi/spi-bus.txt
@@ -51,6 +56,9 @@ Example:
51 clocks = <&gcc GCC_BLSP2_QUP2_SPI_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>; 56 clocks = <&gcc GCC_BLSP2_QUP2_SPI_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>;
52 clock-names = "core", "iface"; 57 clock-names = "core", "iface";
53 58
59 dmas = <&blsp1_bam 13>, <&blsp1_bam 12>;
60 dma-names = "rx", "tx";
61
54 pinctrl-names = "default"; 62 pinctrl-names = "default";
55 pinctrl-0 = <&spi8_default>; 63 pinctrl-0 = <&spi8_default>;
56 64
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index ff9cdbdb6672..4b5fc4d67b6e 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -22,6 +22,8 @@
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/pm_runtime.h> 23#include <linux/pm_runtime.h>
24#include <linux/spi/spi.h> 24#include <linux/spi/spi.h>
25#include <linux/dmaengine.h>
26#include <linux/dma-mapping.h>
25 27
26#define QUP_CONFIG 0x0000 28#define QUP_CONFIG 0x0000
27#define QUP_STATE 0x0004 29#define QUP_STATE 0x0004
@@ -116,6 +118,8 @@
116 118
117#define SPI_NUM_CHIPSELECTS 4 119#define SPI_NUM_CHIPSELECTS 4
118 120
121#define SPI_MAX_DMA_XFER (SZ_64K - 64)
122
119/* high speed mode is when bus rate is greater then 26MHz */ 123/* high speed mode is when bus rate is greater then 26MHz */
120#define SPI_HS_MIN_RATE 26000000 124#define SPI_HS_MIN_RATE 26000000
121#define SPI_MAX_RATE 50000000 125#define SPI_MAX_RATE 50000000
@@ -140,9 +144,14 @@ struct spi_qup {
140 struct completion done; 144 struct completion done;
141 int error; 145 int error;
142 int w_size; /* bytes per SPI word */ 146 int w_size; /* bytes per SPI word */
147 int n_words;
143 int tx_bytes; 148 int tx_bytes;
144 int rx_bytes; 149 int rx_bytes;
145 int qup_v1; 150 int qup_v1;
151
152 int use_dma;
153 struct dma_slave_config rx_conf;
154 struct dma_slave_config tx_conf;
146}; 155};
147 156
148 157
@@ -198,7 +207,6 @@ static int spi_qup_set_state(struct spi_qup *controller, u32 state)
198 return 0; 207 return 0;
199} 208}
200 209
201
202static void spi_qup_fifo_read(struct spi_qup *controller, 210static void spi_qup_fifo_read(struct spi_qup *controller,
203 struct spi_transfer *xfer) 211 struct spi_transfer *xfer)
204{ 212{
@@ -266,6 +274,107 @@ static void spi_qup_fifo_write(struct spi_qup *controller,
266 } 274 }
267} 275}
268 276
277static void spi_qup_dma_done(void *data)
278{
279 struct spi_qup *qup = data;
280
281 complete(&qup->done);
282}
283
284static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer,
285 enum dma_transfer_direction dir,
286 dma_async_tx_callback callback)
287{
288 struct spi_qup *qup = spi_master_get_devdata(master);
289 unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
290 struct dma_async_tx_descriptor *desc;
291 struct scatterlist *sgl;
292 struct dma_chan *chan;
293 dma_cookie_t cookie;
294 unsigned int nents;
295
296 if (dir == DMA_MEM_TO_DEV) {
297 chan = master->dma_tx;
298 nents = xfer->tx_sg.nents;
299 sgl = xfer->tx_sg.sgl;
300 } else {
301 chan = master->dma_rx;
302 nents = xfer->rx_sg.nents;
303 sgl = xfer->rx_sg.sgl;
304 }
305
306 desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
307 if (!desc)
308 return -EINVAL;
309
310 desc->callback = callback;
311 desc->callback_param = qup;
312
313 cookie = dmaengine_submit(desc);
314
315 return dma_submit_error(cookie);
316}
317
318static void spi_qup_dma_terminate(struct spi_master *master,
319 struct spi_transfer *xfer)
320{
321 if (xfer->tx_buf)
322 dmaengine_terminate_all(master->dma_tx);
323 if (xfer->rx_buf)
324 dmaengine_terminate_all(master->dma_rx);
325}
326
327static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer)
328{
329 dma_async_tx_callback rx_done = NULL, tx_done = NULL;
330 int ret;
331
332 if (xfer->rx_buf)
333 rx_done = spi_qup_dma_done;
334 else if (xfer->tx_buf)
335 tx_done = spi_qup_dma_done;
336
337 if (xfer->rx_buf) {
338 ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done);
339 if (ret)
340 return ret;
341
342 dma_async_issue_pending(master->dma_rx);
343 }
344
345 if (xfer->tx_buf) {
346 ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done);
347 if (ret)
348 return ret;
349
350 dma_async_issue_pending(master->dma_tx);
351 }
352
353 return 0;
354}
355
356static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer)
357{
358 struct spi_qup *qup = spi_master_get_devdata(master);
359 int ret;
360
361 ret = spi_qup_set_state(qup, QUP_STATE_RUN);
362 if (ret) {
363 dev_warn(qup->dev, "cannot set RUN state\n");
364 return ret;
365 }
366
367 ret = spi_qup_set_state(qup, QUP_STATE_PAUSE);
368 if (ret) {
369 dev_warn(qup->dev, "cannot set PAUSE state\n");
370 return ret;
371 }
372
373 spi_qup_fifo_write(qup, xfer);
374
375 return 0;
376}
377
269static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id) 378static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
270{ 379{
271 struct spi_qup *controller = dev_id; 380 struct spi_qup *controller = dev_id;
@@ -315,11 +424,13 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
315 error = -EIO; 424 error = -EIO;
316 } 425 }
317 426
318 if (opflags & QUP_OP_IN_SERVICE_FLAG) 427 if (!controller->use_dma) {
319 spi_qup_fifo_read(controller, xfer); 428 if (opflags & QUP_OP_IN_SERVICE_FLAG)
429 spi_qup_fifo_read(controller, xfer);
320 430
321 if (opflags & QUP_OP_OUT_SERVICE_FLAG) 431 if (opflags & QUP_OP_OUT_SERVICE_FLAG)
322 spi_qup_fifo_write(controller, xfer); 432 spi_qup_fifo_write(controller, xfer);
433 }
323 434
324 spin_lock_irqsave(&controller->lock, flags); 435 spin_lock_irqsave(&controller->lock, flags);
325 controller->error = error; 436 controller->error = error;
@@ -332,13 +443,35 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
332 return IRQ_HANDLED; 443 return IRQ_HANDLED;
333} 444}
334 445
446static u32
447spi_qup_get_mode(struct spi_master *master, struct spi_transfer *xfer)
448{
449 struct spi_qup *qup = spi_master_get_devdata(master);
450 u32 mode;
451
452 qup->w_size = 4;
453
454 if (xfer->bits_per_word <= 8)
455 qup->w_size = 1;
456 else if (xfer->bits_per_word <= 16)
457 qup->w_size = 2;
458
459 qup->n_words = xfer->len / qup->w_size;
460
461 if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
462 mode = QUP_IO_M_MODE_FIFO;
463 else
464 mode = QUP_IO_M_MODE_BLOCK;
465
466 return mode;
467}
335 468
336/* set clock freq ... bits per word */ 469/* set clock freq ... bits per word */
337static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer) 470static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
338{ 471{
339 struct spi_qup *controller = spi_master_get_devdata(spi->master); 472 struct spi_qup *controller = spi_master_get_devdata(spi->master);
340 u32 config, iomode, mode, control; 473 u32 config, iomode, mode, control;
341 int ret, n_words, w_size; 474 int ret, n_words;
342 475
343 if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) { 476 if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
344 dev_err(controller->dev, "too big size for loopback %d > %d\n", 477 dev_err(controller->dev, "too big size for loopback %d > %d\n",
@@ -358,35 +491,54 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
358 return -EIO; 491 return -EIO;
359 } 492 }
360 493
361 w_size = 4; 494 mode = spi_qup_get_mode(spi->master, xfer);
362 if (xfer->bits_per_word <= 8) 495 n_words = controller->n_words;
363 w_size = 1;
364 else if (xfer->bits_per_word <= 16)
365 w_size = 2;
366
367 n_words = xfer->len / w_size;
368 controller->w_size = w_size;
369 496
370 if (n_words <= (controller->in_fifo_sz / sizeof(u32))) { 497 if (mode == QUP_IO_M_MODE_FIFO) {
371 mode = QUP_IO_M_MODE_FIFO;
372 writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT); 498 writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
373 writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT); 499 writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
374 /* must be zero for FIFO */ 500 /* must be zero for FIFO */
375 writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT); 501 writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
376 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT); 502 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
377 } else { 503 } else if (!controller->use_dma) {
378 mode = QUP_IO_M_MODE_BLOCK;
379 writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT); 504 writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
380 writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT); 505 writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
381 /* must be zero for BLOCK and BAM */ 506 /* must be zero for BLOCK and BAM */
382 writel_relaxed(0, controller->base + QUP_MX_READ_CNT); 507 writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
383 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT); 508 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
509 } else {
510 mode = QUP_IO_M_MODE_BAM;
511 writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
512 writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
513
514 if (!controller->qup_v1) {
515 void __iomem *input_cnt;
516
517 input_cnt = controller->base + QUP_MX_INPUT_CNT;
518 /*
519 * for DMA transfers, both QUP_MX_INPUT_CNT and
520 * QUP_MX_OUTPUT_CNT must be zero to all cases but one.
521 * That case is a non-balanced transfer when there is
522 * only a rx_buf.
523 */
524 if (xfer->tx_buf)
525 writel_relaxed(0, input_cnt);
526 else
527 writel_relaxed(n_words, input_cnt);
528
529 writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
530 }
384 } 531 }
385 532
386 iomode = readl_relaxed(controller->base + QUP_IO_M_MODES); 533 iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
387 /* Set input and output transfer mode */ 534 /* Set input and output transfer mode */
388 iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK); 535 iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
389 iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN); 536
537 if (!controller->use_dma)
538 iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
539 else
540 iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
541
390 iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT); 542 iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
391 iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT); 543 iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
392 544
@@ -428,11 +580,31 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
428 config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N); 580 config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
429 config |= xfer->bits_per_word - 1; 581 config |= xfer->bits_per_word - 1;
430 config |= QUP_CONFIG_SPI_MODE; 582 config |= QUP_CONFIG_SPI_MODE;
583
584 if (controller->use_dma) {
585 if (!xfer->tx_buf)
586 config |= QUP_CONFIG_NO_OUTPUT;
587 if (!xfer->rx_buf)
588 config |= QUP_CONFIG_NO_INPUT;
589 }
590
431 writel_relaxed(config, controller->base + QUP_CONFIG); 591 writel_relaxed(config, controller->base + QUP_CONFIG);
432 592
433 /* only write to OPERATIONAL_MASK when register is present */ 593 /* only write to OPERATIONAL_MASK when register is present */
434 if (!controller->qup_v1) 594 if (!controller->qup_v1) {
435 writel_relaxed(0, controller->base + QUP_OPERATIONAL_MASK); 595 u32 mask = 0;
596
597 /*
598 * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO
599 * status change in BAM mode
600 */
601
602 if (mode == QUP_IO_M_MODE_BAM)
603 mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG;
604
605 writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK);
606 }
607
436 return 0; 608 return 0;
437} 609}
438 610
@@ -461,17 +633,13 @@ static int spi_qup_transfer_one(struct spi_master *master,
461 controller->tx_bytes = 0; 633 controller->tx_bytes = 0;
462 spin_unlock_irqrestore(&controller->lock, flags); 634 spin_unlock_irqrestore(&controller->lock, flags);
463 635
464 if (spi_qup_set_state(controller, QUP_STATE_RUN)) { 636 if (controller->use_dma)
465 dev_warn(controller->dev, "cannot set RUN state\n"); 637 ret = spi_qup_do_dma(master, xfer);
466 goto exit; 638 else
467 } 639 ret = spi_qup_do_pio(master, xfer);
468 640
469 if (spi_qup_set_state(controller, QUP_STATE_PAUSE)) { 641 if (ret)
470 dev_warn(controller->dev, "cannot set PAUSE state\n");
471 goto exit; 642 goto exit;
472 }
473
474 spi_qup_fifo_write(controller, xfer);
475 643
476 if (spi_qup_set_state(controller, QUP_STATE_RUN)) { 644 if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
477 dev_warn(controller->dev, "cannot set EXECUTE state\n"); 645 dev_warn(controller->dev, "cannot set EXECUTE state\n");
@@ -480,6 +648,7 @@ static int spi_qup_transfer_one(struct spi_master *master,
480 648
481 if (!wait_for_completion_timeout(&controller->done, timeout)) 649 if (!wait_for_completion_timeout(&controller->done, timeout))
482 ret = -ETIMEDOUT; 650 ret = -ETIMEDOUT;
651
483exit: 652exit:
484 spi_qup_set_state(controller, QUP_STATE_RESET); 653 spi_qup_set_state(controller, QUP_STATE_RESET);
485 spin_lock_irqsave(&controller->lock, flags); 654 spin_lock_irqsave(&controller->lock, flags);
@@ -487,6 +656,97 @@ exit:
487 if (!ret) 656 if (!ret)
488 ret = controller->error; 657 ret = controller->error;
489 spin_unlock_irqrestore(&controller->lock, flags); 658 spin_unlock_irqrestore(&controller->lock, flags);
659
660 if (ret && controller->use_dma)
661 spi_qup_dma_terminate(master, xfer);
662
663 return ret;
664}
665
666static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi,
667 struct spi_transfer *xfer)
668{
669 struct spi_qup *qup = spi_master_get_devdata(master);
670 size_t dma_align = dma_get_cache_alignment();
671 u32 mode;
672
673 qup->use_dma = 0;
674
675 if (xfer->rx_buf && (xfer->len % qup->in_blk_sz ||
676 IS_ERR_OR_NULL(master->dma_rx) ||
677 !IS_ALIGNED((size_t)xfer->rx_buf, dma_align)))
678 return false;
679
680 if (xfer->tx_buf && (xfer->len % qup->out_blk_sz ||
681 IS_ERR_OR_NULL(master->dma_tx) ||
682 !IS_ALIGNED((size_t)xfer->tx_buf, dma_align)))
683 return false;
684
685 mode = spi_qup_get_mode(master, xfer);
686 if (mode == QUP_IO_M_MODE_FIFO)
687 return false;
688
689 qup->use_dma = 1;
690
691 return true;
692}
693
694static void spi_qup_release_dma(struct spi_master *master)
695{
696 if (!IS_ERR_OR_NULL(master->dma_rx))
697 dma_release_channel(master->dma_rx);
698 if (!IS_ERR_OR_NULL(master->dma_tx))
699 dma_release_channel(master->dma_tx);
700}
701
702static int spi_qup_init_dma(struct spi_master *master, resource_size_t base)
703{
704 struct spi_qup *spi = spi_master_get_devdata(master);
705 struct dma_slave_config *rx_conf = &spi->rx_conf,
706 *tx_conf = &spi->tx_conf;
707 struct device *dev = spi->dev;
708 int ret;
709
710 /* allocate dma resources, if available */
711 master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
712 if (IS_ERR(master->dma_rx))
713 return PTR_ERR(master->dma_rx);
714
715 master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
716 if (IS_ERR(master->dma_tx)) {
717 ret = PTR_ERR(master->dma_tx);
718 goto err_tx;
719 }
720
721 /* set DMA parameters */
722 rx_conf->direction = DMA_DEV_TO_MEM;
723 rx_conf->device_fc = 1;
724 rx_conf->src_addr = base + QUP_INPUT_FIFO;
725 rx_conf->src_maxburst = spi->in_blk_sz;
726
727 tx_conf->direction = DMA_MEM_TO_DEV;
728 tx_conf->device_fc = 1;
729 tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
730 tx_conf->dst_maxburst = spi->out_blk_sz;
731
732 ret = dmaengine_slave_config(master->dma_rx, rx_conf);
733 if (ret) {
734 dev_err(dev, "failed to configure RX channel\n");
735 goto err;
736 }
737
738 ret = dmaengine_slave_config(master->dma_tx, tx_conf);
739 if (ret) {
740 dev_err(dev, "failed to configure TX channel\n");
741 goto err;
742 }
743
744 return 0;
745
746err:
747 dma_release_channel(master->dma_tx);
748err_tx:
749 dma_release_channel(master->dma_rx);
490 return ret; 750 return ret;
491} 751}
492 752
@@ -562,6 +822,8 @@ static int spi_qup_probe(struct platform_device *pdev)
562 master->transfer_one = spi_qup_transfer_one; 822 master->transfer_one = spi_qup_transfer_one;
563 master->dev.of_node = pdev->dev.of_node; 823 master->dev.of_node = pdev->dev.of_node;
564 master->auto_runtime_pm = true; 824 master->auto_runtime_pm = true;
825 master->dma_alignment = dma_get_cache_alignment();
826 master->max_dma_len = SPI_MAX_DMA_XFER;
565 827
566 platform_set_drvdata(pdev, master); 828 platform_set_drvdata(pdev, master);
567 829
@@ -573,6 +835,12 @@ static int spi_qup_probe(struct platform_device *pdev)
573 controller->cclk = cclk; 835 controller->cclk = cclk;
574 controller->irq = irq; 836 controller->irq = irq;
575 837
838 ret = spi_qup_init_dma(master, res->start);
839 if (ret == -EPROBE_DEFER)
840 goto error;
841 else if (!ret)
842 master->can_dma = spi_qup_can_dma;
843
576 /* set v1 flag if device is version 1 */ 844 /* set v1 flag if device is version 1 */
577 if (of_device_is_compatible(dev->of_node, "qcom,spi-qup-v1.1.1")) 845 if (of_device_is_compatible(dev->of_node, "qcom,spi-qup-v1.1.1"))
578 controller->qup_v1 = 1; 846 controller->qup_v1 = 1;
@@ -609,7 +877,7 @@ static int spi_qup_probe(struct platform_device *pdev)
609 ret = spi_qup_set_state(controller, QUP_STATE_RESET); 877 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
610 if (ret) { 878 if (ret) {
611 dev_err(dev, "cannot set RESET state\n"); 879 dev_err(dev, "cannot set RESET state\n");
612 goto error; 880 goto error_dma;
613 } 881 }
614 882
615 writel_relaxed(0, base + QUP_OPERATIONAL); 883 writel_relaxed(0, base + QUP_OPERATIONAL);
@@ -633,7 +901,7 @@ static int spi_qup_probe(struct platform_device *pdev)
633 ret = devm_request_irq(dev, irq, spi_qup_qup_irq, 901 ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
634 IRQF_TRIGGER_HIGH, pdev->name, controller); 902 IRQF_TRIGGER_HIGH, pdev->name, controller);
635 if (ret) 903 if (ret)
636 goto error; 904 goto error_dma;
637 905
638 pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); 906 pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
639 pm_runtime_use_autosuspend(dev); 907 pm_runtime_use_autosuspend(dev);
@@ -648,6 +916,8 @@ static int spi_qup_probe(struct platform_device *pdev)
648 916
649disable_pm: 917disable_pm:
650 pm_runtime_disable(&pdev->dev); 918 pm_runtime_disable(&pdev->dev);
919error_dma:
920 spi_qup_release_dma(master);
651error: 921error:
652 clk_disable_unprepare(cclk); 922 clk_disable_unprepare(cclk);
653 clk_disable_unprepare(iclk); 923 clk_disable_unprepare(iclk);
@@ -739,6 +1009,8 @@ static int spi_qup_remove(struct platform_device *pdev)
739 if (ret) 1009 if (ret)
740 return ret; 1010 return ret;
741 1011
1012 spi_qup_release_dma(master);
1013
742 clk_disable_unprepare(controller->cclk); 1014 clk_disable_unprepare(controller->cclk);
743 clk_disable_unprepare(controller->iclk); 1015 clk_disable_unprepare(controller->iclk);
744 1016