aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig14
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-orion.c68
-rw-r--r--drivers/spi/spi-pl022.c2
-rw-r--r--drivers/spi/spi-qup.c36
-rw-r--r--drivers/spi/spi-rockchip.c837
-rw-r--r--drivers/spi/spi-rspi.c45
7 files changed, 959 insertions, 44 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 213b5cbb9dcc..20bd055ea2d1 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -382,9 +382,21 @@ config SPI_PXA2XX
382config SPI_PXA2XX_PCI 382config SPI_PXA2XX_PCI
383 def_tristate SPI_PXA2XX && PCI 383 def_tristate SPI_PXA2XX && PCI
384 384
385config SPI_ROCKCHIP
386 tristate "Rockchip SPI controller driver"
387 depends on ARM || ARM64 || AVR32 || HEXAGON || MIPS || SUPERH
388 help
389 This selects a driver for Rockchip SPI controller.
390
391 If you say yes to this option, support will be included for
392 RK3066, RK3188 and RK3288 families of SPI controller.
393 Rockchip SPI controller support DMA transport and PIO mode.
394 The main usecase of this controller is to use spi flash as boot
395 device.
396
385config SPI_RSPI 397config SPI_RSPI
386 tristate "Renesas RSPI/QSPI controller" 398 tristate "Renesas RSPI/QSPI controller"
387 depends on (SUPERH && SH_DMAE_BASE) || ARCH_SHMOBILE 399 depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
388 help 400 help
389 SPI driver for Renesas RSPI and QSPI blocks. 401 SPI driver for Renesas RSPI and QSPI blocks.
390 402
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 929c9f5eac01..762da0741148 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -61,6 +61,7 @@ spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_DMA) += spi-pxa2xx-dma.o
61obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o 61obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
62obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o 62obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
63obj-$(CONFIG_SPI_QUP) += spi-qup.o 63obj-$(CONFIG_SPI_QUP) += spi-qup.o
64obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o
64obj-$(CONFIG_SPI_RSPI) += spi-rspi.o 65obj-$(CONFIG_SPI_RSPI) += spi-rspi.o
65obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o 66obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
66spi-s3c24xx-hw-y := spi-s3c24xx.o 67spi-s3c24xx-hw-y := spi-s3c24xx.o
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index c206a4ad83cd..c4675fa8b645 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -16,6 +16,7 @@
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/spi/spi.h> 17#include <linux/spi/spi.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/pm_runtime.h>
19#include <linux/of.h> 20#include <linux/of.h>
20#include <linux/clk.h> 21#include <linux/clk.h>
21#include <linux/sizes.h> 22#include <linux/sizes.h>
@@ -23,6 +24,9 @@
23 24
24#define DRIVER_NAME "orion_spi" 25#define DRIVER_NAME "orion_spi"
25 26
27/* Runtime PM autosuspend timeout: PM is fairly light on this driver */
28#define SPI_AUTOSUSPEND_TIMEOUT 200
29
26#define ORION_NUM_CHIPSELECTS 1 /* only one slave is supported*/ 30#define ORION_NUM_CHIPSELECTS 1 /* only one slave is supported*/
27#define ORION_SPI_WAIT_RDY_MAX_LOOP 2000 /* in usec */ 31#define ORION_SPI_WAIT_RDY_MAX_LOOP 2000 /* in usec */
28 32
@@ -277,7 +281,6 @@ out:
277 return xfer->len - count; 281 return xfer->len - count;
278} 282}
279 283
280
281static int orion_spi_transfer_one_message(struct spi_master *master, 284static int orion_spi_transfer_one_message(struct spi_master *master,
282 struct spi_message *m) 285 struct spi_message *m)
283{ 286{
@@ -368,6 +371,7 @@ static int orion_spi_probe(struct platform_device *pdev)
368 master->transfer_one_message = orion_spi_transfer_one_message; 371 master->transfer_one_message = orion_spi_transfer_one_message;
369 master->num_chipselect = ORION_NUM_CHIPSELECTS; 372 master->num_chipselect = ORION_NUM_CHIPSELECTS;
370 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); 373 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
374 master->auto_runtime_pm = true;
371 375
372 platform_set_drvdata(pdev, master); 376 platform_set_drvdata(pdev, master);
373 377
@@ -380,8 +384,10 @@ static int orion_spi_probe(struct platform_device *pdev)
380 goto out; 384 goto out;
381 } 385 }
382 386
383 clk_prepare(spi->clk); 387 status = clk_prepare_enable(spi->clk);
384 clk_enable(spi->clk); 388 if (status)
389 goto out;
390
385 tclk_hz = clk_get_rate(spi->clk); 391 tclk_hz = clk_get_rate(spi->clk);
386 master->max_speed_hz = DIV_ROUND_UP(tclk_hz, 4); 392 master->max_speed_hz = DIV_ROUND_UP(tclk_hz, 4);
387 master->min_speed_hz = DIV_ROUND_UP(tclk_hz, 30); 393 master->min_speed_hz = DIV_ROUND_UP(tclk_hz, 30);
@@ -393,16 +399,27 @@ static int orion_spi_probe(struct platform_device *pdev)
393 goto out_rel_clk; 399 goto out_rel_clk;
394 } 400 }
395 401
396 if (orion_spi_reset(spi) < 0) 402 pm_runtime_set_active(&pdev->dev);
397 goto out_rel_clk; 403 pm_runtime_use_autosuspend(&pdev->dev);
404 pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
405 pm_runtime_enable(&pdev->dev);
406
407 status = orion_spi_reset(spi);
408 if (status < 0)
409 goto out_rel_pm;
410
411 pm_runtime_mark_last_busy(&pdev->dev);
412 pm_runtime_put_autosuspend(&pdev->dev);
398 413
399 master->dev.of_node = pdev->dev.of_node; 414 master->dev.of_node = pdev->dev.of_node;
400 status = devm_spi_register_master(&pdev->dev, master); 415 status = spi_register_master(master);
401 if (status < 0) 416 if (status < 0)
402 goto out_rel_clk; 417 goto out_rel_pm;
403 418
404 return status; 419 return status;
405 420
421out_rel_pm:
422 pm_runtime_disable(&pdev->dev);
406out_rel_clk: 423out_rel_clk:
407 clk_disable_unprepare(spi->clk); 424 clk_disable_unprepare(spi->clk);
408out: 425out:
@@ -413,19 +430,45 @@ out:
413 430
414static int orion_spi_remove(struct platform_device *pdev) 431static int orion_spi_remove(struct platform_device *pdev)
415{ 432{
416 struct spi_master *master; 433 struct spi_master *master = platform_get_drvdata(pdev);
417 struct orion_spi *spi; 434 struct orion_spi *spi = spi_master_get_devdata(master);
418
419 master = platform_get_drvdata(pdev);
420 spi = spi_master_get_devdata(master);
421 435
436 pm_runtime_get_sync(&pdev->dev);
422 clk_disable_unprepare(spi->clk); 437 clk_disable_unprepare(spi->clk);
423 438
439 spi_unregister_master(master);
440 pm_runtime_disable(&pdev->dev);
441
424 return 0; 442 return 0;
425} 443}
426 444
427MODULE_ALIAS("platform:" DRIVER_NAME); 445MODULE_ALIAS("platform:" DRIVER_NAME);
428 446
447#ifdef CONFIG_PM_RUNTIME
448static int orion_spi_runtime_suspend(struct device *dev)
449{
450 struct spi_master *master = dev_get_drvdata(dev);
451 struct orion_spi *spi = spi_master_get_devdata(master);
452
453 clk_disable_unprepare(spi->clk);
454 return 0;
455}
456
457static int orion_spi_runtime_resume(struct device *dev)
458{
459 struct spi_master *master = dev_get_drvdata(dev);
460 struct orion_spi *spi = spi_master_get_devdata(master);
461
462 return clk_prepare_enable(spi->clk);
463}
464#endif
465
466static const struct dev_pm_ops orion_spi_pm_ops = {
467 SET_RUNTIME_PM_OPS(orion_spi_runtime_suspend,
468 orion_spi_runtime_resume,
469 NULL)
470};
471
429static const struct of_device_id orion_spi_of_match_table[] = { 472static const struct of_device_id orion_spi_of_match_table[] = {
430 { .compatible = "marvell,orion-spi", }, 473 { .compatible = "marvell,orion-spi", },
431 {} 474 {}
@@ -436,6 +479,7 @@ static struct platform_driver orion_spi_driver = {
436 .driver = { 479 .driver = {
437 .name = DRIVER_NAME, 480 .name = DRIVER_NAME,
438 .owner = THIS_MODULE, 481 .owner = THIS_MODULE,
482 .pm = &orion_spi_pm_ops,
439 .of_match_table = of_match_ptr(orion_spi_of_match_table), 483 .of_match_table = of_match_ptr(orion_spi_of_match_table),
440 }, 484 },
441 .probe = orion_spi_probe, 485 .probe = orion_spi_probe,
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 66d2ae21e78e..1189cfd96477 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1417,7 +1417,7 @@ static void do_interrupt_dma_transfer(struct pl022 *pl022)
1417 * Default is to enable all interrupts except RX - 1417 * Default is to enable all interrupts except RX -
1418 * this will be enabled once TX is complete 1418 * this will be enabled once TX is complete
1419 */ 1419 */
1420 u32 irqflags = ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM; 1420 u32 irqflags = (u32)(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM);
1421 1421
1422 /* Enable target chip, if not already active */ 1422 /* Enable target chip, if not already active */
1423 if (!pl022->next_msg_cs_active) 1423 if (!pl022->next_msg_cs_active)
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index c08da380cb23..9f83d2950748 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -142,6 +142,7 @@ struct spi_qup {
142 int w_size; /* bytes per SPI word */ 142 int w_size; /* bytes per SPI word */
143 int tx_bytes; 143 int tx_bytes;
144 int rx_bytes; 144 int rx_bytes;
145 int qup_v1;
145}; 146};
146 147
147 148
@@ -420,7 +421,9 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
420 config |= QUP_CONFIG_SPI_MODE; 421 config |= QUP_CONFIG_SPI_MODE;
421 writel_relaxed(config, controller->base + QUP_CONFIG); 422 writel_relaxed(config, controller->base + QUP_CONFIG);
422 423
423 writel_relaxed(0, controller->base + QUP_OPERATIONAL_MASK); 424 /* only write to OPERATIONAL_MASK when register is present */
425 if (!controller->qup_v1)
426 writel_relaxed(0, controller->base + QUP_OPERATIONAL_MASK);
424 return 0; 427 return 0;
425} 428}
426 429
@@ -486,7 +489,7 @@ static int spi_qup_probe(struct platform_device *pdev)
486 struct resource *res; 489 struct resource *res;
487 struct device *dev; 490 struct device *dev;
488 void __iomem *base; 491 void __iomem *base;
489 u32 data, max_freq, iomode; 492 u32 max_freq, iomode;
490 int ret, irq, size; 493 int ret, irq, size;
491 494
492 dev = &pdev->dev; 495 dev = &pdev->dev;
@@ -529,15 +532,6 @@ static int spi_qup_probe(struct platform_device *pdev)
529 return ret; 532 return ret;
530 } 533 }
531 534
532 data = readl_relaxed(base + QUP_HW_VERSION);
533
534 if (data < QUP_HW_VERSION_2_1_1) {
535 clk_disable_unprepare(cclk);
536 clk_disable_unprepare(iclk);
537 dev_err(dev, "v.%08x is not supported\n", data);
538 return -ENXIO;
539 }
540
541 master = spi_alloc_master(dev, sizeof(struct spi_qup)); 535 master = spi_alloc_master(dev, sizeof(struct spi_qup));
542 if (!master) { 536 if (!master) {
543 clk_disable_unprepare(cclk); 537 clk_disable_unprepare(cclk);
@@ -570,6 +564,10 @@ static int spi_qup_probe(struct platform_device *pdev)
570 controller->cclk = cclk; 564 controller->cclk = cclk;
571 controller->irq = irq; 565 controller->irq = irq;
572 566
567 /* set v1 flag if device is version 1 */
568 if (of_device_is_compatible(dev->of_node, "qcom,spi-qup-v1.1.1"))
569 controller->qup_v1 = 1;
570
573 spin_lock_init(&controller->lock); 571 spin_lock_init(&controller->lock);
574 init_completion(&controller->done); 572 init_completion(&controller->done);
575 573
@@ -593,8 +591,8 @@ static int spi_qup_probe(struct platform_device *pdev)
593 size = QUP_IO_M_INPUT_FIFO_SIZE(iomode); 591 size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
594 controller->in_fifo_sz = controller->in_blk_sz * (2 << size); 592 controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
595 593
596 dev_info(dev, "v.%08x IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n", 594 dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
597 data, controller->in_blk_sz, controller->in_fifo_sz, 595 controller->in_blk_sz, controller->in_fifo_sz,
598 controller->out_blk_sz, controller->out_fifo_sz); 596 controller->out_blk_sz, controller->out_fifo_sz);
599 597
600 writel_relaxed(1, base + QUP_SW_RESET); 598 writel_relaxed(1, base + QUP_SW_RESET);
@@ -607,10 +605,19 @@ static int spi_qup_probe(struct platform_device *pdev)
607 605
608 writel_relaxed(0, base + QUP_OPERATIONAL); 606 writel_relaxed(0, base + QUP_OPERATIONAL);
609 writel_relaxed(0, base + QUP_IO_M_MODES); 607 writel_relaxed(0, base + QUP_IO_M_MODES);
610 writel_relaxed(0, base + QUP_OPERATIONAL_MASK); 608
609 if (!controller->qup_v1)
610 writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
611
611 writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN, 612 writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
612 base + SPI_ERROR_FLAGS_EN); 613 base + SPI_ERROR_FLAGS_EN);
613 614
615 /* if earlier version of the QUP, disable INPUT_OVERRUN */
616 if (controller->qup_v1)
617 writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
618 QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
619 base + QUP_ERROR_FLAGS_EN);
620
614 writel_relaxed(0, base + SPI_CONFIG); 621 writel_relaxed(0, base + SPI_CONFIG);
615 writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL); 622 writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
616 623
@@ -732,6 +739,7 @@ static int spi_qup_remove(struct platform_device *pdev)
732} 739}
733 740
734static const struct of_device_id spi_qup_dt_match[] = { 741static const struct of_device_id spi_qup_dt_match[] = {
742 { .compatible = "qcom,spi-qup-v1.1.1", },
735 { .compatible = "qcom,spi-qup-v2.1.1", }, 743 { .compatible = "qcom,spi-qup-v2.1.1", },
736 { .compatible = "qcom,spi-qup-v2.2.1", }, 744 { .compatible = "qcom,spi-qup-v2.2.1", },
737 { } 745 { }
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
new file mode 100644
index 000000000000..c0743604b906
--- /dev/null
+++ b/drivers/spi/spi-rockchip.c
@@ -0,0 +1,837 @@
1/*
2 * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
3 * Author: Addy Ke <addy.ke@rock-chips.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/clk.h>
19#include <linux/err.h>
20#include <linux/delay.h>
21#include <linux/interrupt.h>
22#include <linux/platform_device.h>
23#include <linux/slab.h>
24#include <linux/spi/spi.h>
25#include <linux/scatterlist.h>
26#include <linux/of.h>
27#include <linux/pm_runtime.h>
28#include <linux/io.h>
29#include <linux/dmaengine.h>
30
31#define DRIVER_NAME "rockchip-spi"
32
33/* SPI register offsets */
34#define ROCKCHIP_SPI_CTRLR0 0x0000
35#define ROCKCHIP_SPI_CTRLR1 0x0004
36#define ROCKCHIP_SPI_SSIENR 0x0008
37#define ROCKCHIP_SPI_SER 0x000c
38#define ROCKCHIP_SPI_BAUDR 0x0010
39#define ROCKCHIP_SPI_TXFTLR 0x0014
40#define ROCKCHIP_SPI_RXFTLR 0x0018
41#define ROCKCHIP_SPI_TXFLR 0x001c
42#define ROCKCHIP_SPI_RXFLR 0x0020
43#define ROCKCHIP_SPI_SR 0x0024
44#define ROCKCHIP_SPI_IPR 0x0028
45#define ROCKCHIP_SPI_IMR 0x002c
46#define ROCKCHIP_SPI_ISR 0x0030
47#define ROCKCHIP_SPI_RISR 0x0034
48#define ROCKCHIP_SPI_ICR 0x0038
49#define ROCKCHIP_SPI_DMACR 0x003c
50#define ROCKCHIP_SPI_DMATDLR 0x0040
51#define ROCKCHIP_SPI_DMARDLR 0x0044
52#define ROCKCHIP_SPI_TXDR 0x0400
53#define ROCKCHIP_SPI_RXDR 0x0800
54
55/* Bit fields in CTRLR0 */
56#define CR0_DFS_OFFSET 0
57
58#define CR0_CFS_OFFSET 2
59
60#define CR0_SCPH_OFFSET 6
61
62#define CR0_SCPOL_OFFSET 7
63
64#define CR0_CSM_OFFSET 8
65#define CR0_CSM_KEEP 0x0
66/* ss_n be high for half sclk_out cycles */
67#define CR0_CSM_HALF 0X1
68/* ss_n be high for one sclk_out cycle */
69#define CR0_CSM_ONE 0x2
70
71/* ss_n to sclk_out delay */
72#define CR0_SSD_OFFSET 10
73/*
74 * The period between ss_n active and
75 * sclk_out active is half sclk_out cycles
76 */
77#define CR0_SSD_HALF 0x0
78/*
79 * The period between ss_n active and
80 * sclk_out active is one sclk_out cycle
81 */
82#define CR0_SSD_ONE 0x1
83
84#define CR0_EM_OFFSET 11
85#define CR0_EM_LITTLE 0x0
86#define CR0_EM_BIG 0x1
87
88#define CR0_FBM_OFFSET 12
89#define CR0_FBM_MSB 0x0
90#define CR0_FBM_LSB 0x1
91
92#define CR0_BHT_OFFSET 13
93#define CR0_BHT_16BIT 0x0
94#define CR0_BHT_8BIT 0x1
95
96#define CR0_RSD_OFFSET 14
97
98#define CR0_FRF_OFFSET 16
99#define CR0_FRF_SPI 0x0
100#define CR0_FRF_SSP 0x1
101#define CR0_FRF_MICROWIRE 0x2
102
103#define CR0_XFM_OFFSET 18
104#define CR0_XFM_MASK (0x03 << SPI_XFM_OFFSET)
105#define CR0_XFM_TR 0x0
106#define CR0_XFM_TO 0x1
107#define CR0_XFM_RO 0x2
108
109#define CR0_OPM_OFFSET 20
110#define CR0_OPM_MASTER 0x0
111#define CR0_OPM_SLAVE 0x1
112
113#define CR0_MTM_OFFSET 0x21
114
115/* Bit fields in SER, 2bit */
116#define SER_MASK 0x3
117
118/* Bit fields in SR, 5bit */
119#define SR_MASK 0x1f
120#define SR_BUSY (1 << 0)
121#define SR_TF_FULL (1 << 1)
122#define SR_TF_EMPTY (1 << 2)
123#define SR_RF_EMPTY (1 << 3)
124#define SR_RF_FULL (1 << 4)
125
126/* Bit fields in ISR, IMR, ISR, RISR, 5bit */
127#define INT_MASK 0x1f
128#define INT_TF_EMPTY (1 << 0)
129#define INT_TF_OVERFLOW (1 << 1)
130#define INT_RF_UNDERFLOW (1 << 2)
131#define INT_RF_OVERFLOW (1 << 3)
132#define INT_RF_FULL (1 << 4)
133
134/* Bit fields in ICR, 4bit */
135#define ICR_MASK 0x0f
136#define ICR_ALL (1 << 0)
137#define ICR_RF_UNDERFLOW (1 << 1)
138#define ICR_RF_OVERFLOW (1 << 2)
139#define ICR_TF_OVERFLOW (1 << 3)
140
141/* Bit fields in DMACR */
142#define RF_DMA_EN (1 << 0)
143#define TF_DMA_EN (1 << 1)
144
145#define RXBUSY (1 << 0)
146#define TXBUSY (1 << 1)
147
148enum rockchip_ssi_type {
149 SSI_MOTO_SPI = 0,
150 SSI_TI_SSP,
151 SSI_NS_MICROWIRE,
152};
153
154struct rockchip_spi_dma_data {
155 struct dma_chan *ch;
156 enum dma_transfer_direction direction;
157 dma_addr_t addr;
158};
159
160struct rockchip_spi {
161 struct device *dev;
162 struct spi_master *master;
163
164 struct clk *spiclk;
165 struct clk *apb_pclk;
166
167 void __iomem *regs;
168 /*depth of the FIFO buffer */
169 u32 fifo_len;
170 /* max bus freq supported */
171 u32 max_freq;
172 /* supported slave numbers */
173 enum rockchip_ssi_type type;
174
175 u16 mode;
176 u8 tmode;
177 u8 bpw;
178 u8 n_bytes;
179 unsigned len;
180 u32 speed;
181
182 const void *tx;
183 const void *tx_end;
184 void *rx;
185 void *rx_end;
186
187 u32 state;
188 /* protect state */
189 spinlock_t lock;
190
191 struct completion xfer_completion;
192
193 u32 use_dma;
194 struct sg_table tx_sg;
195 struct sg_table rx_sg;
196 struct rockchip_spi_dma_data dma_rx;
197 struct rockchip_spi_dma_data dma_tx;
198};
199
200static inline void spi_enable_chip(struct rockchip_spi *rs, int enable)
201{
202 writel_relaxed((enable ? 1 : 0), rs->regs + ROCKCHIP_SPI_SSIENR);
203}
204
205static inline void spi_set_clk(struct rockchip_spi *rs, u16 div)
206{
207 writel_relaxed(div, rs->regs + ROCKCHIP_SPI_BAUDR);
208}
209
210static inline void flush_fifo(struct rockchip_spi *rs)
211{
212 while (readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR))
213 readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
214}
215
216static inline void wait_for_idle(struct rockchip_spi *rs)
217{
218 unsigned long timeout = jiffies + msecs_to_jiffies(5);
219
220 do {
221 if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY))
222 return;
223 } while (time_before(jiffies, timeout));
224
225 dev_warn(rs->dev, "spi controller is in busy state!\n");
226}
227
228static u32 get_fifo_len(struct rockchip_spi *rs)
229{
230 u32 fifo;
231
232 for (fifo = 2; fifo < 32; fifo++) {
233 writel_relaxed(fifo, rs->regs + ROCKCHIP_SPI_TXFTLR);
234 if (fifo != readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFTLR))
235 break;
236 }
237
238 writel_relaxed(0, rs->regs + ROCKCHIP_SPI_TXFTLR);
239
240 return (fifo == 31) ? 0 : fifo;
241}
242
243static inline u32 tx_max(struct rockchip_spi *rs)
244{
245 u32 tx_left, tx_room;
246
247 tx_left = (rs->tx_end - rs->tx) / rs->n_bytes;
248 tx_room = rs->fifo_len - readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFLR);
249
250 return min(tx_left, tx_room);
251}
252
253static inline u32 rx_max(struct rockchip_spi *rs)
254{
255 u32 rx_left = (rs->rx_end - rs->rx) / rs->n_bytes;
256 u32 rx_room = (u32)readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
257
258 return min(rx_left, rx_room);
259}
260
261static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
262{
263 u32 ser;
264 struct rockchip_spi *rs = spi_master_get_devdata(spi->master);
265
266 ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
267
268 /*
269 * drivers/spi/spi.c:
270 * static void spi_set_cs(struct spi_device *spi, bool enable)
271 * {
272 * if (spi->mode & SPI_CS_HIGH)
273 * enable = !enable;
274 *
275 * if (spi->cs_gpio >= 0)
276 * gpio_set_value(spi->cs_gpio, !enable);
277 * else if (spi->master->set_cs)
278 * spi->master->set_cs(spi, !enable);
279 * }
280 *
281 * Note: enable(rockchip_spi_set_cs) = !enable(spi_set_cs)
282 */
283 if (!enable)
284 ser |= 1 << spi->chip_select;
285 else
286 ser &= ~(1 << spi->chip_select);
287
288 writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
289}
290
291static int rockchip_spi_prepare_message(struct spi_master *master,
292 struct spi_message *msg)
293{
294 struct rockchip_spi *rs = spi_master_get_devdata(master);
295 struct spi_device *spi = msg->spi;
296
297 rs->mode = spi->mode;
298
299 return 0;
300}
301
302static int rockchip_spi_unprepare_message(struct spi_master *master,
303 struct spi_message *msg)
304{
305 unsigned long flags;
306 struct rockchip_spi *rs = spi_master_get_devdata(master);
307
308 spin_lock_irqsave(&rs->lock, flags);
309
310 /*
311 * For DMA mode, we need terminate DMA channel and flush
312 * fifo for the next transfer if DMA thansfer timeout.
313 * unprepare_message() was called by core if transfer complete
314 * or timeout. Maybe it is reasonable for error handling here.
315 */
316 if (rs->use_dma) {
317 if (rs->state & RXBUSY) {
318 dmaengine_terminate_all(rs->dma_rx.ch);
319 flush_fifo(rs);
320 }
321
322 if (rs->state & TXBUSY)
323 dmaengine_terminate_all(rs->dma_tx.ch);
324 }
325
326 spin_unlock_irqrestore(&rs->lock, flags);
327
328 return 0;
329}
330
331static void rockchip_spi_pio_writer(struct rockchip_spi *rs)
332{
333 u32 max = tx_max(rs);
334 u32 txw = 0;
335
336 while (max--) {
337 if (rs->n_bytes == 1)
338 txw = *(u8 *)(rs->tx);
339 else
340 txw = *(u16 *)(rs->tx);
341
342 writel_relaxed(txw, rs->regs + ROCKCHIP_SPI_TXDR);
343 rs->tx += rs->n_bytes;
344 }
345}
346
347static void rockchip_spi_pio_reader(struct rockchip_spi *rs)
348{
349 u32 max = rx_max(rs);
350 u32 rxw;
351
352 while (max--) {
353 rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
354 if (rs->n_bytes == 1)
355 *(u8 *)(rs->rx) = (u8)rxw;
356 else
357 *(u16 *)(rs->rx) = (u16)rxw;
358 rs->rx += rs->n_bytes;
359 }
360}
361
362static int rockchip_spi_pio_transfer(struct rockchip_spi *rs)
363{
364 int remain = 0;
365
366 do {
367 if (rs->tx) {
368 remain = rs->tx_end - rs->tx;
369 rockchip_spi_pio_writer(rs);
370 }
371
372 if (rs->rx) {
373 remain = rs->rx_end - rs->rx;
374 rockchip_spi_pio_reader(rs);
375 }
376
377 cpu_relax();
378 } while (remain);
379
380 /* If tx, wait until the FIFO data completely. */
381 if (rs->tx)
382 wait_for_idle(rs);
383
384 return 0;
385}
386
387static void rockchip_spi_dma_rxcb(void *data)
388{
389 unsigned long flags;
390 struct rockchip_spi *rs = data;
391
392 spin_lock_irqsave(&rs->lock, flags);
393
394 rs->state &= ~RXBUSY;
395 if (!(rs->state & TXBUSY))
396 spi_finalize_current_transfer(rs->master);
397
398 spin_unlock_irqrestore(&rs->lock, flags);
399}
400
401static void rockchip_spi_dma_txcb(void *data)
402{
403 unsigned long flags;
404 struct rockchip_spi *rs = data;
405
406 /* Wait until the FIFO data completely. */
407 wait_for_idle(rs);
408
409 spin_lock_irqsave(&rs->lock, flags);
410
411 rs->state &= ~TXBUSY;
412 if (!(rs->state & RXBUSY))
413 spi_finalize_current_transfer(rs->master);
414
415 spin_unlock_irqrestore(&rs->lock, flags);
416}
417
418static int rockchip_spi_dma_transfer(struct rockchip_spi *rs)
419{
420 unsigned long flags;
421 struct dma_slave_config rxconf, txconf;
422 struct dma_async_tx_descriptor *rxdesc, *txdesc;
423
424 spin_lock_irqsave(&rs->lock, flags);
425 rs->state &= ~RXBUSY;
426 rs->state &= ~TXBUSY;
427 spin_unlock_irqrestore(&rs->lock, flags);
428
429 if (rs->rx) {
430 rxconf.direction = rs->dma_rx.direction;
431 rxconf.src_addr = rs->dma_rx.addr;
432 rxconf.src_addr_width = rs->n_bytes;
433 rxconf.src_maxburst = rs->n_bytes;
434 dmaengine_slave_config(rs->dma_rx.ch, &rxconf);
435
436 rxdesc = dmaengine_prep_slave_sg(
437 rs->dma_rx.ch,
438 rs->rx_sg.sgl, rs->rx_sg.nents,
439 rs->dma_rx.direction, DMA_PREP_INTERRUPT);
440
441 rxdesc->callback = rockchip_spi_dma_rxcb;
442 rxdesc->callback_param = rs;
443 }
444
445 if (rs->tx) {
446 txconf.direction = rs->dma_tx.direction;
447 txconf.dst_addr = rs->dma_tx.addr;
448 txconf.dst_addr_width = rs->n_bytes;
449 txconf.dst_maxburst = rs->n_bytes;
450 dmaengine_slave_config(rs->dma_tx.ch, &txconf);
451
452 txdesc = dmaengine_prep_slave_sg(
453 rs->dma_tx.ch,
454 rs->tx_sg.sgl, rs->tx_sg.nents,
455 rs->dma_tx.direction, DMA_PREP_INTERRUPT);
456
457 txdesc->callback = rockchip_spi_dma_txcb;
458 txdesc->callback_param = rs;
459 }
460
461 /* rx must be started before tx due to spi instinct */
462 if (rs->rx) {
463 spin_lock_irqsave(&rs->lock, flags);
464 rs->state |= RXBUSY;
465 spin_unlock_irqrestore(&rs->lock, flags);
466 dmaengine_submit(rxdesc);
467 dma_async_issue_pending(rs->dma_rx.ch);
468 }
469
470 if (rs->tx) {
471 spin_lock_irqsave(&rs->lock, flags);
472 rs->state |= TXBUSY;
473 spin_unlock_irqrestore(&rs->lock, flags);
474 dmaengine_submit(txdesc);
475 dma_async_issue_pending(rs->dma_tx.ch);
476 }
477
478 return 1;
479}
480
481static void rockchip_spi_config(struct rockchip_spi *rs)
482{
483 u32 div = 0;
484 u32 dmacr = 0;
485
486 u32 cr0 = (CR0_BHT_8BIT << CR0_BHT_OFFSET)
487 | (CR0_SSD_ONE << CR0_SSD_OFFSET);
488
489 cr0 |= (rs->n_bytes << CR0_DFS_OFFSET);
490 cr0 |= ((rs->mode & 0x3) << CR0_SCPH_OFFSET);
491 cr0 |= (rs->tmode << CR0_XFM_OFFSET);
492 cr0 |= (rs->type << CR0_FRF_OFFSET);
493
494 if (rs->use_dma) {
495 if (rs->tx)
496 dmacr |= TF_DMA_EN;
497 if (rs->rx)
498 dmacr |= RF_DMA_EN;
499 }
500
501 /* div doesn't support odd number */
502 div = rs->max_freq / rs->speed;
503 div = (div + 1) & 0xfffe;
504
505 spi_enable_chip(rs, 0);
506
507 writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
508
509 writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1);
510 writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_TXFTLR);
511 writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
512
513 writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMATDLR);
514 writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMARDLR);
515 writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
516
517 spi_set_clk(rs, div);
518
519 dev_dbg(rs->dev, "cr0 0x%x, div %d\n", cr0, div);
520
521 spi_enable_chip(rs, 1);
522}
523
524static int rockchip_spi_transfer_one(
525 struct spi_master *master,
526 struct spi_device *spi,
527 struct spi_transfer *xfer)
528{
529 int ret = 0;
530 struct rockchip_spi *rs = spi_master_get_devdata(master);
531
532 WARN_ON((readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
533
534 if (!xfer->tx_buf && !xfer->rx_buf) {
535 dev_err(rs->dev, "No buffer for transfer\n");
536 return -EINVAL;
537 }
538
539 rs->speed = xfer->speed_hz;
540 rs->bpw = xfer->bits_per_word;
541 rs->n_bytes = rs->bpw >> 3;
542
543 rs->tx = xfer->tx_buf;
544 rs->tx_end = rs->tx + xfer->len;
545 rs->rx = xfer->rx_buf;
546 rs->rx_end = rs->rx + xfer->len;
547 rs->len = xfer->len;
548
549 rs->tx_sg = xfer->tx_sg;
550 rs->rx_sg = xfer->rx_sg;
551
552 if (rs->tx && rs->rx)
553 rs->tmode = CR0_XFM_TR;
554 else if (rs->tx)
555 rs->tmode = CR0_XFM_TO;
556 else if (rs->rx)
557 rs->tmode = CR0_XFM_RO;
558
559 if (master->can_dma && master->can_dma(master, spi, xfer))
560 rs->use_dma = 1;
561 else
562 rs->use_dma = 0;
563
564 rockchip_spi_config(rs);
565
566 if (rs->use_dma)
567 ret = rockchip_spi_dma_transfer(rs);
568 else
569 ret = rockchip_spi_pio_transfer(rs);
570
571 return ret;
572}
573
574static bool rockchip_spi_can_dma(struct spi_master *master,
575 struct spi_device *spi,
576 struct spi_transfer *xfer)
577{
578 struct rockchip_spi *rs = spi_master_get_devdata(master);
579
580 return (xfer->len > rs->fifo_len);
581}
582
583static int rockchip_spi_probe(struct platform_device *pdev)
584{
585 int ret = 0;
586 struct rockchip_spi *rs;
587 struct spi_master *master;
588 struct resource *mem;
589
590 master = spi_alloc_master(&pdev->dev, sizeof(struct rockchip_spi));
591 if (!master)
592 return -ENOMEM;
593
594 platform_set_drvdata(pdev, master);
595
596 rs = spi_master_get_devdata(master);
597 memset(rs, 0, sizeof(struct rockchip_spi));
598
599 /* Get basic io resource and map it */
600 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
601 rs->regs = devm_ioremap_resource(&pdev->dev, mem);
602 if (IS_ERR(rs->regs)) {
603 ret = PTR_ERR(rs->regs);
604 goto err_ioremap_resource;
605 }
606
607 rs->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
608 if (IS_ERR(rs->apb_pclk)) {
609 dev_err(&pdev->dev, "Failed to get apb_pclk\n");
610 ret = PTR_ERR(rs->apb_pclk);
611 goto err_ioremap_resource;
612 }
613
614 rs->spiclk = devm_clk_get(&pdev->dev, "spiclk");
615 if (IS_ERR(rs->spiclk)) {
616 dev_err(&pdev->dev, "Failed to get spi_pclk\n");
617 ret = PTR_ERR(rs->spiclk);
618 goto err_ioremap_resource;
619 }
620
621 ret = clk_prepare_enable(rs->apb_pclk);
622 if (ret) {
623 dev_err(&pdev->dev, "Failed to enable apb_pclk\n");
624 goto err_ioremap_resource;
625 }
626
627 ret = clk_prepare_enable(rs->spiclk);
628 if (ret) {
629 dev_err(&pdev->dev, "Failed to enable spi_clk\n");
630 goto err_spiclk_enable;
631 }
632
633 spi_enable_chip(rs, 0);
634
635 rs->type = SSI_MOTO_SPI;
636 rs->master = master;
637 rs->dev = &pdev->dev;
638 rs->max_freq = clk_get_rate(rs->spiclk);
639
640 rs->fifo_len = get_fifo_len(rs);
641 if (!rs->fifo_len) {
642 dev_err(&pdev->dev, "Failed to get fifo length\n");
643 ret = -EINVAL;
644 goto err_get_fifo_len;
645 }
646
647 spin_lock_init(&rs->lock);
648
649 pm_runtime_set_active(&pdev->dev);
650 pm_runtime_enable(&pdev->dev);
651
652 master->auto_runtime_pm = true;
653 master->bus_num = pdev->id;
654 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
655 master->num_chipselect = 2;
656 master->dev.of_node = pdev->dev.of_node;
657 master->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
658
659 master->set_cs = rockchip_spi_set_cs;
660 master->prepare_message = rockchip_spi_prepare_message;
661 master->unprepare_message = rockchip_spi_unprepare_message;
662 master->transfer_one = rockchip_spi_transfer_one;
663
664 rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx");
665 if (!rs->dma_tx.ch)
666 dev_warn(rs->dev, "Failed to request TX DMA channel\n");
667
668 rs->dma_rx.ch = dma_request_slave_channel(rs->dev, "rx");
669 if (!rs->dma_rx.ch) {
670 if (rs->dma_tx.ch) {
671 dma_release_channel(rs->dma_tx.ch);
672 rs->dma_tx.ch = NULL;
673 }
674 dev_warn(rs->dev, "Failed to request RX DMA channel\n");
675 }
676
677 if (rs->dma_tx.ch && rs->dma_rx.ch) {
678 rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR);
679 rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR);
680 rs->dma_tx.direction = DMA_MEM_TO_DEV;
681 rs->dma_tx.direction = DMA_DEV_TO_MEM;
682
683 master->can_dma = rockchip_spi_can_dma;
684 master->dma_tx = rs->dma_tx.ch;
685 master->dma_rx = rs->dma_rx.ch;
686 }
687
688 ret = devm_spi_register_master(&pdev->dev, master);
689 if (ret) {
690 dev_err(&pdev->dev, "Failed to register master\n");
691 goto err_register_master;
692 }
693
694 return 0;
695
696err_register_master:
697 if (rs->dma_tx.ch)
698 dma_release_channel(rs->dma_tx.ch);
699 if (rs->dma_rx.ch)
700 dma_release_channel(rs->dma_rx.ch);
701err_get_fifo_len:
702 clk_disable_unprepare(rs->spiclk);
703err_spiclk_enable:
704 clk_disable_unprepare(rs->apb_pclk);
705err_ioremap_resource:
706 spi_master_put(master);
707
708 return ret;
709}
710
711static int rockchip_spi_remove(struct platform_device *pdev)
712{
713 struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
714 struct rockchip_spi *rs = spi_master_get_devdata(master);
715
716 pm_runtime_disable(&pdev->dev);
717
718 clk_disable_unprepare(rs->spiclk);
719 clk_disable_unprepare(rs->apb_pclk);
720
721 if (rs->dma_tx.ch)
722 dma_release_channel(rs->dma_tx.ch);
723 if (rs->dma_rx.ch)
724 dma_release_channel(rs->dma_rx.ch);
725
726 spi_master_put(master);
727
728 return 0;
729}
730
731#ifdef CONFIG_PM_SLEEP
732static int rockchip_spi_suspend(struct device *dev)
733{
734 int ret = 0;
735 struct spi_master *master = dev_get_drvdata(dev);
736 struct rockchip_spi *rs = spi_master_get_devdata(master);
737
738 ret = spi_master_suspend(rs->master);
739 if (ret)
740 return ret;
741
742 if (!pm_runtime_suspended(dev)) {
743 clk_disable_unprepare(rs->spiclk);
744 clk_disable_unprepare(rs->apb_pclk);
745 }
746
747 return ret;
748}
749
750static int rockchip_spi_resume(struct device *dev)
751{
752 int ret = 0;
753 struct spi_master *master = dev_get_drvdata(dev);
754 struct rockchip_spi *rs = spi_master_get_devdata(master);
755
756 if (!pm_runtime_suspended(dev)) {
757 ret = clk_prepare_enable(rs->apb_pclk);
758 if (ret < 0)
759 return ret;
760
761 ret = clk_prepare_enable(rs->spiclk);
762 if (ret < 0) {
763 clk_disable_unprepare(rs->apb_pclk);
764 return ret;
765 }
766 }
767
768 ret = spi_master_resume(rs->master);
769 if (ret < 0) {
770 clk_disable_unprepare(rs->spiclk);
771 clk_disable_unprepare(rs->apb_pclk);
772 }
773
774 return ret;
775}
776#endif /* CONFIG_PM_SLEEP */
777
778#ifdef CONFIG_PM_RUNTIME
779static int rockchip_spi_runtime_suspend(struct device *dev)
780{
781 struct spi_master *master = dev_get_drvdata(dev);
782 struct rockchip_spi *rs = spi_master_get_devdata(master);
783
784 clk_disable_unprepare(rs->spiclk);
785 clk_disable_unprepare(rs->apb_pclk);
786
787 return 0;
788}
789
790static int rockchip_spi_runtime_resume(struct device *dev)
791{
792 int ret;
793 struct spi_master *master = dev_get_drvdata(dev);
794 struct rockchip_spi *rs = spi_master_get_devdata(master);
795
796 ret = clk_prepare_enable(rs->apb_pclk);
797 if (ret)
798 return ret;
799
800 ret = clk_prepare_enable(rs->spiclk);
801 if (ret)
802 clk_disable_unprepare(rs->apb_pclk);
803
804 return ret;
805}
806#endif /* CONFIG_PM_RUNTIME */
807
808static const struct dev_pm_ops rockchip_spi_pm = {
809 SET_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend, rockchip_spi_resume)
810 SET_RUNTIME_PM_OPS(rockchip_spi_runtime_suspend,
811 rockchip_spi_runtime_resume, NULL)
812};
813
814static const struct of_device_id rockchip_spi_dt_match[] = {
815 { .compatible = "rockchip,rk3066-spi", },
816 { .compatible = "rockchip,rk3188-spi", },
817 { .compatible = "rockchip,rk3288-spi", },
818 { },
819};
820MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);
821
822static struct platform_driver rockchip_spi_driver = {
823 .driver = {
824 .name = DRIVER_NAME,
825 .owner = THIS_MODULE,
826 .pm = &rockchip_spi_pm,
827 .of_match_table = of_match_ptr(rockchip_spi_dt_match),
828 },
829 .probe = rockchip_spi_probe,
830 .remove = rockchip_spi_remove,
831};
832
833module_platform_driver(rockchip_spi_driver);
834
835MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
836MODULE_DESCRIPTION("ROCKCHIP SPI Controller Driver");
837MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 10112745bb17..c850dfdfa9e3 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -477,7 +477,7 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
477 tx->sgl, tx->nents, DMA_TO_DEVICE, 477 tx->sgl, tx->nents, DMA_TO_DEVICE,
478 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 478 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
479 if (!desc_tx) 479 if (!desc_tx)
480 return -EIO; 480 goto no_dma;
481 481
482 irq_mask |= SPCR_SPTIE; 482 irq_mask |= SPCR_SPTIE;
483 } 483 }
@@ -486,7 +486,7 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
486 rx->sgl, rx->nents, DMA_FROM_DEVICE, 486 rx->sgl, rx->nents, DMA_FROM_DEVICE,
487 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 487 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
488 if (!desc_rx) 488 if (!desc_rx)
489 return -EIO; 489 goto no_dma;
490 490
491 irq_mask |= SPCR_SPRIE; 491 irq_mask |= SPCR_SPRIE;
492 } 492 }
@@ -540,6 +540,12 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
540 enable_irq(rspi->rx_irq); 540 enable_irq(rspi->rx_irq);
541 541
542 return ret; 542 return ret;
543
544no_dma:
545 pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
546 dev_driver_string(&rspi->master->dev),
547 dev_name(&rspi->master->dev));
548 return -EAGAIN;
543} 549}
544 550
545static void rspi_receive_init(const struct rspi_data *rspi) 551static void rspi_receive_init(const struct rspi_data *rspi)
@@ -593,8 +599,10 @@ static int rspi_common_transfer(struct rspi_data *rspi,
593 599
594 if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { 600 if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
595 /* rx_buf can be NULL on RSPI on SH in TX-only Mode */ 601 /* rx_buf can be NULL on RSPI on SH in TX-only Mode */
596 return rspi_dma_transfer(rspi, &xfer->tx_sg, 602 ret = rspi_dma_transfer(rspi, &xfer->tx_sg,
597 xfer->rx_buf ? &xfer->rx_sg : NULL); 603 xfer->rx_buf ? &xfer->rx_sg : NULL);
604 if (ret != -EAGAIN)
605 return ret;
598 } 606 }
599 607
600 ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len); 608 ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len);
@@ -630,7 +638,6 @@ static int rspi_rz_transfer_one(struct spi_master *master,
630 struct spi_transfer *xfer) 638 struct spi_transfer *xfer)
631{ 639{
632 struct rspi_data *rspi = spi_master_get_devdata(master); 640 struct rspi_data *rspi = spi_master_get_devdata(master);
633 int ret;
634 641
635 rspi_rz_receive_init(rspi); 642 rspi_rz_receive_init(rspi);
636 643
@@ -649,8 +656,11 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
649{ 656{
650 int ret; 657 int ret;
651 658
652 if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) 659 if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
653 return rspi_dma_transfer(rspi, &xfer->tx_sg, NULL); 660 ret = rspi_dma_transfer(rspi, &xfer->tx_sg, NULL);
661 if (ret != -EAGAIN)
662 return ret;
663 }
654 664
655 ret = rspi_pio_transfer(rspi, xfer->tx_buf, NULL, xfer->len); 665 ret = rspi_pio_transfer(rspi, xfer->tx_buf, NULL, xfer->len);
656 if (ret < 0) 666 if (ret < 0)
@@ -664,8 +674,11 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
664 674
665static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer) 675static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
666{ 676{
667 if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) 677 if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
668 return rspi_dma_transfer(rspi, NULL, &xfer->rx_sg); 678 int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
679 if (ret != -EAGAIN)
680 return ret;
681 }
669 682
670 return rspi_pio_transfer(rspi, NULL, xfer->rx_buf, xfer->len); 683 return rspi_pio_transfer(rspi, NULL, xfer->rx_buf, xfer->len);
671} 684}
@@ -927,19 +940,19 @@ static int rspi_request_dma(struct device *dev, struct spi_master *master,
927 return 0; 940 return 0;
928} 941}
929 942
930static void rspi_release_dma(struct rspi_data *rspi) 943static void rspi_release_dma(struct spi_master *master)
931{ 944{
932 if (rspi->master->dma_tx) 945 if (master->dma_tx)
933 dma_release_channel(rspi->master->dma_tx); 946 dma_release_channel(master->dma_tx);
934 if (rspi->master->dma_rx) 947 if (master->dma_rx)
935 dma_release_channel(rspi->master->dma_rx); 948 dma_release_channel(master->dma_rx);
936} 949}
937 950
938static int rspi_remove(struct platform_device *pdev) 951static int rspi_remove(struct platform_device *pdev)
939{ 952{
940 struct rspi_data *rspi = platform_get_drvdata(pdev); 953 struct rspi_data *rspi = platform_get_drvdata(pdev);
941 954
942 rspi_release_dma(rspi); 955 rspi_release_dma(rspi->master);
943 pm_runtime_disable(&pdev->dev); 956 pm_runtime_disable(&pdev->dev);
944 957
945 return 0; 958 return 0;
@@ -1141,7 +1154,7 @@ static int rspi_probe(struct platform_device *pdev)
1141 return 0; 1154 return 0;
1142 1155
1143error3: 1156error3:
1144 rspi_release_dma(rspi); 1157 rspi_release_dma(master);
1145error2: 1158error2:
1146 pm_runtime_disable(&pdev->dev); 1159 pm_runtime_disable(&pdev->dev);
1147error1: 1160error1: