summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/spi/Kconfig14
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/atmel-quadspi.c21
-rw-r--r--drivers/spi/spi-at91-usart.c221
-rw-r--r--drivers/spi/spi-bcm2835.c328
-rw-r--r--drivers/spi/spi-bcm2835aux.c4
-rw-r--r--drivers/spi/spi-meson-spifc.c12
-rw-r--r--drivers/spi/spi-mt65xx.c15
-rw-r--r--drivers/spi/spi-pxa2xx.c14
-rw-r--r--drivers/spi/spi-qup.c4
-rw-r--r--drivers/spi/spi-rockchip.c4
-rw-r--r--drivers/spi/spi-sh-msiof.c2
-rw-r--r--drivers/spi/spi-stm32-qspi.c10
-rw-r--r--drivers/spi/spi-synquacer.c828
-rw-r--r--drivers/spi/spi-tegra114.c170
-rw-r--r--drivers/spi/spi.c194
-rw-r--r--drivers/spi/spidev.c2
17 files changed, 1603 insertions, 241 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 30a40280c157..3a1d8f1170de 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -120,7 +120,7 @@ config SPI_AXI_SPI_ENGINE
120config SPI_BCM2835 120config SPI_BCM2835
121 tristate "BCM2835 SPI controller" 121 tristate "BCM2835 SPI controller"
122 depends on GPIOLIB 122 depends on GPIOLIB
123 depends on ARCH_BCM2835 || COMPILE_TEST 123 depends on ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
124 help 124 help
125 This selects a driver for the Broadcom BCM2835 SPI master. 125 This selects a driver for the Broadcom BCM2835 SPI master.
126 126
@@ -131,7 +131,7 @@ config SPI_BCM2835
131 131
132config SPI_BCM2835AUX 132config SPI_BCM2835AUX
133 tristate "BCM2835 SPI auxiliary controller" 133 tristate "BCM2835 SPI auxiliary controller"
134 depends on (ARCH_BCM2835 && GPIOLIB) || COMPILE_TEST 134 depends on ((ARCH_BCM2835 || ARCH_BRCMSTB) && GPIOLIB) || COMPILE_TEST
135 help 135 help
136 This selects a driver for the Broadcom BCM2835 SPI aux master. 136 This selects a driver for the Broadcom BCM2835 SPI aux master.
137 137
@@ -733,6 +733,16 @@ config SPI_SUN6I
733 help 733 help
734 This enables using the SPI controller on the Allwinner A31 SoCs. 734 This enables using the SPI controller on the Allwinner A31 SoCs.
735 735
736config SPI_SYNQUACER
737 tristate "Socionext's SynQuacer HighSpeed SPI controller"
738 depends on ARCH_SYNQUACER || COMPILE_TEST
739 help
740 SPI driver for Socionext's High speed SPI controller which provides
741 various operating modes for interfacing to serial peripheral devices
742 that use the de-facto standard SPI protocol.
743
744 It also supports the new dual-bit and quad-bit SPI protocol.
745
736config SPI_MXIC 746config SPI_MXIC
737 tristate "Macronix MX25F0A SPI controller" 747 tristate "Macronix MX25F0A SPI controller"
738 depends on SPI_MASTER 748 depends on SPI_MASTER
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index f2f78d03dc28..63dcab552bcb 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -106,6 +106,7 @@ obj-$(CONFIG_SPI_STM32_QSPI) += spi-stm32-qspi.o
106obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o 106obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
107obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o 107obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
108obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o 108obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o
109obj-$(CONFIG_SPI_SYNQUACER) += spi-synquacer.o
109obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o 110obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o
110obj-$(CONFIG_SPI_TEGRA20_SFLASH) += spi-tegra20-sflash.o 111obj-$(CONFIG_SPI_TEGRA20_SFLASH) += spi-tegra20-sflash.o
111obj-$(CONFIG_SPI_TEGRA20_SLINK) += spi-tegra20-slink.o 112obj-$(CONFIG_SPI_TEGRA20_SLINK) += spi-tegra20-slink.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 9f24d5f0b431..6a7d7b553d95 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -151,6 +151,7 @@ struct atmel_qspi {
151 const struct atmel_qspi_caps *caps; 151 const struct atmel_qspi_caps *caps;
152 u32 pending; 152 u32 pending;
153 u32 mr; 153 u32 mr;
154 u32 scr;
154 struct completion cmd_completion; 155 struct completion cmd_completion;
155}; 156};
156 157
@@ -382,7 +383,7 @@ static int atmel_qspi_setup(struct spi_device *spi)
382 struct spi_controller *ctrl = spi->master; 383 struct spi_controller *ctrl = spi->master;
383 struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); 384 struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
384 unsigned long src_rate; 385 unsigned long src_rate;
385 u32 scr, scbr; 386 u32 scbr;
386 387
387 if (ctrl->busy) 388 if (ctrl->busy)
388 return -EBUSY; 389 return -EBUSY;
@@ -399,13 +400,13 @@ static int atmel_qspi_setup(struct spi_device *spi)
399 if (scbr > 0) 400 if (scbr > 0)
400 scbr--; 401 scbr--;
401 402
402 scr = QSPI_SCR_SCBR(scbr); 403 aq->scr = QSPI_SCR_SCBR(scbr);
403 writel_relaxed(scr, aq->regs + QSPI_SCR); 404 writel_relaxed(aq->scr, aq->regs + QSPI_SCR);
404 405
405 return 0; 406 return 0;
406} 407}
407 408
408static int atmel_qspi_init(struct atmel_qspi *aq) 409static void atmel_qspi_init(struct atmel_qspi *aq)
409{ 410{
410 /* Reset the QSPI controller */ 411 /* Reset the QSPI controller */
411 writel_relaxed(QSPI_CR_SWRST, aq->regs + QSPI_CR); 412 writel_relaxed(QSPI_CR_SWRST, aq->regs + QSPI_CR);
@@ -416,8 +417,6 @@ static int atmel_qspi_init(struct atmel_qspi *aq)
416 417
417 /* Enable the QSPI controller */ 418 /* Enable the QSPI controller */
418 writel_relaxed(QSPI_CR_QSPIEN, aq->regs + QSPI_CR); 419 writel_relaxed(QSPI_CR_QSPIEN, aq->regs + QSPI_CR);
419
420 return 0;
421} 420}
422 421
423static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id) 422static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
@@ -536,9 +535,7 @@ static int atmel_qspi_probe(struct platform_device *pdev)
536 if (err) 535 if (err)
537 goto disable_qspick; 536 goto disable_qspick;
538 537
539 err = atmel_qspi_init(aq); 538 atmel_qspi_init(aq);
540 if (err)
541 goto disable_qspick;
542 539
543 err = spi_register_controller(ctrl); 540 err = spi_register_controller(ctrl);
544 if (err) 541 if (err)
@@ -587,7 +584,11 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev)
587 clk_prepare_enable(aq->pclk); 584 clk_prepare_enable(aq->pclk);
588 clk_prepare_enable(aq->qspick); 585 clk_prepare_enable(aq->qspick);
589 586
590 return atmel_qspi_init(aq); 587 atmel_qspi_init(aq);
588
589 writel_relaxed(aq->scr, aq->regs + QSPI_SCR);
590
591 return 0;
591} 592}
592 593
593static SIMPLE_DEV_PM_OPS(atmel_qspi_pm_ops, atmel_qspi_suspend, 594static SIMPLE_DEV_PM_OPS(atmel_qspi_pm_ops, atmel_qspi_suspend,
diff --git a/drivers/spi/spi-at91-usart.c b/drivers/spi/spi-at91-usart.c
index f763e14bdf12..a40bb2ef89dc 100644
--- a/drivers/spi/spi-at91-usart.c
+++ b/drivers/spi/spi-at91-usart.c
@@ -8,9 +8,12 @@
8 8
9#include <linux/clk.h> 9#include <linux/clk.h>
10#include <linux/delay.h> 10#include <linux/delay.h>
11#include <linux/dmaengine.h>
12#include <linux/dma-direction.h>
11#include <linux/interrupt.h> 13#include <linux/interrupt.h>
12#include <linux/kernel.h> 14#include <linux/kernel.h>
13#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/of_platform.h>
14#include <linux/of_gpio.h> 17#include <linux/of_gpio.h>
15#include <linux/pinctrl/consumer.h> 18#include <linux/pinctrl/consumer.h>
16#include <linux/platform_device.h> 19#include <linux/platform_device.h>
@@ -59,6 +62,8 @@
59 62
60#define US_INIT \ 63#define US_INIT \
61 (US_MR_SPI_MASTER | US_MR_CHRL | US_MR_CLKO | US_MR_WRDBT) 64 (US_MR_SPI_MASTER | US_MR_CHRL | US_MR_CLKO | US_MR_WRDBT)
65#define US_DMA_MIN_BYTES 16
66#define US_DMA_TIMEOUT (msecs_to_jiffies(1000))
62 67
63/* Register access macros */ 68/* Register access macros */
64#define at91_usart_spi_readl(port, reg) \ 69#define at91_usart_spi_readl(port, reg) \
@@ -72,14 +77,19 @@
72 writeb_relaxed((value), (port)->regs + US_##reg) 77 writeb_relaxed((value), (port)->regs + US_##reg)
73 78
74struct at91_usart_spi { 79struct at91_usart_spi {
80 struct platform_device *mpdev;
75 struct spi_transfer *current_transfer; 81 struct spi_transfer *current_transfer;
76 void __iomem *regs; 82 void __iomem *regs;
77 struct device *dev; 83 struct device *dev;
78 struct clk *clk; 84 struct clk *clk;
79 85
86 struct completion xfer_completion;
87
80 /*used in interrupt to protect data reading*/ 88 /*used in interrupt to protect data reading*/
81 spinlock_t lock; 89 spinlock_t lock;
82 90
91 phys_addr_t phybase;
92
83 int irq; 93 int irq;
84 unsigned int current_tx_remaining_bytes; 94 unsigned int current_tx_remaining_bytes;
85 unsigned int current_rx_remaining_bytes; 95 unsigned int current_rx_remaining_bytes;
@@ -88,8 +98,182 @@ struct at91_usart_spi {
88 u32 status; 98 u32 status;
89 99
90 bool xfer_failed; 100 bool xfer_failed;
101 bool use_dma;
91}; 102};
92 103
104static void dma_callback(void *data)
105{
106 struct spi_controller *ctlr = data;
107 struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
108
109 at91_usart_spi_writel(aus, IER, US_IR_RXRDY);
110 aus->current_rx_remaining_bytes = 0;
111 complete(&aus->xfer_completion);
112}
113
114static bool at91_usart_spi_can_dma(struct spi_controller *ctrl,
115 struct spi_device *spi,
116 struct spi_transfer *xfer)
117{
118 struct at91_usart_spi *aus = spi_master_get_devdata(ctrl);
119
120 return aus->use_dma && xfer->len >= US_DMA_MIN_BYTES;
121}
122
123static int at91_usart_spi_configure_dma(struct spi_controller *ctlr,
124 struct at91_usart_spi *aus)
125{
126 struct dma_slave_config slave_config;
127 struct device *dev = &aus->mpdev->dev;
128 phys_addr_t phybase = aus->phybase;
129 dma_cap_mask_t mask;
130 int err = 0;
131
132 dma_cap_zero(mask);
133 dma_cap_set(DMA_SLAVE, mask);
134
135 ctlr->dma_tx = dma_request_slave_channel_reason(dev, "tx");
136 if (IS_ERR_OR_NULL(ctlr->dma_tx)) {
137 if (IS_ERR(ctlr->dma_tx)) {
138 err = PTR_ERR(ctlr->dma_tx);
139 goto at91_usart_spi_error_clear;
140 }
141
142 dev_dbg(dev,
143 "DMA TX channel not available, SPI unable to use DMA\n");
144 err = -EBUSY;
145 goto at91_usart_spi_error_clear;
146 }
147
148 ctlr->dma_rx = dma_request_slave_channel_reason(dev, "rx");
149 if (IS_ERR_OR_NULL(ctlr->dma_rx)) {
150 if (IS_ERR(ctlr->dma_rx)) {
151 err = PTR_ERR(ctlr->dma_rx);
152 goto at91_usart_spi_error;
153 }
154
155 dev_dbg(dev,
156 "DMA RX channel not available, SPI unable to use DMA\n");
157 err = -EBUSY;
158 goto at91_usart_spi_error;
159 }
160
161 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
162 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
163 slave_config.dst_addr = (dma_addr_t)phybase + US_THR;
164 slave_config.src_addr = (dma_addr_t)phybase + US_RHR;
165 slave_config.src_maxburst = 1;
166 slave_config.dst_maxburst = 1;
167 slave_config.device_fc = false;
168
169 slave_config.direction = DMA_DEV_TO_MEM;
170 if (dmaengine_slave_config(ctlr->dma_rx, &slave_config)) {
171 dev_err(&ctlr->dev,
172 "failed to configure rx dma channel\n");
173 err = -EINVAL;
174 goto at91_usart_spi_error;
175 }
176
177 slave_config.direction = DMA_MEM_TO_DEV;
178 if (dmaengine_slave_config(ctlr->dma_tx, &slave_config)) {
179 dev_err(&ctlr->dev,
180 "failed to configure tx dma channel\n");
181 err = -EINVAL;
182 goto at91_usart_spi_error;
183 }
184
185 aus->use_dma = true;
186 return 0;
187
188at91_usart_spi_error:
189 if (!IS_ERR_OR_NULL(ctlr->dma_tx))
190 dma_release_channel(ctlr->dma_tx);
191 if (!IS_ERR_OR_NULL(ctlr->dma_rx))
192 dma_release_channel(ctlr->dma_rx);
193 ctlr->dma_tx = NULL;
194 ctlr->dma_rx = NULL;
195
196at91_usart_spi_error_clear:
197 return err;
198}
199
200static void at91_usart_spi_release_dma(struct spi_controller *ctlr)
201{
202 if (ctlr->dma_rx)
203 dma_release_channel(ctlr->dma_rx);
204 if (ctlr->dma_tx)
205 dma_release_channel(ctlr->dma_tx);
206}
207
208static void at91_usart_spi_stop_dma(struct spi_controller *ctlr)
209{
210 if (ctlr->dma_rx)
211 dmaengine_terminate_all(ctlr->dma_rx);
212 if (ctlr->dma_tx)
213 dmaengine_terminate_all(ctlr->dma_tx);
214}
215
216static int at91_usart_spi_dma_transfer(struct spi_controller *ctlr,
217 struct spi_transfer *xfer)
218{
219 struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
220 struct dma_chan *rxchan = ctlr->dma_rx;
221 struct dma_chan *txchan = ctlr->dma_tx;
222 struct dma_async_tx_descriptor *rxdesc;
223 struct dma_async_tx_descriptor *txdesc;
224 dma_cookie_t cookie;
225
226 /* Disable RX interrupt */
227 at91_usart_spi_writel(aus, IDR, US_IR_RXRDY);
228
229 rxdesc = dmaengine_prep_slave_sg(rxchan,
230 xfer->rx_sg.sgl,
231 xfer->rx_sg.nents,
232 DMA_DEV_TO_MEM,
233 DMA_PREP_INTERRUPT |
234 DMA_CTRL_ACK);
235 if (!rxdesc)
236 goto at91_usart_spi_err_dma;
237
238 txdesc = dmaengine_prep_slave_sg(txchan,
239 xfer->tx_sg.sgl,
240 xfer->tx_sg.nents,
241 DMA_MEM_TO_DEV,
242 DMA_PREP_INTERRUPT |
243 DMA_CTRL_ACK);
244 if (!txdesc)
245 goto at91_usart_spi_err_dma;
246
247 rxdesc->callback = dma_callback;
248 rxdesc->callback_param = ctlr;
249
250 cookie = rxdesc->tx_submit(rxdesc);
251 if (dma_submit_error(cookie))
252 goto at91_usart_spi_err_dma;
253
254 cookie = txdesc->tx_submit(txdesc);
255 if (dma_submit_error(cookie))
256 goto at91_usart_spi_err_dma;
257
258 rxchan->device->device_issue_pending(rxchan);
259 txchan->device->device_issue_pending(txchan);
260
261 return 0;
262
263at91_usart_spi_err_dma:
264 /* Enable RX interrupt if something fails and fallback to PIO */
265 at91_usart_spi_writel(aus, IER, US_IR_RXRDY);
266 at91_usart_spi_stop_dma(ctlr);
267
268 return -ENOMEM;
269}
270
271static unsigned long at91_usart_spi_dma_timeout(struct at91_usart_spi *aus)
272{
273 return wait_for_completion_timeout(&aus->xfer_completion,
274 US_DMA_TIMEOUT);
275}
276
93static inline u32 at91_usart_spi_tx_ready(struct at91_usart_spi *aus) 277static inline u32 at91_usart_spi_tx_ready(struct at91_usart_spi *aus)
94{ 278{
95 return aus->status & US_IR_TXRDY; 279 return aus->status & US_IR_TXRDY;
@@ -216,6 +400,8 @@ static int at91_usart_spi_transfer_one(struct spi_controller *ctlr,
216 struct spi_transfer *xfer) 400 struct spi_transfer *xfer)
217{ 401{
218 struct at91_usart_spi *aus = spi_master_get_devdata(ctlr); 402 struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
403 unsigned long dma_timeout = 0;
404 int ret = 0;
219 405
220 at91_usart_spi_set_xfer_speed(aus, xfer); 406 at91_usart_spi_set_xfer_speed(aus, xfer);
221 aus->xfer_failed = false; 407 aus->xfer_failed = false;
@@ -225,8 +411,25 @@ static int at91_usart_spi_transfer_one(struct spi_controller *ctlr,
225 411
226 while ((aus->current_tx_remaining_bytes || 412 while ((aus->current_tx_remaining_bytes ||
227 aus->current_rx_remaining_bytes) && !aus->xfer_failed) { 413 aus->current_rx_remaining_bytes) && !aus->xfer_failed) {
228 at91_usart_spi_read_status(aus); 414 reinit_completion(&aus->xfer_completion);
229 at91_usart_spi_tx(aus); 415 if (at91_usart_spi_can_dma(ctlr, spi, xfer) &&
416 !ret) {
417 ret = at91_usart_spi_dma_transfer(ctlr, xfer);
418 if (ret)
419 continue;
420
421 dma_timeout = at91_usart_spi_dma_timeout(aus);
422
423 if (WARN_ON(dma_timeout == 0)) {
424 dev_err(&spi->dev, "DMA transfer timeout\n");
425 return -EIO;
426 }
427 aus->current_tx_remaining_bytes = 0;
428 } else {
429 at91_usart_spi_read_status(aus);
430 at91_usart_spi_tx(aus);
431 }
432
230 cpu_relax(); 433 cpu_relax();
231 } 434 }
232 435
@@ -345,6 +548,7 @@ static int at91_usart_spi_probe(struct platform_device *pdev)
345 controller->transfer_one = at91_usart_spi_transfer_one; 548 controller->transfer_one = at91_usart_spi_transfer_one;
346 controller->prepare_message = at91_usart_spi_prepare_message; 549 controller->prepare_message = at91_usart_spi_prepare_message;
347 controller->unprepare_message = at91_usart_spi_unprepare_message; 550 controller->unprepare_message = at91_usart_spi_unprepare_message;
551 controller->can_dma = at91_usart_spi_can_dma;
348 controller->cleanup = at91_usart_spi_cleanup; 552 controller->cleanup = at91_usart_spi_cleanup;
349 controller->max_speed_hz = DIV_ROUND_UP(clk_get_rate(clk), 553 controller->max_speed_hz = DIV_ROUND_UP(clk_get_rate(clk),
350 US_MIN_CLK_DIV); 554 US_MIN_CLK_DIV);
@@ -376,7 +580,17 @@ static int at91_usart_spi_probe(struct platform_device *pdev)
376 aus->spi_clk = clk_get_rate(clk); 580 aus->spi_clk = clk_get_rate(clk);
377 at91_usart_spi_init(aus); 581 at91_usart_spi_init(aus);
378 582
583 aus->phybase = regs->start;
584
585 aus->mpdev = to_platform_device(pdev->dev.parent);
586
587 ret = at91_usart_spi_configure_dma(controller, aus);
588 if (ret)
589 goto at91_usart_fail_dma;
590
379 spin_lock_init(&aus->lock); 591 spin_lock_init(&aus->lock);
592 init_completion(&aus->xfer_completion);
593
380 ret = devm_spi_register_master(&pdev->dev, controller); 594 ret = devm_spi_register_master(&pdev->dev, controller);
381 if (ret) 595 if (ret)
382 goto at91_usart_fail_register_master; 596 goto at91_usart_fail_register_master;
@@ -389,6 +603,8 @@ static int at91_usart_spi_probe(struct platform_device *pdev)
389 return 0; 603 return 0;
390 604
391at91_usart_fail_register_master: 605at91_usart_fail_register_master:
606 at91_usart_spi_release_dma(controller);
607at91_usart_fail_dma:
392 clk_disable_unprepare(clk); 608 clk_disable_unprepare(clk);
393at91_usart_spi_probe_fail: 609at91_usart_spi_probe_fail:
394 spi_master_put(controller); 610 spi_master_put(controller);
@@ -453,6 +669,7 @@ static int at91_usart_spi_remove(struct platform_device *pdev)
453 struct spi_controller *ctlr = platform_get_drvdata(pdev); 669 struct spi_controller *ctlr = platform_get_drvdata(pdev);
454 struct at91_usart_spi *aus = spi_master_get_devdata(ctlr); 670 struct at91_usart_spi *aus = spi_master_get_devdata(ctlr);
455 671
672 at91_usart_spi_release_dma(ctlr);
456 clk_disable_unprepare(aus->clk); 673 clk_disable_unprepare(aus->clk);
457 674
458 return 0; 675 return 0;
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 402c1efcd762..6f243a90c844 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/completion.h> 15#include <linux/completion.h>
16#include <linux/debugfs.h>
16#include <linux/delay.h> 17#include <linux/delay.h>
17#include <linux/dma-mapping.h> 18#include <linux/dma-mapping.h>
18#include <linux/dmaengine.h> 19#include <linux/dmaengine.h>
@@ -64,14 +65,18 @@
64 65
65#define BCM2835_SPI_FIFO_SIZE 64 66#define BCM2835_SPI_FIFO_SIZE 64
66#define BCM2835_SPI_FIFO_SIZE_3_4 48 67#define BCM2835_SPI_FIFO_SIZE_3_4 48
67#define BCM2835_SPI_POLLING_LIMIT_US 30
68#define BCM2835_SPI_POLLING_JIFFIES 2
69#define BCM2835_SPI_DMA_MIN_LENGTH 96 68#define BCM2835_SPI_DMA_MIN_LENGTH 96
70#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ 69#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
71 | SPI_NO_CS | SPI_3WIRE) 70 | SPI_NO_CS | SPI_3WIRE)
72 71
73#define DRV_NAME "spi-bcm2835" 72#define DRV_NAME "spi-bcm2835"
74 73
74/* define polling limits */
75unsigned int polling_limit_us = 30;
76module_param(polling_limit_us, uint, 0664);
77MODULE_PARM_DESC(polling_limit_us,
78 "time in us to run a transfer in polling mode\n");
79
75/** 80/**
76 * struct bcm2835_spi - BCM2835 SPI controller 81 * struct bcm2835_spi - BCM2835 SPI controller
77 * @regs: base address of register map 82 * @regs: base address of register map
@@ -88,6 +93,15 @@
88 * length is not a multiple of 4 (to overcome hardware limitation) 93 * length is not a multiple of 4 (to overcome hardware limitation)
89 * @tx_spillover: whether @tx_prologue spills over to second TX sglist entry 94 * @tx_spillover: whether @tx_prologue spills over to second TX sglist entry
90 * @dma_pending: whether a DMA transfer is in progress 95 * @dma_pending: whether a DMA transfer is in progress
96 * @debugfs_dir: the debugfs directory - neede to remove debugfs when
97 * unloading the module
98 * @count_transfer_polling: count of how often polling mode is used
99 * @count_transfer_irq: count of how often interrupt mode is used
100 * @count_transfer_irq_after_polling: count of how often we fall back to
101 * interrupt mode after starting in polling mode.
102 * These are counted as well in @count_transfer_polling and
103 * @count_transfer_irq
104 * @count_transfer_dma: count how often dma mode is used
91 */ 105 */
92struct bcm2835_spi { 106struct bcm2835_spi {
93 void __iomem *regs; 107 void __iomem *regs;
@@ -102,8 +116,55 @@ struct bcm2835_spi {
102 int rx_prologue; 116 int rx_prologue;
103 unsigned int tx_spillover; 117 unsigned int tx_spillover;
104 unsigned int dma_pending; 118 unsigned int dma_pending;
119
120 struct dentry *debugfs_dir;
121 u64 count_transfer_polling;
122 u64 count_transfer_irq;
123 u64 count_transfer_irq_after_polling;
124 u64 count_transfer_dma;
105}; 125};
106 126
127#if defined(CONFIG_DEBUG_FS)
128static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
129 const char *dname)
130{
131 char name[64];
132 struct dentry *dir;
133
134 /* get full name */
135 snprintf(name, sizeof(name), "spi-bcm2835-%s", dname);
136
137 /* the base directory */
138 dir = debugfs_create_dir(name, NULL);
139 bs->debugfs_dir = dir;
140
141 /* the counters */
142 debugfs_create_u64("count_transfer_polling", 0444, dir,
143 &bs->count_transfer_polling);
144 debugfs_create_u64("count_transfer_irq", 0444, dir,
145 &bs->count_transfer_irq);
146 debugfs_create_u64("count_transfer_irq_after_polling", 0444, dir,
147 &bs->count_transfer_irq_after_polling);
148 debugfs_create_u64("count_transfer_dma", 0444, dir,
149 &bs->count_transfer_dma);
150}
151
152static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
153{
154 debugfs_remove_recursive(bs->debugfs_dir);
155 bs->debugfs_dir = NULL;
156}
157#else
158static void bcm2835_debugfs_create(struct bcm2835_spi *bs,
159 const char *dname)
160{
161}
162
163static void bcm2835_debugfs_remove(struct bcm2835_spi *bs)
164{
165}
166#endif /* CONFIG_DEBUG_FS */
167
107static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg) 168static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
108{ 169{
109 return readl(bs->regs + reg); 170 return readl(bs->regs + reg);
@@ -248,9 +309,9 @@ static inline void bcm2835_wr_fifo_blind(struct bcm2835_spi *bs, int count)
248 } 309 }
249} 310}
250 311
251static void bcm2835_spi_reset_hw(struct spi_master *master) 312static void bcm2835_spi_reset_hw(struct spi_controller *ctlr)
252{ 313{
253 struct bcm2835_spi *bs = spi_master_get_devdata(master); 314 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
254 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); 315 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
255 316
256 /* Disable SPI interrupts and transfer */ 317 /* Disable SPI interrupts and transfer */
@@ -269,8 +330,8 @@ static void bcm2835_spi_reset_hw(struct spi_master *master)
269 330
270static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id) 331static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
271{ 332{
272 struct spi_master *master = dev_id; 333 struct spi_controller *ctlr = dev_id;
273 struct bcm2835_spi *bs = spi_master_get_devdata(master); 334 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
274 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); 335 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
275 336
276 /* 337 /*
@@ -292,20 +353,23 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
292 353
293 if (!bs->rx_len) { 354 if (!bs->rx_len) {
294 /* Transfer complete - reset SPI HW */ 355 /* Transfer complete - reset SPI HW */
295 bcm2835_spi_reset_hw(master); 356 bcm2835_spi_reset_hw(ctlr);
296 /* wake up the framework */ 357 /* wake up the framework */
297 complete(&master->xfer_completion); 358 complete(&ctlr->xfer_completion);
298 } 359 }
299 360
300 return IRQ_HANDLED; 361 return IRQ_HANDLED;
301} 362}
302 363
303static int bcm2835_spi_transfer_one_irq(struct spi_master *master, 364static int bcm2835_spi_transfer_one_irq(struct spi_controller *ctlr,
304 struct spi_device *spi, 365 struct spi_device *spi,
305 struct spi_transfer *tfr, 366 struct spi_transfer *tfr,
306 u32 cs, bool fifo_empty) 367 u32 cs, bool fifo_empty)
307{ 368{
308 struct bcm2835_spi *bs = spi_master_get_devdata(master); 369 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
370
371 /* update usage statistics */
372 bs->count_transfer_irq++;
309 373
310 /* 374 /*
311 * Enable HW block, but with interrupts still disabled. 375 * Enable HW block, but with interrupts still disabled.
@@ -328,7 +392,7 @@ static int bcm2835_spi_transfer_one_irq(struct spi_master *master,
328 392
329/** 393/**
330 * bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA 394 * bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA
331 * @master: SPI master 395 * @ctlr: SPI master controller
332 * @tfr: SPI transfer 396 * @tfr: SPI transfer
333 * @bs: BCM2835 SPI controller 397 * @bs: BCM2835 SPI controller
334 * @cs: CS register 398 * @cs: CS register
@@ -372,7 +436,7 @@ static int bcm2835_spi_transfer_one_irq(struct spi_master *master,
372 * be transmitted in 32-bit width to ensure that the following DMA transfer can 436 * be transmitted in 32-bit width to ensure that the following DMA transfer can
373 * pick up the residue in the RX FIFO in ungarbled form. 437 * pick up the residue in the RX FIFO in ungarbled form.
374 */ 438 */
375static void bcm2835_spi_transfer_prologue(struct spi_master *master, 439static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
376 struct spi_transfer *tfr, 440 struct spi_transfer *tfr,
377 struct bcm2835_spi *bs, 441 struct bcm2835_spi *bs,
378 u32 cs) 442 u32 cs)
@@ -413,9 +477,9 @@ static void bcm2835_spi_transfer_prologue(struct spi_master *master,
413 bcm2835_wr_fifo_count(bs, bs->rx_prologue); 477 bcm2835_wr_fifo_count(bs, bs->rx_prologue);
414 bcm2835_wait_tx_fifo_empty(bs); 478 bcm2835_wait_tx_fifo_empty(bs);
415 bcm2835_rd_fifo_count(bs, bs->rx_prologue); 479 bcm2835_rd_fifo_count(bs, bs->rx_prologue);
416 bcm2835_spi_reset_hw(master); 480 bcm2835_spi_reset_hw(ctlr);
417 481
418 dma_sync_single_for_device(master->dma_rx->device->dev, 482 dma_sync_single_for_device(ctlr->dma_rx->device->dev,
419 sg_dma_address(&tfr->rx_sg.sgl[0]), 483 sg_dma_address(&tfr->rx_sg.sgl[0]),
420 bs->rx_prologue, DMA_FROM_DEVICE); 484 bs->rx_prologue, DMA_FROM_DEVICE);
421 485
@@ -479,11 +543,11 @@ static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs)
479 543
480static void bcm2835_spi_dma_done(void *data) 544static void bcm2835_spi_dma_done(void *data)
481{ 545{
482 struct spi_master *master = data; 546 struct spi_controller *ctlr = data;
483 struct bcm2835_spi *bs = spi_master_get_devdata(master); 547 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
484 548
485 /* reset fifo and HW */ 549 /* reset fifo and HW */
486 bcm2835_spi_reset_hw(master); 550 bcm2835_spi_reset_hw(ctlr);
487 551
488 /* and terminate tx-dma as we do not have an irq for it 552 /* and terminate tx-dma as we do not have an irq for it
489 * because when the rx dma will terminate and this callback 553 * because when the rx dma will terminate and this callback
@@ -491,15 +555,15 @@ static void bcm2835_spi_dma_done(void *data)
491 * situation otherwise... 555 * situation otherwise...
492 */ 556 */
493 if (cmpxchg(&bs->dma_pending, true, false)) { 557 if (cmpxchg(&bs->dma_pending, true, false)) {
494 dmaengine_terminate_async(master->dma_tx); 558 dmaengine_terminate_async(ctlr->dma_tx);
495 bcm2835_spi_undo_prologue(bs); 559 bcm2835_spi_undo_prologue(bs);
496 } 560 }
497 561
498 /* and mark as completed */; 562 /* and mark as completed */;
499 complete(&master->xfer_completion); 563 complete(&ctlr->xfer_completion);
500} 564}
501 565
502static int bcm2835_spi_prepare_sg(struct spi_master *master, 566static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
503 struct spi_transfer *tfr, 567 struct spi_transfer *tfr,
504 bool is_tx) 568 bool is_tx)
505{ 569{
@@ -514,14 +578,14 @@ static int bcm2835_spi_prepare_sg(struct spi_master *master,
514 578
515 if (is_tx) { 579 if (is_tx) {
516 dir = DMA_MEM_TO_DEV; 580 dir = DMA_MEM_TO_DEV;
517 chan = master->dma_tx; 581 chan = ctlr->dma_tx;
518 nents = tfr->tx_sg.nents; 582 nents = tfr->tx_sg.nents;
519 sgl = tfr->tx_sg.sgl; 583 sgl = tfr->tx_sg.sgl;
520 flags = 0 /* no tx interrupt */; 584 flags = 0 /* no tx interrupt */;
521 585
522 } else { 586 } else {
523 dir = DMA_DEV_TO_MEM; 587 dir = DMA_DEV_TO_MEM;
524 chan = master->dma_rx; 588 chan = ctlr->dma_rx;
525 nents = tfr->rx_sg.nents; 589 nents = tfr->rx_sg.nents;
526 sgl = tfr->rx_sg.sgl; 590 sgl = tfr->rx_sg.sgl;
527 flags = DMA_PREP_INTERRUPT; 591 flags = DMA_PREP_INTERRUPT;
@@ -534,7 +598,7 @@ static int bcm2835_spi_prepare_sg(struct spi_master *master,
534 /* set callback for rx */ 598 /* set callback for rx */
535 if (!is_tx) { 599 if (!is_tx) {
536 desc->callback = bcm2835_spi_dma_done; 600 desc->callback = bcm2835_spi_dma_done;
537 desc->callback_param = master; 601 desc->callback_param = ctlr;
538 } 602 }
539 603
540 /* submit it to DMA-engine */ 604 /* submit it to DMA-engine */
@@ -543,27 +607,30 @@ static int bcm2835_spi_prepare_sg(struct spi_master *master,
543 return dma_submit_error(cookie); 607 return dma_submit_error(cookie);
544} 608}
545 609
546static int bcm2835_spi_transfer_one_dma(struct spi_master *master, 610static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
547 struct spi_device *spi, 611 struct spi_device *spi,
548 struct spi_transfer *tfr, 612 struct spi_transfer *tfr,
549 u32 cs) 613 u32 cs)
550{ 614{
551 struct bcm2835_spi *bs = spi_master_get_devdata(master); 615 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
552 int ret; 616 int ret;
553 617
618 /* update usage statistics */
619 bs->count_transfer_dma++;
620
554 /* 621 /*
555 * Transfer first few bytes without DMA if length of first TX or RX 622 * Transfer first few bytes without DMA if length of first TX or RX
556 * sglist entry is not a multiple of 4 bytes (hardware limitation). 623 * sglist entry is not a multiple of 4 bytes (hardware limitation).
557 */ 624 */
558 bcm2835_spi_transfer_prologue(master, tfr, bs, cs); 625 bcm2835_spi_transfer_prologue(ctlr, tfr, bs, cs);
559 626
560 /* setup tx-DMA */ 627 /* setup tx-DMA */
561 ret = bcm2835_spi_prepare_sg(master, tfr, true); 628 ret = bcm2835_spi_prepare_sg(ctlr, tfr, true);
562 if (ret) 629 if (ret)
563 goto err_reset_hw; 630 goto err_reset_hw;
564 631
565 /* start TX early */ 632 /* start TX early */
566 dma_async_issue_pending(master->dma_tx); 633 dma_async_issue_pending(ctlr->dma_tx);
567 634
568 /* mark as dma pending */ 635 /* mark as dma pending */
569 bs->dma_pending = 1; 636 bs->dma_pending = 1;
@@ -579,27 +646,27 @@ static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
579 * mapping of the rx buffers still takes place 646 * mapping of the rx buffers still takes place
580 * this saves 10us or more. 647 * this saves 10us or more.
581 */ 648 */
582 ret = bcm2835_spi_prepare_sg(master, tfr, false); 649 ret = bcm2835_spi_prepare_sg(ctlr, tfr, false);
583 if (ret) { 650 if (ret) {
584 /* need to reset on errors */ 651 /* need to reset on errors */
585 dmaengine_terminate_sync(master->dma_tx); 652 dmaengine_terminate_sync(ctlr->dma_tx);
586 bs->dma_pending = false; 653 bs->dma_pending = false;
587 goto err_reset_hw; 654 goto err_reset_hw;
588 } 655 }
589 656
590 /* start rx dma late */ 657 /* start rx dma late */
591 dma_async_issue_pending(master->dma_rx); 658 dma_async_issue_pending(ctlr->dma_rx);
592 659
593 /* wait for wakeup in framework */ 660 /* wait for wakeup in framework */
594 return 1; 661 return 1;
595 662
596err_reset_hw: 663err_reset_hw:
597 bcm2835_spi_reset_hw(master); 664 bcm2835_spi_reset_hw(ctlr);
598 bcm2835_spi_undo_prologue(bs); 665 bcm2835_spi_undo_prologue(bs);
599 return ret; 666 return ret;
600} 667}
601 668
602static bool bcm2835_spi_can_dma(struct spi_master *master, 669static bool bcm2835_spi_can_dma(struct spi_controller *ctlr,
603 struct spi_device *spi, 670 struct spi_device *spi,
604 struct spi_transfer *tfr) 671 struct spi_transfer *tfr)
605{ 672{
@@ -611,21 +678,21 @@ static bool bcm2835_spi_can_dma(struct spi_master *master,
611 return true; 678 return true;
612} 679}
613 680
614static void bcm2835_dma_release(struct spi_master *master) 681static void bcm2835_dma_release(struct spi_controller *ctlr)
615{ 682{
616 if (master->dma_tx) { 683 if (ctlr->dma_tx) {
617 dmaengine_terminate_sync(master->dma_tx); 684 dmaengine_terminate_sync(ctlr->dma_tx);
618 dma_release_channel(master->dma_tx); 685 dma_release_channel(ctlr->dma_tx);
619 master->dma_tx = NULL; 686 ctlr->dma_tx = NULL;
620 } 687 }
621 if (master->dma_rx) { 688 if (ctlr->dma_rx) {
622 dmaengine_terminate_sync(master->dma_rx); 689 dmaengine_terminate_sync(ctlr->dma_rx);
623 dma_release_channel(master->dma_rx); 690 dma_release_channel(ctlr->dma_rx);
624 master->dma_rx = NULL; 691 ctlr->dma_rx = NULL;
625 } 692 }
626} 693}
627 694
628static void bcm2835_dma_init(struct spi_master *master, struct device *dev) 695static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev)
629{ 696{
630 struct dma_slave_config slave_config; 697 struct dma_slave_config slave_config;
631 const __be32 *addr; 698 const __be32 *addr;
@@ -633,7 +700,7 @@ static void bcm2835_dma_init(struct spi_master *master, struct device *dev)
633 int ret; 700 int ret;
634 701
635 /* base address in dma-space */ 702 /* base address in dma-space */
636 addr = of_get_address(master->dev.of_node, 0, NULL, NULL); 703 addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
637 if (!addr) { 704 if (!addr) {
638 dev_err(dev, "could not get DMA-register address - not using dma mode\n"); 705 dev_err(dev, "could not get DMA-register address - not using dma mode\n");
639 goto err; 706 goto err;
@@ -641,38 +708,36 @@ static void bcm2835_dma_init(struct spi_master *master, struct device *dev)
641 dma_reg_base = be32_to_cpup(addr); 708 dma_reg_base = be32_to_cpup(addr);
642 709
643 /* get tx/rx dma */ 710 /* get tx/rx dma */
644 master->dma_tx = dma_request_slave_channel(dev, "tx"); 711 ctlr->dma_tx = dma_request_slave_channel(dev, "tx");
645 if (!master->dma_tx) { 712 if (!ctlr->dma_tx) {
646 dev_err(dev, "no tx-dma configuration found - not using dma mode\n"); 713 dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
647 goto err; 714 goto err;
648 } 715 }
649 master->dma_rx = dma_request_slave_channel(dev, "rx"); 716 ctlr->dma_rx = dma_request_slave_channel(dev, "rx");
650 if (!master->dma_rx) { 717 if (!ctlr->dma_rx) {
651 dev_err(dev, "no rx-dma configuration found - not using dma mode\n"); 718 dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
652 goto err_release; 719 goto err_release;
653 } 720 }
654 721
655 /* configure DMAs */ 722 /* configure DMAs */
656 slave_config.direction = DMA_MEM_TO_DEV;
657 slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO); 723 slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
658 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 724 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
659 725
660 ret = dmaengine_slave_config(master->dma_tx, &slave_config); 726 ret = dmaengine_slave_config(ctlr->dma_tx, &slave_config);
661 if (ret) 727 if (ret)
662 goto err_config; 728 goto err_config;
663 729
664 slave_config.direction = DMA_DEV_TO_MEM;
665 slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO); 730 slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
666 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 731 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
667 732
668 ret = dmaengine_slave_config(master->dma_rx, &slave_config); 733 ret = dmaengine_slave_config(ctlr->dma_rx, &slave_config);
669 if (ret) 734 if (ret)
670 goto err_config; 735 goto err_config;
671 736
672 /* all went well, so set can_dma */ 737 /* all went well, so set can_dma */
673 master->can_dma = bcm2835_spi_can_dma; 738 ctlr->can_dma = bcm2835_spi_can_dma;
674 /* need to do TX AND RX DMA, so we need dummy buffers */ 739 /* need to do TX AND RX DMA, so we need dummy buffers */
675 master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; 740 ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
676 741
677 return; 742 return;
678 743
@@ -680,20 +745,22 @@ err_config:
680 dev_err(dev, "issue configuring dma: %d - not using DMA mode\n", 745 dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
681 ret); 746 ret);
682err_release: 747err_release:
683 bcm2835_dma_release(master); 748 bcm2835_dma_release(ctlr);
684err: 749err:
685 return; 750 return;
686} 751}
687 752
688static int bcm2835_spi_transfer_one_poll(struct spi_master *master, 753static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr,
689 struct spi_device *spi, 754 struct spi_device *spi,
690 struct spi_transfer *tfr, 755 struct spi_transfer *tfr,
691 u32 cs, 756 u32 cs)
692 unsigned long long xfer_time_us)
693{ 757{
694 struct bcm2835_spi *bs = spi_master_get_devdata(master); 758 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
695 unsigned long timeout; 759 unsigned long timeout;
696 760
761 /* update usage statistics */
762 bs->count_transfer_polling++;
763
697 /* enable HW block without interrupts */ 764 /* enable HW block without interrupts */
698 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA); 765 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
699 766
@@ -703,8 +770,8 @@ static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
703 */ 770 */
704 bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE); 771 bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE);
705 772
706 /* set the timeout */ 773 /* set the timeout to at least 2 jiffies */
707 timeout = jiffies + BCM2835_SPI_POLLING_JIFFIES; 774 timeout = jiffies + 2 + HZ * polling_limit_us / 1000000;
708 775
709 /* loop until finished the transfer */ 776 /* loop until finished the transfer */
710 while (bs->rx_len) { 777 while (bs->rx_len) {
@@ -723,25 +790,28 @@ static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
723 jiffies - timeout, 790 jiffies - timeout,
724 bs->tx_len, bs->rx_len); 791 bs->tx_len, bs->rx_len);
725 /* fall back to interrupt mode */ 792 /* fall back to interrupt mode */
726 return bcm2835_spi_transfer_one_irq(master, spi, 793
794 /* update usage statistics */
795 bs->count_transfer_irq_after_polling++;
796
797 return bcm2835_spi_transfer_one_irq(ctlr, spi,
727 tfr, cs, false); 798 tfr, cs, false);
728 } 799 }
729 } 800 }
730 801
731 /* Transfer complete - reset SPI HW */ 802 /* Transfer complete - reset SPI HW */
732 bcm2835_spi_reset_hw(master); 803 bcm2835_spi_reset_hw(ctlr);
733 /* and return without waiting for completion */ 804 /* and return without waiting for completion */
734 return 0; 805 return 0;
735} 806}
736 807
737static int bcm2835_spi_transfer_one(struct spi_master *master, 808static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
738 struct spi_device *spi, 809 struct spi_device *spi,
739 struct spi_transfer *tfr) 810 struct spi_transfer *tfr)
740{ 811{
741 struct bcm2835_spi *bs = spi_master_get_devdata(master); 812 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
742 unsigned long spi_hz, clk_hz, cdiv; 813 unsigned long spi_hz, clk_hz, cdiv, spi_used_hz;
743 unsigned long spi_used_hz; 814 unsigned long hz_per_byte, byte_limit;
744 unsigned long long xfer_time_us;
745 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); 815 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
746 816
747 /* set clock */ 817 /* set clock */
@@ -782,42 +852,49 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
782 bs->tx_len = tfr->len; 852 bs->tx_len = tfr->len;
783 bs->rx_len = tfr->len; 853 bs->rx_len = tfr->len;
784 854
785 /* calculate the estimated time in us the transfer runs */ 855 /* Calculate the estimated time in us the transfer runs. Note that
786 xfer_time_us = (unsigned long long)tfr->len 856 * there is 1 idle clocks cycles after each byte getting transferred
787 * 9 /* clocks/byte - SPI-HW waits 1 clock after each byte */ 857 * so we have 9 cycles/byte. This is used to find the number of Hz
788 * 1000000; 858 * per byte per polling limit. E.g., we can transfer 1 byte in 30 us
789 do_div(xfer_time_us, spi_used_hz); 859 * per 300,000 Hz of bus clock.
860 */
861 hz_per_byte = polling_limit_us ? (9 * 1000000) / polling_limit_us : 0;
862 byte_limit = hz_per_byte ? spi_used_hz / hz_per_byte : 1;
790 863
791 /* for short requests run polling*/ 864 /* run in polling mode for short transfers */
792 if (xfer_time_us <= BCM2835_SPI_POLLING_LIMIT_US) 865 if (tfr->len < byte_limit)
793 return bcm2835_spi_transfer_one_poll(master, spi, tfr, 866 return bcm2835_spi_transfer_one_poll(ctlr, spi, tfr, cs);
794 cs, xfer_time_us);
795 867
796 /* run in dma mode if conditions are right */ 868 /* run in dma mode if conditions are right
797 if (master->can_dma && bcm2835_spi_can_dma(master, spi, tfr)) 869 * Note that unlike poll or interrupt mode DMA mode does not have
798 return bcm2835_spi_transfer_one_dma(master, spi, tfr, cs); 870 * this 1 idle clock cycle pattern but runs the spi clock without gaps
871 */
872 if (ctlr->can_dma && bcm2835_spi_can_dma(ctlr, spi, tfr))
873 return bcm2835_spi_transfer_one_dma(ctlr, spi, tfr, cs);
799 874
800 /* run in interrupt-mode */ 875 /* run in interrupt-mode */
801 return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs, true); 876 return bcm2835_spi_transfer_one_irq(ctlr, spi, tfr, cs, true);
802} 877}
803 878
804static int bcm2835_spi_prepare_message(struct spi_master *master, 879static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
805 struct spi_message *msg) 880 struct spi_message *msg)
806{ 881{
807 struct spi_device *spi = msg->spi; 882 struct spi_device *spi = msg->spi;
808 struct bcm2835_spi *bs = spi_master_get_devdata(master); 883 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
809 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); 884 u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
810 int ret; 885 int ret;
811 886
812 /* 887 if (ctlr->can_dma) {
813 * DMA transfers are limited to 16 bit (0 to 65535 bytes) by the SPI HW 888 /*
814 * due to DLEN. Split up transfers (32-bit FIFO aligned) if the limit is 889 * DMA transfers are limited to 16 bit (0 to 65535 bytes) by
815 * exceeded. 890 * the SPI HW due to DLEN. Split up transfers (32-bit FIFO
816 */ 891 * aligned) if the limit is exceeded.
817 ret = spi_split_transfers_maxsize(master, msg, 65532, 892 */
818 GFP_KERNEL | GFP_DMA); 893 ret = spi_split_transfers_maxsize(ctlr, msg, 65532,
819 if (ret) 894 GFP_KERNEL | GFP_DMA);
820 return ret; 895 if (ret)
896 return ret;
897 }
821 898
822 cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA); 899 cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA);
823 900
@@ -831,19 +908,19 @@ static int bcm2835_spi_prepare_message(struct spi_master *master,
831 return 0; 908 return 0;
832} 909}
833 910
834static void bcm2835_spi_handle_err(struct spi_master *master, 911static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
835 struct spi_message *msg) 912 struct spi_message *msg)
836{ 913{
837 struct bcm2835_spi *bs = spi_master_get_devdata(master); 914 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
838 915
839 /* if an error occurred and we have an active dma, then terminate */ 916 /* if an error occurred and we have an active dma, then terminate */
840 if (cmpxchg(&bs->dma_pending, true, false)) { 917 if (cmpxchg(&bs->dma_pending, true, false)) {
841 dmaengine_terminate_sync(master->dma_tx); 918 dmaengine_terminate_sync(ctlr->dma_tx);
842 dmaengine_terminate_sync(master->dma_rx); 919 dmaengine_terminate_sync(ctlr->dma_rx);
843 bcm2835_spi_undo_prologue(bs); 920 bcm2835_spi_undo_prologue(bs);
844 } 921 }
845 /* and reset */ 922 /* and reset */
846 bcm2835_spi_reset_hw(master); 923 bcm2835_spi_reset_hw(ctlr);
847} 924}
848 925
849static int chip_match_name(struct gpio_chip *chip, void *data) 926static int chip_match_name(struct gpio_chip *chip, void *data)
@@ -900,85 +977,88 @@ static int bcm2835_spi_setup(struct spi_device *spi)
900 977
901static int bcm2835_spi_probe(struct platform_device *pdev) 978static int bcm2835_spi_probe(struct platform_device *pdev)
902{ 979{
903 struct spi_master *master; 980 struct spi_controller *ctlr;
904 struct bcm2835_spi *bs; 981 struct bcm2835_spi *bs;
905 struct resource *res; 982 struct resource *res;
906 int err; 983 int err;
907 984
908 master = spi_alloc_master(&pdev->dev, sizeof(*bs)); 985 ctlr = spi_alloc_master(&pdev->dev, sizeof(*bs));
909 if (!master) { 986 if (!ctlr)
910 dev_err(&pdev->dev, "spi_alloc_master() failed\n");
911 return -ENOMEM; 987 return -ENOMEM;
912 }
913 988
914 platform_set_drvdata(pdev, master); 989 platform_set_drvdata(pdev, ctlr);
915 990
916 master->mode_bits = BCM2835_SPI_MODE_BITS; 991 ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
917 master->bits_per_word_mask = SPI_BPW_MASK(8); 992 ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
918 master->num_chipselect = 3; 993 ctlr->num_chipselect = 3;
919 master->setup = bcm2835_spi_setup; 994 ctlr->setup = bcm2835_spi_setup;
920 master->transfer_one = bcm2835_spi_transfer_one; 995 ctlr->transfer_one = bcm2835_spi_transfer_one;
921 master->handle_err = bcm2835_spi_handle_err; 996 ctlr->handle_err = bcm2835_spi_handle_err;
922 master->prepare_message = bcm2835_spi_prepare_message; 997 ctlr->prepare_message = bcm2835_spi_prepare_message;
923 master->dev.of_node = pdev->dev.of_node; 998 ctlr->dev.of_node = pdev->dev.of_node;
924 999
925 bs = spi_master_get_devdata(master); 1000 bs = spi_controller_get_devdata(ctlr);
926 1001
927 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1002 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
928 bs->regs = devm_ioremap_resource(&pdev->dev, res); 1003 bs->regs = devm_ioremap_resource(&pdev->dev, res);
929 if (IS_ERR(bs->regs)) { 1004 if (IS_ERR(bs->regs)) {
930 err = PTR_ERR(bs->regs); 1005 err = PTR_ERR(bs->regs);
931 goto out_master_put; 1006 goto out_controller_put;
932 } 1007 }
933 1008
934 bs->clk = devm_clk_get(&pdev->dev, NULL); 1009 bs->clk = devm_clk_get(&pdev->dev, NULL);
935 if (IS_ERR(bs->clk)) { 1010 if (IS_ERR(bs->clk)) {
936 err = PTR_ERR(bs->clk); 1011 err = PTR_ERR(bs->clk);
937 dev_err(&pdev->dev, "could not get clk: %d\n", err); 1012 dev_err(&pdev->dev, "could not get clk: %d\n", err);
938 goto out_master_put; 1013 goto out_controller_put;
939 } 1014 }
940 1015
941 bs->irq = platform_get_irq(pdev, 0); 1016 bs->irq = platform_get_irq(pdev, 0);
942 if (bs->irq <= 0) { 1017 if (bs->irq <= 0) {
943 dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq); 1018 dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
944 err = bs->irq ? bs->irq : -ENODEV; 1019 err = bs->irq ? bs->irq : -ENODEV;
945 goto out_master_put; 1020 goto out_controller_put;
946 } 1021 }
947 1022
948 clk_prepare_enable(bs->clk); 1023 clk_prepare_enable(bs->clk);
949 1024
950 bcm2835_dma_init(master, &pdev->dev); 1025 bcm2835_dma_init(ctlr, &pdev->dev);
951 1026
952 /* initialise the hardware with the default polarities */ 1027 /* initialise the hardware with the default polarities */
953 bcm2835_wr(bs, BCM2835_SPI_CS, 1028 bcm2835_wr(bs, BCM2835_SPI_CS,
954 BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); 1029 BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
955 1030
956 err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0, 1031 err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0,
957 dev_name(&pdev->dev), master); 1032 dev_name(&pdev->dev), ctlr);
958 if (err) { 1033 if (err) {
959 dev_err(&pdev->dev, "could not request IRQ: %d\n", err); 1034 dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
960 goto out_clk_disable; 1035 goto out_clk_disable;
961 } 1036 }
962 1037
963 err = devm_spi_register_master(&pdev->dev, master); 1038 err = devm_spi_register_controller(&pdev->dev, ctlr);
964 if (err) { 1039 if (err) {
965 dev_err(&pdev->dev, "could not register SPI master: %d\n", err); 1040 dev_err(&pdev->dev, "could not register SPI controller: %d\n",
1041 err);
966 goto out_clk_disable; 1042 goto out_clk_disable;
967 } 1043 }
968 1044
1045 bcm2835_debugfs_create(bs, dev_name(&pdev->dev));
1046
969 return 0; 1047 return 0;
970 1048
971out_clk_disable: 1049out_clk_disable:
972 clk_disable_unprepare(bs->clk); 1050 clk_disable_unprepare(bs->clk);
973out_master_put: 1051out_controller_put:
974 spi_master_put(master); 1052 spi_controller_put(ctlr);
975 return err; 1053 return err;
976} 1054}
977 1055
978static int bcm2835_spi_remove(struct platform_device *pdev) 1056static int bcm2835_spi_remove(struct platform_device *pdev)
979{ 1057{
980 struct spi_master *master = platform_get_drvdata(pdev); 1058 struct spi_controller *ctlr = platform_get_drvdata(pdev);
981 struct bcm2835_spi *bs = spi_master_get_devdata(master); 1059 struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
1060
1061 bcm2835_debugfs_remove(bs);
982 1062
983 /* Clear FIFOs, and disable the HW block */ 1063 /* Clear FIFOs, and disable the HW block */
984 bcm2835_wr(bs, BCM2835_SPI_CS, 1064 bcm2835_wr(bs, BCM2835_SPI_CS,
@@ -986,7 +1066,7 @@ static int bcm2835_spi_remove(struct platform_device *pdev)
986 1066
987 clk_disable_unprepare(bs->clk); 1067 clk_disable_unprepare(bs->clk);
988 1068
989 bcm2835_dma_release(master); 1069 bcm2835_dma_release(ctlr);
990 1070
991 return 0; 1071 return 0;
992} 1072}
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 40dfb7f58efe..bb57035c5770 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -496,10 +496,8 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
496 int err; 496 int err;
497 497
498 master = spi_alloc_master(&pdev->dev, sizeof(*bs)); 498 master = spi_alloc_master(&pdev->dev, sizeof(*bs));
499 if (!master) { 499 if (!master)
500 dev_err(&pdev->dev, "spi_alloc_master() failed\n");
501 return -ENOMEM; 500 return -ENOMEM;
502 }
503 501
504 platform_set_drvdata(pdev, master); 502 platform_set_drvdata(pdev, master);
505 master->mode_bits = (SPI_CPOL | SPI_CS_HIGH | SPI_NO_CS); 503 master->mode_bits = (SPI_CPOL | SPI_CS_HIGH | SPI_NO_CS);
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
index ea4b1bf0fa16..f7fe9b13d122 100644
--- a/drivers/spi/spi-meson-spifc.c
+++ b/drivers/spi/spi-meson-spifc.c
@@ -1,9 +1,9 @@
1// SPDX-License-Identifier: GPL-2.0-only 1// SPDX-License-Identifier: GPL-2.0+
2/* 2//
3 * Driver for Amlogic Meson SPI flash controller (SPIFC) 3// Driver for Amlogic Meson SPI flash controller (SPIFC)
4 * 4//
5 * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com> 5// Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
6 */ 6//
7 7
8#include <linux/clk.h> 8#include <linux/clk.h>
9#include <linux/delay.h> 9#include <linux/delay.h>
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 10041eab36a2..45d8a7048b6c 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -123,8 +123,6 @@ static const struct mtk_spi_compatible mt8183_compat = {
123 * supplies it. 123 * supplies it.
124 */ 124 */
125static const struct mtk_chip_config mtk_default_chip_info = { 125static const struct mtk_chip_config mtk_default_chip_info = {
126 .rx_mlsb = 1,
127 .tx_mlsb = 1,
128 .cs_pol = 0, 126 .cs_pol = 0,
129 .sample_sel = 0, 127 .sample_sel = 0,
130}; 128};
@@ -195,14 +193,13 @@ static int mtk_spi_prepare_message(struct spi_master *master,
195 reg_val &= ~SPI_CMD_CPOL; 193 reg_val &= ~SPI_CMD_CPOL;
196 194
197 /* set the mlsbx and mlsbtx */ 195 /* set the mlsbx and mlsbtx */
198 if (chip_config->tx_mlsb) 196 if (spi->mode & SPI_LSB_FIRST) {
199 reg_val |= SPI_CMD_TXMSBF;
200 else
201 reg_val &= ~SPI_CMD_TXMSBF; 197 reg_val &= ~SPI_CMD_TXMSBF;
202 if (chip_config->rx_mlsb)
203 reg_val |= SPI_CMD_RXMSBF;
204 else
205 reg_val &= ~SPI_CMD_RXMSBF; 198 reg_val &= ~SPI_CMD_RXMSBF;
199 } else {
200 reg_val |= SPI_CMD_TXMSBF;
201 reg_val |= SPI_CMD_RXMSBF;
202 }
206 203
207 /* set the tx/rx endian */ 204 /* set the tx/rx endian */
208#ifdef __LITTLE_ENDIAN 205#ifdef __LITTLE_ENDIAN
@@ -599,7 +596,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
599 596
600 master->auto_runtime_pm = true; 597 master->auto_runtime_pm = true;
601 master->dev.of_node = pdev->dev.of_node; 598 master->dev.of_node = pdev->dev.of_node;
602 master->mode_bits = SPI_CPOL | SPI_CPHA; 599 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
603 600
604 master->set_cs = mtk_spi_set_cs; 601 master->set_cs = mtk_spi_set_cs;
605 master->prepare_message = mtk_spi_prepare_message; 602 master->prepare_message = mtk_spi_prepare_message;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index af3f37ba82c8..fc7ab4b26880 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1437,6 +1437,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
1437 { PCI_VDEVICE(INTEL, 0x34aa), LPSS_CNL_SSP }, 1437 { PCI_VDEVICE(INTEL, 0x34aa), LPSS_CNL_SSP },
1438 { PCI_VDEVICE(INTEL, 0x34ab), LPSS_CNL_SSP }, 1438 { PCI_VDEVICE(INTEL, 0x34ab), LPSS_CNL_SSP },
1439 { PCI_VDEVICE(INTEL, 0x34fb), LPSS_CNL_SSP }, 1439 { PCI_VDEVICE(INTEL, 0x34fb), LPSS_CNL_SSP },
1440 /* EHL */
1441 { PCI_VDEVICE(INTEL, 0x4b2a), LPSS_BXT_SSP },
1442 { PCI_VDEVICE(INTEL, 0x4b2b), LPSS_BXT_SSP },
1443 { PCI_VDEVICE(INTEL, 0x4b37), LPSS_BXT_SSP },
1440 /* APL */ 1444 /* APL */
1441 { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP }, 1445 { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
1442 { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP }, 1446 { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
@@ -1704,6 +1708,16 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1704 goto out_error_dma_irq_alloc; 1708 goto out_error_dma_irq_alloc;
1705 1709
1706 controller->max_speed_hz = clk_get_rate(ssp->clk); 1710 controller->max_speed_hz = clk_get_rate(ssp->clk);
1711 /*
1712 * Set minimum speed for all other platforms than Intel Quark which is
1713 * able do under 1 Hz transfers.
1714 */
1715 if (!pxa25x_ssp_comp(drv_data))
1716 controller->min_speed_hz =
1717 DIV_ROUND_UP(controller->max_speed_hz, 4096);
1718 else if (!is_quark_x1000_ssp(drv_data))
1719 controller->min_speed_hz =
1720 DIV_ROUND_UP(controller->max_speed_hz, 512);
1707 1721
1708 /* Load default SSP configuration */ 1722 /* Load default SSP configuration */
1709 pxa2xx_spi_write(drv_data, SSCR0, 0); 1723 pxa2xx_spi_write(drv_data, SSCR0, 0);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index eb8a6a2e91c9..2f559e531100 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -873,10 +873,6 @@ static int spi_qup_transfer_one(struct spi_master *master,
873 else 873 else
874 ret = spi_qup_do_pio(spi, xfer, timeout); 874 ret = spi_qup_do_pio(spi, xfer, timeout);
875 875
876 if (ret)
877 goto exit;
878
879exit:
880 spi_qup_set_state(controller, QUP_STATE_RESET); 876 spi_qup_set_state(controller, QUP_STATE_RESET);
881 spin_lock_irqsave(&controller->lock, flags); 877 spin_lock_irqsave(&controller->lock, flags);
882 if (!ret) 878 if (!ret)
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 9b91188a85f9..2cc6d9951b52 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -417,7 +417,7 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs,
417 .direction = DMA_MEM_TO_DEV, 417 .direction = DMA_MEM_TO_DEV,
418 .dst_addr = rs->dma_addr_tx, 418 .dst_addr = rs->dma_addr_tx,
419 .dst_addr_width = rs->n_bytes, 419 .dst_addr_width = rs->n_bytes,
420 .dst_maxburst = rs->fifo_len / 2, 420 .dst_maxburst = rs->fifo_len / 4,
421 }; 421 };
422 422
423 dmaengine_slave_config(master->dma_tx, &txconf); 423 dmaengine_slave_config(master->dma_tx, &txconf);
@@ -518,7 +518,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs,
518 else 518 else
519 writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR); 519 writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
520 520
521 writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_DMATDLR); 521 writel_relaxed(rs->fifo_len / 2, rs->regs + ROCKCHIP_SPI_DMATDLR);
522 writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMARDLR); 522 writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMARDLR);
523 writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR); 523 writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
524 524
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 6aab7b2136db..b50bdbc27e58 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -229,7 +229,7 @@ static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
229 sh_msiof_write(p, CTR, data); 229 sh_msiof_write(p, CTR, data);
230 230
231 return readl_poll_timeout_atomic(p->mapbase + CTR, data, 231 return readl_poll_timeout_atomic(p->mapbase + CTR, data,
232 (data & mask) == set, 10, 1000); 232 (data & mask) == set, 1, 100);
233} 233}
234 234
235static irqreturn_t sh_msiof_spi_irq(int irq, void *data) 235static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 5dbb6a8e893c..655e4afbfb2a 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -245,12 +245,8 @@ static int stm32_qspi_tx_dma(struct stm32_qspi *qspi,
245 writel_relaxed(cr | CR_DMAEN, qspi->io_base + QSPI_CR); 245 writel_relaxed(cr | CR_DMAEN, qspi->io_base + QSPI_CR);
246 246
247 t_out = sgt.nents * STM32_COMP_TIMEOUT_MS; 247 t_out = sgt.nents * STM32_COMP_TIMEOUT_MS;
248 if (!wait_for_completion_interruptible_timeout(&qspi->dma_completion, 248 if (!wait_for_completion_timeout(&qspi->dma_completion,
249 msecs_to_jiffies(t_out))) 249 msecs_to_jiffies(t_out)))
250 err = -ETIMEDOUT;
251
252 if (dma_async_is_tx_complete(dma_ch, cookie,
253 NULL, NULL) != DMA_COMPLETE)
254 err = -ETIMEDOUT; 250 err = -ETIMEDOUT;
255 251
256 if (err) 252 if (err)
@@ -304,7 +300,7 @@ static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
304 cr = readl_relaxed(qspi->io_base + QSPI_CR); 300 cr = readl_relaxed(qspi->io_base + QSPI_CR);
305 writel_relaxed(cr | CR_TCIE | CR_TEIE, qspi->io_base + QSPI_CR); 301 writel_relaxed(cr | CR_TCIE | CR_TEIE, qspi->io_base + QSPI_CR);
306 302
307 if (!wait_for_completion_interruptible_timeout(&qspi->data_completion, 303 if (!wait_for_completion_timeout(&qspi->data_completion,
308 msecs_to_jiffies(STM32_COMP_TIMEOUT_MS))) { 304 msecs_to_jiffies(STM32_COMP_TIMEOUT_MS))) {
309 err = -ETIMEDOUT; 305 err = -ETIMEDOUT;
310 } else { 306 } else {
diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c
new file mode 100644
index 000000000000..f99abd85c50a
--- /dev/null
+++ b/drivers/spi/spi-synquacer.c
@@ -0,0 +1,828 @@
1// SPDX-License-Identifier: GPL-2.0
2//
3// Synquacer HSSPI controller driver
4//
5// Copyright (c) 2015-2018 Socionext Inc.
6// Copyright (c) 2018-2019 Linaro Ltd.
7//
8
9#include <linux/acpi.h>
10#include <linux/delay.h>
11#include <linux/interrupt.h>
12#include <linux/io.h>
13#include <linux/module.h>
14#include <linux/of.h>
15#include <linux/platform_device.h>
16#include <linux/pm_runtime.h>
17#include <linux/scatterlist.h>
18#include <linux/slab.h>
19#include <linux/spi/spi.h>
20#include <linux/spinlock.h>
21#include <linux/clk.h>
22
23/* HSSPI register address definitions */
24#define SYNQUACER_HSSPI_REG_MCTRL 0x00
25#define SYNQUACER_HSSPI_REG_PCC0 0x04
26#define SYNQUACER_HSSPI_REG_PCC(n) (SYNQUACER_HSSPI_REG_PCC0 + (n) * 4)
27#define SYNQUACER_HSSPI_REG_TXF 0x14
28#define SYNQUACER_HSSPI_REG_TXE 0x18
29#define SYNQUACER_HSSPI_REG_TXC 0x1C
30#define SYNQUACER_HSSPI_REG_RXF 0x20
31#define SYNQUACER_HSSPI_REG_RXE 0x24
32#define SYNQUACER_HSSPI_REG_RXC 0x28
33#define SYNQUACER_HSSPI_REG_FAULTF 0x2C
34#define SYNQUACER_HSSPI_REG_FAULTC 0x30
35#define SYNQUACER_HSSPI_REG_DMCFG 0x34
36#define SYNQUACER_HSSPI_REG_DMSTART 0x38
37#define SYNQUACER_HSSPI_REG_DMBCC 0x3C
38#define SYNQUACER_HSSPI_REG_DMSTATUS 0x40
39#define SYNQUACER_HSSPI_REG_FIFOCFG 0x4C
40#define SYNQUACER_HSSPI_REG_TX_FIFO 0x50
41#define SYNQUACER_HSSPI_REG_RX_FIFO 0x90
42#define SYNQUACER_HSSPI_REG_MID 0xFC
43
44/* HSSPI register bit definitions */
45#define SYNQUACER_HSSPI_MCTRL_MEN BIT(0)
46#define SYNQUACER_HSSPI_MCTRL_COMMAND_SEQUENCE_EN BIT(1)
47#define SYNQUACER_HSSPI_MCTRL_CDSS BIT(3)
48#define SYNQUACER_HSSPI_MCTRL_MES BIT(4)
49#define SYNQUACER_HSSPI_MCTRL_SYNCON BIT(5)
50
51#define SYNQUACER_HSSPI_PCC_CPHA BIT(0)
52#define SYNQUACER_HSSPI_PCC_CPOL BIT(1)
53#define SYNQUACER_HSSPI_PCC_ACES BIT(2)
54#define SYNQUACER_HSSPI_PCC_RTM BIT(3)
55#define SYNQUACER_HSSPI_PCC_SSPOL BIT(4)
56#define SYNQUACER_HSSPI_PCC_SDIR BIT(7)
57#define SYNQUACER_HSSPI_PCC_SENDIAN BIT(8)
58#define SYNQUACER_HSSPI_PCC_SAFESYNC BIT(16)
59#define SYNQUACER_HSSPI_PCC_SS2CD_SHIFT 5U
60#define SYNQUACER_HSSPI_PCC_CDRS_MASK 0x7f
61#define SYNQUACER_HSSPI_PCC_CDRS_SHIFT 9U
62
63#define SYNQUACER_HSSPI_TXF_FIFO_FULL BIT(0)
64#define SYNQUACER_HSSPI_TXF_FIFO_EMPTY BIT(1)
65#define SYNQUACER_HSSPI_TXF_SLAVE_RELEASED BIT(6)
66
67#define SYNQUACER_HSSPI_TXE_FIFO_FULL BIT(0)
68#define SYNQUACER_HSSPI_TXE_FIFO_EMPTY BIT(1)
69#define SYNQUACER_HSSPI_TXE_SLAVE_RELEASED BIT(6)
70
71#define SYNQUACER_HSSPI_RXF_FIFO_MORE_THAN_THRESHOLD BIT(5)
72#define SYNQUACER_HSSPI_RXF_SLAVE_RELEASED BIT(6)
73
74#define SYNQUACER_HSSPI_RXE_FIFO_MORE_THAN_THRESHOLD BIT(5)
75#define SYNQUACER_HSSPI_RXE_SLAVE_RELEASED BIT(6)
76
77#define SYNQUACER_HSSPI_DMCFG_SSDC BIT(1)
78#define SYNQUACER_HSSPI_DMCFG_MSTARTEN BIT(2)
79
80#define SYNQUACER_HSSPI_DMSTART_START BIT(0)
81#define SYNQUACER_HSSPI_DMSTOP_STOP BIT(8)
82#define SYNQUACER_HSSPI_DMPSEL_CS_MASK 0x3
83#define SYNQUACER_HSSPI_DMPSEL_CS_SHIFT 16U
84#define SYNQUACER_HSSPI_DMTRP_BUS_WIDTH_SHIFT 24U
85#define SYNQUACER_HSSPI_DMTRP_DATA_MASK 0x3
86#define SYNQUACER_HSSPI_DMTRP_DATA_SHIFT 26U
87#define SYNQUACER_HSSPI_DMTRP_DATA_TXRX 0
88#define SYNQUACER_HSSPI_DMTRP_DATA_RX 1
89#define SYNQUACER_HSSPI_DMTRP_DATA_TX 2
90
91#define SYNQUACER_HSSPI_DMSTATUS_RX_DATA_MASK 0x1f
92#define SYNQUACER_HSSPI_DMSTATUS_RX_DATA_SHIFT 8U
93#define SYNQUACER_HSSPI_DMSTATUS_TX_DATA_MASK 0x1f
94#define SYNQUACER_HSSPI_DMSTATUS_TX_DATA_SHIFT 16U
95
96#define SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_MASK 0xf
97#define SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_SHIFT 0U
98#define SYNQUACER_HSSPI_FIFOCFG_TX_THRESHOLD_MASK 0xf
99#define SYNQUACER_HSSPI_FIFOCFG_TX_THRESHOLD_SHIFT 4U
100#define SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_MASK 0x3
101#define SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_SHIFT 8U
102#define SYNQUACER_HSSPI_FIFOCFG_RX_FLUSH BIT(11)
103#define SYNQUACER_HSSPI_FIFOCFG_TX_FLUSH BIT(12)
104
105#define SYNQUACER_HSSPI_FIFO_DEPTH 16U
106#define SYNQUACER_HSSPI_FIFO_TX_THRESHOLD 4U
107#define SYNQUACER_HSSPI_FIFO_RX_THRESHOLD \
108 (SYNQUACER_HSSPI_FIFO_DEPTH - SYNQUACER_HSSPI_FIFO_TX_THRESHOLD)
109
110#define SYNQUACER_HSSPI_TRANSFER_MODE_TX BIT(1)
111#define SYNQUACER_HSSPI_TRANSFER_MODE_RX BIT(2)
112#define SYNQUACER_HSSPI_TRANSFER_TMOUT_MSEC 2000U
113#define SYNQUACER_HSSPI_ENABLE_TMOUT_MSEC 1000U
114
115#define SYNQUACER_HSSPI_CLOCK_SRC_IHCLK 0
116#define SYNQUACER_HSSPI_CLOCK_SRC_IPCLK 1
117
118#define SYNQUACER_HSSPI_NUM_CHIP_SELECT 4U
119#define SYNQUACER_HSSPI_IRQ_NAME_MAX 32U
120
121struct synquacer_spi {
122 struct device *dev;
123 struct completion transfer_done;
124 unsigned int cs;
125 unsigned int bpw;
126 unsigned int mode;
127 unsigned int speed;
128 bool aces, rtm;
129 void *rx_buf;
130 const void *tx_buf;
131 struct clk *clk;
132 int clk_src_type;
133 void __iomem *regs;
134 u32 tx_words, rx_words;
135 unsigned int bus_width;
136 unsigned int transfer_mode;
137 char rx_irq_name[SYNQUACER_HSSPI_IRQ_NAME_MAX];
138 char tx_irq_name[SYNQUACER_HSSPI_IRQ_NAME_MAX];
139};
140
141static int read_fifo(struct synquacer_spi *sspi)
142{
143 u32 len = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTATUS);
144
145 len = (len >> SYNQUACER_HSSPI_DMSTATUS_RX_DATA_SHIFT) &
146 SYNQUACER_HSSPI_DMSTATUS_RX_DATA_MASK;
147 len = min(len, sspi->rx_words);
148
149 switch (sspi->bpw) {
150 case 8: {
151 u8 *buf = sspi->rx_buf;
152
153 ioread8_rep(sspi->regs + SYNQUACER_HSSPI_REG_RX_FIFO,
154 buf, len);
155 sspi->rx_buf = buf + len;
156 break;
157 }
158 case 16: {
159 u16 *buf = sspi->rx_buf;
160
161 ioread16_rep(sspi->regs + SYNQUACER_HSSPI_REG_RX_FIFO,
162 buf, len);
163 sspi->rx_buf = buf + len;
164 break;
165 }
166 case 24:
167 /* fallthrough, should use 32-bits access */
168 case 32: {
169 u32 *buf = sspi->rx_buf;
170
171 ioread32_rep(sspi->regs + SYNQUACER_HSSPI_REG_RX_FIFO,
172 buf, len);
173 sspi->rx_buf = buf + len;
174 break;
175 }
176 default:
177 return -EINVAL;
178 }
179
180 sspi->rx_words -= len;
181 return 0;
182}
183
184static int write_fifo(struct synquacer_spi *sspi)
185{
186 u32 len = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTATUS);
187
188 len = (len >> SYNQUACER_HSSPI_DMSTATUS_TX_DATA_SHIFT) &
189 SYNQUACER_HSSPI_DMSTATUS_TX_DATA_MASK;
190 len = min(SYNQUACER_HSSPI_FIFO_DEPTH - len,
191 sspi->tx_words);
192
193 switch (sspi->bpw) {
194 case 8: {
195 const u8 *buf = sspi->tx_buf;
196
197 iowrite8_rep(sspi->regs + SYNQUACER_HSSPI_REG_TX_FIFO,
198 buf, len);
199 sspi->tx_buf = buf + len;
200 break;
201 }
202 case 16: {
203 const u16 *buf = sspi->tx_buf;
204
205 iowrite16_rep(sspi->regs + SYNQUACER_HSSPI_REG_TX_FIFO,
206 buf, len);
207 sspi->tx_buf = buf + len;
208 break;
209 }
210 case 24:
211 /* fallthrough, should use 32-bits access */
212 case 32: {
213 const u32 *buf = sspi->tx_buf;
214
215 iowrite32_rep(sspi->regs + SYNQUACER_HSSPI_REG_TX_FIFO,
216 buf, len);
217 sspi->tx_buf = buf + len;
218 break;
219 }
220 default:
221 return -EINVAL;
222 }
223
224 sspi->tx_words -= len;
225 return 0;
226}
227
228static int synquacer_spi_config(struct spi_master *master,
229 struct spi_device *spi,
230 struct spi_transfer *xfer)
231{
232 struct synquacer_spi *sspi = spi_master_get_devdata(master);
233 unsigned int speed, mode, bpw, cs, bus_width, transfer_mode;
234 u32 rate, val, div;
235
236 /* Full Duplex only on 1-bit wide bus */
237 if (xfer->rx_buf && xfer->tx_buf &&
238 (xfer->rx_nbits != 1 || xfer->tx_nbits != 1)) {
239 dev_err(sspi->dev,
240 "RX and TX bus widths must be 1-bit for Full-Duplex!\n");
241 return -EINVAL;
242 }
243
244 if (xfer->tx_buf) {
245 bus_width = xfer->tx_nbits;
246 transfer_mode = SYNQUACER_HSSPI_TRANSFER_MODE_TX;
247 } else {
248 bus_width = xfer->rx_nbits;
249 transfer_mode = SYNQUACER_HSSPI_TRANSFER_MODE_RX;
250 }
251
252 mode = spi->mode;
253 cs = spi->chip_select;
254 speed = xfer->speed_hz;
255 bpw = xfer->bits_per_word;
256
257 /* return if nothing to change */
258 if (speed == sspi->speed &&
259 bus_width == sspi->bus_width && bpw == sspi->bpw &&
260 mode == sspi->mode && cs == sspi->cs &&
261 transfer_mode == sspi->transfer_mode) {
262 return 0;
263 }
264
265 sspi->transfer_mode = transfer_mode;
266 rate = master->max_speed_hz;
267
268 div = DIV_ROUND_UP(rate, speed);
269 if (div > 254) {
270 dev_err(sspi->dev, "Requested rate too low (%u)\n",
271 sspi->speed);
272 return -EINVAL;
273 }
274
275 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_PCC(cs));
276 val &= ~SYNQUACER_HSSPI_PCC_SAFESYNC;
277 if (bpw == 8 && (mode & (SPI_TX_DUAL | SPI_RX_DUAL)) && div < 3)
278 val |= SYNQUACER_HSSPI_PCC_SAFESYNC;
279 if (bpw == 8 && (mode & (SPI_TX_QUAD | SPI_RX_QUAD)) && div < 6)
280 val |= SYNQUACER_HSSPI_PCC_SAFESYNC;
281 if (bpw == 16 && (mode & (SPI_TX_QUAD | SPI_RX_QUAD)) && div < 3)
282 val |= SYNQUACER_HSSPI_PCC_SAFESYNC;
283
284 if (mode & SPI_CPHA)
285 val |= SYNQUACER_HSSPI_PCC_CPHA;
286 else
287 val &= ~SYNQUACER_HSSPI_PCC_CPHA;
288
289 if (mode & SPI_CPOL)
290 val |= SYNQUACER_HSSPI_PCC_CPOL;
291 else
292 val &= ~SYNQUACER_HSSPI_PCC_CPOL;
293
294 if (mode & SPI_CS_HIGH)
295 val |= SYNQUACER_HSSPI_PCC_SSPOL;
296 else
297 val &= ~SYNQUACER_HSSPI_PCC_SSPOL;
298
299 if (mode & SPI_LSB_FIRST)
300 val |= SYNQUACER_HSSPI_PCC_SDIR;
301 else
302 val &= ~SYNQUACER_HSSPI_PCC_SDIR;
303
304 if (sspi->aces)
305 val |= SYNQUACER_HSSPI_PCC_ACES;
306 else
307 val &= ~SYNQUACER_HSSPI_PCC_ACES;
308
309 if (sspi->rtm)
310 val |= SYNQUACER_HSSPI_PCC_RTM;
311 else
312 val &= ~SYNQUACER_HSSPI_PCC_RTM;
313
314 val |= (3 << SYNQUACER_HSSPI_PCC_SS2CD_SHIFT);
315 val |= SYNQUACER_HSSPI_PCC_SENDIAN;
316
317 val &= ~(SYNQUACER_HSSPI_PCC_CDRS_MASK <<
318 SYNQUACER_HSSPI_PCC_CDRS_SHIFT);
319 val |= ((div >> 1) << SYNQUACER_HSSPI_PCC_CDRS_SHIFT);
320
321 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_PCC(cs));
322
323 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
324 val &= ~(SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_MASK <<
325 SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_SHIFT);
326 val |= ((bpw / 8 - 1) << SYNQUACER_HSSPI_FIFOCFG_FIFO_WIDTH_SHIFT);
327 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
328
329 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
330 val &= ~(SYNQUACER_HSSPI_DMTRP_DATA_MASK <<
331 SYNQUACER_HSSPI_DMTRP_DATA_SHIFT);
332
333 if (xfer->rx_buf)
334 val |= (SYNQUACER_HSSPI_DMTRP_DATA_RX <<
335 SYNQUACER_HSSPI_DMTRP_DATA_SHIFT);
336 else
337 val |= (SYNQUACER_HSSPI_DMTRP_DATA_TX <<
338 SYNQUACER_HSSPI_DMTRP_DATA_SHIFT);
339
340 val &= ~(3 << SYNQUACER_HSSPI_DMTRP_BUS_WIDTH_SHIFT);
341 val |= ((bus_width >> 1) << SYNQUACER_HSSPI_DMTRP_BUS_WIDTH_SHIFT);
342 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
343
344 sspi->bpw = bpw;
345 sspi->mode = mode;
346 sspi->speed = speed;
347 sspi->cs = spi->chip_select;
348 sspi->bus_width = bus_width;
349
350 return 0;
351}
352
353static int synquacer_spi_transfer_one(struct spi_master *master,
354 struct spi_device *spi,
355 struct spi_transfer *xfer)
356{
357 struct synquacer_spi *sspi = spi_master_get_devdata(master);
358 int ret;
359 int status = 0;
360 u32 words;
361 u8 bpw;
362 u32 val;
363
364 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
365 val &= ~SYNQUACER_HSSPI_DMSTOP_STOP;
366 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
367
368 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
369 val |= SYNQUACER_HSSPI_FIFOCFG_RX_FLUSH;
370 val |= SYNQUACER_HSSPI_FIFOCFG_TX_FLUSH;
371 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
372
373 /*
374 * See if we can transfer 4-bytes as 1 word
375 * to maximize the FIFO buffer efficiency.
376 */
377 bpw = xfer->bits_per_word;
378 if (bpw == 8 && !(xfer->len % 4) && !(spi->mode & SPI_LSB_FIRST))
379 xfer->bits_per_word = 32;
380
381 ret = synquacer_spi_config(master, spi, xfer);
382
383 /* restore */
384 xfer->bits_per_word = bpw;
385
386 if (ret)
387 return ret;
388
389 reinit_completion(&sspi->transfer_done);
390
391 sspi->tx_buf = xfer->tx_buf;
392 sspi->rx_buf = xfer->rx_buf;
393
394 switch (sspi->bpw) {
395 case 8:
396 words = xfer->len;
397 break;
398 case 16:
399 words = xfer->len / 2;
400 break;
401 case 24:
402 /* fallthrough, should use 32-bits access */
403 case 32:
404 words = xfer->len / 4;
405 break;
406 default:
407 dev_err(sspi->dev, "unsupported bpw: %d\n", sspi->bpw);
408 return -EINVAL;
409 }
410
411 if (xfer->tx_buf)
412 sspi->tx_words = words;
413 else
414 sspi->tx_words = 0;
415
416 if (xfer->rx_buf)
417 sspi->rx_words = words;
418 else
419 sspi->rx_words = 0;
420
421 if (xfer->tx_buf) {
422 status = write_fifo(sspi);
423 if (status < 0) {
424 dev_err(sspi->dev, "failed write_fifo. status: 0x%x\n",
425 status);
426 return status;
427 }
428 }
429
430 if (xfer->rx_buf) {
431 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
432 val &= ~(SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_MASK <<
433 SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_SHIFT);
434 val |= ((sspi->rx_words > SYNQUACER_HSSPI_FIFO_DEPTH ?
435 SYNQUACER_HSSPI_FIFO_RX_THRESHOLD : sspi->rx_words) <<
436 SYNQUACER_HSSPI_FIFOCFG_RX_THRESHOLD_SHIFT);
437 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_FIFOCFG);
438 }
439
440 writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_TXC);
441 writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_RXC);
442
443 /* Trigger */
444 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
445 val |= SYNQUACER_HSSPI_DMSTART_START;
446 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
447
448 if (xfer->tx_buf) {
449 val = SYNQUACER_HSSPI_TXE_FIFO_EMPTY;
450 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_TXE);
451 status = wait_for_completion_timeout(&sspi->transfer_done,
452 msecs_to_jiffies(SYNQUACER_HSSPI_TRANSFER_TMOUT_MSEC));
453 writel(0, sspi->regs + SYNQUACER_HSSPI_REG_TXE);
454 }
455
456 if (xfer->rx_buf) {
457 u32 buf[SYNQUACER_HSSPI_FIFO_DEPTH];
458
459 val = SYNQUACER_HSSPI_RXE_FIFO_MORE_THAN_THRESHOLD |
460 SYNQUACER_HSSPI_RXE_SLAVE_RELEASED;
461 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_RXE);
462 status = wait_for_completion_timeout(&sspi->transfer_done,
463 msecs_to_jiffies(SYNQUACER_HSSPI_TRANSFER_TMOUT_MSEC));
464 writel(0, sspi->regs + SYNQUACER_HSSPI_REG_RXE);
465
466 /* stop RX and clean RXFIFO */
467 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
468 val |= SYNQUACER_HSSPI_DMSTOP_STOP;
469 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
470 sspi->rx_buf = buf;
471 sspi->rx_words = SYNQUACER_HSSPI_FIFO_DEPTH;
472 read_fifo(sspi);
473 }
474
475 if (status < 0) {
476 dev_err(sspi->dev, "failed to transfer. status: 0x%x\n",
477 status);
478 return status;
479 }
480
481 return 0;
482}
483
484static void synquacer_spi_set_cs(struct spi_device *spi, bool enable)
485{
486 struct synquacer_spi *sspi = spi_master_get_devdata(spi->master);
487 u32 val;
488
489 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
490 val &= ~(SYNQUACER_HSSPI_DMPSEL_CS_MASK <<
491 SYNQUACER_HSSPI_DMPSEL_CS_SHIFT);
492 val |= spi->chip_select << SYNQUACER_HSSPI_DMPSEL_CS_SHIFT;
493 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
494}
495
496static int synquacer_spi_wait_status_update(struct synquacer_spi *sspi,
497 bool enable)
498{
499 u32 val;
500 unsigned long timeout = jiffies +
501 msecs_to_jiffies(SYNQUACER_HSSPI_ENABLE_TMOUT_MSEC);
502
503 /* wait MES(Module Enable Status) is updated */
504 do {
505 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_MCTRL) &
506 SYNQUACER_HSSPI_MCTRL_MES;
507 if (enable && val)
508 return 0;
509 if (!enable && !val)
510 return 0;
511 } while (time_before(jiffies, timeout));
512
513 dev_err(sspi->dev, "timeout occurs in updating Module Enable Status\n");
514 return -EBUSY;
515}
516
517static int synquacer_spi_enable(struct spi_master *master)
518{
519 u32 val;
520 int status;
521 struct synquacer_spi *sspi = spi_master_get_devdata(master);
522
523 /* Disable module */
524 writel(0, sspi->regs + SYNQUACER_HSSPI_REG_MCTRL);
525 status = synquacer_spi_wait_status_update(sspi, false);
526 if (status < 0)
527 return status;
528
529 writel(0, sspi->regs + SYNQUACER_HSSPI_REG_TXE);
530 writel(0, sspi->regs + SYNQUACER_HSSPI_REG_RXE);
531 writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_TXC);
532 writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_RXC);
533 writel(~0, sspi->regs + SYNQUACER_HSSPI_REG_FAULTC);
534
535 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_DMCFG);
536 val &= ~SYNQUACER_HSSPI_DMCFG_SSDC;
537 val &= ~SYNQUACER_HSSPI_DMCFG_MSTARTEN;
538 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMCFG);
539
540 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_MCTRL);
541 if (sspi->clk_src_type == SYNQUACER_HSSPI_CLOCK_SRC_IPCLK)
542 val |= SYNQUACER_HSSPI_MCTRL_CDSS;
543 else
544 val &= ~SYNQUACER_HSSPI_MCTRL_CDSS;
545
546 val &= ~SYNQUACER_HSSPI_MCTRL_COMMAND_SEQUENCE_EN;
547 val |= SYNQUACER_HSSPI_MCTRL_MEN;
548 val |= SYNQUACER_HSSPI_MCTRL_SYNCON;
549
550 /* Enable module */
551 writel(val, sspi->regs + SYNQUACER_HSSPI_REG_MCTRL);
552 status = synquacer_spi_wait_status_update(sspi, true);
553 if (status < 0)
554 return status;
555
556 return 0;
557}
558
559static irqreturn_t sq_spi_rx_handler(int irq, void *priv)
560{
561 uint32_t val;
562 struct synquacer_spi *sspi = priv;
563
564 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_RXF);
565 if ((val & SYNQUACER_HSSPI_RXF_SLAVE_RELEASED) ||
566 (val & SYNQUACER_HSSPI_RXF_FIFO_MORE_THAN_THRESHOLD)) {
567 read_fifo(sspi);
568
569 if (sspi->rx_words == 0) {
570 writel(0, sspi->regs + SYNQUACER_HSSPI_REG_RXE);
571 complete(&sspi->transfer_done);
572 }
573 return IRQ_HANDLED;
574 }
575
576 return IRQ_NONE;
577}
578
579static irqreturn_t sq_spi_tx_handler(int irq, void *priv)
580{
581 uint32_t val;
582 struct synquacer_spi *sspi = priv;
583
584 val = readl(sspi->regs + SYNQUACER_HSSPI_REG_TXF);
585 if (val & SYNQUACER_HSSPI_TXF_FIFO_EMPTY) {
586 if (sspi->tx_words == 0) {
587 writel(0, sspi->regs + SYNQUACER_HSSPI_REG_TXE);
588 complete(&sspi->transfer_done);
589 } else {
590 write_fifo(sspi);
591 }
592 return IRQ_HANDLED;
593 }
594
595 return IRQ_NONE;
596}
597
598static int synquacer_spi_probe(struct platform_device *pdev)
599{
600 struct device_node *np = pdev->dev.of_node;
601 struct spi_master *master;
602 struct synquacer_spi *sspi;
603 int ret;
604 int rx_irq, tx_irq;
605
606 master = spi_alloc_master(&pdev->dev, sizeof(*sspi));
607 if (!master)
608 return -ENOMEM;
609
610 platform_set_drvdata(pdev, master);
611
612 sspi = spi_master_get_devdata(master);
613 sspi->dev = &pdev->dev;
614
615 init_completion(&sspi->transfer_done);
616
617 sspi->regs = devm_platform_ioremap_resource(pdev, 0);
618 if (IS_ERR(sspi->regs)) {
619 ret = PTR_ERR(sspi->regs);
620 goto put_spi;
621 }
622
623 sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IHCLK; /* Default */
624 device_property_read_u32(&pdev->dev, "socionext,ihclk-rate",
625 &master->max_speed_hz); /* for ACPI */
626
627 if (dev_of_node(&pdev->dev)) {
628 if (device_property_match_string(&pdev->dev,
629 "clock-names", "iHCLK") >= 0) {
630 sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IHCLK;
631 sspi->clk = devm_clk_get(sspi->dev, "iHCLK");
632 } else if (device_property_match_string(&pdev->dev,
633 "clock-names", "iPCLK") >= 0) {
634 sspi->clk_src_type = SYNQUACER_HSSPI_CLOCK_SRC_IPCLK;
635 sspi->clk = devm_clk_get(sspi->dev, "iPCLK");
636 } else {
637 dev_err(&pdev->dev, "specified wrong clock source\n");
638 ret = -EINVAL;
639 goto put_spi;
640 }
641
642 if (IS_ERR(sspi->clk)) {
643 if (!(PTR_ERR(sspi->clk) == -EPROBE_DEFER))
644 dev_err(&pdev->dev, "clock not found\n");
645 ret = PTR_ERR(sspi->clk);
646 goto put_spi;
647 }
648
649 ret = clk_prepare_enable(sspi->clk);
650 if (ret) {
651 dev_err(&pdev->dev, "failed to enable clock (%d)\n",
652 ret);
653 goto put_spi;
654 }
655
656 master->max_speed_hz = clk_get_rate(sspi->clk);
657 }
658
659 if (!master->max_speed_hz) {
660 dev_err(&pdev->dev, "missing clock source\n");
661 return -EINVAL;
662 }
663 master->min_speed_hz = master->max_speed_hz / 254;
664
665 sspi->aces = device_property_read_bool(&pdev->dev,
666 "socionext,set-aces");
667 sspi->rtm = device_property_read_bool(&pdev->dev, "socionext,use-rtm");
668
669 master->num_chipselect = SYNQUACER_HSSPI_NUM_CHIP_SELECT;
670
671 rx_irq = platform_get_irq(pdev, 0);
672 if (rx_irq <= 0) {
673 dev_err(&pdev->dev, "get rx_irq failed (%d)\n", rx_irq);
674 ret = rx_irq;
675 goto put_spi;
676 }
677 snprintf(sspi->rx_irq_name, SYNQUACER_HSSPI_IRQ_NAME_MAX, "%s-rx",
678 dev_name(&pdev->dev));
679 ret = devm_request_irq(&pdev->dev, rx_irq, sq_spi_rx_handler,
680 0, sspi->rx_irq_name, sspi);
681 if (ret) {
682 dev_err(&pdev->dev, "request rx_irq failed (%d)\n", ret);
683 goto put_spi;
684 }
685
686 tx_irq = platform_get_irq(pdev, 1);
687 if (tx_irq <= 0) {
688 dev_err(&pdev->dev, "get tx_irq failed (%d)\n", tx_irq);
689 ret = tx_irq;
690 goto put_spi;
691 }
692 snprintf(sspi->tx_irq_name, SYNQUACER_HSSPI_IRQ_NAME_MAX, "%s-tx",
693 dev_name(&pdev->dev));
694 ret = devm_request_irq(&pdev->dev, tx_irq, sq_spi_tx_handler,
695 0, sspi->tx_irq_name, sspi);
696 if (ret) {
697 dev_err(&pdev->dev, "request tx_irq failed (%d)\n", ret);
698 goto put_spi;
699 }
700
701 master->dev.of_node = np;
702 master->dev.fwnode = pdev->dev.fwnode;
703 master->auto_runtime_pm = true;
704 master->bus_num = pdev->id;
705
706 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL |
707 SPI_TX_QUAD | SPI_RX_QUAD;
708 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(24) |
709 SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
710
711 master->set_cs = synquacer_spi_set_cs;
712 master->transfer_one = synquacer_spi_transfer_one;
713
714 ret = synquacer_spi_enable(master);
715 if (ret)
716 goto fail_enable;
717
718 pm_runtime_set_active(sspi->dev);
719 pm_runtime_enable(sspi->dev);
720
721 ret = devm_spi_register_master(sspi->dev, master);
722 if (ret)
723 goto disable_pm;
724
725 return 0;
726
727disable_pm:
728 pm_runtime_disable(sspi->dev);
729fail_enable:
730 clk_disable_unprepare(sspi->clk);
731put_spi:
732 spi_master_put(master);
733
734 return ret;
735}
736
737static int synquacer_spi_remove(struct platform_device *pdev)
738{
739 struct spi_master *master = platform_get_drvdata(pdev);
740 struct synquacer_spi *sspi = spi_master_get_devdata(master);
741
742 pm_runtime_disable(sspi->dev);
743
744 clk_disable_unprepare(sspi->clk);
745
746 return 0;
747}
748
749static int __maybe_unused synquacer_spi_suspend(struct device *dev)
750{
751 struct spi_master *master = dev_get_drvdata(dev);
752 struct synquacer_spi *sspi = spi_master_get_devdata(master);
753 int ret;
754
755 ret = spi_master_suspend(master);
756 if (ret)
757 return ret;
758
759 if (!pm_runtime_suspended(dev))
760 clk_disable_unprepare(sspi->clk);
761
762 return ret;
763}
764
765static int __maybe_unused synquacer_spi_resume(struct device *dev)
766{
767 struct spi_master *master = dev_get_drvdata(dev);
768 struct synquacer_spi *sspi = spi_master_get_devdata(master);
769 int ret;
770
771 if (!pm_runtime_suspended(dev)) {
772 /* Ensure reconfigure during next xfer */
773 sspi->speed = 0;
774
775 ret = clk_prepare_enable(sspi->clk);
776 if (ret < 0) {
777 dev_err(dev, "failed to enable clk (%d)\n",
778 ret);
779 return ret;
780 }
781
782 ret = synquacer_spi_enable(master);
783 if (ret) {
784 dev_err(dev, "failed to enable spi (%d)\n", ret);
785 return ret;
786 }
787 }
788
789 ret = spi_master_resume(master);
790 if (ret < 0)
791 clk_disable_unprepare(sspi->clk);
792
793 return ret;
794}
795
796static SIMPLE_DEV_PM_OPS(synquacer_spi_pm_ops, synquacer_spi_suspend,
797 synquacer_spi_resume);
798
799static const struct of_device_id synquacer_spi_of_match[] = {
800 {.compatible = "socionext,synquacer-spi"},
801 {}
802};
803MODULE_DEVICE_TABLE(of, synquacer_spi_of_match);
804
805#ifdef CONFIG_ACPI
806static const struct acpi_device_id synquacer_hsspi_acpi_ids[] = {
807 { "SCX0004" },
808 { /* sentinel */ }
809};
810MODULE_DEVICE_TABLE(acpi, synquacer_hsspi_acpi_ids);
811#endif
812
813static struct platform_driver synquacer_spi_driver = {
814 .driver = {
815 .name = "synquacer-spi",
816 .pm = &synquacer_spi_pm_ops,
817 .of_match_table = synquacer_spi_of_match,
818 .acpi_match_table = ACPI_PTR(synquacer_hsspi_acpi_ids),
819 },
820 .probe = synquacer_spi_probe,
821 .remove = synquacer_spi_remove,
822};
823module_platform_driver(synquacer_spi_driver);
824
825MODULE_DESCRIPTION("Socionext Synquacer HS-SPI controller driver");
826MODULE_AUTHOR("Masahisa Kojima <masahisa.kojima@linaro.org>");
827MODULE_AUTHOR("Jassi Brar <jaswinder.singh@linaro.org>");
828MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index d22f4d10413f..39374c2edcf3 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -84,8 +84,10 @@
84 (reg = (((val) & 0x1) << ((cs) * 8 + 5)) | \ 84 (reg = (((val) & 0x1) << ((cs) * 8 + 5)) | \
85 ((reg) & ~(1 << ((cs) * 8 + 5)))) 85 ((reg) & ~(1 << ((cs) * 8 + 5))))
86#define SPI_SET_CYCLES_BETWEEN_PACKETS(reg, cs, val) \ 86#define SPI_SET_CYCLES_BETWEEN_PACKETS(reg, cs, val) \
87 (reg = (((val) & 0xF) << ((cs) * 8)) | \ 87 (reg = (((val) & 0x1F) << ((cs) * 8)) | \
88 ((reg) & ~(0xF << ((cs) * 8)))) 88 ((reg) & ~(0x1F << ((cs) * 8))))
89#define MAX_SETUP_HOLD_CYCLES 16
90#define MAX_INACTIVE_CYCLES 32
89 91
90#define SPI_TRANS_STATUS 0x010 92#define SPI_TRANS_STATUS 0x010
91#define SPI_BLK_CNT(val) (((val) >> 0) & 0xFFFF) 93#define SPI_BLK_CNT(val) (((val) >> 0) & 0xFFFF)
@@ -156,6 +158,11 @@ struct tegra_spi_soc_data {
156 bool has_intr_mask_reg; 158 bool has_intr_mask_reg;
157}; 159};
158 160
161struct tegra_spi_client_data {
162 int tx_clk_tap_delay;
163 int rx_clk_tap_delay;
164};
165
159struct tegra_spi_data { 166struct tegra_spi_data {
160 struct device *dev; 167 struct device *dev;
161 struct spi_master *master; 168 struct spi_master *master;
@@ -182,6 +189,7 @@ struct tegra_spi_data {
182 unsigned dma_buf_size; 189 unsigned dma_buf_size;
183 unsigned max_buf_size; 190 unsigned max_buf_size;
184 bool is_curr_dma_xfer; 191 bool is_curr_dma_xfer;
192 bool use_hw_based_cs;
185 193
186 struct completion rx_dma_complete; 194 struct completion rx_dma_complete;
187 struct completion tx_dma_complete; 195 struct completion tx_dma_complete;
@@ -194,6 +202,10 @@ struct tegra_spi_data {
194 u32 command1_reg; 202 u32 command1_reg;
195 u32 dma_control_reg; 203 u32 dma_control_reg;
196 u32 def_command1_reg; 204 u32 def_command1_reg;
205 u32 def_command2_reg;
206 u32 spi_cs_timing1;
207 u32 spi_cs_timing2;
208 u8 last_used_cs;
197 209
198 struct completion xfer_completion; 210 struct completion xfer_completion;
199 struct spi_transfer *curr_xfer; 211 struct spi_transfer *curr_xfer;
@@ -711,14 +723,55 @@ static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
711 dma_release_channel(dma_chan); 723 dma_release_channel(dma_chan);
712} 724}
713 725
726static void tegra_spi_set_hw_cs_timing(struct spi_device *spi, u8 setup_dly,
727 u8 hold_dly, u8 inactive_dly)
728{
729 struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
730 u32 setup_hold;
731 u32 spi_cs_timing;
732 u32 inactive_cycles;
733 u8 cs_state;
734
735 setup_dly = min_t(u8, setup_dly, MAX_SETUP_HOLD_CYCLES);
736 hold_dly = min_t(u8, hold_dly, MAX_SETUP_HOLD_CYCLES);
737 if (setup_dly && hold_dly) {
738 setup_hold = SPI_SETUP_HOLD(setup_dly - 1, hold_dly - 1);
739 spi_cs_timing = SPI_CS_SETUP_HOLD(tspi->spi_cs_timing1,
740 spi->chip_select,
741 setup_hold);
742 if (tspi->spi_cs_timing1 != spi_cs_timing) {
743 tspi->spi_cs_timing1 = spi_cs_timing;
744 tegra_spi_writel(tspi, spi_cs_timing, SPI_CS_TIMING1);
745 }
746 }
747
748 inactive_cycles = min_t(u8, inactive_dly, MAX_INACTIVE_CYCLES);
749 if (inactive_cycles)
750 inactive_cycles--;
751 cs_state = inactive_cycles ? 0 : 1;
752 spi_cs_timing = tspi->spi_cs_timing2;
753 SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(spi_cs_timing, spi->chip_select,
754 cs_state);
755 SPI_SET_CYCLES_BETWEEN_PACKETS(spi_cs_timing, spi->chip_select,
756 inactive_cycles);
757 if (tspi->spi_cs_timing2 != spi_cs_timing) {
758 tspi->spi_cs_timing2 = spi_cs_timing;
759 tegra_spi_writel(tspi, spi_cs_timing, SPI_CS_TIMING2);
760 }
761}
762
714static u32 tegra_spi_setup_transfer_one(struct spi_device *spi, 763static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
715 struct spi_transfer *t, bool is_first_of_msg) 764 struct spi_transfer *t,
765 bool is_first_of_msg,
766 bool is_single_xfer)
716{ 767{
717 struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master); 768 struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
769 struct tegra_spi_client_data *cdata = spi->controller_data;
718 u32 speed = t->speed_hz; 770 u32 speed = t->speed_hz;
719 u8 bits_per_word = t->bits_per_word; 771 u8 bits_per_word = t->bits_per_word;
720 u32 command1; 772 u32 command1, command2;
721 int req_mode; 773 int req_mode;
774 u32 tx_tap = 0, rx_tap = 0;
722 775
723 if (speed != tspi->cur_speed) { 776 if (speed != tspi->cur_speed) {
724 clk_set_rate(tspi->clk, speed); 777 clk_set_rate(tspi->clk, speed);
@@ -765,13 +818,34 @@ static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
765 } else 818 } else
766 tegra_spi_writel(tspi, command1, SPI_COMMAND1); 819 tegra_spi_writel(tspi, command1, SPI_COMMAND1);
767 820
768 command1 |= SPI_CS_SW_HW; 821 /* GPIO based chip select control */
769 if (spi->mode & SPI_CS_HIGH) 822 if (spi->cs_gpiod)
770 command1 |= SPI_CS_SW_VAL; 823 gpiod_set_value(spi->cs_gpiod, 1);
771 else 824
772 command1 &= ~SPI_CS_SW_VAL; 825 if (is_single_xfer && !(t->cs_change)) {
826 tspi->use_hw_based_cs = true;
827 command1 &= ~(SPI_CS_SW_HW | SPI_CS_SW_VAL);
828 } else {
829 tspi->use_hw_based_cs = false;
830 command1 |= SPI_CS_SW_HW;
831 if (spi->mode & SPI_CS_HIGH)
832 command1 |= SPI_CS_SW_VAL;
833 else
834 command1 &= ~SPI_CS_SW_VAL;
835 }
836
837 if (tspi->last_used_cs != spi->chip_select) {
838 if (cdata && cdata->tx_clk_tap_delay)
839 tx_tap = cdata->tx_clk_tap_delay;
840 if (cdata && cdata->rx_clk_tap_delay)
841 rx_tap = cdata->rx_clk_tap_delay;
842 command2 = SPI_TX_TAP_DELAY(tx_tap) |
843 SPI_RX_TAP_DELAY(rx_tap);
844 if (command2 != tspi->def_command2_reg)
845 tegra_spi_writel(tspi, command2, SPI_COMMAND2);
846 tspi->last_used_cs = spi->chip_select;
847 }
773 848
774 tegra_spi_writel(tspi, 0, SPI_COMMAND2);
775 } else { 849 } else {
776 command1 = tspi->command1_reg; 850 command1 = tspi->command1_reg;
777 command1 &= ~SPI_BIT_LENGTH(~0); 851 command1 &= ~SPI_BIT_LENGTH(~0);
@@ -827,9 +901,42 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
827 return ret; 901 return ret;
828} 902}
829 903
904static struct tegra_spi_client_data
905 *tegra_spi_parse_cdata_dt(struct spi_device *spi)
906{
907 struct tegra_spi_client_data *cdata;
908 struct device_node *slave_np;
909
910 slave_np = spi->dev.of_node;
911 if (!slave_np) {
912 dev_dbg(&spi->dev, "device node not found\n");
913 return NULL;
914 }
915
916 cdata = kzalloc(sizeof(*cdata), GFP_KERNEL);
917 if (!cdata)
918 return NULL;
919
920 of_property_read_u32(slave_np, "nvidia,tx-clk-tap-delay",
921 &cdata->tx_clk_tap_delay);
922 of_property_read_u32(slave_np, "nvidia,rx-clk-tap-delay",
923 &cdata->rx_clk_tap_delay);
924 return cdata;
925}
926
927static void tegra_spi_cleanup(struct spi_device *spi)
928{
929 struct tegra_spi_client_data *cdata = spi->controller_data;
930
931 spi->controller_data = NULL;
932 if (spi->dev.of_node)
933 kfree(cdata);
934}
935
830static int tegra_spi_setup(struct spi_device *spi) 936static int tegra_spi_setup(struct spi_device *spi)
831{ 937{
832 struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master); 938 struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
939 struct tegra_spi_client_data *cdata = spi->controller_data;
833 u32 val; 940 u32 val;
834 unsigned long flags; 941 unsigned long flags;
835 int ret; 942 int ret;
@@ -840,9 +947,16 @@ static int tegra_spi_setup(struct spi_device *spi)
840 spi->mode & SPI_CPHA ? "" : "~", 947 spi->mode & SPI_CPHA ? "" : "~",
841 spi->max_speed_hz); 948 spi->max_speed_hz);
842 949
950 if (!cdata) {
951 cdata = tegra_spi_parse_cdata_dt(spi);
952 spi->controller_data = cdata;
953 }
954
843 ret = pm_runtime_get_sync(tspi->dev); 955 ret = pm_runtime_get_sync(tspi->dev);
844 if (ret < 0) { 956 if (ret < 0) {
845 dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret); 957 dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
958 if (cdata)
959 tegra_spi_cleanup(spi);
846 return ret; 960 return ret;
847 } 961 }
848 962
@@ -853,6 +967,10 @@ static int tegra_spi_setup(struct spi_device *spi)
853 } 967 }
854 968
855 spin_lock_irqsave(&tspi->lock, flags); 969 spin_lock_irqsave(&tspi->lock, flags);
970 /* GPIO based chip select control */
971 if (spi->cs_gpiod)
972 gpiod_set_value(spi->cs_gpiod, 0);
973
856 val = tspi->def_command1_reg; 974 val = tspi->def_command1_reg;
857 if (spi->mode & SPI_CS_HIGH) 975 if (spi->mode & SPI_CS_HIGH)
858 val &= ~SPI_CS_POL_INACTIVE(spi->chip_select); 976 val &= ~SPI_CS_POL_INACTIVE(spi->chip_select);
@@ -882,11 +1000,18 @@ static void tegra_spi_transfer_end(struct spi_device *spi)
882 struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master); 1000 struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
883 int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1; 1001 int cs_val = (spi->mode & SPI_CS_HIGH) ? 0 : 1;
884 1002
885 if (cs_val) 1003 /* GPIO based chip select control */
886 tspi->command1_reg |= SPI_CS_SW_VAL; 1004 if (spi->cs_gpiod)
887 else 1005 gpiod_set_value(spi->cs_gpiod, 0);
888 tspi->command1_reg &= ~SPI_CS_SW_VAL; 1006
889 tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1); 1007 if (!tspi->use_hw_based_cs) {
1008 if (cs_val)
1009 tspi->command1_reg |= SPI_CS_SW_VAL;
1010 else
1011 tspi->command1_reg &= ~SPI_CS_SW_VAL;
1012 tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
1013 }
1014
890 tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1); 1015 tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
891} 1016}
892 1017
@@ -913,16 +1038,19 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
913 struct spi_device *spi = msg->spi; 1038 struct spi_device *spi = msg->spi;
914 int ret; 1039 int ret;
915 bool skip = false; 1040 bool skip = false;
1041 int single_xfer;
916 1042
917 msg->status = 0; 1043 msg->status = 0;
918 msg->actual_length = 0; 1044 msg->actual_length = 0;
919 1045
1046 single_xfer = list_is_singular(&msg->transfers);
920 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 1047 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
921 u32 cmd1; 1048 u32 cmd1;
922 1049
923 reinit_completion(&tspi->xfer_completion); 1050 reinit_completion(&tspi->xfer_completion);
924 1051
925 cmd1 = tegra_spi_setup_transfer_one(spi, xfer, is_first_msg); 1052 cmd1 = tegra_spi_setup_transfer_one(spi, xfer, is_first_msg,
1053 single_xfer);
926 1054
927 if (!xfer->len) { 1055 if (!xfer->len) {
928 ret = 0; 1056 ret = 0;
@@ -955,6 +1083,7 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
955 reset_control_assert(tspi->rst); 1083 reset_control_assert(tspi->rst);
956 udelay(2); 1084 udelay(2);
957 reset_control_deassert(tspi->rst); 1085 reset_control_deassert(tspi->rst);
1086 tspi->last_used_cs = master->num_chipselect + 1;
958 goto complete_xfer; 1087 goto complete_xfer;
959 } 1088 }
960 1089
@@ -1188,11 +1317,14 @@ static int tegra_spi_probe(struct platform_device *pdev)
1188 master->max_speed_hz = 25000000; /* 25MHz */ 1317 master->max_speed_hz = 25000000; /* 25MHz */
1189 1318
1190 /* the spi->mode bits understood by this driver: */ 1319 /* the spi->mode bits understood by this driver: */
1320 master->use_gpio_descriptors = true;
1191 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST | 1321 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST |
1192 SPI_TX_DUAL | SPI_RX_DUAL | SPI_3WIRE; 1322 SPI_TX_DUAL | SPI_RX_DUAL | SPI_3WIRE;
1193 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); 1323 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1194 master->setup = tegra_spi_setup; 1324 master->setup = tegra_spi_setup;
1325 master->cleanup = tegra_spi_cleanup;
1195 master->transfer_one_message = tegra_spi_transfer_one_message; 1326 master->transfer_one_message = tegra_spi_transfer_one_message;
1327 master->set_cs_timing = tegra_spi_set_hw_cs_timing;
1196 master->num_chipselect = MAX_CHIP_SELECT; 1328 master->num_chipselect = MAX_CHIP_SELECT;
1197 master->auto_runtime_pm = true; 1329 master->auto_runtime_pm = true;
1198 bus_num = of_alias_get_id(pdev->dev.of_node, "spi"); 1330 bus_num = of_alias_get_id(pdev->dev.of_node, "spi");
@@ -1268,6 +1400,10 @@ static int tegra_spi_probe(struct platform_device *pdev)
1268 reset_control_deassert(tspi->rst); 1400 reset_control_deassert(tspi->rst);
1269 tspi->def_command1_reg = SPI_M_S; 1401 tspi->def_command1_reg = SPI_M_S;
1270 tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1); 1402 tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
1403 tspi->spi_cs_timing1 = tegra_spi_readl(tspi, SPI_CS_TIMING1);
1404 tspi->spi_cs_timing2 = tegra_spi_readl(tspi, SPI_CS_TIMING2);
1405 tspi->def_command2_reg = tegra_spi_readl(tspi, SPI_COMMAND2);
1406 tspi->last_used_cs = master->num_chipselect + 1;
1271 pm_runtime_put(&pdev->dev); 1407 pm_runtime_put(&pdev->dev);
1272 ret = request_threaded_irq(tspi->irq, tegra_spi_isr, 1408 ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
1273 tegra_spi_isr_thread, IRQF_ONESHOT, 1409 tegra_spi_isr_thread, IRQF_ONESHOT,
@@ -1340,6 +1476,8 @@ static int tegra_spi_resume(struct device *dev)
1340 return ret; 1476 return ret;
1341 } 1477 }
1342 tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1); 1478 tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
1479 tegra_spi_writel(tspi, tspi->def_command2_reg, SPI_COMMAND2);
1480 tspi->last_used_cs = master->num_chipselect + 1;
1343 pm_runtime_put(dev); 1481 pm_runtime_put(dev);
1344 1482
1345 return spi_master_resume(master); 1483 return spi_master_resume(master);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 5e4654032bfa..81e4d9f7c0f4 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1090,6 +1090,60 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
1090 return 0; 1090 return 0;
1091} 1091}
1092 1092
1093static void _spi_transfer_delay_ns(u32 ns)
1094{
1095 if (!ns)
1096 return;
1097 if (ns <= 1000) {
1098 ndelay(ns);
1099 } else {
1100 u32 us = DIV_ROUND_UP(ns, 1000);
1101
1102 if (us <= 10)
1103 udelay(us);
1104 else
1105 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1106 }
1107}
1108
1109static void _spi_transfer_cs_change_delay(struct spi_message *msg,
1110 struct spi_transfer *xfer)
1111{
1112 u32 delay = xfer->cs_change_delay;
1113 u32 unit = xfer->cs_change_delay_unit;
1114 u32 hz;
1115
1116 /* return early on "fast" mode - for everything but USECS */
1117 if (!delay && unit != SPI_DELAY_UNIT_USECS)
1118 return;
1119
1120 switch (unit) {
1121 case SPI_DELAY_UNIT_USECS:
1122 /* for compatibility use default of 10us */
1123 if (!delay)
1124 delay = 10000;
1125 else
1126 delay *= 1000;
1127 break;
1128 case SPI_DELAY_UNIT_NSECS: /* nothing to do here */
1129 break;
1130 case SPI_DELAY_UNIT_SCK:
1131 /* if there is no effective speed know, then approximate
1132 * by underestimating with half the requested hz
1133 */
1134 hz = xfer->effective_speed_hz ?: xfer->speed_hz / 2;
1135 delay *= DIV_ROUND_UP(1000000000, hz);
1136 break;
1137 default:
1138 dev_err_once(&msg->spi->dev,
1139 "Use of unsupported delay unit %i, using default of 10us\n",
1140 xfer->cs_change_delay_unit);
1141 delay = 10000;
1142 }
1143 /* now sleep for the requested amount of time */
1144 _spi_transfer_delay_ns(delay);
1145}
1146
1093/* 1147/*
1094 * spi_transfer_one_message - Default implementation of transfer_one_message() 1148 * spi_transfer_one_message - Default implementation of transfer_one_message()
1095 * 1149 *
@@ -1148,14 +1202,8 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
1148 if (msg->status != -EINPROGRESS) 1202 if (msg->status != -EINPROGRESS)
1149 goto out; 1203 goto out;
1150 1204
1151 if (xfer->delay_usecs) { 1205 if (xfer->delay_usecs)
1152 u16 us = xfer->delay_usecs; 1206 _spi_transfer_delay_ns(xfer->delay_usecs * 1000);
1153
1154 if (us <= 10)
1155 udelay(us);
1156 else
1157 usleep_range(us, us + DIV_ROUND_UP(us, 10));
1158 }
1159 1207
1160 if (xfer->cs_change) { 1208 if (xfer->cs_change) {
1161 if (list_is_last(&xfer->transfer_list, 1209 if (list_is_last(&xfer->transfer_list,
@@ -1163,7 +1211,7 @@ static int spi_transfer_one_message(struct spi_controller *ctlr,
1163 keep_cs = true; 1211 keep_cs = true;
1164 } else { 1212 } else {
1165 spi_set_cs(msg->spi, false); 1213 spi_set_cs(msg->spi, false);
1166 udelay(10); 1214 _spi_transfer_cs_change_delay(msg, xfer);
1167 spi_set_cs(msg->spi, true); 1215 spi_set_cs(msg->spi, true);
1168 } 1216 }
1169 } 1217 }
@@ -1804,9 +1852,18 @@ static void of_register_spi_devices(struct spi_controller *ctlr) { }
1804#endif 1852#endif
1805 1853
1806#ifdef CONFIG_ACPI 1854#ifdef CONFIG_ACPI
1807static void acpi_spi_parse_apple_properties(struct spi_device *spi) 1855struct acpi_spi_lookup {
1856 struct spi_controller *ctlr;
1857 u32 max_speed_hz;
1858 u32 mode;
1859 int irq;
1860 u8 bits_per_word;
1861 u8 chip_select;
1862};
1863
1864static void acpi_spi_parse_apple_properties(struct acpi_device *dev,
1865 struct acpi_spi_lookup *lookup)
1808{ 1866{
1809 struct acpi_device *dev = ACPI_COMPANION(&spi->dev);
1810 const union acpi_object *obj; 1867 const union acpi_object *obj;
1811 1868
1812 if (!x86_apple_machine) 1869 if (!x86_apple_machine)
@@ -1814,35 +1871,46 @@ static void acpi_spi_parse_apple_properties(struct spi_device *spi)
1814 1871
1815 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj) 1872 if (!acpi_dev_get_property(dev, "spiSclkPeriod", ACPI_TYPE_BUFFER, &obj)
1816 && obj->buffer.length >= 4) 1873 && obj->buffer.length >= 4)
1817 spi->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer; 1874 lookup->max_speed_hz = NSEC_PER_SEC / *(u32 *)obj->buffer.pointer;
1818 1875
1819 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj) 1876 if (!acpi_dev_get_property(dev, "spiWordSize", ACPI_TYPE_BUFFER, &obj)
1820 && obj->buffer.length == 8) 1877 && obj->buffer.length == 8)
1821 spi->bits_per_word = *(u64 *)obj->buffer.pointer; 1878 lookup->bits_per_word = *(u64 *)obj->buffer.pointer;
1822 1879
1823 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj) 1880 if (!acpi_dev_get_property(dev, "spiBitOrder", ACPI_TYPE_BUFFER, &obj)
1824 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer) 1881 && obj->buffer.length == 8 && !*(u64 *)obj->buffer.pointer)
1825 spi->mode |= SPI_LSB_FIRST; 1882 lookup->mode |= SPI_LSB_FIRST;
1826 1883
1827 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj) 1884 if (!acpi_dev_get_property(dev, "spiSPO", ACPI_TYPE_BUFFER, &obj)
1828 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 1885 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
1829 spi->mode |= SPI_CPOL; 1886 lookup->mode |= SPI_CPOL;
1830 1887
1831 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj) 1888 if (!acpi_dev_get_property(dev, "spiSPH", ACPI_TYPE_BUFFER, &obj)
1832 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer) 1889 && obj->buffer.length == 8 && *(u64 *)obj->buffer.pointer)
1833 spi->mode |= SPI_CPHA; 1890 lookup->mode |= SPI_CPHA;
1834} 1891}
1835 1892
1836static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 1893static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1837{ 1894{
1838 struct spi_device *spi = data; 1895 struct acpi_spi_lookup *lookup = data;
1839 struct spi_controller *ctlr = spi->controller; 1896 struct spi_controller *ctlr = lookup->ctlr;
1840 1897
1841 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 1898 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1842 struct acpi_resource_spi_serialbus *sb; 1899 struct acpi_resource_spi_serialbus *sb;
1900 acpi_handle parent_handle;
1901 acpi_status status;
1843 1902
1844 sb = &ares->data.spi_serial_bus; 1903 sb = &ares->data.spi_serial_bus;
1845 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 1904 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1905
1906 status = acpi_get_handle(NULL,
1907 sb->resource_source.string_ptr,
1908 &parent_handle);
1909
1910 if (ACPI_FAILURE(status) ||
1911 ACPI_HANDLE(ctlr->dev.parent) != parent_handle)
1912 return -ENODEV;
1913
1846 /* 1914 /*
1847 * ACPI DeviceSelection numbering is handled by the 1915 * ACPI DeviceSelection numbering is handled by the
1848 * host controller driver in Windows and can vary 1916 * host controller driver in Windows and can vary
@@ -1855,25 +1923,25 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1855 sb->device_selection); 1923 sb->device_selection);
1856 if (cs < 0) 1924 if (cs < 0)
1857 return cs; 1925 return cs;
1858 spi->chip_select = cs; 1926 lookup->chip_select = cs;
1859 } else { 1927 } else {
1860 spi->chip_select = sb->device_selection; 1928 lookup->chip_select = sb->device_selection;
1861 } 1929 }
1862 1930
1863 spi->max_speed_hz = sb->connection_speed; 1931 lookup->max_speed_hz = sb->connection_speed;
1864 1932
1865 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 1933 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
1866 spi->mode |= SPI_CPHA; 1934 lookup->mode |= SPI_CPHA;
1867 if (sb->clock_polarity == ACPI_SPI_START_HIGH) 1935 if (sb->clock_polarity == ACPI_SPI_START_HIGH)
1868 spi->mode |= SPI_CPOL; 1936 lookup->mode |= SPI_CPOL;
1869 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH) 1937 if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
1870 spi->mode |= SPI_CS_HIGH; 1938 lookup->mode |= SPI_CS_HIGH;
1871 } 1939 }
1872 } else if (spi->irq < 0) { 1940 } else if (lookup->irq < 0) {
1873 struct resource r; 1941 struct resource r;
1874 1942
1875 if (acpi_dev_resource_interrupt(ares, 0, &r)) 1943 if (acpi_dev_resource_interrupt(ares, 0, &r))
1876 spi->irq = r.start; 1944 lookup->irq = r.start;
1877 } 1945 }
1878 1946
1879 /* Always tell the ACPI core to skip this resource */ 1947 /* Always tell the ACPI core to skip this resource */
@@ -1883,7 +1951,9 @@ static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1883static acpi_status acpi_register_spi_device(struct spi_controller *ctlr, 1951static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
1884 struct acpi_device *adev) 1952 struct acpi_device *adev)
1885{ 1953{
1954 acpi_handle parent_handle = NULL;
1886 struct list_head resource_list; 1955 struct list_head resource_list;
1956 struct acpi_spi_lookup lookup = {};
1887 struct spi_device *spi; 1957 struct spi_device *spi;
1888 int ret; 1958 int ret;
1889 1959
@@ -1891,28 +1961,42 @@ static acpi_status acpi_register_spi_device(struct spi_controller *ctlr,
1891 acpi_device_enumerated(adev)) 1961 acpi_device_enumerated(adev))
1892 return AE_OK; 1962 return AE_OK;
1893 1963
1894 spi = spi_alloc_device(ctlr); 1964 lookup.ctlr = ctlr;
1895 if (!spi) { 1965 lookup.irq = -1;
1896 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
1897 dev_name(&adev->dev));
1898 return AE_NO_MEMORY;
1899 }
1900
1901 ACPI_COMPANION_SET(&spi->dev, adev);
1902 spi->irq = -1;
1903 1966
1904 INIT_LIST_HEAD(&resource_list); 1967 INIT_LIST_HEAD(&resource_list);
1905 ret = acpi_dev_get_resources(adev, &resource_list, 1968 ret = acpi_dev_get_resources(adev, &resource_list,
1906 acpi_spi_add_resource, spi); 1969 acpi_spi_add_resource, &lookup);
1907 acpi_dev_free_resource_list(&resource_list); 1970 acpi_dev_free_resource_list(&resource_list);
1908 1971
1909 acpi_spi_parse_apple_properties(spi); 1972 if (ret < 0)
1973 /* found SPI in _CRS but it points to another controller */
1974 return AE_OK;
1910 1975
1911 if (ret < 0 || !spi->max_speed_hz) { 1976 if (!lookup.max_speed_hz &&
1912 spi_dev_put(spi); 1977 !ACPI_FAILURE(acpi_get_parent(adev->handle, &parent_handle)) &&
1978 ACPI_HANDLE(ctlr->dev.parent) == parent_handle) {
1979 /* Apple does not use _CRS but nested devices for SPI slaves */
1980 acpi_spi_parse_apple_properties(adev, &lookup);
1981 }
1982
1983 if (!lookup.max_speed_hz)
1913 return AE_OK; 1984 return AE_OK;
1985
1986 spi = spi_alloc_device(ctlr);
1987 if (!spi) {
1988 dev_err(&ctlr->dev, "failed to allocate SPI device for %s\n",
1989 dev_name(&adev->dev));
1990 return AE_NO_MEMORY;
1914 } 1991 }
1915 1992
1993 ACPI_COMPANION_SET(&spi->dev, adev);
1994 spi->max_speed_hz = lookup.max_speed_hz;
1995 spi->mode = lookup.mode;
1996 spi->irq = lookup.irq;
1997 spi->bits_per_word = lookup.bits_per_word;
1998 spi->chip_select = lookup.chip_select;
1999
1916 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias, 2000 acpi_set_modalias(adev, acpi_device_hid(adev), spi->modalias,
1917 sizeof(spi->modalias)); 2001 sizeof(spi->modalias));
1918 2002
@@ -1944,6 +2028,8 @@ static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
1944 return acpi_register_spi_device(ctlr, adev); 2028 return acpi_register_spi_device(ctlr, adev);
1945} 2029}
1946 2030
2031#define SPI_ACPI_ENUMERATE_MAX_DEPTH 32
2032
1947static void acpi_register_spi_devices(struct spi_controller *ctlr) 2033static void acpi_register_spi_devices(struct spi_controller *ctlr)
1948{ 2034{
1949 acpi_status status; 2035 acpi_status status;
@@ -1953,7 +2039,8 @@ static void acpi_register_spi_devices(struct spi_controller *ctlr)
1953 if (!handle) 2039 if (!handle)
1954 return; 2040 return;
1955 2041
1956 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, 2042 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
2043 SPI_ACPI_ENUMERATE_MAX_DEPTH,
1957 acpi_spi_add_device, NULL, ctlr, NULL); 2044 acpi_spi_add_device, NULL, ctlr, NULL);
1958 if (ACPI_FAILURE(status)) 2045 if (ACPI_FAILURE(status))
1959 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n"); 2046 dev_warn(&ctlr->dev, "failed to enumerate SPI slaves\n");
@@ -2286,11 +2373,6 @@ int spi_register_controller(struct spi_controller *ctlr)
2286 if (status) 2373 if (status)
2287 return status; 2374 return status;
2288 2375
2289 /* even if it's just one always-selected device, there must
2290 * be at least one chipselect
2291 */
2292 if (ctlr->num_chipselect == 0)
2293 return -EINVAL;
2294 if (ctlr->bus_num >= 0) { 2376 if (ctlr->bus_num >= 0) {
2295 /* devices with a fixed bus num must check-in with the num */ 2377 /* devices with a fixed bus num must check-in with the num */
2296 mutex_lock(&board_lock); 2378 mutex_lock(&board_lock);
@@ -2361,6 +2443,13 @@ int spi_register_controller(struct spi_controller *ctlr)
2361 } 2443 }
2362 } 2444 }
2363 2445
2446 /*
2447 * Even if it's just one always-selected device, there must
2448 * be at least one chipselect.
2449 */
2450 if (!ctlr->num_chipselect)
2451 return -EINVAL;
2452
2364 status = device_add(&ctlr->dev); 2453 status = device_add(&ctlr->dev);
2365 if (status < 0) { 2454 if (status < 0) {
2366 /* free bus id */ 2455 /* free bus id */
@@ -2470,7 +2559,6 @@ void spi_unregister_controller(struct spi_controller *ctlr)
2470{ 2559{
2471 struct spi_controller *found; 2560 struct spi_controller *found;
2472 int id = ctlr->bus_num; 2561 int id = ctlr->bus_num;
2473 int dummy;
2474 2562
2475 /* First make sure that this controller was ever added */ 2563 /* First make sure that this controller was ever added */
2476 mutex_lock(&board_lock); 2564 mutex_lock(&board_lock);
@@ -2484,7 +2572,7 @@ void spi_unregister_controller(struct spi_controller *ctlr)
2484 list_del(&ctlr->list); 2572 list_del(&ctlr->list);
2485 mutex_unlock(&board_lock); 2573 mutex_unlock(&board_lock);
2486 2574
2487 dummy = device_for_each_child(&ctlr->dev, NULL, __unregister); 2575 device_for_each_child(&ctlr->dev, NULL, __unregister);
2488 device_unregister(&ctlr->dev); 2576 device_unregister(&ctlr->dev);
2489 /* free bus id */ 2577 /* free bus id */
2490 mutex_lock(&board_lock); 2578 mutex_lock(&board_lock);
@@ -2633,12 +2721,9 @@ EXPORT_SYMBOL_GPL(spi_res_add);
2633 */ 2721 */
2634void spi_res_release(struct spi_controller *ctlr, struct spi_message *message) 2722void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
2635{ 2723{
2636 struct spi_res *res; 2724 struct spi_res *res, *tmp;
2637
2638 while (!list_empty(&message->resources)) {
2639 res = list_last_entry(&message->resources,
2640 struct spi_res, entry);
2641 2725
2726 list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
2642 if (res->release) 2727 if (res->release)
2643 res->release(ctlr, message, res->data); 2728 res->release(ctlr, message, res->data);
2644 2729
@@ -2702,8 +2787,7 @@ struct spi_replaced_transfers *spi_replace_transfers(
2702 2787
2703 /* allocate the structure using spi_res */ 2788 /* allocate the structure using spi_res */
2704 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release, 2789 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2705 insert * sizeof(struct spi_transfer) 2790 struct_size(rxfer, inserted_transfers, insert)
2706 + sizeof(struct spi_replaced_transfers)
2707 + extradatasize, 2791 + extradatasize,
2708 gfp); 2792 gfp);
2709 if (!rxfer) 2793 if (!rxfer)
@@ -3083,6 +3167,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
3083 */ 3167 */
3084 message->frame_length = 0; 3168 message->frame_length = 0;
3085 list_for_each_entry(xfer, &message->transfers, transfer_list) { 3169 list_for_each_entry(xfer, &message->transfers, transfer_list) {
3170 xfer->effective_speed_hz = 0;
3086 message->frame_length += xfer->len; 3171 message->frame_length += xfer->len;
3087 if (!xfer->bits_per_word) 3172 if (!xfer->bits_per_word)
3088 xfer->bits_per_word = spi->bits_per_word; 3173 xfer->bits_per_word = spi->bits_per_word;
@@ -3762,4 +3847,3 @@ err0:
3762 * include needing to have boardinfo data structures be much more public. 3847 * include needing to have boardinfo data structures be much more public.
3763 */ 3848 */
3764postcore_initcall(spi_init); 3849postcore_initcall(spi_init);
3765
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 422bac8cc3e0..255786f2e844 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -663,6 +663,8 @@ static const struct of_device_id spidev_dt_ids[] = {
663 { .compatible = "ge,achc" }, 663 { .compatible = "ge,achc" },
664 { .compatible = "semtech,sx1301" }, 664 { .compatible = "semtech,sx1301" },
665 { .compatible = "lwn,bk4" }, 665 { .compatible = "lwn,bk4" },
666 { .compatible = "dh,dhcom-board" },
667 { .compatible = "menlo,m53cpld" },
666 {}, 668 {},
667}; 669};
668MODULE_DEVICE_TABLE(of, spidev_dt_ids); 670MODULE_DEVICE_TABLE(of, spidev_dt_ids);