aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig26
-rw-r--r--drivers/spi/Makefile3
-rw-r--r--drivers/spi/spi-ath79.c34
-rw-r--r--drivers/spi/spi-atmel.c292
-rw-r--r--drivers/spi/spi-bcm2835.c392
-rw-r--r--drivers/spi/spi-davinci.c2
-rw-r--r--drivers/spi/spi-fsl-dspi.c307
-rw-r--r--drivers/spi/spi-fsl-espi.c6
-rw-r--r--drivers/spi/spi-img-spfi.c2
-rw-r--r--drivers/spi/spi-imx.c7
-rw-r--r--drivers/spi/spi-omap2-mcspi.c280
-rw-r--r--drivers/spi/spi-orion.c70
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c8
-rw-r--r--drivers/spi/spi-pxa2xx-pxadma.c487
-rw-r--r--drivers/spi/spi-pxa2xx.c159
-rw-r--r--drivers/spi/spi-pxa2xx.h6
-rw-r--r--drivers/spi/spi-rb4xx.c210
-rw-r--r--drivers/spi/spi-rspi.c23
-rw-r--r--drivers/spi/spi-s3c64xx.c2
-rw-r--r--drivers/spi/spi-sh-msiof.c2
-rw-r--r--drivers/spi/spi-sirf.c877
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c1123
-rw-r--r--drivers/spi/spi.c45
-rw-r--r--drivers/spi/spidev.c34
24 files changed, 3218 insertions, 1179 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 72b059081559..b0f30fb68914 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -77,6 +77,7 @@ config SPI_ATMEL
77 77
78config SPI_BCM2835 78config SPI_BCM2835
79 tristate "BCM2835 SPI controller" 79 tristate "BCM2835 SPI controller"
80 depends on GPIOLIB
80 depends on ARCH_BCM2835 || COMPILE_TEST 81 depends on ARCH_BCM2835 || COMPILE_TEST
81 depends on GPIOLIB 82 depends on GPIOLIB
82 help 83 help
@@ -221,7 +222,7 @@ config SPI_FALCON
221 222
222config SPI_GPIO 223config SPI_GPIO
223 tristate "GPIO-based bitbanging SPI Master" 224 tristate "GPIO-based bitbanging SPI Master"
224 depends on GPIOLIB 225 depends on GPIOLIB || COMPILE_TEST
225 select SPI_BITBANG 226 select SPI_BITBANG
226 help 227 help
227 This simple GPIO bitbanging SPI master uses the arch-neutral GPIO 228 This simple GPIO bitbanging SPI master uses the arch-neutral GPIO
@@ -327,7 +328,7 @@ config SPI_MESON_SPIFC
327 328
328config SPI_OC_TINY 329config SPI_OC_TINY
329 tristate "OpenCores tiny SPI" 330 tristate "OpenCores tiny SPI"
330 depends on GPIOLIB 331 depends on GPIOLIB || COMPILE_TEST
331 select SPI_BITBANG 332 select SPI_BITBANG
332 help 333 help
333 This is the driver for OpenCores tiny SPI master controller. 334 This is the driver for OpenCores tiny SPI master controller.
@@ -394,16 +395,9 @@ config SPI_PPC4xx
394 help 395 help
395 This selects a driver for the PPC4xx SPI Controller. 396 This selects a driver for the PPC4xx SPI Controller.
396 397
397config SPI_PXA2XX_PXADMA
398 bool "PXA2xx SSP legacy PXA DMA API support"
399 depends on SPI_PXA2XX && ARCH_PXA
400 help
401 Enable PXA private legacy DMA API support. Note that this is
402 deprecated in favor of generic DMA engine API.
403
404config SPI_PXA2XX_DMA 398config SPI_PXA2XX_DMA
405 def_bool y 399 def_bool y
406 depends on SPI_PXA2XX && !SPI_PXA2XX_PXADMA 400 depends on SPI_PXA2XX
407 401
408config SPI_PXA2XX 402config SPI_PXA2XX
409 tristate "PXA2xx SSP SPI master" 403 tristate "PXA2xx SSP SPI master"
@@ -429,6 +423,12 @@ config SPI_ROCKCHIP
429 The main usecase of this controller is to use spi flash as boot 423 The main usecase of this controller is to use spi flash as boot
430 device. 424 device.
431 425
426config SPI_RB4XX
427 tristate "Mikrotik RB4XX SPI master"
428 depends on SPI_MASTER && ATH79
429 help
430 SPI controller driver for the Mikrotik RB4xx series boards.
431
432config SPI_RSPI 432config SPI_RSPI
433 tristate "Renesas RSPI/QSPI controller" 433 tristate "Renesas RSPI/QSPI controller"
434 depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST 434 depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
@@ -610,6 +610,12 @@ config SPI_XTENSA_XTFPGA
610 16 bit words in SPI mode 0, automatically asserting CS on transfer 610 16 bit words in SPI mode 0, automatically asserting CS on transfer
611 start and deasserting on end. 611 start and deasserting on end.
612 612
613config SPI_ZYNQMP_GQSPI
614 tristate "Xilinx ZynqMP GQSPI controller"
615 depends on SPI_MASTER && HAS_DMA
616 help
617 Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
618
613config SPI_NUC900 619config SPI_NUC900
614 tristate "Nuvoton NUC900 series SPI" 620 tristate "Nuvoton NUC900 series SPI"
615 depends on ARCH_W90X900 621 depends on ARCH_W90X900
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index d8cbf654976b..1154dbac8f2c 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -60,12 +60,12 @@ obj-$(CONFIG_SPI_ORION) += spi-orion.o
60obj-$(CONFIG_SPI_PL022) += spi-pl022.o 60obj-$(CONFIG_SPI_PL022) += spi-pl022.o
61obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o 61obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
62spi-pxa2xx-platform-objs := spi-pxa2xx.o 62spi-pxa2xx-platform-objs := spi-pxa2xx.o
63spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_PXADMA) += spi-pxa2xx-pxadma.o
64spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_DMA) += spi-pxa2xx-dma.o 63spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_DMA) += spi-pxa2xx-dma.o
65obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o 64obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
66obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o 65obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
67obj-$(CONFIG_SPI_QUP) += spi-qup.o 66obj-$(CONFIG_SPI_QUP) += spi-qup.o
68obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o 67obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o
68obj-$(CONFIG_SPI_RB4XX) += spi-rb4xx.o
69obj-$(CONFIG_SPI_RSPI) += spi-rspi.o 69obj-$(CONFIG_SPI_RSPI) += spi-rspi.o
70obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o 70obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
71spi-s3c24xx-hw-y := spi-s3c24xx.o 71spi-s3c24xx-hw-y := spi-s3c24xx.o
@@ -89,3 +89,4 @@ obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
89obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o 89obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o
90obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o 90obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
91obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o 91obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o
92obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index b02eb4ac0218..bf1f9b32c597 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -79,10 +79,8 @@ static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
79 } 79 }
80 80
81 if (spi->chip_select) { 81 if (spi->chip_select) {
82 struct ath79_spi_controller_data *cdata = spi->controller_data;
83
84 /* SPI is normally active-low */ 82 /* SPI is normally active-low */
85 gpio_set_value(cdata->gpio, cs_high); 83 gpio_set_value(spi->cs_gpio, cs_high);
86 } else { 84 } else {
87 if (cs_high) 85 if (cs_high)
88 sp->ioc_base |= AR71XX_SPI_IOC_CS0; 86 sp->ioc_base |= AR71XX_SPI_IOC_CS0;
@@ -117,11 +115,10 @@ static void ath79_spi_disable(struct ath79_spi *sp)
117 115
118static int ath79_spi_setup_cs(struct spi_device *spi) 116static int ath79_spi_setup_cs(struct spi_device *spi)
119{ 117{
120 struct ath79_spi_controller_data *cdata; 118 struct ath79_spi *sp = ath79_spidev_to_sp(spi);
121 int status; 119 int status;
122 120
123 cdata = spi->controller_data; 121 if (spi->chip_select && !gpio_is_valid(spi->cs_gpio))
124 if (spi->chip_select && !cdata)
125 return -EINVAL; 122 return -EINVAL;
126 123
127 status = 0; 124 status = 0;
@@ -134,8 +131,15 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
134 else 131 else
135 flags |= GPIOF_INIT_HIGH; 132 flags |= GPIOF_INIT_HIGH;
136 133
137 status = gpio_request_one(cdata->gpio, flags, 134 status = gpio_request_one(spi->cs_gpio, flags,
138 dev_name(&spi->dev)); 135 dev_name(&spi->dev));
136 } else {
137 if (spi->mode & SPI_CS_HIGH)
138 sp->ioc_base &= ~AR71XX_SPI_IOC_CS0;
139 else
140 sp->ioc_base |= AR71XX_SPI_IOC_CS0;
141
142 ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
139 } 143 }
140 144
141 return status; 145 return status;
@@ -144,8 +148,7 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
144static void ath79_spi_cleanup_cs(struct spi_device *spi) 148static void ath79_spi_cleanup_cs(struct spi_device *spi)
145{ 149{
146 if (spi->chip_select) { 150 if (spi->chip_select) {
147 struct ath79_spi_controller_data *cdata = spi->controller_data; 151 gpio_free(spi->cs_gpio);
148 gpio_free(cdata->gpio);
149 } 152 }
150} 153}
151 154
@@ -217,6 +220,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
217 } 220 }
218 221
219 sp = spi_master_get_devdata(master); 222 sp = spi_master_get_devdata(master);
223 master->dev.of_node = pdev->dev.of_node;
220 platform_set_drvdata(pdev, sp); 224 platform_set_drvdata(pdev, sp);
221 225
222 pdata = dev_get_platdata(&pdev->dev); 226 pdata = dev_get_platdata(&pdev->dev);
@@ -253,7 +257,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
253 goto err_put_master; 257 goto err_put_master;
254 } 258 }
255 259
256 ret = clk_enable(sp->clk); 260 ret = clk_prepare_enable(sp->clk);
257 if (ret) 261 if (ret)
258 goto err_put_master; 262 goto err_put_master;
259 263
@@ -277,7 +281,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
277err_disable: 281err_disable:
278 ath79_spi_disable(sp); 282 ath79_spi_disable(sp);
279err_clk_disable: 283err_clk_disable:
280 clk_disable(sp->clk); 284 clk_disable_unprepare(sp->clk);
281err_put_master: 285err_put_master:
282 spi_master_put(sp->bitbang.master); 286 spi_master_put(sp->bitbang.master);
283 287
@@ -290,7 +294,7 @@ static int ath79_spi_remove(struct platform_device *pdev)
290 294
291 spi_bitbang_stop(&sp->bitbang); 295 spi_bitbang_stop(&sp->bitbang);
292 ath79_spi_disable(sp); 296 ath79_spi_disable(sp);
293 clk_disable(sp->clk); 297 clk_disable_unprepare(sp->clk);
294 spi_master_put(sp->bitbang.master); 298 spi_master_put(sp->bitbang.master);
295 299
296 return 0; 300 return 0;
@@ -301,12 +305,18 @@ static void ath79_spi_shutdown(struct platform_device *pdev)
301 ath79_spi_remove(pdev); 305 ath79_spi_remove(pdev);
302} 306}
303 307
308static const struct of_device_id ath79_spi_of_match[] = {
309 { .compatible = "qca,ar7100-spi", },
310 { },
311};
312
304static struct platform_driver ath79_spi_driver = { 313static struct platform_driver ath79_spi_driver = {
305 .probe = ath79_spi_probe, 314 .probe = ath79_spi_probe,
306 .remove = ath79_spi_remove, 315 .remove = ath79_spi_remove,
307 .shutdown = ath79_spi_shutdown, 316 .shutdown = ath79_spi_shutdown,
308 .driver = { 317 .driver = {
309 .name = DRV_NAME, 318 .name = DRV_NAME,
319 .of_match_table = ath79_spi_of_match,
310 }, 320 },
311}; 321};
312module_platform_driver(ath79_spi_driver); 322module_platform_driver(ath79_spi_driver);
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index a2f40b1b2225..c9eca347787d 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -41,6 +41,8 @@
41#define SPI_CSR1 0x0034 41#define SPI_CSR1 0x0034
42#define SPI_CSR2 0x0038 42#define SPI_CSR2 0x0038
43#define SPI_CSR3 0x003c 43#define SPI_CSR3 0x003c
44#define SPI_FMR 0x0040
45#define SPI_FLR 0x0044
44#define SPI_VERSION 0x00fc 46#define SPI_VERSION 0x00fc
45#define SPI_RPR 0x0100 47#define SPI_RPR 0x0100
46#define SPI_RCR 0x0104 48#define SPI_RCR 0x0104
@@ -62,6 +64,14 @@
62#define SPI_SWRST_SIZE 1 64#define SPI_SWRST_SIZE 1
63#define SPI_LASTXFER_OFFSET 24 65#define SPI_LASTXFER_OFFSET 24
64#define SPI_LASTXFER_SIZE 1 66#define SPI_LASTXFER_SIZE 1
67#define SPI_TXFCLR_OFFSET 16
68#define SPI_TXFCLR_SIZE 1
69#define SPI_RXFCLR_OFFSET 17
70#define SPI_RXFCLR_SIZE 1
71#define SPI_FIFOEN_OFFSET 30
72#define SPI_FIFOEN_SIZE 1
73#define SPI_FIFODIS_OFFSET 31
74#define SPI_FIFODIS_SIZE 1
65 75
66/* Bitfields in MR */ 76/* Bitfields in MR */
67#define SPI_MSTR_OFFSET 0 77#define SPI_MSTR_OFFSET 0
@@ -114,6 +124,22 @@
114#define SPI_TXEMPTY_SIZE 1 124#define SPI_TXEMPTY_SIZE 1
115#define SPI_SPIENS_OFFSET 16 125#define SPI_SPIENS_OFFSET 16
116#define SPI_SPIENS_SIZE 1 126#define SPI_SPIENS_SIZE 1
127#define SPI_TXFEF_OFFSET 24
128#define SPI_TXFEF_SIZE 1
129#define SPI_TXFFF_OFFSET 25
130#define SPI_TXFFF_SIZE 1
131#define SPI_TXFTHF_OFFSET 26
132#define SPI_TXFTHF_SIZE 1
133#define SPI_RXFEF_OFFSET 27
134#define SPI_RXFEF_SIZE 1
135#define SPI_RXFFF_OFFSET 28
136#define SPI_RXFFF_SIZE 1
137#define SPI_RXFTHF_OFFSET 29
138#define SPI_RXFTHF_SIZE 1
139#define SPI_TXFPTEF_OFFSET 30
140#define SPI_TXFPTEF_SIZE 1
141#define SPI_RXFPTEF_OFFSET 31
142#define SPI_RXFPTEF_SIZE 1
117 143
118/* Bitfields in CSR0 */ 144/* Bitfields in CSR0 */
119#define SPI_CPOL_OFFSET 0 145#define SPI_CPOL_OFFSET 0
@@ -157,6 +183,22 @@
157#define SPI_TXTDIS_OFFSET 9 183#define SPI_TXTDIS_OFFSET 9
158#define SPI_TXTDIS_SIZE 1 184#define SPI_TXTDIS_SIZE 1
159 185
186/* Bitfields in FMR */
187#define SPI_TXRDYM_OFFSET 0
188#define SPI_TXRDYM_SIZE 2
189#define SPI_RXRDYM_OFFSET 4
190#define SPI_RXRDYM_SIZE 2
191#define SPI_TXFTHRES_OFFSET 16
192#define SPI_TXFTHRES_SIZE 6
193#define SPI_RXFTHRES_OFFSET 24
194#define SPI_RXFTHRES_SIZE 6
195
196/* Bitfields in FLR */
197#define SPI_TXFL_OFFSET 0
198#define SPI_TXFL_SIZE 6
199#define SPI_RXFL_OFFSET 16
200#define SPI_RXFL_SIZE 6
201
160/* Constants for BITS */ 202/* Constants for BITS */
161#define SPI_BITS_8_BPT 0 203#define SPI_BITS_8_BPT 0
162#define SPI_BITS_9_BPT 1 204#define SPI_BITS_9_BPT 1
@@ -167,6 +209,9 @@
167#define SPI_BITS_14_BPT 6 209#define SPI_BITS_14_BPT 6
168#define SPI_BITS_15_BPT 7 210#define SPI_BITS_15_BPT 7
169#define SPI_BITS_16_BPT 8 211#define SPI_BITS_16_BPT 8
212#define SPI_ONE_DATA 0
213#define SPI_TWO_DATA 1
214#define SPI_FOUR_DATA 2
170 215
171/* Bit manipulation macros */ 216/* Bit manipulation macros */
172#define SPI_BIT(name) \ 217#define SPI_BIT(name) \
@@ -185,11 +230,31 @@
185 __raw_readl((port)->regs + SPI_##reg) 230 __raw_readl((port)->regs + SPI_##reg)
186#define spi_writel(port, reg, value) \ 231#define spi_writel(port, reg, value) \
187 __raw_writel((value), (port)->regs + SPI_##reg) 232 __raw_writel((value), (port)->regs + SPI_##reg)
233
234#define spi_readw(port, reg) \
235 __raw_readw((port)->regs + SPI_##reg)
236#define spi_writew(port, reg, value) \
237 __raw_writew((value), (port)->regs + SPI_##reg)
238
239#define spi_readb(port, reg) \
240 __raw_readb((port)->regs + SPI_##reg)
241#define spi_writeb(port, reg, value) \
242 __raw_writeb((value), (port)->regs + SPI_##reg)
188#else 243#else
189#define spi_readl(port, reg) \ 244#define spi_readl(port, reg) \
190 readl_relaxed((port)->regs + SPI_##reg) 245 readl_relaxed((port)->regs + SPI_##reg)
191#define spi_writel(port, reg, value) \ 246#define spi_writel(port, reg, value) \
192 writel_relaxed((value), (port)->regs + SPI_##reg) 247 writel_relaxed((value), (port)->regs + SPI_##reg)
248
249#define spi_readw(port, reg) \
250 readw_relaxed((port)->regs + SPI_##reg)
251#define spi_writew(port, reg, value) \
252 writew_relaxed((value), (port)->regs + SPI_##reg)
253
254#define spi_readb(port, reg) \
255 readb_relaxed((port)->regs + SPI_##reg)
256#define spi_writeb(port, reg, value) \
257 writeb_relaxed((value), (port)->regs + SPI_##reg)
193#endif 258#endif
194/* use PIO for small transfers, avoiding DMA setup/teardown overhead and 259/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
195 * cache operations; better heuristics consider wordsize and bitrate. 260 * cache operations; better heuristics consider wordsize and bitrate.
@@ -246,11 +311,14 @@ struct atmel_spi {
246 311
247 bool use_dma; 312 bool use_dma;
248 bool use_pdc; 313 bool use_pdc;
314 bool use_cs_gpios;
249 /* dmaengine data */ 315 /* dmaengine data */
250 struct atmel_spi_dma dma; 316 struct atmel_spi_dma dma;
251 317
252 bool keep_cs; 318 bool keep_cs;
253 bool cs_active; 319 bool cs_active;
320
321 u32 fifo_size;
254}; 322};
255 323
256/* Controller-specific per-slave state */ 324/* Controller-specific per-slave state */
@@ -321,7 +389,8 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
321 } 389 }
322 390
323 mr = spi_readl(as, MR); 391 mr = spi_readl(as, MR);
324 gpio_set_value(asd->npcs_pin, active); 392 if (as->use_cs_gpios)
393 gpio_set_value(asd->npcs_pin, active);
325 } else { 394 } else {
326 u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0; 395 u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
327 int i; 396 int i;
@@ -337,7 +406,7 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
337 406
338 mr = spi_readl(as, MR); 407 mr = spi_readl(as, MR);
339 mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr); 408 mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr);
340 if (spi->chip_select != 0) 409 if (as->use_cs_gpios && spi->chip_select != 0)
341 gpio_set_value(asd->npcs_pin, active); 410 gpio_set_value(asd->npcs_pin, active);
342 spi_writel(as, MR, mr); 411 spi_writel(as, MR, mr);
343 } 412 }
@@ -366,7 +435,9 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
366 asd->npcs_pin, active ? " (low)" : "", 435 asd->npcs_pin, active ? " (low)" : "",
367 mr); 436 mr);
368 437
369 if (atmel_spi_is_v2(as) || spi->chip_select != 0) 438 if (!as->use_cs_gpios)
439 spi_writel(as, CR, SPI_BIT(LASTXFER));
440 else if (atmel_spi_is_v2(as) || spi->chip_select != 0)
370 gpio_set_value(asd->npcs_pin, !active); 441 gpio_set_value(asd->npcs_pin, !active);
371} 442}
372 443
@@ -406,6 +477,20 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as,
406 slave_config->dst_maxburst = 1; 477 slave_config->dst_maxburst = 1;
407 slave_config->device_fc = false; 478 slave_config->device_fc = false;
408 479
480 /*
481 * This driver uses fixed peripheral select mode (PS bit set to '0' in
482 * the Mode Register).
483 * So according to the datasheet, when FIFOs are available (and
484 * enabled), the Transmit FIFO operates in Multiple Data Mode.
485 * In this mode, up to 2 data, not 4, can be written into the Transmit
486 * Data Register in a single access.
487 * However, the first data has to be written into the lowest 16 bits and
488 * the second data into the highest 16 bits of the Transmit
489 * Data Register. For 8bit data (the most frequent case), it would
490 * require to rework tx_buf so each data would actualy fit 16 bits.
491 * So we'd rather write only one data at the time. Hence the transmit
492 * path works the same whether FIFOs are available (and enabled) or not.
493 */
409 slave_config->direction = DMA_MEM_TO_DEV; 494 slave_config->direction = DMA_MEM_TO_DEV;
410 if (dmaengine_slave_config(as->dma.chan_tx, slave_config)) { 495 if (dmaengine_slave_config(as->dma.chan_tx, slave_config)) {
411 dev_err(&as->pdev->dev, 496 dev_err(&as->pdev->dev,
@@ -413,6 +498,14 @@ static int atmel_spi_dma_slave_config(struct atmel_spi *as,
413 err = -EINVAL; 498 err = -EINVAL;
414 } 499 }
415 500
501 /*
502 * This driver configures the spi controller for master mode (MSTR bit
503 * set to '1' in the Mode Register).
504 * So according to the datasheet, when FIFOs are available (and
505 * enabled), the Receive FIFO operates in Single Data Mode.
506 * So the receive path works the same whether FIFOs are available (and
507 * enabled) or not.
508 */
416 slave_config->direction = DMA_DEV_TO_MEM; 509 slave_config->direction = DMA_DEV_TO_MEM;
417 if (dmaengine_slave_config(as->dma.chan_rx, slave_config)) { 510 if (dmaengine_slave_config(as->dma.chan_rx, slave_config)) {
418 dev_err(&as->pdev->dev, 511 dev_err(&as->pdev->dev,
@@ -502,10 +595,10 @@ static void dma_callback(void *data)
502} 595}
503 596
504/* 597/*
505 * Next transfer using PIO. 598 * Next transfer using PIO without FIFO.
506 */ 599 */
507static void atmel_spi_next_xfer_pio(struct spi_master *master, 600static void atmel_spi_next_xfer_single(struct spi_master *master,
508 struct spi_transfer *xfer) 601 struct spi_transfer *xfer)
509{ 602{
510 struct atmel_spi *as = spi_master_get_devdata(master); 603 struct atmel_spi *as = spi_master_get_devdata(master);
511 unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; 604 unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
@@ -538,6 +631,99 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master,
538} 631}
539 632
540/* 633/*
634 * Next transfer using PIO with FIFO.
635 */
636static void atmel_spi_next_xfer_fifo(struct spi_master *master,
637 struct spi_transfer *xfer)
638{
639 struct atmel_spi *as = spi_master_get_devdata(master);
640 u32 current_remaining_data, num_data;
641 u32 offset = xfer->len - as->current_remaining_bytes;
642 const u16 *words = (const u16 *)((u8 *)xfer->tx_buf + offset);
643 const u8 *bytes = (const u8 *)((u8 *)xfer->tx_buf + offset);
644 u16 td0, td1;
645 u32 fifomr;
646
647 dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_fifo\n");
648
649 /* Compute the number of data to transfer in the current iteration */
650 current_remaining_data = ((xfer->bits_per_word > 8) ?
651 ((u32)as->current_remaining_bytes >> 1) :
652 (u32)as->current_remaining_bytes);
653 num_data = min(current_remaining_data, as->fifo_size);
654
655 /* Flush RX and TX FIFOs */
656 spi_writel(as, CR, SPI_BIT(RXFCLR) | SPI_BIT(TXFCLR));
657 while (spi_readl(as, FLR))
658 cpu_relax();
659
660 /* Set RX FIFO Threshold to the number of data to transfer */
661 fifomr = spi_readl(as, FMR);
662 spi_writel(as, FMR, SPI_BFINS(RXFTHRES, num_data, fifomr));
663
664 /* Clear FIFO flags in the Status Register, especially RXFTHF */
665 (void)spi_readl(as, SR);
666
667 /* Fill TX FIFO */
668 while (num_data >= 2) {
669 if (xfer->tx_buf) {
670 if (xfer->bits_per_word > 8) {
671 td0 = *words++;
672 td1 = *words++;
673 } else {
674 td0 = *bytes++;
675 td1 = *bytes++;
676 }
677 } else {
678 td0 = 0;
679 td1 = 0;
680 }
681
682 spi_writel(as, TDR, (td1 << 16) | td0);
683 num_data -= 2;
684 }
685
686 if (num_data) {
687 if (xfer->tx_buf) {
688 if (xfer->bits_per_word > 8)
689 td0 = *words++;
690 else
691 td0 = *bytes++;
692 } else {
693 td0 = 0;
694 }
695
696 spi_writew(as, TDR, td0);
697 num_data--;
698 }
699
700 dev_dbg(master->dev.parent,
701 " start fifo xfer %p: len %u tx %p rx %p bitpw %d\n",
702 xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
703 xfer->bits_per_word);
704
705 /*
706 * Enable RX FIFO Threshold Flag interrupt to be notified about
707 * transfer completion.
708 */
709 spi_writel(as, IER, SPI_BIT(RXFTHF) | SPI_BIT(OVRES));
710}
711
712/*
713 * Next transfer using PIO.
714 */
715static void atmel_spi_next_xfer_pio(struct spi_master *master,
716 struct spi_transfer *xfer)
717{
718 struct atmel_spi *as = spi_master_get_devdata(master);
719
720 if (as->fifo_size)
721 atmel_spi_next_xfer_fifo(master, xfer);
722 else
723 atmel_spi_next_xfer_single(master, xfer);
724}
725
726/*
541 * Submit next transfer for DMA. 727 * Submit next transfer for DMA.
542 */ 728 */
543static int atmel_spi_next_xfer_dma_submit(struct spi_master *master, 729static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
@@ -839,13 +1025,8 @@ static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as)
839 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 1025 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
840} 1026}
841 1027
842/* Called from IRQ
843 *
844 * Must update "current_remaining_bytes" to keep track of data
845 * to transfer.
846 */
847static void 1028static void
848atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer) 1029atmel_spi_pump_single_data(struct atmel_spi *as, struct spi_transfer *xfer)
849{ 1030{
850 u8 *rxp; 1031 u8 *rxp;
851 u16 *rxp16; 1032 u16 *rxp16;
@@ -872,6 +1053,57 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
872 } 1053 }
873} 1054}
874 1055
1056static void
1057atmel_spi_pump_fifo_data(struct atmel_spi *as, struct spi_transfer *xfer)
1058{
1059 u32 fifolr = spi_readl(as, FLR);
1060 u32 num_bytes, num_data = SPI_BFEXT(RXFL, fifolr);
1061 u32 offset = xfer->len - as->current_remaining_bytes;
1062 u16 *words = (u16 *)((u8 *)xfer->rx_buf + offset);
1063 u8 *bytes = (u8 *)((u8 *)xfer->rx_buf + offset);
1064 u16 rd; /* RD field is the lowest 16 bits of RDR */
1065
1066 /* Update the number of remaining bytes to transfer */
1067 num_bytes = ((xfer->bits_per_word > 8) ?
1068 (num_data << 1) :
1069 num_data);
1070
1071 if (as->current_remaining_bytes > num_bytes)
1072 as->current_remaining_bytes -= num_bytes;
1073 else
1074 as->current_remaining_bytes = 0;
1075
1076 /* Handle odd number of bytes when data are more than 8bit width */
1077 if (xfer->bits_per_word > 8)
1078 as->current_remaining_bytes &= ~0x1;
1079
1080 /* Read data */
1081 while (num_data) {
1082 rd = spi_readl(as, RDR);
1083 if (xfer->rx_buf) {
1084 if (xfer->bits_per_word > 8)
1085 *words++ = rd;
1086 else
1087 *bytes++ = rd;
1088 }
1089 num_data--;
1090 }
1091}
1092
1093/* Called from IRQ
1094 *
1095 * Must update "current_remaining_bytes" to keep track of data
1096 * to transfer.
1097 */
1098static void
1099atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
1100{
1101 if (as->fifo_size)
1102 atmel_spi_pump_fifo_data(as, xfer);
1103 else
1104 atmel_spi_pump_single_data(as, xfer);
1105}
1106
875/* Interrupt 1107/* Interrupt
876 * 1108 *
877 * No need for locking in this Interrupt handler: done_status is the 1109 * No need for locking in this Interrupt handler: done_status is the
@@ -912,7 +1144,7 @@ atmel_spi_pio_interrupt(int irq, void *dev_id)
912 1144
913 complete(&as->xfer_completion); 1145 complete(&as->xfer_completion);
914 1146
915 } else if (pending & SPI_BIT(RDRF)) { 1147 } else if (pending & (SPI_BIT(RDRF) | SPI_BIT(RXFTHF))) {
916 atmel_spi_lock(as); 1148 atmel_spi_lock(as);
917 1149
918 if (as->current_remaining_bytes) { 1150 if (as->current_remaining_bytes) {
@@ -996,6 +1228,8 @@ static int atmel_spi_setup(struct spi_device *spi)
996 csr |= SPI_BIT(CPOL); 1228 csr |= SPI_BIT(CPOL);
997 if (!(spi->mode & SPI_CPHA)) 1229 if (!(spi->mode & SPI_CPHA))
998 csr |= SPI_BIT(NCPHA); 1230 csr |= SPI_BIT(NCPHA);
1231 if (!as->use_cs_gpios)
1232 csr |= SPI_BIT(CSAAT);
999 1233
1000 /* DLYBS is mostly irrelevant since we manage chipselect using GPIOs. 1234 /* DLYBS is mostly irrelevant since we manage chipselect using GPIOs.
1001 * 1235 *
@@ -1009,7 +1243,9 @@ static int atmel_spi_setup(struct spi_device *spi)
1009 /* chipselect must have been muxed as GPIO (e.g. in board setup) */ 1243 /* chipselect must have been muxed as GPIO (e.g. in board setup) */
1010 npcs_pin = (unsigned long)spi->controller_data; 1244 npcs_pin = (unsigned long)spi->controller_data;
1011 1245
1012 if (gpio_is_valid(spi->cs_gpio)) 1246 if (!as->use_cs_gpios)
1247 npcs_pin = spi->chip_select;
1248 else if (gpio_is_valid(spi->cs_gpio))
1013 npcs_pin = spi->cs_gpio; 1249 npcs_pin = spi->cs_gpio;
1014 1250
1015 asd = spi->controller_state; 1251 asd = spi->controller_state;
@@ -1018,15 +1254,19 @@ static int atmel_spi_setup(struct spi_device *spi)
1018 if (!asd) 1254 if (!asd)
1019 return -ENOMEM; 1255 return -ENOMEM;
1020 1256
1021 ret = gpio_request(npcs_pin, dev_name(&spi->dev)); 1257 if (as->use_cs_gpios) {
1022 if (ret) { 1258 ret = gpio_request(npcs_pin, dev_name(&spi->dev));
1023 kfree(asd); 1259 if (ret) {
1024 return ret; 1260 kfree(asd);
1261 return ret;
1262 }
1263
1264 gpio_direction_output(npcs_pin,
1265 !(spi->mode & SPI_CS_HIGH));
1025 } 1266 }
1026 1267
1027 asd->npcs_pin = npcs_pin; 1268 asd->npcs_pin = npcs_pin;
1028 spi->controller_state = asd; 1269 spi->controller_state = asd;
1029 gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH));
1030 } 1270 }
1031 1271
1032 asd->csr = csr; 1272 asd->csr = csr;
@@ -1338,6 +1578,13 @@ static int atmel_spi_probe(struct platform_device *pdev)
1338 1578
1339 atmel_get_caps(as); 1579 atmel_get_caps(as);
1340 1580
1581 as->use_cs_gpios = true;
1582 if (atmel_spi_is_v2(as) &&
1583 !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) {
1584 as->use_cs_gpios = false;
1585 master->num_chipselect = 4;
1586 }
1587
1341 as->use_dma = false; 1588 as->use_dma = false;
1342 as->use_pdc = false; 1589 as->use_pdc = false;
1343 if (as->caps.has_dma_support) { 1590 if (as->caps.has_dma_support) {
@@ -1380,6 +1627,13 @@ static int atmel_spi_probe(struct platform_device *pdev)
1380 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); 1627 spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
1381 spi_writel(as, CR, SPI_BIT(SPIEN)); 1628 spi_writel(as, CR, SPI_BIT(SPIEN));
1382 1629
1630 as->fifo_size = 0;
1631 if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size",
1632 &as->fifo_size)) {
1633 dev_info(&pdev->dev, "Using FIFO (%u data)\n", as->fifo_size);
1634 spi_writel(as, CR, SPI_BIT(FIFOEN));
1635 }
1636
1383 /* go! */ 1637 /* go! */
1384 dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n", 1638 dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
1385 (unsigned long)regs->start, irq); 1639 (unsigned long)regs->start, irq);
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 37875cf942f7..59705ab23577 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -20,18 +20,22 @@
20 * GNU General Public License for more details. 20 * GNU General Public License for more details.
21 */ 21 */
22 22
23#include <asm/page.h>
23#include <linux/clk.h> 24#include <linux/clk.h>
24#include <linux/completion.h> 25#include <linux/completion.h>
25#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/dma-mapping.h>
28#include <linux/dmaengine.h>
26#include <linux/err.h> 29#include <linux/err.h>
27#include <linux/interrupt.h> 30#include <linux/interrupt.h>
28#include <linux/io.h> 31#include <linux/io.h>
29#include <linux/kernel.h> 32#include <linux/kernel.h>
30#include <linux/module.h> 33#include <linux/module.h>
31#include <linux/of.h> 34#include <linux/of.h>
32#include <linux/of_irq.h> 35#include <linux/of_address.h>
33#include <linux/of_gpio.h>
34#include <linux/of_device.h> 36#include <linux/of_device.h>
37#include <linux/of_gpio.h>
38#include <linux/of_irq.h>
35#include <linux/spi/spi.h> 39#include <linux/spi/spi.h>
36 40
37/* SPI register offsets */ 41/* SPI register offsets */
@@ -69,7 +73,8 @@
69#define BCM2835_SPI_CS_CS_01 0x00000001 73#define BCM2835_SPI_CS_CS_01 0x00000001
70 74
71#define BCM2835_SPI_POLLING_LIMIT_US 30 75#define BCM2835_SPI_POLLING_LIMIT_US 30
72#define BCM2835_SPI_TIMEOUT_MS 30000 76#define BCM2835_SPI_POLLING_JIFFIES 2
77#define BCM2835_SPI_DMA_MIN_LENGTH 96
73#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ 78#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
74 | SPI_NO_CS | SPI_3WIRE) 79 | SPI_NO_CS | SPI_3WIRE)
75 80
@@ -83,6 +88,7 @@ struct bcm2835_spi {
83 u8 *rx_buf; 88 u8 *rx_buf;
84 int tx_len; 89 int tx_len;
85 int rx_len; 90 int rx_len;
91 bool dma_pending;
86}; 92};
87 93
88static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg) 94static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
@@ -128,12 +134,15 @@ static void bcm2835_spi_reset_hw(struct spi_master *master)
128 /* Disable SPI interrupts and transfer */ 134 /* Disable SPI interrupts and transfer */
129 cs &= ~(BCM2835_SPI_CS_INTR | 135 cs &= ~(BCM2835_SPI_CS_INTR |
130 BCM2835_SPI_CS_INTD | 136 BCM2835_SPI_CS_INTD |
137 BCM2835_SPI_CS_DMAEN |
131 BCM2835_SPI_CS_TA); 138 BCM2835_SPI_CS_TA);
132 /* and reset RX/TX FIFOS */ 139 /* and reset RX/TX FIFOS */
133 cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX; 140 cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX;
134 141
135 /* and reset the SPI_HW */ 142 /* and reset the SPI_HW */
136 bcm2835_wr(bs, BCM2835_SPI_CS, cs); 143 bcm2835_wr(bs, BCM2835_SPI_CS, cs);
144 /* as well as DLEN */
145 bcm2835_wr(bs, BCM2835_SPI_DLEN, 0);
137} 146}
138 147
139static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id) 148static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
@@ -157,42 +166,6 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
157 return IRQ_HANDLED; 166 return IRQ_HANDLED;
158} 167}
159 168
160static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
161 struct spi_device *spi,
162 struct spi_transfer *tfr,
163 u32 cs,
164 unsigned long xfer_time_us)
165{
166 struct bcm2835_spi *bs = spi_master_get_devdata(master);
167 /* set timeout to 1 second of maximum polling */
168 unsigned long timeout = jiffies + HZ;
169
170 /* enable HW block without interrupts */
171 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
172
173 /* loop until finished the transfer */
174 while (bs->rx_len) {
175 /* read from fifo as much as possible */
176 bcm2835_rd_fifo(bs);
177 /* fill in tx fifo as much as possible */
178 bcm2835_wr_fifo(bs);
179 /* if we still expect some data after the read,
180 * check for a possible timeout
181 */
182 if (bs->rx_len && time_after(jiffies, timeout)) {
183 /* Transfer complete - reset SPI HW */
184 bcm2835_spi_reset_hw(master);
185 /* and return timeout */
186 return -ETIMEDOUT;
187 }
188 }
189
190 /* Transfer complete - reset SPI HW */
191 bcm2835_spi_reset_hw(master);
192 /* and return without waiting for completion */
193 return 0;
194}
195
196static int bcm2835_spi_transfer_one_irq(struct spi_master *master, 169static int bcm2835_spi_transfer_one_irq(struct spi_master *master,
197 struct spi_device *spi, 170 struct spi_device *spi,
198 struct spi_transfer *tfr, 171 struct spi_transfer *tfr,
@@ -229,6 +202,329 @@ static int bcm2835_spi_transfer_one_irq(struct spi_master *master,
229 return 1; 202 return 1;
230} 203}
231 204
205/*
206 * DMA support
207 *
208 * this implementation has currently a few issues in so far as it does
209 * not work arrount limitations of the HW.
210 *
211 * the main one being that DMA transfers are limited to 16 bit
212 * (so 0 to 65535 bytes) by the SPI HW due to BCM2835_SPI_DLEN
213 *
214 * also we currently assume that the scatter-gather fragments are
215 * all multiple of 4 (except the last) - otherwise we would need
216 * to reset the FIFO before subsequent transfers...
217 * this also means that tx/rx transfers sg's need to be of equal size!
218 *
219 * there may be a few more border-cases we may need to address as well
220 * but unfortunately this would mean splitting up the scatter-gather
221 * list making it slightly unpractical...
222 */
223static void bcm2835_spi_dma_done(void *data)
224{
225 struct spi_master *master = data;
226 struct bcm2835_spi *bs = spi_master_get_devdata(master);
227
228 /* reset fifo and HW */
229 bcm2835_spi_reset_hw(master);
230
231 /* and terminate tx-dma as we do not have an irq for it
232 * because when the rx dma will terminate and this callback
233 * is called the tx-dma must have finished - can't get to this
234 * situation otherwise...
235 */
236 dmaengine_terminate_all(master->dma_tx);
237
238 /* mark as no longer pending */
239 bs->dma_pending = 0;
240
241 /* and mark as completed */;
242 complete(&master->xfer_completion);
243}
244
245static int bcm2835_spi_prepare_sg(struct spi_master *master,
246 struct spi_transfer *tfr,
247 bool is_tx)
248{
249 struct dma_chan *chan;
250 struct scatterlist *sgl;
251 unsigned int nents;
252 enum dma_transfer_direction dir;
253 unsigned long flags;
254
255 struct dma_async_tx_descriptor *desc;
256 dma_cookie_t cookie;
257
258 if (is_tx) {
259 dir = DMA_MEM_TO_DEV;
260 chan = master->dma_tx;
261 nents = tfr->tx_sg.nents;
262 sgl = tfr->tx_sg.sgl;
263 flags = 0 /* no tx interrupt */;
264
265 } else {
266 dir = DMA_DEV_TO_MEM;
267 chan = master->dma_rx;
268 nents = tfr->rx_sg.nents;
269 sgl = tfr->rx_sg.sgl;
270 flags = DMA_PREP_INTERRUPT;
271 }
272 /* prepare the channel */
273 desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
274 if (!desc)
275 return -EINVAL;
276
277 /* set callback for rx */
278 if (!is_tx) {
279 desc->callback = bcm2835_spi_dma_done;
280 desc->callback_param = master;
281 }
282
283 /* submit it to DMA-engine */
284 cookie = dmaengine_submit(desc);
285
286 return dma_submit_error(cookie);
287}
288
289static inline int bcm2835_check_sg_length(struct sg_table *sgt)
290{
291 int i;
292 struct scatterlist *sgl;
293
294 /* check that the sg entries are word-sized (except for last) */
295 for_each_sg(sgt->sgl, sgl, (int)sgt->nents - 1, i) {
296 if (sg_dma_len(sgl) % 4)
297 return -EFAULT;
298 }
299
300 return 0;
301}
302
303static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
304 struct spi_device *spi,
305 struct spi_transfer *tfr,
306 u32 cs)
307{
308 struct bcm2835_spi *bs = spi_master_get_devdata(master);
309 int ret;
310
311 /* check that the scatter gather segments are all a multiple of 4 */
312 if (bcm2835_check_sg_length(&tfr->tx_sg) ||
313 bcm2835_check_sg_length(&tfr->rx_sg)) {
314 dev_warn_once(&spi->dev,
315 "scatter gather segment length is not a multiple of 4 - falling back to interrupt mode\n");
316 return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs);
317 }
318
319 /* setup tx-DMA */
320 ret = bcm2835_spi_prepare_sg(master, tfr, true);
321 if (ret)
322 return ret;
323
324 /* start TX early */
325 dma_async_issue_pending(master->dma_tx);
326
327 /* mark as dma pending */
328 bs->dma_pending = 1;
329
330 /* set the DMA length */
331 bcm2835_wr(bs, BCM2835_SPI_DLEN, tfr->len);
332
333 /* start the HW */
334 bcm2835_wr(bs, BCM2835_SPI_CS,
335 cs | BCM2835_SPI_CS_TA | BCM2835_SPI_CS_DMAEN);
336
337 /* setup rx-DMA late - to run transfers while
338 * mapping of the rx buffers still takes place
339 * this saves 10us or more.
340 */
341 ret = bcm2835_spi_prepare_sg(master, tfr, false);
342 if (ret) {
343 /* need to reset on errors */
344 dmaengine_terminate_all(master->dma_tx);
345 bcm2835_spi_reset_hw(master);
346 return ret;
347 }
348
349 /* start rx dma late */
350 dma_async_issue_pending(master->dma_rx);
351
352 /* wait for wakeup in framework */
353 return 1;
354}
355
356static bool bcm2835_spi_can_dma(struct spi_master *master,
357 struct spi_device *spi,
358 struct spi_transfer *tfr)
359{
360 /* only run for gpio_cs */
361 if (!gpio_is_valid(spi->cs_gpio))
362 return false;
363
364 /* we start DMA efforts only on bigger transfers */
365 if (tfr->len < BCM2835_SPI_DMA_MIN_LENGTH)
366 return false;
367
368 /* BCM2835_SPI_DLEN has defined a max transfer size as
369 * 16 bit, so max is 65535
370 * we can revisit this by using an alternative transfer
371 * method - ideally this would get done without any more
372 * interaction...
373 */
374 if (tfr->len > 65535) {
375 dev_warn_once(&spi->dev,
376 "transfer size of %d too big for dma-transfer\n",
377 tfr->len);
378 return false;
379 }
380
381 /* if we run rx/tx_buf with word aligned addresses then we are OK */
382 if ((((size_t)tfr->rx_buf & 3) == 0) &&
383 (((size_t)tfr->tx_buf & 3) == 0))
384 return true;
385
386 /* otherwise we only allow transfers within the same page
387 * to avoid wasting time on dma_mapping when it is not practical
388 */
389 if (((size_t)tfr->tx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) {
390 dev_warn_once(&spi->dev,
391 "Unaligned spi tx-transfer bridging page\n");
392 return false;
393 }
394 if (((size_t)tfr->rx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) {
395 dev_warn_once(&spi->dev,
396 "Unaligned spi tx-transfer bridging page\n");
397 return false;
398 }
399
400 /* return OK */
401 return true;
402}
403
404static void bcm2835_dma_release(struct spi_master *master)
405{
406 if (master->dma_tx) {
407 dmaengine_terminate_all(master->dma_tx);
408 dma_release_channel(master->dma_tx);
409 master->dma_tx = NULL;
410 }
411 if (master->dma_rx) {
412 dmaengine_terminate_all(master->dma_rx);
413 dma_release_channel(master->dma_rx);
414 master->dma_rx = NULL;
415 }
416}
417
418static void bcm2835_dma_init(struct spi_master *master, struct device *dev)
419{
420 struct dma_slave_config slave_config;
421 const __be32 *addr;
422 dma_addr_t dma_reg_base;
423 int ret;
424
425 /* base address in dma-space */
426 addr = of_get_address(master->dev.of_node, 0, NULL, NULL);
427 if (!addr) {
428 dev_err(dev, "could not get DMA-register address - not using dma mode\n");
429 goto err;
430 }
431 dma_reg_base = be32_to_cpup(addr);
432
433 /* get tx/rx dma */
434 master->dma_tx = dma_request_slave_channel(dev, "tx");
435 if (!master->dma_tx) {
436 dev_err(dev, "no tx-dma configuration found - not using dma mode\n");
437 goto err;
438 }
439 master->dma_rx = dma_request_slave_channel(dev, "rx");
440 if (!master->dma_rx) {
441 dev_err(dev, "no rx-dma configuration found - not using dma mode\n");
442 goto err_release;
443 }
444
445 /* configure DMAs */
446 slave_config.direction = DMA_MEM_TO_DEV;
447 slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
448 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
449
450 ret = dmaengine_slave_config(master->dma_tx, &slave_config);
451 if (ret)
452 goto err_config;
453
454 slave_config.direction = DMA_DEV_TO_MEM;
455 slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
456 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
457
458 ret = dmaengine_slave_config(master->dma_rx, &slave_config);
459 if (ret)
460 goto err_config;
461
462 /* all went well, so set can_dma */
463 master->can_dma = bcm2835_spi_can_dma;
464 master->max_dma_len = 65535; /* limitation by BCM2835_SPI_DLEN */
465 /* need to do TX AND RX DMA, so we need dummy buffers */
466 master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
467
468 return;
469
470err_config:
471 dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
472 ret);
473err_release:
474 bcm2835_dma_release(master);
475err:
476 return;
477}
478
479static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
480 struct spi_device *spi,
481 struct spi_transfer *tfr,
482 u32 cs,
483 unsigned long xfer_time_us)
484{
485 struct bcm2835_spi *bs = spi_master_get_devdata(master);
486 unsigned long timeout;
487
488 /* enable HW block without interrupts */
489 bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
490
491 /* fill in the fifo before timeout calculations
492 * if we are interrupted here, then the data is
493 * getting transferred by the HW while we are interrupted
494 */
495 bcm2835_wr_fifo(bs);
496
497 /* set the timeout */
498 timeout = jiffies + BCM2835_SPI_POLLING_JIFFIES;
499
500 /* loop until finished the transfer */
501 while (bs->rx_len) {
502 /* fill in tx fifo with remaining data */
503 bcm2835_wr_fifo(bs);
504
505 /* read from fifo as much as possible */
506 bcm2835_rd_fifo(bs);
507
508 /* if there is still data pending to read
509 * then check the timeout
510 */
511 if (bs->rx_len && time_after(jiffies, timeout)) {
512 dev_dbg_ratelimited(&spi->dev,
513 "timeout period reached: jiffies: %lu remaining tx/rx: %d/%d - falling back to interrupt mode\n",
514 jiffies - timeout,
515 bs->tx_len, bs->rx_len);
516 /* fall back to interrupt mode */
517 return bcm2835_spi_transfer_one_irq(master, spi,
518 tfr, cs);
519 }
520 }
521
522 /* Transfer complete - reset SPI HW */
523 bcm2835_spi_reset_hw(master);
524 /* and return without waiting for completion */
525 return 0;
526}
527
232static int bcm2835_spi_transfer_one(struct spi_master *master, 528static int bcm2835_spi_transfer_one(struct spi_master *master,
233 struct spi_device *spi, 529 struct spi_device *spi,
234 struct spi_transfer *tfr) 530 struct spi_transfer *tfr)
@@ -288,12 +584,26 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
288 return bcm2835_spi_transfer_one_poll(master, spi, tfr, 584 return bcm2835_spi_transfer_one_poll(master, spi, tfr,
289 cs, xfer_time_us); 585 cs, xfer_time_us);
290 586
587 /* run in dma mode if conditions are right */
588 if (master->can_dma && bcm2835_spi_can_dma(master, spi, tfr))
589 return bcm2835_spi_transfer_one_dma(master, spi, tfr, cs);
590
591 /* run in interrupt-mode */
291 return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs); 592 return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs);
292} 593}
293 594
294static void bcm2835_spi_handle_err(struct spi_master *master, 595static void bcm2835_spi_handle_err(struct spi_master *master,
295 struct spi_message *msg) 596 struct spi_message *msg)
296{ 597{
598 struct bcm2835_spi *bs = spi_master_get_devdata(master);
599
600 /* if an error occurred and we have an active dma, then terminate */
601 if (bs->dma_pending) {
602 dmaengine_terminate_all(master->dma_tx);
603 dmaengine_terminate_all(master->dma_rx);
604 bs->dma_pending = 0;
605 }
606 /* and reset */
297 bcm2835_spi_reset_hw(master); 607 bcm2835_spi_reset_hw(master);
298} 608}
299 609
@@ -463,6 +773,8 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
463 goto out_clk_disable; 773 goto out_clk_disable;
464 } 774 }
465 775
776 bcm2835_dma_init(master, &pdev->dev);
777
466 /* initialise the hardware with the default polarities */ 778 /* initialise the hardware with the default polarities */
467 bcm2835_wr(bs, BCM2835_SPI_CS, 779 bcm2835_wr(bs, BCM2835_SPI_CS,
468 BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); 780 BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
@@ -493,6 +805,8 @@ static int bcm2835_spi_remove(struct platform_device *pdev)
493 805
494 clk_disable_unprepare(bs->clk); 806 clk_disable_unprepare(bs->clk);
495 807
808 bcm2835_dma_release(master);
809
496 return 0; 810 return 0;
497} 811}
498 812
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 5e991065f5b0..987afebea093 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -265,7 +265,7 @@ static inline int davinci_spi_get_prescale(struct davinci_spi *dspi,
265 265
266 ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz); 266 ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz);
267 267
268 if (ret < 3 || ret > 256) 268 if (ret < 1 || ret > 256)
269 return -EINVAL; 269 return -EINVAL;
270 270
271 return ret - 1; 271 return ret - 1;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 5fe54cda309f..86bcdd68c1fe 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -24,6 +24,7 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/of.h> 25#include <linux/of.h>
26#include <linux/of_device.h> 26#include <linux/of_device.h>
27#include <linux/pinctrl/consumer.h>
27#include <linux/platform_device.h> 28#include <linux/platform_device.h>
28#include <linux/pm_runtime.h> 29#include <linux/pm_runtime.h>
29#include <linux/regmap.h> 30#include <linux/regmap.h>
@@ -47,6 +48,7 @@
47#define SPI_MCR_CLR_RXF (1 << 10) 48#define SPI_MCR_CLR_RXF (1 << 10)
48 49
49#define SPI_TCR 0x08 50#define SPI_TCR 0x08
51#define SPI_TCR_GET_TCNT(x) (((x) & 0xffff0000) >> 16)
50 52
51#define SPI_CTAR(x) (0x0c + (((x) & 0x3) * 4)) 53#define SPI_CTAR(x) (0x0c + (((x) & 0x3) * 4))
52#define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27) 54#define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27)
@@ -67,9 +69,11 @@
67 69
68#define SPI_SR 0x2c 70#define SPI_SR 0x2c
69#define SPI_SR_EOQF 0x10000000 71#define SPI_SR_EOQF 0x10000000
72#define SPI_SR_TCFQF 0x80000000
70 73
71#define SPI_RSER 0x30 74#define SPI_RSER 0x30
72#define SPI_RSER_EOQFE 0x10000000 75#define SPI_RSER_EOQFE 0x10000000
76#define SPI_RSER_TCFQE 0x80000000
73 77
74#define SPI_PUSHR 0x34 78#define SPI_PUSHR 0x34
75#define SPI_PUSHR_CONT (1 << 31) 79#define SPI_PUSHR_CONT (1 << 31)
@@ -102,12 +106,35 @@
102#define SPI_CS_ASSERT 0x02 106#define SPI_CS_ASSERT 0x02
103#define SPI_CS_DROP 0x04 107#define SPI_CS_DROP 0x04
104 108
109#define SPI_TCR_TCNT_MAX 0x10000
110
105struct chip_data { 111struct chip_data {
106 u32 mcr_val; 112 u32 mcr_val;
107 u32 ctar_val; 113 u32 ctar_val;
108 u16 void_write_data; 114 u16 void_write_data;
109}; 115};
110 116
117enum dspi_trans_mode {
118 DSPI_EOQ_MODE = 0,
119 DSPI_TCFQ_MODE,
120};
121
122struct fsl_dspi_devtype_data {
123 enum dspi_trans_mode trans_mode;
124};
125
126static const struct fsl_dspi_devtype_data vf610_data = {
127 .trans_mode = DSPI_EOQ_MODE,
128};
129
130static const struct fsl_dspi_devtype_data ls1021a_v1_data = {
131 .trans_mode = DSPI_TCFQ_MODE,
132};
133
134static const struct fsl_dspi_devtype_data ls2085a_data = {
135 .trans_mode = DSPI_TCFQ_MODE,
136};
137
111struct fsl_dspi { 138struct fsl_dspi {
112 struct spi_master *master; 139 struct spi_master *master;
113 struct platform_device *pdev; 140 struct platform_device *pdev;
@@ -128,9 +155,12 @@ struct fsl_dspi {
128 u8 cs; 155 u8 cs;
129 u16 void_write_data; 156 u16 void_write_data;
130 u32 cs_change; 157 u32 cs_change;
158 struct fsl_dspi_devtype_data *devtype_data;
131 159
132 wait_queue_head_t waitq; 160 wait_queue_head_t waitq;
133 u32 waitflags; 161 u32 waitflags;
162
163 u32 spi_tcnt;
134}; 164};
135 165
136static inline int is_double_byte_mode(struct fsl_dspi *dspi) 166static inline int is_double_byte_mode(struct fsl_dspi *dspi)
@@ -213,63 +243,60 @@ static void ns_delay_scale(char *psc, char *sc, int delay_ns,
213 } 243 }
214} 244}
215 245
216static int dspi_transfer_write(struct fsl_dspi *dspi) 246static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word)
217{ 247{
218 int tx_count = 0;
219 int tx_word;
220 u16 d16; 248 u16 d16;
221 u8 d8;
222 u32 dspi_pushr = 0;
223 int first = 1;
224 249
225 tx_word = is_double_byte_mode(dspi); 250 if (!(dspi->dataflags & TRAN_STATE_TX_VOID))
251 d16 = tx_word ? *(u16 *)dspi->tx : *(u8 *)dspi->tx;
252 else
253 d16 = dspi->void_write_data;
226 254
227 /* If we are in word mode, but only have a single byte to transfer 255 dspi->tx += tx_word + 1;
228 * then switch to byte mode temporarily. Will switch back at the 256 dspi->len -= tx_word + 1;
229 * end of the transfer.
230 */
231 if (tx_word && (dspi->len == 1)) {
232 dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
233 regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
234 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
235 tx_word = 0;
236 }
237 257
238 while (dspi->len && (tx_count < DSPI_FIFO_SIZE)) { 258 return SPI_PUSHR_TXDATA(d16) |
239 if (tx_word) { 259 SPI_PUSHR_PCS(dspi->cs) |
240 if (dspi->len == 1) 260 SPI_PUSHR_CTAS(dspi->cs) |
241 break; 261 SPI_PUSHR_CONT;
262}
242 263
243 if (!(dspi->dataflags & TRAN_STATE_TX_VOID)) { 264static void dspi_data_from_popr(struct fsl_dspi *dspi, int rx_word)
244 d16 = *(u16 *)dspi->tx; 265{
245 dspi->tx += 2; 266 u16 d;
246 } else { 267 unsigned int val;
247 d16 = dspi->void_write_data;
248 }
249 268
250 dspi_pushr = SPI_PUSHR_TXDATA(d16) | 269 regmap_read(dspi->regmap, SPI_POPR, &val);
251 SPI_PUSHR_PCS(dspi->cs) | 270 d = SPI_POPR_RXDATA(val);
252 SPI_PUSHR_CTAS(dspi->cs) |
253 SPI_PUSHR_CONT;
254 271
255 dspi->len -= 2; 272 if (!(dspi->dataflags & TRAN_STATE_RX_VOID))
256 } else { 273 rx_word ? (*(u16 *)dspi->rx = d) : (*(u8 *)dspi->rx = d);
257 if (!(dspi->dataflags & TRAN_STATE_TX_VOID)) {
258 274
259 d8 = *(u8 *)dspi->tx; 275 dspi->rx += rx_word + 1;
260 dspi->tx++; 276}
261 } else {
262 d8 = (u8)dspi->void_write_data;
263 }
264 277
265 dspi_pushr = SPI_PUSHR_TXDATA(d8) | 278static int dspi_eoq_write(struct fsl_dspi *dspi)
266 SPI_PUSHR_PCS(dspi->cs) | 279{
267 SPI_PUSHR_CTAS(dspi->cs) | 280 int tx_count = 0;
268 SPI_PUSHR_CONT; 281 int tx_word;
282 u32 dspi_pushr = 0;
283
284 tx_word = is_double_byte_mode(dspi);
269 285
270 dspi->len--; 286 while (dspi->len && (tx_count < DSPI_FIFO_SIZE)) {
287 /* If we are in word mode, only have a single byte to transfer
288 * switch to byte mode temporarily. Will switch back at the
289 * end of the transfer.
290 */
291 if (tx_word && (dspi->len == 1)) {
292 dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
293 regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
294 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
295 tx_word = 0;
271 } 296 }
272 297
298 dspi_pushr = dspi_data_to_pushr(dspi, tx_word);
299
273 if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) { 300 if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) {
274 /* last transfer in the transfer */ 301 /* last transfer in the transfer */
275 dspi_pushr |= SPI_PUSHR_EOQ; 302 dspi_pushr |= SPI_PUSHR_EOQ;
@@ -278,11 +305,6 @@ static int dspi_transfer_write(struct fsl_dspi *dspi)
278 } else if (tx_word && (dspi->len == 1)) 305 } else if (tx_word && (dspi->len == 1))
279 dspi_pushr |= SPI_PUSHR_EOQ; 306 dspi_pushr |= SPI_PUSHR_EOQ;
280 307
281 if (first) {
282 first = 0;
283 dspi_pushr |= SPI_PUSHR_CTCNT; /* clear counter */
284 }
285
286 regmap_write(dspi->regmap, SPI_PUSHR, dspi_pushr); 308 regmap_write(dspi->regmap, SPI_PUSHR, dspi_pushr);
287 309
288 tx_count++; 310 tx_count++;
@@ -291,40 +313,55 @@ static int dspi_transfer_write(struct fsl_dspi *dspi)
291 return tx_count * (tx_word + 1); 313 return tx_count * (tx_word + 1);
292} 314}
293 315
294static int dspi_transfer_read(struct fsl_dspi *dspi) 316static int dspi_eoq_read(struct fsl_dspi *dspi)
295{ 317{
296 int rx_count = 0; 318 int rx_count = 0;
297 int rx_word = is_double_byte_mode(dspi); 319 int rx_word = is_double_byte_mode(dspi);
298 u16 d;
299 320
300 while ((dspi->rx < dspi->rx_end) 321 while ((dspi->rx < dspi->rx_end)
301 && (rx_count < DSPI_FIFO_SIZE)) { 322 && (rx_count < DSPI_FIFO_SIZE)) {
302 if (rx_word) { 323 if (rx_word && (dspi->rx_end - dspi->rx) == 1)
303 unsigned int val; 324 rx_word = 0;
304 325
305 if ((dspi->rx_end - dspi->rx) == 1) 326 dspi_data_from_popr(dspi, rx_word);
306 break; 327 rx_count++;
328 }
307 329
308 regmap_read(dspi->regmap, SPI_POPR, &val); 330 return rx_count;
309 d = SPI_POPR_RXDATA(val); 331}
310 332
311 if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) 333static int dspi_tcfq_write(struct fsl_dspi *dspi)
312 *(u16 *)dspi->rx = d; 334{
313 dspi->rx += 2; 335 int tx_word;
336 u32 dspi_pushr = 0;
314 337
315 } else { 338 tx_word = is_double_byte_mode(dspi);
316 unsigned int val;
317 339
318 regmap_read(dspi->regmap, SPI_POPR, &val); 340 if (tx_word && (dspi->len == 1)) {
319 d = SPI_POPR_RXDATA(val); 341 dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
320 if (!(dspi->dataflags & TRAN_STATE_RX_VOID)) 342 regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
321 *(u8 *)dspi->rx = d; 343 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
322 dspi->rx++; 344 tx_word = 0;
323 }
324 rx_count++;
325 } 345 }
326 346
327 return rx_count; 347 dspi_pushr = dspi_data_to_pushr(dspi, tx_word);
348
349 if ((dspi->cs_change) && (!dspi->len))
350 dspi_pushr &= ~SPI_PUSHR_CONT;
351
352 regmap_write(dspi->regmap, SPI_PUSHR, dspi_pushr);
353
354 return tx_word + 1;
355}
356
357static void dspi_tcfq_read(struct fsl_dspi *dspi)
358{
359 int rx_word = is_double_byte_mode(dspi);
360
361 if (rx_word && (dspi->rx_end - dspi->rx) == 1)
362 rx_word = 0;
363
364 dspi_data_from_popr(dspi, rx_word);
328} 365}
329 366
330static int dspi_transfer_one_message(struct spi_master *master, 367static int dspi_transfer_one_message(struct spi_master *master,
@@ -334,6 +371,12 @@ static int dspi_transfer_one_message(struct spi_master *master,
334 struct spi_device *spi = message->spi; 371 struct spi_device *spi = message->spi;
335 struct spi_transfer *transfer; 372 struct spi_transfer *transfer;
336 int status = 0; 373 int status = 0;
374 enum dspi_trans_mode trans_mode;
375 u32 spi_tcr;
376
377 regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
378 dspi->spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
379
337 message->actual_length = 0; 380 message->actual_length = 0;
338 381
339 list_for_each_entry(transfer, &message->transfers, transfer_list) { 382 list_for_each_entry(transfer, &message->transfers, transfer_list) {
@@ -341,10 +384,10 @@ static int dspi_transfer_one_message(struct spi_master *master,
341 dspi->cur_msg = message; 384 dspi->cur_msg = message;
342 dspi->cur_chip = spi_get_ctldata(spi); 385 dspi->cur_chip = spi_get_ctldata(spi);
343 dspi->cs = spi->chip_select; 386 dspi->cs = spi->chip_select;
387 dspi->cs_change = 0;
344 if (dspi->cur_transfer->transfer_list.next 388 if (dspi->cur_transfer->transfer_list.next
345 == &dspi->cur_msg->transfers) 389 == &dspi->cur_msg->transfers)
346 transfer->cs_change = 1; 390 dspi->cs_change = 1;
347 dspi->cs_change = transfer->cs_change;
348 dspi->void_write_data = dspi->cur_chip->void_write_data; 391 dspi->void_write_data = dspi->cur_chip->void_write_data;
349 392
350 dspi->dataflags = 0; 393 dspi->dataflags = 0;
@@ -370,8 +413,22 @@ static int dspi_transfer_one_message(struct spi_master *master,
370 regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), 413 regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
371 dspi->cur_chip->ctar_val); 414 dspi->cur_chip->ctar_val);
372 415
373 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE); 416 trans_mode = dspi->devtype_data->trans_mode;
374 message->actual_length += dspi_transfer_write(dspi); 417 switch (trans_mode) {
418 case DSPI_EOQ_MODE:
419 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
420 dspi_eoq_write(dspi);
421 break;
422 case DSPI_TCFQ_MODE:
423 regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_TCFQE);
424 dspi_tcfq_write(dspi);
425 break;
426 default:
427 dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
428 trans_mode);
429 status = -EINVAL;
430 goto out;
431 }
375 432
376 if (wait_event_interruptible(dspi->waitq, dspi->waitflags)) 433 if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
377 dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n"); 434 dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
@@ -381,6 +438,7 @@ static int dspi_transfer_one_message(struct spi_master *master,
381 udelay(transfer->delay_usecs); 438 udelay(transfer->delay_usecs);
382 } 439 }
383 440
441out:
384 message->status = status; 442 message->status = status;
385 spi_finalize_current_message(master); 443 spi_finalize_current_message(master);
386 444
@@ -460,27 +518,89 @@ static void dspi_cleanup(struct spi_device *spi)
460static irqreturn_t dspi_interrupt(int irq, void *dev_id) 518static irqreturn_t dspi_interrupt(int irq, void *dev_id)
461{ 519{
462 struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id; 520 struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
463
464 struct spi_message *msg = dspi->cur_msg; 521 struct spi_message *msg = dspi->cur_msg;
522 enum dspi_trans_mode trans_mode;
523 u32 spi_sr, spi_tcr;
524 u32 spi_tcnt, tcnt_diff;
525 int tx_word;
465 526
466 regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF); 527 regmap_read(dspi->regmap, SPI_SR, &spi_sr);
467 dspi_transfer_read(dspi); 528 regmap_write(dspi->regmap, SPI_SR, spi_sr);
468 529
469 if (!dspi->len) { 530
531 if (spi_sr & (SPI_SR_EOQF | SPI_SR_TCFQF)) {
532 tx_word = is_double_byte_mode(dspi);
533
534 regmap_read(dspi->regmap, SPI_TCR, &spi_tcr);
535 spi_tcnt = SPI_TCR_GET_TCNT(spi_tcr);
536 /*
537 * The width of SPI Transfer Counter in SPI_TCR is 16bits,
538 * so the max couner is 65535. When the counter reach 65535,
539 * it will wrap around, counter reset to zero.
540 * spi_tcnt my be less than dspi->spi_tcnt, it means the
541 * counter already wrapped around.
542 * SPI Transfer Counter is a counter of transmitted frames.
543 * The size of frame maybe two bytes.
544 */
545 tcnt_diff = ((spi_tcnt + SPI_TCR_TCNT_MAX) - dspi->spi_tcnt)
546 % SPI_TCR_TCNT_MAX;
547 tcnt_diff *= (tx_word + 1);
470 if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) 548 if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM)
471 regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs), 549 tcnt_diff--;
472 SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16)); 550
551 msg->actual_length += tcnt_diff;
552
553 dspi->spi_tcnt = spi_tcnt;
554
555 trans_mode = dspi->devtype_data->trans_mode;
556 switch (trans_mode) {
557 case DSPI_EOQ_MODE:
558 dspi_eoq_read(dspi);
559 break;
560 case DSPI_TCFQ_MODE:
561 dspi_tcfq_read(dspi);
562 break;
563 default:
564 dev_err(&dspi->pdev->dev, "unsupported trans_mode %u\n",
565 trans_mode);
566 return IRQ_HANDLED;
567 }
473 568
474 dspi->waitflags = 1; 569 if (!dspi->len) {
475 wake_up_interruptible(&dspi->waitq); 570 if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) {
476 } else 571 regmap_update_bits(dspi->regmap,
477 msg->actual_length += dspi_transfer_write(dspi); 572 SPI_CTAR(dspi->cs),
573 SPI_FRAME_BITS_MASK,
574 SPI_FRAME_BITS(16));
575 dspi->dataflags &= ~TRAN_STATE_WORD_ODD_NUM;
576 }
577
578 dspi->waitflags = 1;
579 wake_up_interruptible(&dspi->waitq);
580 } else {
581 switch (trans_mode) {
582 case DSPI_EOQ_MODE:
583 dspi_eoq_write(dspi);
584 break;
585 case DSPI_TCFQ_MODE:
586 dspi_tcfq_write(dspi);
587 break;
588 default:
589 dev_err(&dspi->pdev->dev,
590 "unsupported trans_mode %u\n",
591 trans_mode);
592 }
593 }
594 }
478 595
479 return IRQ_HANDLED; 596 return IRQ_HANDLED;
480} 597}
481 598
482static const struct of_device_id fsl_dspi_dt_ids[] = { 599static const struct of_device_id fsl_dspi_dt_ids[] = {
483 { .compatible = "fsl,vf610-dspi", .data = NULL, }, 600 { .compatible = "fsl,vf610-dspi", .data = (void *)&vf610_data, },
601 { .compatible = "fsl,ls1021a-v1.0-dspi",
602 .data = (void *)&ls1021a_v1_data, },
603 { .compatible = "fsl,ls2085a-dspi", .data = (void *)&ls2085a_data, },
484 { /* sentinel */ } 604 { /* sentinel */ }
485}; 605};
486MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids); 606MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
@@ -494,6 +614,8 @@ static int dspi_suspend(struct device *dev)
494 spi_master_suspend(master); 614 spi_master_suspend(master);
495 clk_disable_unprepare(dspi->clk); 615 clk_disable_unprepare(dspi->clk);
496 616
617 pinctrl_pm_select_sleep_state(dev);
618
497 return 0; 619 return 0;
498} 620}
499 621
@@ -502,6 +624,8 @@ static int dspi_resume(struct device *dev)
502 struct spi_master *master = dev_get_drvdata(dev); 624 struct spi_master *master = dev_get_drvdata(dev);
503 struct fsl_dspi *dspi = spi_master_get_devdata(master); 625 struct fsl_dspi *dspi = spi_master_get_devdata(master);
504 626
627 pinctrl_pm_select_default_state(dev);
628
505 clk_prepare_enable(dspi->clk); 629 clk_prepare_enable(dspi->clk);
506 spi_master_resume(master); 630 spi_master_resume(master);
507 631
@@ -526,6 +650,8 @@ static int dspi_probe(struct platform_device *pdev)
526 struct resource *res; 650 struct resource *res;
527 void __iomem *base; 651 void __iomem *base;
528 int ret = 0, cs_num, bus_num; 652 int ret = 0, cs_num, bus_num;
653 const struct of_device_id *of_id =
654 of_match_device(fsl_dspi_dt_ids, &pdev->dev);
529 655
530 master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi)); 656 master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
531 if (!master) 657 if (!master)
@@ -559,6 +685,13 @@ static int dspi_probe(struct platform_device *pdev)
559 } 685 }
560 master->bus_num = bus_num; 686 master->bus_num = bus_num;
561 687
688 dspi->devtype_data = (struct fsl_dspi_devtype_data *)of_id->data;
689 if (!dspi->devtype_data) {
690 dev_err(&pdev->dev, "can't get devtype_data\n");
691 ret = -EFAULT;
692 goto out_master_put;
693 }
694
562 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 695 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
563 base = devm_ioremap_resource(&pdev->dev, res); 696 base = devm_ioremap_resource(&pdev->dev, res);
564 if (IS_ERR(base)) { 697 if (IS_ERR(base)) {
@@ -566,7 +699,7 @@ static int dspi_probe(struct platform_device *pdev)
566 goto out_master_put; 699 goto out_master_put;
567 } 700 }
568 701
569 dspi->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dspi", base, 702 dspi->regmap = devm_regmap_init_mmio_clk(&pdev->dev, NULL, base,
570 &dspi_regmap_config); 703 &dspi_regmap_config);
571 if (IS_ERR(dspi->regmap)) { 704 if (IS_ERR(dspi->regmap)) {
572 dev_err(&pdev->dev, "failed to init regmap: %ld\n", 705 dev_err(&pdev->dev, "failed to init regmap: %ld\n",
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 80d245ac846f..d3f05a0525a4 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -561,9 +561,13 @@ void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
561 561
562 /* spin until TX is done */ 562 /* spin until TX is done */
563 ret = spin_event_timeout(((events = mpc8xxx_spi_read_reg( 563 ret = spin_event_timeout(((events = mpc8xxx_spi_read_reg(
564 &reg_base->event)) & SPIE_NF) == 0, 1000, 0); 564 &reg_base->event)) & SPIE_NF), 1000, 0);
565 if (!ret) { 565 if (!ret) {
566 dev_err(mspi->dev, "tired waiting for SPIE_NF\n"); 566 dev_err(mspi->dev, "tired waiting for SPIE_NF\n");
567
568 /* Clear the SPIE bits */
569 mpc8xxx_spi_write_reg(&reg_base->event, events);
570 complete(&mspi->done);
567 return; 571 return;
568 } 572 }
569 } 573 }
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 788e2b176a4f..acce90ac7371 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -40,6 +40,7 @@
40#define SPFI_CONTROL_SOFT_RESET BIT(11) 40#define SPFI_CONTROL_SOFT_RESET BIT(11)
41#define SPFI_CONTROL_SEND_DMA BIT(10) 41#define SPFI_CONTROL_SEND_DMA BIT(10)
42#define SPFI_CONTROL_GET_DMA BIT(9) 42#define SPFI_CONTROL_GET_DMA BIT(9)
43#define SPFI_CONTROL_SE BIT(8)
43#define SPFI_CONTROL_TMODE_SHIFT 5 44#define SPFI_CONTROL_TMODE_SHIFT 5
44#define SPFI_CONTROL_TMODE_MASK 0x7 45#define SPFI_CONTROL_TMODE_MASK 0x7
45#define SPFI_CONTROL_TMODE_SINGLE 0 46#define SPFI_CONTROL_TMODE_SINGLE 0
@@ -491,6 +492,7 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
491 else if (xfer->tx_nbits == SPI_NBITS_QUAD && 492 else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
492 xfer->rx_nbits == SPI_NBITS_QUAD) 493 xfer->rx_nbits == SPI_NBITS_QUAD)
493 val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT; 494 val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
495 val |= SPFI_CONTROL_SE;
494 spfi_writel(spfi, val, SPFI_CONTROL); 496 spfi_writel(spfi, val, SPFI_CONTROL);
495} 497}
496 498
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index f08e812b2984..f9deb84e4e55 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -201,8 +201,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
201{ 201{
202 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 202 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
203 203
204 if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml) 204 if (spi_imx->dma_is_inited
205 && (transfer->len > spi_imx->tx_wml)) 205 && transfer->len > spi_imx->rx_wml * sizeof(u32)
206 && transfer->len > spi_imx->tx_wml * sizeof(u32))
206 return true; 207 return true;
207 return false; 208 return false;
208} 209}
@@ -674,7 +675,7 @@ static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
674 .devtype = IMX51_ECSPI, 675 .devtype = IMX51_ECSPI,
675}; 676};
676 677
677static struct platform_device_id spi_imx_devtype[] = { 678static const struct platform_device_id spi_imx_devtype[] = {
678 { 679 {
679 .name = "imx1-cspi", 680 .name = "imx1-cspi",
680 .driver_data = (kernel_ulong_t) &imx1_cspi_devtype_data, 681 .driver_data = (kernel_ulong_t) &imx1_cspi_devtype_data,
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index d1a5b9fc3eba..58673841286c 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -35,6 +35,7 @@
35#include <linux/gcd.h> 35#include <linux/gcd.h>
36 36
37#include <linux/spi/spi.h> 37#include <linux/spi/spi.h>
38#include <linux/gpio.h>
38 39
39#include <linux/platform_data/spi-omap2-mcspi.h> 40#include <linux/platform_data/spi-omap2-mcspi.h>
40 41
@@ -242,17 +243,27 @@ static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
242 mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0); 243 mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
243} 244}
244 245
245static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active) 246static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
246{ 247{
247 u32 l; 248 u32 l;
248 249
249 l = mcspi_cached_chconf0(spi); 250 /* The controller handles the inverted chip selects
250 if (cs_active) 251 * using the OMAP2_MCSPI_CHCONF_EPOL bit so revert
251 l |= OMAP2_MCSPI_CHCONF_FORCE; 252 * the inversion from the core spi_set_cs function.
252 else 253 */
253 l &= ~OMAP2_MCSPI_CHCONF_FORCE; 254 if (spi->mode & SPI_CS_HIGH)
255 enable = !enable;
254 256
255 mcspi_write_chconf0(spi, l); 257 if (spi->controller_state) {
258 l = mcspi_cached_chconf0(spi);
259
260 if (enable)
261 l &= ~OMAP2_MCSPI_CHCONF_FORCE;
262 else
263 l |= OMAP2_MCSPI_CHCONF_FORCE;
264
265 mcspi_write_chconf0(spi, l);
266 }
256} 267}
257 268
258static void omap2_mcspi_set_master_mode(struct spi_master *master) 269static void omap2_mcspi_set_master_mode(struct spi_master *master)
@@ -1011,6 +1022,15 @@ static int omap2_mcspi_setup(struct spi_device *spi)
1011 return ret; 1022 return ret;
1012 } 1023 }
1013 1024
1025 if (gpio_is_valid(spi->cs_gpio)) {
1026 ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
1027 if (ret) {
1028 dev_err(&spi->dev, "failed to request gpio\n");
1029 return ret;
1030 }
1031 gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
1032 }
1033
1014 ret = pm_runtime_get_sync(mcspi->dev); 1034 ret = pm_runtime_get_sync(mcspi->dev);
1015 if (ret < 0) 1035 if (ret < 0)
1016 return ret; 1036 return ret;
@@ -1050,9 +1070,13 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
1050 mcspi_dma->dma_tx = NULL; 1070 mcspi_dma->dma_tx = NULL;
1051 } 1071 }
1052 } 1072 }
1073
1074 if (gpio_is_valid(spi->cs_gpio))
1075 gpio_free(spi->cs_gpio);
1053} 1076}
1054 1077
1055static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m) 1078static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
1079 struct spi_device *spi, struct spi_transfer *t)
1056{ 1080{
1057 1081
1058 /* We only enable one channel at a time -- the one whose message is 1082 /* We only enable one channel at a time -- the one whose message is
@@ -1062,18 +1086,14 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
1062 * chipselect with the FORCE bit ... CS != channel enable. 1086 * chipselect with the FORCE bit ... CS != channel enable.
1063 */ 1087 */
1064 1088
1065 struct spi_device *spi;
1066 struct spi_transfer *t = NULL;
1067 struct spi_master *master; 1089 struct spi_master *master;
1068 struct omap2_mcspi_dma *mcspi_dma; 1090 struct omap2_mcspi_dma *mcspi_dma;
1069 int cs_active = 0;
1070 struct omap2_mcspi_cs *cs; 1091 struct omap2_mcspi_cs *cs;
1071 struct omap2_mcspi_device_config *cd; 1092 struct omap2_mcspi_device_config *cd;
1072 int par_override = 0; 1093 int par_override = 0;
1073 int status = 0; 1094 int status = 0;
1074 u32 chconf; 1095 u32 chconf;
1075 1096
1076 spi = m->spi;
1077 master = spi->master; 1097 master = spi->master;
1078 mcspi_dma = mcspi->dma_channels + spi->chip_select; 1098 mcspi_dma = mcspi->dma_channels + spi->chip_select;
1079 cs = spi->controller_state; 1099 cs = spi->controller_state;
@@ -1090,103 +1110,84 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
1090 par_override = 1; 1110 par_override = 1;
1091 1111
1092 omap2_mcspi_set_enable(spi, 0); 1112 omap2_mcspi_set_enable(spi, 0);
1093 list_for_each_entry(t, &m->transfers, transfer_list) {
1094 if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
1095 status = -EINVAL;
1096 break;
1097 }
1098 if (par_override ||
1099 (t->speed_hz != spi->max_speed_hz) ||
1100 (t->bits_per_word != spi->bits_per_word)) {
1101 par_override = 1;
1102 status = omap2_mcspi_setup_transfer(spi, t);
1103 if (status < 0)
1104 break;
1105 if (t->speed_hz == spi->max_speed_hz &&
1106 t->bits_per_word == spi->bits_per_word)
1107 par_override = 0;
1108 }
1109 if (cd && cd->cs_per_word) {
1110 chconf = mcspi->ctx.modulctrl;
1111 chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
1112 mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
1113 mcspi->ctx.modulctrl =
1114 mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
1115 }
1116 1113
1114 if (gpio_is_valid(spi->cs_gpio))
1115 omap2_mcspi_set_cs(spi, spi->mode & SPI_CS_HIGH);
1117 1116
1118 if (!cs_active) { 1117 if (par_override ||
1119 omap2_mcspi_force_cs(spi, 1); 1118 (t->speed_hz != spi->max_speed_hz) ||
1120 cs_active = 1; 1119 (t->bits_per_word != spi->bits_per_word)) {
1121 } 1120 par_override = 1;
1122 1121 status = omap2_mcspi_setup_transfer(spi, t);
1123 chconf = mcspi_cached_chconf0(spi); 1122 if (status < 0)
1124 chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK; 1123 goto out;
1125 chconf &= ~OMAP2_MCSPI_CHCONF_TURBO; 1124 if (t->speed_hz == spi->max_speed_hz &&
1125 t->bits_per_word == spi->bits_per_word)
1126 par_override = 0;
1127 }
1128 if (cd && cd->cs_per_word) {
1129 chconf = mcspi->ctx.modulctrl;
1130 chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
1131 mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
1132 mcspi->ctx.modulctrl =
1133 mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
1134 }
1126 1135
1127 if (t->tx_buf == NULL) 1136 chconf = mcspi_cached_chconf0(spi);
1128 chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY; 1137 chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
1129 else if (t->rx_buf == NULL) 1138 chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
1130 chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY; 1139
1131 1140 if (t->tx_buf == NULL)
1132 if (cd && cd->turbo_mode && t->tx_buf == NULL) { 1141 chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
1133 /* Turbo mode is for more than one word */ 1142 else if (t->rx_buf == NULL)
1134 if (t->len > ((cs->word_len + 7) >> 3)) 1143 chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
1135 chconf |= OMAP2_MCSPI_CHCONF_TURBO; 1144
1136 } 1145 if (cd && cd->turbo_mode && t->tx_buf == NULL) {
1146 /* Turbo mode is for more than one word */
1147 if (t->len > ((cs->word_len + 7) >> 3))
1148 chconf |= OMAP2_MCSPI_CHCONF_TURBO;
1149 }
1137 1150
1138 mcspi_write_chconf0(spi, chconf); 1151 mcspi_write_chconf0(spi, chconf);
1139 1152
1140 if (t->len) { 1153 if (t->len) {
1141 unsigned count; 1154 unsigned count;
1142 1155
1143 if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) && 1156 if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
1144 (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)) 1157 (t->len >= DMA_MIN_BYTES))
1145 omap2_mcspi_set_fifo(spi, t, 1); 1158 omap2_mcspi_set_fifo(spi, t, 1);
1146 1159
1147 omap2_mcspi_set_enable(spi, 1); 1160 omap2_mcspi_set_enable(spi, 1);
1148 1161
1149 /* RX_ONLY mode needs dummy data in TX reg */ 1162 /* RX_ONLY mode needs dummy data in TX reg */
1150 if (t->tx_buf == NULL) 1163 if (t->tx_buf == NULL)
1151 writel_relaxed(0, cs->base 1164 writel_relaxed(0, cs->base
1152 + OMAP2_MCSPI_TX0); 1165 + OMAP2_MCSPI_TX0);
1153 1166
1154 if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) && 1167 if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
1155 (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)) 1168 (t->len >= DMA_MIN_BYTES))
1156 count = omap2_mcspi_txrx_dma(spi, t); 1169 count = omap2_mcspi_txrx_dma(spi, t);
1157 else 1170 else
1158 count = omap2_mcspi_txrx_pio(spi, t); 1171 count = omap2_mcspi_txrx_pio(spi, t);
1159 m->actual_length += count;
1160 1172
1161 if (count != t->len) { 1173 if (count != t->len) {
1162 status = -EIO; 1174 status = -EIO;
1163 break; 1175 goto out;
1164 }
1165 } 1176 }
1177 }
1166 1178
1167 if (t->delay_usecs) 1179 omap2_mcspi_set_enable(spi, 0);
1168 udelay(t->delay_usecs);
1169
1170 /* ignore the "leave it on after last xfer" hint */
1171 if (t->cs_change) {
1172 omap2_mcspi_force_cs(spi, 0);
1173 cs_active = 0;
1174 }
1175 1180
1176 omap2_mcspi_set_enable(spi, 0); 1181 if (mcspi->fifo_depth > 0)
1182 omap2_mcspi_set_fifo(spi, t, 0);
1177 1183
1178 if (mcspi->fifo_depth > 0) 1184out:
1179 omap2_mcspi_set_fifo(spi, t, 0);
1180 }
1181 /* Restore defaults if they were overriden */ 1185 /* Restore defaults if they were overriden */
1182 if (par_override) { 1186 if (par_override) {
1183 par_override = 0; 1187 par_override = 0;
1184 status = omap2_mcspi_setup_transfer(spi, NULL); 1188 status = omap2_mcspi_setup_transfer(spi, NULL);
1185 } 1189 }
1186 1190
1187 if (cs_active)
1188 omap2_mcspi_force_cs(spi, 0);
1189
1190 if (cd && cd->cs_per_word) { 1191 if (cd && cd->cs_per_word) {
1191 chconf = mcspi->ctx.modulctrl; 1192 chconf = mcspi->ctx.modulctrl;
1192 chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE; 1193 chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE;
@@ -1197,78 +1198,64 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
1197 1198
1198 omap2_mcspi_set_enable(spi, 0); 1199 omap2_mcspi_set_enable(spi, 0);
1199 1200
1201 if (gpio_is_valid(spi->cs_gpio))
1202 omap2_mcspi_set_cs(spi, !(spi->mode & SPI_CS_HIGH));
1203
1200 if (mcspi->fifo_depth > 0 && t) 1204 if (mcspi->fifo_depth > 0 && t)
1201 omap2_mcspi_set_fifo(spi, t, 0); 1205 omap2_mcspi_set_fifo(spi, t, 0);
1202 1206
1203 m->status = status; 1207 return status;
1204} 1208}
1205 1209
1206static int omap2_mcspi_transfer_one_message(struct spi_master *master, 1210static int omap2_mcspi_transfer_one(struct spi_master *master,
1207 struct spi_message *m) 1211 struct spi_device *spi, struct spi_transfer *t)
1208{ 1212{
1209 struct spi_device *spi;
1210 struct omap2_mcspi *mcspi; 1213 struct omap2_mcspi *mcspi;
1211 struct omap2_mcspi_dma *mcspi_dma; 1214 struct omap2_mcspi_dma *mcspi_dma;
1212 struct spi_transfer *t; 1215 const void *tx_buf = t->tx_buf;
1213 int status; 1216 void *rx_buf = t->rx_buf;
1217 unsigned len = t->len;
1214 1218
1215 spi = m->spi;
1216 mcspi = spi_master_get_devdata(master); 1219 mcspi = spi_master_get_devdata(master);
1217 mcspi_dma = mcspi->dma_channels + spi->chip_select; 1220 mcspi_dma = mcspi->dma_channels + spi->chip_select;
1218 m->actual_length = 0;
1219 m->status = 0;
1220
1221 list_for_each_entry(t, &m->transfers, transfer_list) {
1222 const void *tx_buf = t->tx_buf;
1223 void *rx_buf = t->rx_buf;
1224 unsigned len = t->len;
1225
1226 if ((len && !(rx_buf || tx_buf))) {
1227 dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
1228 t->speed_hz,
1229 len,
1230 tx_buf ? "tx" : "",
1231 rx_buf ? "rx" : "",
1232 t->bits_per_word);
1233 status = -EINVAL;
1234 goto out;
1235 }
1236 1221
1237 if (m->is_dma_mapped || len < DMA_MIN_BYTES) 1222 if ((len && !(rx_buf || tx_buf))) {
1238 continue; 1223 dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
1239 1224 t->speed_hz,
1240 if (mcspi_dma->dma_tx && tx_buf != NULL) { 1225 len,
1241 t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf, 1226 tx_buf ? "tx" : "",
1242 len, DMA_TO_DEVICE); 1227 rx_buf ? "rx" : "",
1243 if (dma_mapping_error(mcspi->dev, t->tx_dma)) { 1228 t->bits_per_word);
1244 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n", 1229 return -EINVAL;
1245 'T', len); 1230 }
1246 status = -EINVAL; 1231
1247 goto out; 1232 if (len < DMA_MIN_BYTES)
1248 } 1233 goto skip_dma_map;
1234
1235 if (mcspi_dma->dma_tx && tx_buf != NULL) {
1236 t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
1237 len, DMA_TO_DEVICE);
1238 if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
1239 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1240 'T', len);
1241 return -EINVAL;
1249 } 1242 }
1250 if (mcspi_dma->dma_rx && rx_buf != NULL) { 1243 }
1251 t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len, 1244 if (mcspi_dma->dma_rx && rx_buf != NULL) {
1252 DMA_FROM_DEVICE); 1245 t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
1253 if (dma_mapping_error(mcspi->dev, t->rx_dma)) { 1246 DMA_FROM_DEVICE);
1254 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n", 1247 if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
1255 'R', len); 1248 dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
1256 if (tx_buf != NULL) 1249 'R', len);
1257 dma_unmap_single(mcspi->dev, t->tx_dma, 1250 if (tx_buf != NULL)
1258 len, DMA_TO_DEVICE); 1251 dma_unmap_single(mcspi->dev, t->tx_dma,
1259 status = -EINVAL; 1252 len, DMA_TO_DEVICE);
1260 goto out; 1253 return -EINVAL;
1261 }
1262 } 1254 }
1263 } 1255 }
1264 1256
1265 omap2_mcspi_work(mcspi, m); 1257skip_dma_map:
1266 /* spi_finalize_current_message() changes the status inside the 1258 return omap2_mcspi_work_one(mcspi, spi, t);
1267 * spi_message, save the status here. */
1268 status = m->status;
1269out:
1270 spi_finalize_current_message(master);
1271 return status;
1272} 1259}
1273 1260
1274static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi) 1261static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
@@ -1347,7 +1334,8 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
1347 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); 1334 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
1348 master->setup = omap2_mcspi_setup; 1335 master->setup = omap2_mcspi_setup;
1349 master->auto_runtime_pm = true; 1336 master->auto_runtime_pm = true;
1350 master->transfer_one_message = omap2_mcspi_transfer_one_message; 1337 master->transfer_one = omap2_mcspi_transfer_one;
1338 master->set_cs = omap2_mcspi_set_cs;
1351 master->cleanup = omap2_mcspi_cleanup; 1339 master->cleanup = omap2_mcspi_cleanup;
1352 master->dev.of_node = node; 1340 master->dev.of_node = node;
1353 master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ; 1341 master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 861664776672..8cad107a5b3f 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -61,6 +61,12 @@ enum orion_spi_type {
61 61
62struct orion_spi_dev { 62struct orion_spi_dev {
63 enum orion_spi_type typ; 63 enum orion_spi_type typ;
64 /*
65 * min_divisor and max_hz should be exclusive, the only we can
66 * have both is for managing the armada-370-spi case with old
67 * device tree
68 */
69 unsigned long max_hz;
64 unsigned int min_divisor; 70 unsigned int min_divisor;
65 unsigned int max_divisor; 71 unsigned int max_divisor;
66 u32 prescale_mask; 72 u32 prescale_mask;
@@ -385,16 +391,54 @@ static const struct orion_spi_dev orion_spi_dev_data = {
385 .prescale_mask = ORION_SPI_CLK_PRESCALE_MASK, 391 .prescale_mask = ORION_SPI_CLK_PRESCALE_MASK,
386}; 392};
387 393
388static const struct orion_spi_dev armada_spi_dev_data = { 394static const struct orion_spi_dev armada_370_spi_dev_data = {
389 .typ = ARMADA_SPI, 395 .typ = ARMADA_SPI,
390 .min_divisor = 1, 396 .min_divisor = 4,
397 .max_divisor = 1920,
398 .max_hz = 50000000,
399 .prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
400};
401
402static const struct orion_spi_dev armada_xp_spi_dev_data = {
403 .typ = ARMADA_SPI,
404 .max_hz = 50000000,
405 .max_divisor = 1920,
406 .prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
407};
408
409static const struct orion_spi_dev armada_375_spi_dev_data = {
410 .typ = ARMADA_SPI,
411 .min_divisor = 15,
391 .max_divisor = 1920, 412 .max_divisor = 1920,
392 .prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK, 413 .prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
393}; 414};
394 415
395static const struct of_device_id orion_spi_of_match_table[] = { 416static const struct of_device_id orion_spi_of_match_table[] = {
396 { .compatible = "marvell,orion-spi", .data = &orion_spi_dev_data, }, 417 {
397 { .compatible = "marvell,armada-370-spi", .data = &armada_spi_dev_data, }, 418 .compatible = "marvell,orion-spi",
419 .data = &orion_spi_dev_data,
420 },
421 {
422 .compatible = "marvell,armada-370-spi",
423 .data = &armada_370_spi_dev_data,
424 },
425 {
426 .compatible = "marvell,armada-375-spi",
427 .data = &armada_375_spi_dev_data,
428 },
429 {
430 .compatible = "marvell,armada-380-spi",
431 .data = &armada_xp_spi_dev_data,
432 },
433 {
434 .compatible = "marvell,armada-390-spi",
435 .data = &armada_xp_spi_dev_data,
436 },
437 {
438 .compatible = "marvell,armada-xp-spi",
439 .data = &armada_xp_spi_dev_data,
440 },
441
398 {} 442 {}
399}; 443};
400MODULE_DEVICE_TABLE(of, orion_spi_of_match_table); 444MODULE_DEVICE_TABLE(of, orion_spi_of_match_table);
@@ -454,7 +498,23 @@ static int orion_spi_probe(struct platform_device *pdev)
454 goto out; 498 goto out;
455 499
456 tclk_hz = clk_get_rate(spi->clk); 500 tclk_hz = clk_get_rate(spi->clk);
457 master->max_speed_hz = DIV_ROUND_UP(tclk_hz, devdata->min_divisor); 501
502 /*
503 * With old device tree, armada-370-spi could be used with
504 * Armada XP, however for this SoC the maximum frequency is
505 * 50MHz instead of tclk/4. On Armada 370, tclk cannot be
506 * higher than 200MHz. So, in order to be able to handle both
507 * SoCs, we can take the minimum of 50MHz and tclk/4.
508 */
509 if (of_device_is_compatible(pdev->dev.of_node,
510 "marvell,armada-370-spi"))
511 master->max_speed_hz = min(devdata->max_hz,
512 DIV_ROUND_UP(tclk_hz, devdata->min_divisor));
513 else if (devdata->min_divisor)
514 master->max_speed_hz =
515 DIV_ROUND_UP(tclk_hz, devdata->min_divisor);
516 else
517 master->max_speed_hz = devdata->max_hz;
458 master->min_speed_hz = DIV_ROUND_UP(tclk_hz, devdata->max_divisor); 518 master->min_speed_hz = DIV_ROUND_UP(tclk_hz, devdata->max_divisor);
459 519
460 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 520 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index fa7399e84bbb..3cfd4357489a 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -62,7 +62,7 @@ static struct pxa_spi_info spi_info_configs[] = {
62 .max_clk_rate = 3686400, 62 .max_clk_rate = 3686400,
63 }, 63 },
64 [PORT_BYT] = { 64 [PORT_BYT] = {
65 .type = LPSS_SSP, 65 .type = LPSS_BYT_SSP,
66 .port_id = 0, 66 .port_id = 0,
67 .num_chipselect = 1, 67 .num_chipselect = 1,
68 .max_clk_rate = 50000000, 68 .max_clk_rate = 50000000,
@@ -70,7 +70,7 @@ static struct pxa_spi_info spi_info_configs[] = {
70 .rx_param = &byt_rx_param, 70 .rx_param = &byt_rx_param,
71 }, 71 },
72 [PORT_BSW0] = { 72 [PORT_BSW0] = {
73 .type = LPSS_SSP, 73 .type = LPSS_BYT_SSP,
74 .port_id = 0, 74 .port_id = 0,
75 .num_chipselect = 1, 75 .num_chipselect = 1,
76 .max_clk_rate = 50000000, 76 .max_clk_rate = 50000000,
@@ -78,7 +78,7 @@ static struct pxa_spi_info spi_info_configs[] = {
78 .rx_param = &bsw0_rx_param, 78 .rx_param = &bsw0_rx_param,
79 }, 79 },
80 [PORT_BSW1] = { 80 [PORT_BSW1] = {
81 .type = LPSS_SSP, 81 .type = LPSS_BYT_SSP,
82 .port_id = 1, 82 .port_id = 1,
83 .num_chipselect = 1, 83 .num_chipselect = 1,
84 .max_clk_rate = 50000000, 84 .max_clk_rate = 50000000,
@@ -86,7 +86,7 @@ static struct pxa_spi_info spi_info_configs[] = {
86 .rx_param = &bsw1_rx_param, 86 .rx_param = &bsw1_rx_param,
87 }, 87 },
88 [PORT_BSW2] = { 88 [PORT_BSW2] = {
89 .type = LPSS_SSP, 89 .type = LPSS_BYT_SSP,
90 .port_id = 2, 90 .port_id = 2,
91 .num_chipselect = 1, 91 .num_chipselect = 1,
92 .max_clk_rate = 50000000, 92 .max_clk_rate = 50000000,
diff --git a/drivers/spi/spi-pxa2xx-pxadma.c b/drivers/spi/spi-pxa2xx-pxadma.c
deleted file mode 100644
index 2e0796a0003f..000000000000
--- a/drivers/spi/spi-pxa2xx-pxadma.c
+++ /dev/null
@@ -1,487 +0,0 @@
1/*
2 * PXA2xx SPI private DMA support.
3 *
4 * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/delay.h>
18#include <linux/device.h>
19#include <linux/dma-mapping.h>
20#include <linux/pxa2xx_ssp.h>
21#include <linux/spi/spi.h>
22#include <linux/spi/pxa2xx_spi.h>
23
24#include <mach/dma.h>
25#include "spi-pxa2xx.h"
26
27#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
28#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
29
30bool pxa2xx_spi_dma_is_possible(size_t len)
31{
32 /* Try to map dma buffer and do a dma transfer if successful, but
33 * only if the length is non-zero and less than MAX_DMA_LEN.
34 *
35 * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
36 * of PIO instead. Care is needed above because the transfer may
37 * have have been passed with buffers that are already dma mapped.
38 * A zero-length transfer in PIO mode will not try to write/read
39 * to/from the buffers
40 *
41 * REVISIT large transfers are exactly where we most want to be
42 * using DMA. If this happens much, split those transfers into
43 * multiple DMA segments rather than forcing PIO.
44 */
45 return len > 0 && len <= MAX_DMA_LEN;
46}
47
48int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
49{
50 struct spi_message *msg = drv_data->cur_msg;
51 struct device *dev = &msg->spi->dev;
52
53 if (!drv_data->cur_chip->enable_dma)
54 return 0;
55
56 if (msg->is_dma_mapped)
57 return drv_data->rx_dma && drv_data->tx_dma;
58
59 if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
60 return 0;
61
62 /* Modify setup if rx buffer is null */
63 if (drv_data->rx == NULL) {
64 *drv_data->null_dma_buf = 0;
65 drv_data->rx = drv_data->null_dma_buf;
66 drv_data->rx_map_len = 4;
67 } else
68 drv_data->rx_map_len = drv_data->len;
69
70
71 /* Modify setup if tx buffer is null */
72 if (drv_data->tx == NULL) {
73 *drv_data->null_dma_buf = 0;
74 drv_data->tx = drv_data->null_dma_buf;
75 drv_data->tx_map_len = 4;
76 } else
77 drv_data->tx_map_len = drv_data->len;
78
79 /* Stream map the tx buffer. Always do DMA_TO_DEVICE first
80 * so we flush the cache *before* invalidating it, in case
81 * the tx and rx buffers overlap.
82 */
83 drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
84 drv_data->tx_map_len, DMA_TO_DEVICE);
85 if (dma_mapping_error(dev, drv_data->tx_dma))
86 return 0;
87
88 /* Stream map the rx buffer */
89 drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
90 drv_data->rx_map_len, DMA_FROM_DEVICE);
91 if (dma_mapping_error(dev, drv_data->rx_dma)) {
92 dma_unmap_single(dev, drv_data->tx_dma,
93 drv_data->tx_map_len, DMA_TO_DEVICE);
94 return 0;
95 }
96
97 return 1;
98}
99
100static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
101{
102 struct device *dev;
103
104 if (!drv_data->dma_mapped)
105 return;
106
107 if (!drv_data->cur_msg->is_dma_mapped) {
108 dev = &drv_data->cur_msg->spi->dev;
109 dma_unmap_single(dev, drv_data->rx_dma,
110 drv_data->rx_map_len, DMA_FROM_DEVICE);
111 dma_unmap_single(dev, drv_data->tx_dma,
112 drv_data->tx_map_len, DMA_TO_DEVICE);
113 }
114
115 drv_data->dma_mapped = 0;
116}
117
118static int wait_ssp_rx_stall(struct driver_data *drv_data)
119{
120 unsigned long limit = loops_per_jiffy << 1;
121
122 while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit)
123 cpu_relax();
124
125 return limit;
126}
127
128static int wait_dma_channel_stop(int channel)
129{
130 unsigned long limit = loops_per_jiffy << 1;
131
132 while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
133 cpu_relax();
134
135 return limit;
136}
137
138static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
139 const char *msg)
140{
141 /* Stop and reset */
142 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
143 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
144 write_SSSR_CS(drv_data, drv_data->clear_sr);
145 pxa2xx_spi_write(drv_data, SSCR1,
146 pxa2xx_spi_read(drv_data, SSCR1)
147 & ~drv_data->dma_cr1);
148 if (!pxa25x_ssp_comp(drv_data))
149 pxa2xx_spi_write(drv_data, SSTO, 0);
150 pxa2xx_spi_flush(drv_data);
151 pxa2xx_spi_write(drv_data, SSCR0,
152 pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
153
154 pxa2xx_spi_unmap_dma_buffers(drv_data);
155
156 dev_err(&drv_data->pdev->dev, "%s\n", msg);
157
158 drv_data->cur_msg->state = ERROR_STATE;
159 tasklet_schedule(&drv_data->pump_transfers);
160}
161
162static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data)
163{
164 struct spi_message *msg = drv_data->cur_msg;
165
166 /* Clear and disable interrupts on SSP and DMA channels*/
167 pxa2xx_spi_write(drv_data, SSCR1,
168 pxa2xx_spi_read(drv_data, SSCR1)
169 & ~drv_data->dma_cr1);
170 write_SSSR_CS(drv_data, drv_data->clear_sr);
171 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
172 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
173
174 if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
175 dev_err(&drv_data->pdev->dev,
176 "dma_handler: dma rx channel stop failed\n");
177
178 if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
179 dev_err(&drv_data->pdev->dev,
180 "dma_transfer: ssp rx stall failed\n");
181
182 pxa2xx_spi_unmap_dma_buffers(drv_data);
183
184 /* update the buffer pointer for the amount completed in dma */
185 drv_data->rx += drv_data->len -
186 (DCMD(drv_data->rx_channel) & DCMD_LENGTH);
187
188 /* read trailing data from fifo, it does not matter how many
189 * bytes are in the fifo just read until buffer is full
190 * or fifo is empty, which ever occurs first */
191 drv_data->read(drv_data);
192
193 /* return count of what was actually read */
194 msg->actual_length += drv_data->len -
195 (drv_data->rx_end - drv_data->rx);
196
197 /* Transfer delays and chip select release are
198 * handled in pump_transfers or giveback
199 */
200
201 /* Move to next transfer */
202 msg->state = pxa2xx_spi_next_transfer(drv_data);
203
204 /* Schedule transfer tasklet */
205 tasklet_schedule(&drv_data->pump_transfers);
206}
207
208void pxa2xx_spi_dma_handler(int channel, void *data)
209{
210 struct driver_data *drv_data = data;
211 u32 irq_status = DCSR(channel) & DMA_INT_MASK;
212
213 if (irq_status & DCSR_BUSERR) {
214
215 if (channel == drv_data->tx_channel)
216 pxa2xx_spi_dma_error_stop(drv_data,
217 "dma_handler: bad bus address on tx channel");
218 else
219 pxa2xx_spi_dma_error_stop(drv_data,
220 "dma_handler: bad bus address on rx channel");
221 return;
222 }
223
224 /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
225 if ((channel == drv_data->tx_channel)
226 && (irq_status & DCSR_ENDINTR)
227 && (drv_data->ssp_type == PXA25x_SSP)) {
228
229 /* Wait for rx to stall */
230 if (wait_ssp_rx_stall(drv_data) == 0)
231 dev_err(&drv_data->pdev->dev,
232 "dma_handler: ssp rx stall failed\n");
233
234 /* finish this transfer, start the next */
235 pxa2xx_spi_dma_transfer_complete(drv_data);
236 }
237}
238
239irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
240{
241 u32 irq_status;
242
243 irq_status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
244 if (irq_status & SSSR_ROR) {
245 pxa2xx_spi_dma_error_stop(drv_data,
246 "dma_transfer: fifo overrun");
247 return IRQ_HANDLED;
248 }
249
250 /* Check for false positive timeout */
251 if ((irq_status & SSSR_TINT)
252 && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
253 pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
254 return IRQ_HANDLED;
255 }
256
257 if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
258
259 /* Clear and disable timeout interrupt, do the rest in
260 * dma_transfer_complete */
261 if (!pxa25x_ssp_comp(drv_data))
262 pxa2xx_spi_write(drv_data, SSTO, 0);
263
264 /* finish this transfer, start the next */
265 pxa2xx_spi_dma_transfer_complete(drv_data);
266
267 return IRQ_HANDLED;
268 }
269
270 /* Opps problem detected */
271 return IRQ_NONE;
272}
273
274int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
275{
276 u32 dma_width;
277
278 switch (drv_data->n_bytes) {
279 case 1:
280 dma_width = DCMD_WIDTH1;
281 break;
282 case 2:
283 dma_width = DCMD_WIDTH2;
284 break;
285 default:
286 dma_width = DCMD_WIDTH4;
287 break;
288 }
289
290 /* Setup rx DMA Channel */
291 DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
292 DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
293 DTADR(drv_data->rx_channel) = drv_data->rx_dma;
294 if (drv_data->rx == drv_data->null_dma_buf)
295 /* No target address increment */
296 DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
297 | dma_width
298 | dma_burst
299 | drv_data->len;
300 else
301 DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
302 | DCMD_FLOWSRC
303 | dma_width
304 | dma_burst
305 | drv_data->len;
306
307 /* Setup tx DMA Channel */
308 DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
309 DSADR(drv_data->tx_channel) = drv_data->tx_dma;
310 DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
311 if (drv_data->tx == drv_data->null_dma_buf)
312 /* No source address increment */
313 DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
314 | dma_width
315 | dma_burst
316 | drv_data->len;
317 else
318 DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
319 | DCMD_FLOWTRG
320 | dma_width
321 | dma_burst
322 | drv_data->len;
323
324 /* Enable dma end irqs on SSP to detect end of transfer */
325 if (drv_data->ssp_type == PXA25x_SSP)
326 DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
327
328 return 0;
329}
330
331void pxa2xx_spi_dma_start(struct driver_data *drv_data)
332{
333 DCSR(drv_data->rx_channel) |= DCSR_RUN;
334 DCSR(drv_data->tx_channel) |= DCSR_RUN;
335}
336
337int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
338{
339 struct device *dev = &drv_data->pdev->dev;
340 struct ssp_device *ssp = drv_data->ssp;
341
342 /* Get two DMA channels (rx and tx) */
343 drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
344 DMA_PRIO_HIGH,
345 pxa2xx_spi_dma_handler,
346 drv_data);
347 if (drv_data->rx_channel < 0) {
348 dev_err(dev, "problem (%d) requesting rx channel\n",
349 drv_data->rx_channel);
350 return -ENODEV;
351 }
352 drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
353 DMA_PRIO_MEDIUM,
354 pxa2xx_spi_dma_handler,
355 drv_data);
356 if (drv_data->tx_channel < 0) {
357 dev_err(dev, "problem (%d) requesting tx channel\n",
358 drv_data->tx_channel);
359 pxa_free_dma(drv_data->rx_channel);
360 return -ENODEV;
361 }
362
363 DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel;
364 DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel;
365
366 return 0;
367}
368
369void pxa2xx_spi_dma_release(struct driver_data *drv_data)
370{
371 struct ssp_device *ssp = drv_data->ssp;
372
373 DRCMR(ssp->drcmr_rx) = 0;
374 DRCMR(ssp->drcmr_tx) = 0;
375
376 if (drv_data->tx_channel != 0)
377 pxa_free_dma(drv_data->tx_channel);
378 if (drv_data->rx_channel != 0)
379 pxa_free_dma(drv_data->rx_channel);
380}
381
382void pxa2xx_spi_dma_resume(struct driver_data *drv_data)
383{
384 if (drv_data->rx_channel != -1)
385 DRCMR(drv_data->ssp->drcmr_rx) =
386 DRCMR_MAPVLD | drv_data->rx_channel;
387 if (drv_data->tx_channel != -1)
388 DRCMR(drv_data->ssp->drcmr_tx) =
389 DRCMR_MAPVLD | drv_data->tx_channel;
390}
391
392int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
393 struct spi_device *spi,
394 u8 bits_per_word, u32 *burst_code,
395 u32 *threshold)
396{
397 struct pxa2xx_spi_chip *chip_info =
398 (struct pxa2xx_spi_chip *)spi->controller_data;
399 int bytes_per_word;
400 int burst_bytes;
401 int thresh_words;
402 int req_burst_size;
403 int retval = 0;
404
405 /* Set the threshold (in registers) to equal the same amount of data
406 * as represented by burst size (in bytes). The computation below
407 * is (burst_size rounded up to nearest 8 byte, word or long word)
408 * divided by (bytes/register); the tx threshold is the inverse of
409 * the rx, so that there will always be enough data in the rx fifo
410 * to satisfy a burst, and there will always be enough space in the
411 * tx fifo to accept a burst (a tx burst will overwrite the fifo if
412 * there is not enough space), there must always remain enough empty
413 * space in the rx fifo for any data loaded to the tx fifo.
414 * Whenever burst_size (in bytes) equals bits/word, the fifo threshold
415 * will be 8, or half the fifo;
416 * The threshold can only be set to 2, 4 or 8, but not 16, because
417 * to burst 16 to the tx fifo, the fifo would have to be empty;
418 * however, the minimum fifo trigger level is 1, and the tx will
419 * request service when the fifo is at this level, with only 15 spaces.
420 */
421
422 /* find bytes/word */
423 if (bits_per_word <= 8)
424 bytes_per_word = 1;
425 else if (bits_per_word <= 16)
426 bytes_per_word = 2;
427 else
428 bytes_per_word = 4;
429
430 /* use struct pxa2xx_spi_chip->dma_burst_size if available */
431 if (chip_info)
432 req_burst_size = chip_info->dma_burst_size;
433 else {
434 switch (chip->dma_burst_size) {
435 default:
436 /* if the default burst size is not set,
437 * do it now */
438 chip->dma_burst_size = DCMD_BURST8;
439 case DCMD_BURST8:
440 req_burst_size = 8;
441 break;
442 case DCMD_BURST16:
443 req_burst_size = 16;
444 break;
445 case DCMD_BURST32:
446 req_burst_size = 32;
447 break;
448 }
449 }
450 if (req_burst_size <= 8) {
451 *burst_code = DCMD_BURST8;
452 burst_bytes = 8;
453 } else if (req_burst_size <= 16) {
454 if (bytes_per_word == 1) {
455 /* don't burst more than 1/2 the fifo */
456 *burst_code = DCMD_BURST8;
457 burst_bytes = 8;
458 retval = 1;
459 } else {
460 *burst_code = DCMD_BURST16;
461 burst_bytes = 16;
462 }
463 } else {
464 if (bytes_per_word == 1) {
465 /* don't burst more than 1/2 the fifo */
466 *burst_code = DCMD_BURST8;
467 burst_bytes = 8;
468 retval = 1;
469 } else if (bytes_per_word == 2) {
470 /* don't burst more than 1/2 the fifo */
471 *burst_code = DCMD_BURST16;
472 burst_bytes = 16;
473 retval = 1;
474 } else {
475 *burst_code = DCMD_BURST32;
476 burst_bytes = 32;
477 }
478 }
479
480 thresh_words = burst_bytes / bytes_per_word;
481
482 /* thresh_words will be between 2 and 8 */
483 *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
484 | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
485
486 return retval;
487}
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index e3223ac75a7c..7293d6d875c5 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -60,21 +60,60 @@ MODULE_ALIAS("platform:pxa2xx-spi");
60 | QUARK_X1000_SSCR1_TFT \ 60 | QUARK_X1000_SSCR1_TFT \
61 | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM) 61 | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
62 62
63#define LPSS_RX_THRESH_DFLT 64
64#define LPSS_TX_LOTHRESH_DFLT 160
65#define LPSS_TX_HITHRESH_DFLT 224
66
67/* Offset from drv_data->lpss_base */
68#define GENERAL_REG 0x08
69#define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24) 63#define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
70#define SSP_REG 0x0c
71#define SPI_CS_CONTROL 0x18
72#define SPI_CS_CONTROL_SW_MODE BIT(0) 64#define SPI_CS_CONTROL_SW_MODE BIT(0)
73#define SPI_CS_CONTROL_CS_HIGH BIT(1) 65#define SPI_CS_CONTROL_CS_HIGH BIT(1)
74 66
67struct lpss_config {
68 /* LPSS offset from drv_data->ioaddr */
69 unsigned offset;
70 /* Register offsets from drv_data->lpss_base or -1 */
71 int reg_general;
72 int reg_ssp;
73 int reg_cs_ctrl;
74 /* FIFO thresholds */
75 u32 rx_threshold;
76 u32 tx_threshold_lo;
77 u32 tx_threshold_hi;
78};
79
80/* Keep these sorted with enum pxa_ssp_type */
81static const struct lpss_config lpss_platforms[] = {
82 { /* LPSS_LPT_SSP */
83 .offset = 0x800,
84 .reg_general = 0x08,
85 .reg_ssp = 0x0c,
86 .reg_cs_ctrl = 0x18,
87 .rx_threshold = 64,
88 .tx_threshold_lo = 160,
89 .tx_threshold_hi = 224,
90 },
91 { /* LPSS_BYT_SSP */
92 .offset = 0x400,
93 .reg_general = 0x08,
94 .reg_ssp = 0x0c,
95 .reg_cs_ctrl = 0x18,
96 .rx_threshold = 64,
97 .tx_threshold_lo = 160,
98 .tx_threshold_hi = 224,
99 },
100};
101
102static inline const struct lpss_config
103*lpss_get_config(const struct driver_data *drv_data)
104{
105 return &lpss_platforms[drv_data->ssp_type - LPSS_LPT_SSP];
106}
107
75static bool is_lpss_ssp(const struct driver_data *drv_data) 108static bool is_lpss_ssp(const struct driver_data *drv_data)
76{ 109{
77 return drv_data->ssp_type == LPSS_SSP; 110 switch (drv_data->ssp_type) {
111 case LPSS_LPT_SSP:
112 case LPSS_BYT_SSP:
113 return true;
114 default:
115 return false;
116 }
78} 117}
79 118
80static bool is_quark_x1000_ssp(const struct driver_data *drv_data) 119static bool is_quark_x1000_ssp(const struct driver_data *drv_data)
@@ -192,63 +231,43 @@ static void __lpss_ssp_write_priv(struct driver_data *drv_data,
192 */ 231 */
193static void lpss_ssp_setup(struct driver_data *drv_data) 232static void lpss_ssp_setup(struct driver_data *drv_data)
194{ 233{
195 unsigned offset = 0x400; 234 const struct lpss_config *config;
196 u32 value, orig; 235 u32 value;
197
198 /*
199 * Perform auto-detection of the LPSS SSP private registers. They
200 * can be either at 1k or 2k offset from the base address.
201 */
202 orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
203
204 /* Test SPI_CS_CONTROL_SW_MODE bit enabling */
205 value = orig | SPI_CS_CONTROL_SW_MODE;
206 writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
207 value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
208 if (value != (orig | SPI_CS_CONTROL_SW_MODE)) {
209 offset = 0x800;
210 goto detection_done;
211 }
212
213 orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
214
215 /* Test SPI_CS_CONTROL_SW_MODE bit disabling */
216 value = orig & ~SPI_CS_CONTROL_SW_MODE;
217 writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
218 value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
219 if (value != (orig & ~SPI_CS_CONTROL_SW_MODE)) {
220 offset = 0x800;
221 goto detection_done;
222 }
223 236
224detection_done: 237 config = lpss_get_config(drv_data);
225 /* Now set the LPSS base */ 238 drv_data->lpss_base = drv_data->ioaddr + config->offset;
226 drv_data->lpss_base = drv_data->ioaddr + offset;
227 239
228 /* Enable software chip select control */ 240 /* Enable software chip select control */
229 value = SPI_CS_CONTROL_SW_MODE | SPI_CS_CONTROL_CS_HIGH; 241 value = SPI_CS_CONTROL_SW_MODE | SPI_CS_CONTROL_CS_HIGH;
230 __lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value); 242 __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
231 243
232 /* Enable multiblock DMA transfers */ 244 /* Enable multiblock DMA transfers */
233 if (drv_data->master_info->enable_dma) { 245 if (drv_data->master_info->enable_dma) {
234 __lpss_ssp_write_priv(drv_data, SSP_REG, 1); 246 __lpss_ssp_write_priv(drv_data, config->reg_ssp, 1);
235 247
236 value = __lpss_ssp_read_priv(drv_data, GENERAL_REG); 248 if (config->reg_general >= 0) {
237 value |= GENERAL_REG_RXTO_HOLDOFF_DISABLE; 249 value = __lpss_ssp_read_priv(drv_data,
238 __lpss_ssp_write_priv(drv_data, GENERAL_REG, value); 250 config->reg_general);
251 value |= GENERAL_REG_RXTO_HOLDOFF_DISABLE;
252 __lpss_ssp_write_priv(drv_data,
253 config->reg_general, value);
254 }
239 } 255 }
240} 256}
241 257
242static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable) 258static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
243{ 259{
260 const struct lpss_config *config;
244 u32 value; 261 u32 value;
245 262
246 value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL); 263 config = lpss_get_config(drv_data);
264
265 value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
247 if (enable) 266 if (enable)
248 value &= ~SPI_CS_CONTROL_CS_HIGH; 267 value &= ~SPI_CS_CONTROL_CS_HIGH;
249 else 268 else
250 value |= SPI_CS_CONTROL_CS_HIGH; 269 value |= SPI_CS_CONTROL_CS_HIGH;
251 __lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value); 270 __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
252} 271}
253 272
254static void cs_assert(struct driver_data *drv_data) 273static void cs_assert(struct driver_data *drv_data)
@@ -1075,6 +1094,7 @@ static int setup(struct spi_device *spi)
1075{ 1094{
1076 struct pxa2xx_spi_chip *chip_info = NULL; 1095 struct pxa2xx_spi_chip *chip_info = NULL;
1077 struct chip_data *chip; 1096 struct chip_data *chip;
1097 const struct lpss_config *config;
1078 struct driver_data *drv_data = spi_master_get_devdata(spi->master); 1098 struct driver_data *drv_data = spi_master_get_devdata(spi->master);
1079 unsigned int clk_div; 1099 unsigned int clk_div;
1080 uint tx_thres, tx_hi_thres, rx_thres; 1100 uint tx_thres, tx_hi_thres, rx_thres;
@@ -1085,10 +1105,12 @@ static int setup(struct spi_device *spi)
1085 tx_hi_thres = 0; 1105 tx_hi_thres = 0;
1086 rx_thres = RX_THRESH_QUARK_X1000_DFLT; 1106 rx_thres = RX_THRESH_QUARK_X1000_DFLT;
1087 break; 1107 break;
1088 case LPSS_SSP: 1108 case LPSS_LPT_SSP:
1089 tx_thres = LPSS_TX_LOTHRESH_DFLT; 1109 case LPSS_BYT_SSP:
1090 tx_hi_thres = LPSS_TX_HITHRESH_DFLT; 1110 config = lpss_get_config(drv_data);
1091 rx_thres = LPSS_RX_THRESH_DFLT; 1111 tx_thres = config->tx_threshold_lo;
1112 tx_hi_thres = config->tx_threshold_hi;
1113 rx_thres = config->rx_threshold;
1092 break; 1114 break;
1093 default: 1115 default:
1094 tx_thres = TX_THRESH_DFLT; 1116 tx_thres = TX_THRESH_DFLT;
@@ -1242,6 +1264,18 @@ static void cleanup(struct spi_device *spi)
1242} 1264}
1243 1265
1244#ifdef CONFIG_ACPI 1266#ifdef CONFIG_ACPI
1267
1268static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
1269 { "INT33C0", LPSS_LPT_SSP },
1270 { "INT33C1", LPSS_LPT_SSP },
1271 { "INT3430", LPSS_LPT_SSP },
1272 { "INT3431", LPSS_LPT_SSP },
1273 { "80860F0E", LPSS_BYT_SSP },
1274 { "8086228E", LPSS_BYT_SSP },
1275 { },
1276};
1277MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
1278
1245static struct pxa2xx_spi_master * 1279static struct pxa2xx_spi_master *
1246pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev) 1280pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
1247{ 1281{
@@ -1249,12 +1283,19 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
1249 struct acpi_device *adev; 1283 struct acpi_device *adev;
1250 struct ssp_device *ssp; 1284 struct ssp_device *ssp;
1251 struct resource *res; 1285 struct resource *res;
1252 int devid; 1286 const struct acpi_device_id *id;
1287 int devid, type;
1253 1288
1254 if (!ACPI_HANDLE(&pdev->dev) || 1289 if (!ACPI_HANDLE(&pdev->dev) ||
1255 acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev)) 1290 acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
1256 return NULL; 1291 return NULL;
1257 1292
1293 id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
1294 if (id)
1295 type = (int)id->driver_data;
1296 else
1297 return NULL;
1298
1258 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1299 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1259 if (!pdata) 1300 if (!pdata)
1260 return NULL; 1301 return NULL;
@@ -1272,7 +1313,7 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
1272 1313
1273 ssp->clk = devm_clk_get(&pdev->dev, NULL); 1314 ssp->clk = devm_clk_get(&pdev->dev, NULL);
1274 ssp->irq = platform_get_irq(pdev, 0); 1315 ssp->irq = platform_get_irq(pdev, 0);
1275 ssp->type = LPSS_SSP; 1316 ssp->type = type;
1276 ssp->pdev = pdev; 1317 ssp->pdev = pdev;
1277 1318
1278 ssp->port_id = -1; 1319 ssp->port_id = -1;
@@ -1285,16 +1326,6 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
1285 return pdata; 1326 return pdata;
1286} 1327}
1287 1328
1288static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
1289 { "INT33C0", 0 },
1290 { "INT33C1", 0 },
1291 { "INT3430", 0 },
1292 { "INT3431", 0 },
1293 { "80860F0E", 0 },
1294 { "8086228E", 0 },
1295 { },
1296};
1297MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
1298#else 1329#else
1299static inline struct pxa2xx_spi_master * 1330static inline struct pxa2xx_spi_master *
1300pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev) 1331pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 85a58c906869..9f01e9c9aa75 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -162,11 +162,7 @@ extern void *pxa2xx_spi_next_transfer(struct driver_data *drv_data);
162/* 162/*
163 * Select the right DMA implementation. 163 * Select the right DMA implementation.
164 */ 164 */
165#if defined(CONFIG_SPI_PXA2XX_PXADMA) 165#if defined(CONFIG_SPI_PXA2XX_DMA)
166#define SPI_PXA2XX_USE_DMA 1
167#define MAX_DMA_LEN 8191
168#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE)
169#elif defined(CONFIG_SPI_PXA2XX_DMA)
170#define SPI_PXA2XX_USE_DMA 1 166#define SPI_PXA2XX_USE_DMA 1
171#define MAX_DMA_LEN SZ_64K 167#define MAX_DMA_LEN SZ_64K
172#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL) 168#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL)
diff --git a/drivers/spi/spi-rb4xx.c b/drivers/spi/spi-rb4xx.c
new file mode 100644
index 000000000000..3641d0e20135
--- /dev/null
+++ b/drivers/spi/spi-rb4xx.c
@@ -0,0 +1,210 @@
1/*
2 * SPI controller driver for the Mikrotik RB4xx boards
3 *
4 * Copyright (C) 2010 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2015 Bert Vermeulen <bert@biot.com>
6 *
7 * This file was based on the patches for Linux 2.6.27.39 published by
8 * MikroTik for their RouterBoard 4xx series devices.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/platform_device.h>
19#include <linux/clk.h>
20#include <linux/spi/spi.h>
21
22#include <asm/mach-ath79/ar71xx_regs.h>
23
24struct rb4xx_spi {
25 void __iomem *base;
26 struct clk *clk;
27};
28
29static inline u32 rb4xx_read(struct rb4xx_spi *rbspi, u32 reg)
30{
31 return __raw_readl(rbspi->base + reg);
32}
33
34static inline void rb4xx_write(struct rb4xx_spi *rbspi, u32 reg, u32 value)
35{
36 __raw_writel(value, rbspi->base + reg);
37}
38
39static inline void do_spi_clk(struct rb4xx_spi *rbspi, u32 spi_ioc, int value)
40{
41 u32 regval;
42
43 regval = spi_ioc;
44 if (value & BIT(0))
45 regval |= AR71XX_SPI_IOC_DO;
46
47 rb4xx_write(rbspi, AR71XX_SPI_REG_IOC, regval);
48 rb4xx_write(rbspi, AR71XX_SPI_REG_IOC, regval | AR71XX_SPI_IOC_CLK);
49}
50
51static void do_spi_byte(struct rb4xx_spi *rbspi, u32 spi_ioc, u8 byte)
52{
53 int i;
54
55 for (i = 7; i >= 0; i--)
56 do_spi_clk(rbspi, spi_ioc, byte >> i);
57}
58
59/* The CS2 pin is used to clock in a second bit per clock cycle. */
60static inline void do_spi_clk_two(struct rb4xx_spi *rbspi, u32 spi_ioc,
61 u8 value)
62{
63 u32 regval;
64
65 regval = spi_ioc;
66 if (value & BIT(1))
67 regval |= AR71XX_SPI_IOC_DO;
68 if (value & BIT(0))
69 regval |= AR71XX_SPI_IOC_CS2;
70
71 rb4xx_write(rbspi, AR71XX_SPI_REG_IOC, regval);
72 rb4xx_write(rbspi, AR71XX_SPI_REG_IOC, regval | AR71XX_SPI_IOC_CLK);
73}
74
75/* Two bits at a time, msb first */
76static void do_spi_byte_two(struct rb4xx_spi *rbspi, u32 spi_ioc, u8 byte)
77{
78 do_spi_clk_two(rbspi, spi_ioc, byte >> 6);
79 do_spi_clk_two(rbspi, spi_ioc, byte >> 4);
80 do_spi_clk_two(rbspi, spi_ioc, byte >> 2);
81 do_spi_clk_two(rbspi, spi_ioc, byte >> 0);
82}
83
84static void rb4xx_set_cs(struct spi_device *spi, bool enable)
85{
86 struct rb4xx_spi *rbspi = spi_master_get_devdata(spi->master);
87
88 /*
89 * Setting CS is done along with bitbanging the actual values,
90 * since it's all on the same hardware register. However the
91 * CPLD needs CS deselected after every command.
92 */
93 if (enable)
94 rb4xx_write(rbspi, AR71XX_SPI_REG_IOC,
95 AR71XX_SPI_IOC_CS0 | AR71XX_SPI_IOC_CS1);
96}
97
98static int rb4xx_transfer_one(struct spi_master *master,
99 struct spi_device *spi, struct spi_transfer *t)
100{
101 struct rb4xx_spi *rbspi = spi_master_get_devdata(master);
102 int i;
103 u32 spi_ioc;
104 u8 *rx_buf;
105 const u8 *tx_buf;
106
107 /*
108 * Prime the SPI register with the SPI device selected. The m25p80 boot
109 * flash and CPLD share the CS0 pin. This works because the CPLD's
110 * command set was designed to almost not clash with that of the
111 * boot flash.
112 */
113 if (spi->chip_select == 2)
114 /* MMC */
115 spi_ioc = AR71XX_SPI_IOC_CS0;
116 else
117 /* Boot flash and CPLD */
118 spi_ioc = AR71XX_SPI_IOC_CS1;
119
120 tx_buf = t->tx_buf;
121 rx_buf = t->rx_buf;
122 for (i = 0; i < t->len; ++i) {
123 if (t->tx_nbits == SPI_NBITS_DUAL)
124 /* CPLD can use two-wire transfers */
125 do_spi_byte_two(rbspi, spi_ioc, tx_buf[i]);
126 else
127 do_spi_byte(rbspi, spi_ioc, tx_buf[i]);
128 if (!rx_buf)
129 continue;
130 rx_buf[i] = rb4xx_read(rbspi, AR71XX_SPI_REG_RDS);
131 }
132 spi_finalize_current_transfer(master);
133
134 return 0;
135}
136
137static int rb4xx_spi_probe(struct platform_device *pdev)
138{
139 struct spi_master *master;
140 struct clk *ahb_clk;
141 struct rb4xx_spi *rbspi;
142 struct resource *r;
143 int err;
144 void __iomem *spi_base;
145
146 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
147 spi_base = devm_ioremap_resource(&pdev->dev, r);
148 if (IS_ERR(spi_base))
149 return PTR_ERR(spi_base);
150
151 master = spi_alloc_master(&pdev->dev, sizeof(*rbspi));
152 if (!master)
153 return -ENOMEM;
154
155 ahb_clk = devm_clk_get(&pdev->dev, "ahb");
156 if (IS_ERR(ahb_clk))
157 return PTR_ERR(ahb_clk);
158
159 master->bus_num = 0;
160 master->num_chipselect = 3;
161 master->mode_bits = SPI_TX_DUAL;
162 master->bits_per_word_mask = BIT(7);
163 master->flags = SPI_MASTER_MUST_TX;
164 master->transfer_one = rb4xx_transfer_one;
165 master->set_cs = rb4xx_set_cs;
166
167 err = devm_spi_register_master(&pdev->dev, master);
168 if (err) {
169 dev_err(&pdev->dev, "failed to register SPI master\n");
170 return err;
171 }
172
173 err = clk_prepare_enable(ahb_clk);
174 if (err)
175 return err;
176
177 rbspi = spi_master_get_devdata(master);
178 rbspi->base = spi_base;
179 rbspi->clk = ahb_clk;
180 platform_set_drvdata(pdev, rbspi);
181
182 /* Enable SPI */
183 rb4xx_write(rbspi, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO);
184
185 return 0;
186}
187
188static int rb4xx_spi_remove(struct platform_device *pdev)
189{
190 struct rb4xx_spi *rbspi = platform_get_drvdata(pdev);
191
192 clk_disable_unprepare(rbspi->clk);
193
194 return 0;
195}
196
197static struct platform_driver rb4xx_spi_drv = {
198 .probe = rb4xx_spi_probe,
199 .remove = rb4xx_spi_remove,
200 .driver = {
201 .name = "rb4xx-spi",
202 },
203};
204
205module_platform_driver(rb4xx_spi_drv);
206
207MODULE_DESCRIPTION("Mikrotik RB4xx SPI controller driver");
208MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
209MODULE_AUTHOR("Bert Vermeulen <bert@biot.com>");
210MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index f6bac9e77d06..f9189a0c8cec 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -665,15 +665,12 @@ static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi,
665static int rspi_dma_check_then_transfer(struct rspi_data *rspi, 665static int rspi_dma_check_then_transfer(struct rspi_data *rspi,
666 struct spi_transfer *xfer) 666 struct spi_transfer *xfer)
667{ 667{
668 if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) { 668 if (!rspi->master->can_dma || !__rspi_can_dma(rspi, xfer))
669 /* rx_buf can be NULL on RSPI on SH in TX-only Mode */ 669 return -EAGAIN;
670 int ret = rspi_dma_transfer(rspi, &xfer->tx_sg,
671 xfer->rx_buf ? &xfer->rx_sg : NULL);
672 if (ret != -EAGAIN)
673 return 0;
674 }
675 670
676 return -EAGAIN; 671 /* rx_buf can be NULL on RSPI on SH in TX-only Mode */
672 return rspi_dma_transfer(rspi, &xfer->tx_sg,
673 xfer->rx_buf ? &xfer->rx_sg : NULL);
677} 674}
678 675
679static int rspi_common_transfer(struct rspi_data *rspi, 676static int rspi_common_transfer(struct rspi_data *rspi,
@@ -724,7 +721,7 @@ static int rspi_rz_transfer_one(struct spi_master *master,
724 return rspi_common_transfer(rspi, xfer); 721 return rspi_common_transfer(rspi, xfer);
725} 722}
726 723
727static int qspi_trigger_transfer_out_int(struct rspi_data *rspi, const u8 *tx, 724static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
728 u8 *rx, unsigned int len) 725 u8 *rx, unsigned int len)
729{ 726{
730 int i, n, ret; 727 int i, n, ret;
@@ -771,12 +768,8 @@ static int qspi_transfer_out_in(struct rspi_data *rspi,
771 if (ret != -EAGAIN) 768 if (ret != -EAGAIN)
772 return ret; 769 return ret;
773 770
774 ret = qspi_trigger_transfer_out_int(rspi, xfer->tx_buf, 771 return qspi_trigger_transfer_out_in(rspi, xfer->tx_buf,
775 xfer->rx_buf, xfer->len); 772 xfer->rx_buf, xfer->len);
776 if (ret < 0)
777 return ret;
778
779 return 0;
780} 773}
781 774
782static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer) 775static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
@@ -1300,7 +1293,7 @@ error1:
1300 return ret; 1293 return ret;
1301} 1294}
1302 1295
1303static struct platform_device_id spi_driver_ids[] = { 1296static const struct platform_device_id spi_driver_ids[] = {
1304 { "rspi", (kernel_ulong_t)&rspi_ops }, 1297 { "rspi", (kernel_ulong_t)&rspi_ops },
1305 { "rspi-rz", (kernel_ulong_t)&rspi_rz_ops }, 1298 { "rspi-rz", (kernel_ulong_t)&rspi_rz_ops },
1306 { "qspi", (kernel_ulong_t)&qspi_ops }, 1299 { "qspi", (kernel_ulong_t)&qspi_ops },
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index b1c6731fbf27..2a8c513c4d07 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -1347,7 +1347,7 @@ static struct s3c64xx_spi_port_config exynos7_spi_port_config = {
1347 .quirks = S3C64XX_SPI_QUIRK_CS_AUTO, 1347 .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
1348}; 1348};
1349 1349
1350static struct platform_device_id s3c64xx_spi_driver_ids[] = { 1350static const struct platform_device_id s3c64xx_spi_driver_ids[] = {
1351 { 1351 {
1352 .name = "s3c2443-spi", 1352 .name = "s3c2443-spi",
1353 .driver_data = (kernel_ulong_t)&s3c2443_spi_port_config, 1353 .driver_data = (kernel_ulong_t)&s3c2443_spi_port_config,
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index bcc7c635d8e7..d3370a612d84 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -1263,7 +1263,7 @@ static int sh_msiof_spi_remove(struct platform_device *pdev)
1263 return 0; 1263 return 0;
1264} 1264}
1265 1265
1266static struct platform_device_id spi_driver_ids[] = { 1266static const struct platform_device_id spi_driver_ids[] = {
1267 { "spi_sh_msiof", (kernel_ulong_t)&sh_data }, 1267 { "spi_sh_msiof", (kernel_ulong_t)&sh_data },
1268 { "spi_r8a7790_msiof", (kernel_ulong_t)&r8a779x_data }, 1268 { "spi_r8a7790_msiof", (kernel_ulong_t)&r8a779x_data },
1269 { "spi_r8a7791_msiof", (kernel_ulong_t)&r8a779x_data }, 1269 { "spi_r8a7791_msiof", (kernel_ulong_t)&r8a779x_data },
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index f5715c9f68b0..7072276ad354 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -26,28 +26,6 @@
26#include <linux/reset.h> 26#include <linux/reset.h>
27 27
28#define DRIVER_NAME "sirfsoc_spi" 28#define DRIVER_NAME "sirfsoc_spi"
29
30#define SIRFSOC_SPI_CTRL 0x0000
31#define SIRFSOC_SPI_CMD 0x0004
32#define SIRFSOC_SPI_TX_RX_EN 0x0008
33#define SIRFSOC_SPI_INT_EN 0x000C
34#define SIRFSOC_SPI_INT_STATUS 0x0010
35#define SIRFSOC_SPI_TX_DMA_IO_CTRL 0x0100
36#define SIRFSOC_SPI_TX_DMA_IO_LEN 0x0104
37#define SIRFSOC_SPI_TXFIFO_CTRL 0x0108
38#define SIRFSOC_SPI_TXFIFO_LEVEL_CHK 0x010C
39#define SIRFSOC_SPI_TXFIFO_OP 0x0110
40#define SIRFSOC_SPI_TXFIFO_STATUS 0x0114
41#define SIRFSOC_SPI_TXFIFO_DATA 0x0118
42#define SIRFSOC_SPI_RX_DMA_IO_CTRL 0x0120
43#define SIRFSOC_SPI_RX_DMA_IO_LEN 0x0124
44#define SIRFSOC_SPI_RXFIFO_CTRL 0x0128
45#define SIRFSOC_SPI_RXFIFO_LEVEL_CHK 0x012C
46#define SIRFSOC_SPI_RXFIFO_OP 0x0130
47#define SIRFSOC_SPI_RXFIFO_STATUS 0x0134
48#define SIRFSOC_SPI_RXFIFO_DATA 0x0138
49#define SIRFSOC_SPI_DUMMY_DELAY_CTL 0x0144
50
51/* SPI CTRL register defines */ 29/* SPI CTRL register defines */
52#define SIRFSOC_SPI_SLV_MODE BIT(16) 30#define SIRFSOC_SPI_SLV_MODE BIT(16)
53#define SIRFSOC_SPI_CMD_MODE BIT(17) 31#define SIRFSOC_SPI_CMD_MODE BIT(17)
@@ -80,8 +58,6 @@
80#define SIRFSOC_SPI_TXFIFO_THD_INT_EN BIT(9) 58#define SIRFSOC_SPI_TXFIFO_THD_INT_EN BIT(9)
81#define SIRFSOC_SPI_FRM_END_INT_EN BIT(10) 59#define SIRFSOC_SPI_FRM_END_INT_EN BIT(10)
82 60
83#define SIRFSOC_SPI_INT_MASK_ALL 0x1FFF
84
85/* Interrupt status */ 61/* Interrupt status */
86#define SIRFSOC_SPI_RX_DONE BIT(0) 62#define SIRFSOC_SPI_RX_DONE BIT(0)
87#define SIRFSOC_SPI_TX_DONE BIT(1) 63#define SIRFSOC_SPI_TX_DONE BIT(1)
@@ -110,20 +86,66 @@
110#define SIRFSOC_SPI_FIFO_WIDTH_BYTE (0 << 0) 86#define SIRFSOC_SPI_FIFO_WIDTH_BYTE (0 << 0)
111#define SIRFSOC_SPI_FIFO_WIDTH_WORD (1 << 0) 87#define SIRFSOC_SPI_FIFO_WIDTH_WORD (1 << 0)
112#define SIRFSOC_SPI_FIFO_WIDTH_DWORD (2 << 0) 88#define SIRFSOC_SPI_FIFO_WIDTH_DWORD (2 << 0)
113 89/* USP related */
114/* FIFO Status */ 90#define SIRFSOC_USP_SYNC_MODE BIT(0)
115#define SIRFSOC_SPI_FIFO_LEVEL_MASK 0xFF 91#define SIRFSOC_USP_SLV_MODE BIT(1)
116#define SIRFSOC_SPI_FIFO_FULL BIT(8) 92#define SIRFSOC_USP_LSB BIT(4)
117#define SIRFSOC_SPI_FIFO_EMPTY BIT(9) 93#define SIRFSOC_USP_EN BIT(5)
118 94#define SIRFSOC_USP_RXD_FALLING_EDGE BIT(6)
119/* 256 bytes rx/tx FIFO */ 95#define SIRFSOC_USP_TXD_FALLING_EDGE BIT(7)
120#define SIRFSOC_SPI_FIFO_SIZE 256 96#define SIRFSOC_USP_CS_HIGH_VALID BIT(9)
121#define SIRFSOC_SPI_DAT_FRM_LEN_MAX (64 * 1024) 97#define SIRFSOC_USP_SCLK_IDLE_STAT BIT(11)
122 98#define SIRFSOC_USP_TFS_IO_MODE BIT(14)
123#define SIRFSOC_SPI_FIFO_SC(x) ((x) & 0x3F) 99#define SIRFSOC_USP_TFS_IO_INPUT BIT(19)
124#define SIRFSOC_SPI_FIFO_LC(x) (((x) & 0x3F) << 10) 100
125#define SIRFSOC_SPI_FIFO_HC(x) (((x) & 0x3F) << 20) 101#define SIRFSOC_USP_RXD_DELAY_LEN_MASK 0xFF
126#define SIRFSOC_SPI_FIFO_THD(x) (((x) & 0xFF) << 2) 102#define SIRFSOC_USP_TXD_DELAY_LEN_MASK 0xFF
103#define SIRFSOC_USP_RXD_DELAY_OFFSET 0
104#define SIRFSOC_USP_TXD_DELAY_OFFSET 8
105#define SIRFSOC_USP_RXD_DELAY_LEN 1
106#define SIRFSOC_USP_TXD_DELAY_LEN 1
107#define SIRFSOC_USP_CLK_DIVISOR_OFFSET 21
108#define SIRFSOC_USP_CLK_DIVISOR_MASK 0x3FF
109#define SIRFSOC_USP_CLK_10_11_MASK 0x3
110#define SIRFSOC_USP_CLK_10_11_OFFSET 30
111#define SIRFSOC_USP_CLK_12_15_MASK 0xF
112#define SIRFSOC_USP_CLK_12_15_OFFSET 24
113
114#define SIRFSOC_USP_TX_DATA_OFFSET 0
115#define SIRFSOC_USP_TX_SYNC_OFFSET 8
116#define SIRFSOC_USP_TX_FRAME_OFFSET 16
117#define SIRFSOC_USP_TX_SHIFTER_OFFSET 24
118
119#define SIRFSOC_USP_TX_DATA_MASK 0xFF
120#define SIRFSOC_USP_TX_SYNC_MASK 0xFF
121#define SIRFSOC_USP_TX_FRAME_MASK 0xFF
122#define SIRFSOC_USP_TX_SHIFTER_MASK 0x1F
123
124#define SIRFSOC_USP_RX_DATA_OFFSET 0
125#define SIRFSOC_USP_RX_FRAME_OFFSET 8
126#define SIRFSOC_USP_RX_SHIFTER_OFFSET 16
127
128#define SIRFSOC_USP_RX_DATA_MASK 0xFF
129#define SIRFSOC_USP_RX_FRAME_MASK 0xFF
130#define SIRFSOC_USP_RX_SHIFTER_MASK 0x1F
131#define SIRFSOC_USP_CS_HIGH_VALUE BIT(1)
132
133#define SIRFSOC_SPI_FIFO_SC_OFFSET 0
134#define SIRFSOC_SPI_FIFO_LC_OFFSET 10
135#define SIRFSOC_SPI_FIFO_HC_OFFSET 20
136
137#define SIRFSOC_SPI_FIFO_FULL_MASK(s) (1 << ((s)->fifo_full_offset))
138#define SIRFSOC_SPI_FIFO_EMPTY_MASK(s) (1 << ((s)->fifo_full_offset + 1))
139#define SIRFSOC_SPI_FIFO_THD_MASK(s) ((s)->fifo_size - 1)
140#define SIRFSOC_SPI_FIFO_THD_OFFSET 2
141#define SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(s, val) \
142 ((val) & (s)->fifo_level_chk_mask)
143
144enum sirf_spi_type {
145 SIRF_REAL_SPI,
146 SIRF_USP_SPI_P2,
147 SIRF_USP_SPI_A7,
148};
127 149
128/* 150/*
129 * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma 151 * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma
@@ -137,6 +159,95 @@
137#define SIRFSOC_MAX_CMD_BYTES 4 159#define SIRFSOC_MAX_CMD_BYTES 4
138#define SIRFSOC_SPI_DEFAULT_FRQ 1000000 160#define SIRFSOC_SPI_DEFAULT_FRQ 1000000
139 161
162struct sirf_spi_register {
163 /*SPI and USP-SPI common*/
164 u32 tx_rx_en;
165 u32 int_en;
166 u32 int_st;
167 u32 tx_dma_io_ctrl;
168 u32 tx_dma_io_len;
169 u32 txfifo_ctrl;
170 u32 txfifo_level_chk;
171 u32 txfifo_op;
172 u32 txfifo_st;
173 u32 txfifo_data;
174 u32 rx_dma_io_ctrl;
175 u32 rx_dma_io_len;
176 u32 rxfifo_ctrl;
177 u32 rxfifo_level_chk;
178 u32 rxfifo_op;
179 u32 rxfifo_st;
180 u32 rxfifo_data;
181 /*SPI self*/
182 u32 spi_ctrl;
183 u32 spi_cmd;
184 u32 spi_dummy_delay_ctrl;
185 /*USP-SPI self*/
186 u32 usp_mode1;
187 u32 usp_mode2;
188 u32 usp_tx_frame_ctrl;
189 u32 usp_rx_frame_ctrl;
190 u32 usp_pin_io_data;
191 u32 usp_risc_dsp_mode;
192 u32 usp_async_param_reg;
193 u32 usp_irda_x_mode_div;
194 u32 usp_sm_cfg;
195 u32 usp_int_en_clr;
196};
197
198static const struct sirf_spi_register real_spi_register = {
199 .tx_rx_en = 0x8,
200 .int_en = 0xc,
201 .int_st = 0x10,
202 .tx_dma_io_ctrl = 0x100,
203 .tx_dma_io_len = 0x104,
204 .txfifo_ctrl = 0x108,
205 .txfifo_level_chk = 0x10c,
206 .txfifo_op = 0x110,
207 .txfifo_st = 0x114,
208 .txfifo_data = 0x118,
209 .rx_dma_io_ctrl = 0x120,
210 .rx_dma_io_len = 0x124,
211 .rxfifo_ctrl = 0x128,
212 .rxfifo_level_chk = 0x12c,
213 .rxfifo_op = 0x130,
214 .rxfifo_st = 0x134,
215 .rxfifo_data = 0x138,
216 .spi_ctrl = 0x0,
217 .spi_cmd = 0x4,
218 .spi_dummy_delay_ctrl = 0x144,
219};
220
221static const struct sirf_spi_register usp_spi_register = {
222 .tx_rx_en = 0x10,
223 .int_en = 0x14,
224 .int_st = 0x18,
225 .tx_dma_io_ctrl = 0x100,
226 .tx_dma_io_len = 0x104,
227 .txfifo_ctrl = 0x108,
228 .txfifo_level_chk = 0x10c,
229 .txfifo_op = 0x110,
230 .txfifo_st = 0x114,
231 .txfifo_data = 0x118,
232 .rx_dma_io_ctrl = 0x120,
233 .rx_dma_io_len = 0x124,
234 .rxfifo_ctrl = 0x128,
235 .rxfifo_level_chk = 0x12c,
236 .rxfifo_op = 0x130,
237 .rxfifo_st = 0x134,
238 .rxfifo_data = 0x138,
239 .usp_mode1 = 0x0,
240 .usp_mode2 = 0x4,
241 .usp_tx_frame_ctrl = 0x8,
242 .usp_rx_frame_ctrl = 0xc,
243 .usp_pin_io_data = 0x1c,
244 .usp_risc_dsp_mode = 0x20,
245 .usp_async_param_reg = 0x24,
246 .usp_irda_x_mode_div = 0x28,
247 .usp_sm_cfg = 0x2c,
248 .usp_int_en_clr = 0x140,
249};
250
140struct sirfsoc_spi { 251struct sirfsoc_spi {
141 struct spi_bitbang bitbang; 252 struct spi_bitbang bitbang;
142 struct completion rx_done; 253 struct completion rx_done;
@@ -164,7 +275,6 @@ struct sirfsoc_spi {
164 struct dma_chan *tx_chan; 275 struct dma_chan *tx_chan;
165 dma_addr_t src_start; 276 dma_addr_t src_start;
166 dma_addr_t dst_start; 277 dma_addr_t dst_start;
167 void *dummypage;
168 int word_width; /* in bytes */ 278 int word_width; /* in bytes */
169 279
170 /* 280 /*
@@ -173,14 +283,39 @@ struct sirfsoc_spi {
173 */ 283 */
174 bool tx_by_cmd; 284 bool tx_by_cmd;
175 bool hw_cs; 285 bool hw_cs;
286 enum sirf_spi_type type;
287 const struct sirf_spi_register *regs;
288 unsigned int fifo_size;
289 /* fifo empty offset is (fifo full offset + 1)*/
290 unsigned int fifo_full_offset;
291 /* fifo_level_chk_mask is (fifo_size/4 - 1) */
292 unsigned int fifo_level_chk_mask;
293 unsigned int dat_max_frm_len;
294};
295
296struct sirf_spi_comp_data {
297 const struct sirf_spi_register *regs;
298 enum sirf_spi_type type;
299 unsigned int dat_max_frm_len;
300 unsigned int fifo_size;
301 void (*hwinit)(struct sirfsoc_spi *sspi);
176}; 302};
177 303
304static void sirfsoc_usp_hwinit(struct sirfsoc_spi *sspi)
305{
306 /* reset USP and let USP can operate */
307 writel(readl(sspi->base + sspi->regs->usp_mode1) &
308 ~SIRFSOC_USP_EN, sspi->base + sspi->regs->usp_mode1);
309 writel(readl(sspi->base + sspi->regs->usp_mode1) |
310 SIRFSOC_USP_EN, sspi->base + sspi->regs->usp_mode1);
311}
312
178static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi) 313static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi)
179{ 314{
180 u32 data; 315 u32 data;
181 u8 *rx = sspi->rx; 316 u8 *rx = sspi->rx;
182 317
183 data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA); 318 data = readl(sspi->base + sspi->regs->rxfifo_data);
184 319
185 if (rx) { 320 if (rx) {
186 *rx++ = (u8) data; 321 *rx++ = (u8) data;
@@ -199,8 +334,7 @@ static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
199 data = *tx++; 334 data = *tx++;
200 sspi->tx = tx; 335 sspi->tx = tx;
201 } 336 }
202 337 writel(data, sspi->base + sspi->regs->txfifo_data);
203 writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
204 sspi->left_tx_word--; 338 sspi->left_tx_word--;
205} 339}
206 340
@@ -209,7 +343,7 @@ static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
209 u32 data; 343 u32 data;
210 u16 *rx = sspi->rx; 344 u16 *rx = sspi->rx;
211 345
212 data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA); 346 data = readl(sspi->base + sspi->regs->rxfifo_data);
213 347
214 if (rx) { 348 if (rx) {
215 *rx++ = (u16) data; 349 *rx++ = (u16) data;
@@ -229,7 +363,7 @@ static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
229 sspi->tx = tx; 363 sspi->tx = tx;
230 } 364 }
231 365
232 writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA); 366 writel(data, sspi->base + sspi->regs->txfifo_data);
233 sspi->left_tx_word--; 367 sspi->left_tx_word--;
234} 368}
235 369
@@ -238,7 +372,7 @@ static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
238 u32 data; 372 u32 data;
239 u32 *rx = sspi->rx; 373 u32 *rx = sspi->rx;
240 374
241 data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA); 375 data = readl(sspi->base + sspi->regs->rxfifo_data);
242 376
243 if (rx) { 377 if (rx) {
244 *rx++ = (u32) data; 378 *rx++ = (u32) data;
@@ -259,41 +393,59 @@ static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi)
259 sspi->tx = tx; 393 sspi->tx = tx;
260 } 394 }
261 395
262 writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA); 396 writel(data, sspi->base + sspi->regs->txfifo_data);
263 sspi->left_tx_word--; 397 sspi->left_tx_word--;
264} 398}
265 399
266static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id) 400static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
267{ 401{
268 struct sirfsoc_spi *sspi = dev_id; 402 struct sirfsoc_spi *sspi = dev_id;
269 u32 spi_stat = readl(sspi->base + SIRFSOC_SPI_INT_STATUS); 403 u32 spi_stat;
270 if (sspi->tx_by_cmd && (spi_stat & SIRFSOC_SPI_FRM_END)) { 404
405 spi_stat = readl(sspi->base + sspi->regs->int_st);
406 if (sspi->tx_by_cmd && sspi->type == SIRF_REAL_SPI
407 && (spi_stat & SIRFSOC_SPI_FRM_END)) {
271 complete(&sspi->tx_done); 408 complete(&sspi->tx_done);
272 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); 409 writel(0x0, sspi->base + sspi->regs->int_en);
273 writel(SIRFSOC_SPI_INT_MASK_ALL, 410 writel(readl(sspi->base + sspi->regs->int_st),
274 sspi->base + SIRFSOC_SPI_INT_STATUS); 411 sspi->base + sspi->regs->int_st);
275 return IRQ_HANDLED; 412 return IRQ_HANDLED;
276 } 413 }
277
278 /* Error Conditions */ 414 /* Error Conditions */
279 if (spi_stat & SIRFSOC_SPI_RX_OFLOW || 415 if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
280 spi_stat & SIRFSOC_SPI_TX_UFLOW) { 416 spi_stat & SIRFSOC_SPI_TX_UFLOW) {
281 complete(&sspi->tx_done); 417 complete(&sspi->tx_done);
282 complete(&sspi->rx_done); 418 complete(&sspi->rx_done);
283 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); 419 switch (sspi->type) {
284 writel(SIRFSOC_SPI_INT_MASK_ALL, 420 case SIRF_REAL_SPI:
285 sspi->base + SIRFSOC_SPI_INT_STATUS); 421 case SIRF_USP_SPI_P2:
422 writel(0x0, sspi->base + sspi->regs->int_en);
423 break;
424 case SIRF_USP_SPI_A7:
425 writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
426 break;
427 }
428 writel(readl(sspi->base + sspi->regs->int_st),
429 sspi->base + sspi->regs->int_st);
286 return IRQ_HANDLED; 430 return IRQ_HANDLED;
287 } 431 }
288 if (spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY) 432 if (spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY)
289 complete(&sspi->tx_done); 433 complete(&sspi->tx_done);
290 while (!(readl(sspi->base + SIRFSOC_SPI_INT_STATUS) & 434 while (!(readl(sspi->base + sspi->regs->int_st) &
291 SIRFSOC_SPI_RX_IO_DMA)) 435 SIRFSOC_SPI_RX_IO_DMA))
292 cpu_relax(); 436 cpu_relax();
293 complete(&sspi->rx_done); 437 complete(&sspi->rx_done);
294 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); 438 switch (sspi->type) {
295 writel(SIRFSOC_SPI_INT_MASK_ALL, 439 case SIRF_REAL_SPI:
296 sspi->base + SIRFSOC_SPI_INT_STATUS); 440 case SIRF_USP_SPI_P2:
441 writel(0x0, sspi->base + sspi->regs->int_en);
442 break;
443 case SIRF_USP_SPI_A7:
444 writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
445 break;
446 }
447 writel(readl(sspi->base + sspi->regs->int_st),
448 sspi->base + sspi->regs->int_st);
297 449
298 return IRQ_HANDLED; 450 return IRQ_HANDLED;
299} 451}
@@ -313,8 +465,8 @@ static void spi_sirfsoc_cmd_transfer(struct spi_device *spi,
313 u32 cmd; 465 u32 cmd;
314 466
315 sspi = spi_master_get_devdata(spi->master); 467 sspi = spi_master_get_devdata(spi->master);
316 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 468 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op);
317 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 469 writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->txfifo_op);
318 memcpy(&cmd, sspi->tx, t->len); 470 memcpy(&cmd, sspi->tx, t->len);
319 if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST)) 471 if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST))
320 cmd = cpu_to_be32(cmd) >> 472 cmd = cpu_to_be32(cmd) >>
@@ -322,11 +474,11 @@ static void spi_sirfsoc_cmd_transfer(struct spi_device *spi,
322 if (sspi->word_width == 2 && t->len == 4 && 474 if (sspi->word_width == 2 && t->len == 4 &&
323 (!(spi->mode & SPI_LSB_FIRST))) 475 (!(spi->mode & SPI_LSB_FIRST)))
324 cmd = ((cmd & 0xffff) << 16) | (cmd >> 16); 476 cmd = ((cmd & 0xffff) << 16) | (cmd >> 16);
325 writel(cmd, sspi->base + SIRFSOC_SPI_CMD); 477 writel(cmd, sspi->base + sspi->regs->spi_cmd);
326 writel(SIRFSOC_SPI_FRM_END_INT_EN, 478 writel(SIRFSOC_SPI_FRM_END_INT_EN,
327 sspi->base + SIRFSOC_SPI_INT_EN); 479 sspi->base + sspi->regs->int_en);
328 writel(SIRFSOC_SPI_CMD_TX_EN, 480 writel(SIRFSOC_SPI_CMD_TX_EN,
329 sspi->base + SIRFSOC_SPI_TX_RX_EN); 481 sspi->base + sspi->regs->tx_rx_en);
330 if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) { 482 if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
331 dev_err(&spi->dev, "cmd transfer timeout\n"); 483 dev_err(&spi->dev, "cmd transfer timeout\n");
332 return; 484 return;
@@ -342,25 +494,56 @@ static void spi_sirfsoc_dma_transfer(struct spi_device *spi,
342 int timeout = t->len * 10; 494 int timeout = t->len * 10;
343 495
344 sspi = spi_master_get_devdata(spi->master); 496 sspi = spi_master_get_devdata(spi->master);
345 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 497 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->rxfifo_op);
346 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 498 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op);
347 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 499 switch (sspi->type) {
348 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 500 case SIRF_REAL_SPI:
349 writel(0, sspi->base + SIRFSOC_SPI_INT_EN); 501 writel(SIRFSOC_SPI_FIFO_START,
350 writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS); 502 sspi->base + sspi->regs->rxfifo_op);
351 if (sspi->left_tx_word < SIRFSOC_SPI_DAT_FRM_LEN_MAX) { 503 writel(SIRFSOC_SPI_FIFO_START,
352 writel(readl(sspi->base + SIRFSOC_SPI_CTRL) | 504 sspi->base + sspi->regs->txfifo_op);
353 SIRFSOC_SPI_ENA_AUTO_CLR | SIRFSOC_SPI_MUL_DAT_MODE, 505 writel(0, sspi->base + sspi->regs->int_en);
354 sspi->base + SIRFSOC_SPI_CTRL); 506 break;
355 writel(sspi->left_tx_word - 1, 507 case SIRF_USP_SPI_P2:
356 sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); 508 writel(0x0, sspi->base + sspi->regs->rxfifo_op);
357 writel(sspi->left_tx_word - 1, 509 writel(0x0, sspi->base + sspi->regs->txfifo_op);
358 sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN); 510 writel(0, sspi->base + sspi->regs->int_en);
511 break;
512 case SIRF_USP_SPI_A7:
513 writel(0x0, sspi->base + sspi->regs->rxfifo_op);
514 writel(0x0, sspi->base + sspi->regs->txfifo_op);
515 writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
516 break;
517 }
518 writel(readl(sspi->base + sspi->regs->int_st),
519 sspi->base + sspi->regs->int_st);
520 if (sspi->left_tx_word < sspi->dat_max_frm_len) {
521 switch (sspi->type) {
522 case SIRF_REAL_SPI:
523 writel(readl(sspi->base + sspi->regs->spi_ctrl) |
524 SIRFSOC_SPI_ENA_AUTO_CLR |
525 SIRFSOC_SPI_MUL_DAT_MODE,
526 sspi->base + sspi->regs->spi_ctrl);
527 writel(sspi->left_tx_word - 1,
528 sspi->base + sspi->regs->tx_dma_io_len);
529 writel(sspi->left_tx_word - 1,
530 sspi->base + sspi->regs->rx_dma_io_len);
531 break;
532 case SIRF_USP_SPI_P2:
533 case SIRF_USP_SPI_A7:
534 /*USP simulate SPI, tx/rx_dma_io_len indicates bytes*/
535 writel(sspi->left_tx_word * sspi->word_width,
536 sspi->base + sspi->regs->tx_dma_io_len);
537 writel(sspi->left_tx_word * sspi->word_width,
538 sspi->base + sspi->regs->rx_dma_io_len);
539 break;
540 }
359 } else { 541 } else {
360 writel(readl(sspi->base + SIRFSOC_SPI_CTRL), 542 if (sspi->type == SIRF_REAL_SPI)
361 sspi->base + SIRFSOC_SPI_CTRL); 543 writel(readl(sspi->base + sspi->regs->spi_ctrl),
362 writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); 544 sspi->base + sspi->regs->spi_ctrl);
363 writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN); 545 writel(0, sspi->base + sspi->regs->tx_dma_io_len);
546 writel(0, sspi->base + sspi->regs->rx_dma_io_len);
364 } 547 }
365 sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len, 548 sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len,
366 (t->tx_buf != t->rx_buf) ? 549 (t->tx_buf != t->rx_buf) ?
@@ -385,7 +568,14 @@ static void spi_sirfsoc_dma_transfer(struct spi_device *spi,
385 dma_async_issue_pending(sspi->tx_chan); 568 dma_async_issue_pending(sspi->tx_chan);
386 dma_async_issue_pending(sspi->rx_chan); 569 dma_async_issue_pending(sspi->rx_chan);
387 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, 570 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
388 sspi->base + SIRFSOC_SPI_TX_RX_EN); 571 sspi->base + sspi->regs->tx_rx_en);
572 if (sspi->type == SIRF_USP_SPI_P2 ||
573 sspi->type == SIRF_USP_SPI_A7) {
574 writel(SIRFSOC_SPI_FIFO_START,
575 sspi->base + sspi->regs->rxfifo_op);
576 writel(SIRFSOC_SPI_FIFO_START,
577 sspi->base + sspi->regs->txfifo_op);
578 }
389 if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) { 579 if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
390 dev_err(&spi->dev, "transfer timeout\n"); 580 dev_err(&spi->dev, "transfer timeout\n");
391 dmaengine_terminate_all(sspi->rx_chan); 581 dmaengine_terminate_all(sspi->rx_chan);
@@ -398,15 +588,21 @@ static void spi_sirfsoc_dma_transfer(struct spi_device *spi,
398 */ 588 */
399 if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) { 589 if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
400 dev_err(&spi->dev, "transfer timeout\n"); 590 dev_err(&spi->dev, "transfer timeout\n");
591 if (sspi->type == SIRF_USP_SPI_P2 ||
592 sspi->type == SIRF_USP_SPI_A7)
593 writel(0, sspi->base + sspi->regs->tx_rx_en);
401 dmaengine_terminate_all(sspi->tx_chan); 594 dmaengine_terminate_all(sspi->tx_chan);
402 } 595 }
403 dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE); 596 dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
404 dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE); 597 dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
405 /* TX, RX FIFO stop */ 598 /* TX, RX FIFO stop */
406 writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 599 writel(0, sspi->base + sspi->regs->rxfifo_op);
407 writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 600 writel(0, sspi->base + sspi->regs->txfifo_op);
408 if (sspi->left_tx_word >= SIRFSOC_SPI_DAT_FRM_LEN_MAX) 601 if (sspi->left_tx_word >= sspi->dat_max_frm_len)
409 writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN); 602 writel(0, sspi->base + sspi->regs->tx_rx_en);
603 if (sspi->type == SIRF_USP_SPI_P2 ||
604 sspi->type == SIRF_USP_SPI_A7)
605 writel(0, sspi->base + sspi->regs->tx_rx_en);
410} 606}
411 607
412static void spi_sirfsoc_pio_transfer(struct spi_device *spi, 608static void spi_sirfsoc_pio_transfer(struct spi_device *spi,
@@ -414,57 +610,105 @@ static void spi_sirfsoc_pio_transfer(struct spi_device *spi,
414{ 610{
415 struct sirfsoc_spi *sspi; 611 struct sirfsoc_spi *sspi;
416 int timeout = t->len * 10; 612 int timeout = t->len * 10;
613 unsigned int data_units;
417 614
418 sspi = spi_master_get_devdata(spi->master); 615 sspi = spi_master_get_devdata(spi->master);
419 do { 616 do {
420 writel(SIRFSOC_SPI_FIFO_RESET, 617 writel(SIRFSOC_SPI_FIFO_RESET,
421 sspi->base + SIRFSOC_SPI_RXFIFO_OP); 618 sspi->base + sspi->regs->rxfifo_op);
422 writel(SIRFSOC_SPI_FIFO_RESET, 619 writel(SIRFSOC_SPI_FIFO_RESET,
423 sspi->base + SIRFSOC_SPI_TXFIFO_OP); 620 sspi->base + sspi->regs->txfifo_op);
424 writel(SIRFSOC_SPI_FIFO_START, 621 switch (sspi->type) {
425 sspi->base + SIRFSOC_SPI_RXFIFO_OP); 622 case SIRF_USP_SPI_P2:
426 writel(SIRFSOC_SPI_FIFO_START, 623 writel(0x0, sspi->base + sspi->regs->rxfifo_op);
427 sspi->base + SIRFSOC_SPI_TXFIFO_OP); 624 writel(0x0, sspi->base + sspi->regs->txfifo_op);
428 writel(0, sspi->base + SIRFSOC_SPI_INT_EN); 625 writel(0, sspi->base + sspi->regs->int_en);
429 writel(SIRFSOC_SPI_INT_MASK_ALL, 626 writel(readl(sspi->base + sspi->regs->int_st),
430 sspi->base + SIRFSOC_SPI_INT_STATUS); 627 sspi->base + sspi->regs->int_st);
431 writel(readl(sspi->base + SIRFSOC_SPI_CTRL) | 628 writel(min((sspi->left_tx_word * sspi->word_width),
432 SIRFSOC_SPI_MUL_DAT_MODE | SIRFSOC_SPI_ENA_AUTO_CLR, 629 sspi->fifo_size),
433 sspi->base + SIRFSOC_SPI_CTRL); 630 sspi->base + sspi->regs->tx_dma_io_len);
434 writel(min(sspi->left_tx_word, (u32)(256 / sspi->word_width)) 631 writel(min((sspi->left_rx_word * sspi->word_width),
435 - 1, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); 632 sspi->fifo_size),
436 writel(min(sspi->left_rx_word, (u32)(256 / sspi->word_width)) 633 sspi->base + sspi->regs->rx_dma_io_len);
437 - 1, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN); 634 break;
438 while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS) 635 case SIRF_USP_SPI_A7:
439 & SIRFSOC_SPI_FIFO_FULL)) && sspi->left_tx_word) 636 writel(0x0, sspi->base + sspi->regs->rxfifo_op);
637 writel(0x0, sspi->base + sspi->regs->txfifo_op);
638 writel(~0UL, sspi->base + sspi->regs->usp_int_en_clr);
639 writel(readl(sspi->base + sspi->regs->int_st),
640 sspi->base + sspi->regs->int_st);
641 writel(min((sspi->left_tx_word * sspi->word_width),
642 sspi->fifo_size),
643 sspi->base + sspi->regs->tx_dma_io_len);
644 writel(min((sspi->left_rx_word * sspi->word_width),
645 sspi->fifo_size),
646 sspi->base + sspi->regs->rx_dma_io_len);
647 break;
648 case SIRF_REAL_SPI:
649 writel(SIRFSOC_SPI_FIFO_START,
650 sspi->base + sspi->regs->rxfifo_op);
651 writel(SIRFSOC_SPI_FIFO_START,
652 sspi->base + sspi->regs->txfifo_op);
653 writel(0, sspi->base + sspi->regs->int_en);
654 writel(readl(sspi->base + sspi->regs->int_st),
655 sspi->base + sspi->regs->int_st);
656 writel(readl(sspi->base + sspi->regs->spi_ctrl) |
657 SIRFSOC_SPI_MUL_DAT_MODE |
658 SIRFSOC_SPI_ENA_AUTO_CLR,
659 sspi->base + sspi->regs->spi_ctrl);
660 data_units = sspi->fifo_size / sspi->word_width;
661 writel(min(sspi->left_tx_word, data_units) - 1,
662 sspi->base + sspi->regs->tx_dma_io_len);
663 writel(min(sspi->left_rx_word, data_units) - 1,
664 sspi->base + sspi->regs->rx_dma_io_len);
665 break;
666 }
667 while (!((readl(sspi->base + sspi->regs->txfifo_st)
668 & SIRFSOC_SPI_FIFO_FULL_MASK(sspi))) &&
669 sspi->left_tx_word)
440 sspi->tx_word(sspi); 670 sspi->tx_word(sspi);
441 writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN | 671 writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN |
442 SIRFSOC_SPI_TX_UFLOW_INT_EN | 672 SIRFSOC_SPI_TX_UFLOW_INT_EN |
443 SIRFSOC_SPI_RX_OFLOW_INT_EN | 673 SIRFSOC_SPI_RX_OFLOW_INT_EN |
444 SIRFSOC_SPI_RX_IO_DMA_INT_EN, 674 SIRFSOC_SPI_RX_IO_DMA_INT_EN,
445 sspi->base + SIRFSOC_SPI_INT_EN); 675 sspi->base + sspi->regs->int_en);
446 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, 676 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
447 sspi->base + SIRFSOC_SPI_TX_RX_EN); 677 sspi->base + sspi->regs->tx_rx_en);
678 if (sspi->type == SIRF_USP_SPI_P2 ||
679 sspi->type == SIRF_USP_SPI_A7) {
680 writel(SIRFSOC_SPI_FIFO_START,
681 sspi->base + sspi->regs->rxfifo_op);
682 writel(SIRFSOC_SPI_FIFO_START,
683 sspi->base + sspi->regs->txfifo_op);
684 }
448 if (!wait_for_completion_timeout(&sspi->tx_done, timeout) || 685 if (!wait_for_completion_timeout(&sspi->tx_done, timeout) ||
449 !wait_for_completion_timeout(&sspi->rx_done, timeout)) { 686 !wait_for_completion_timeout(&sspi->rx_done, timeout)) {
450 dev_err(&spi->dev, "transfer timeout\n"); 687 dev_err(&spi->dev, "transfer timeout\n");
688 if (sspi->type == SIRF_USP_SPI_P2 ||
689 sspi->type == SIRF_USP_SPI_A7)
690 writel(0, sspi->base + sspi->regs->tx_rx_en);
451 break; 691 break;
452 } 692 }
453 while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS) 693 while (!((readl(sspi->base + sspi->regs->rxfifo_st)
454 & SIRFSOC_SPI_FIFO_EMPTY)) && sspi->left_rx_word) 694 & SIRFSOC_SPI_FIFO_EMPTY_MASK(sspi))) &&
695 sspi->left_rx_word)
455 sspi->rx_word(sspi); 696 sspi->rx_word(sspi);
456 writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 697 if (sspi->type == SIRF_USP_SPI_P2 ||
457 writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 698 sspi->type == SIRF_USP_SPI_A7)
699 writel(0, sspi->base + sspi->regs->tx_rx_en);
700 writel(0, sspi->base + sspi->regs->rxfifo_op);
701 writel(0, sspi->base + sspi->regs->txfifo_op);
458 } while (sspi->left_tx_word != 0 || sspi->left_rx_word != 0); 702 } while (sspi->left_tx_word != 0 || sspi->left_rx_word != 0);
459} 703}
460 704
461static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t) 705static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
462{ 706{
463 struct sirfsoc_spi *sspi; 707 struct sirfsoc_spi *sspi;
464 sspi = spi_master_get_devdata(spi->master);
465 708
466 sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage; 709 sspi = spi_master_get_devdata(spi->master);
467 sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage; 710 sspi->tx = t->tx_buf;
711 sspi->rx = t->rx_buf;
468 sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width; 712 sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
469 reinit_completion(&sspi->rx_done); 713 reinit_completion(&sspi->rx_done);
470 reinit_completion(&sspi->tx_done); 714 reinit_completion(&sspi->tx_done);
@@ -473,7 +717,7 @@ static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
473 * null, just fill command data into command register and wait for its 717 * null, just fill command data into command register and wait for its
474 * completion. 718 * completion.
475 */ 719 */
476 if (sspi->tx_by_cmd) 720 if (sspi->type == SIRF_REAL_SPI && sspi->tx_by_cmd)
477 spi_sirfsoc_cmd_transfer(spi, t); 721 spi_sirfsoc_cmd_transfer(spi, t);
478 else if (IS_DMA_VALID(t)) 722 else if (IS_DMA_VALID(t))
479 spi_sirfsoc_dma_transfer(spi, t); 723 spi_sirfsoc_dma_transfer(spi, t);
@@ -488,22 +732,49 @@ static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
488 struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master); 732 struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master);
489 733
490 if (sspi->hw_cs) { 734 if (sspi->hw_cs) {
491 u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL); 735 u32 regval;
492 switch (value) { 736
493 case BITBANG_CS_ACTIVE: 737 switch (sspi->type) {
494 if (spi->mode & SPI_CS_HIGH) 738 case SIRF_REAL_SPI:
495 regval |= SIRFSOC_SPI_CS_IO_OUT; 739 regval = readl(sspi->base + sspi->regs->spi_ctrl);
496 else 740 switch (value) {
497 regval &= ~SIRFSOC_SPI_CS_IO_OUT; 741 case BITBANG_CS_ACTIVE:
742 if (spi->mode & SPI_CS_HIGH)
743 regval |= SIRFSOC_SPI_CS_IO_OUT;
744 else
745 regval &= ~SIRFSOC_SPI_CS_IO_OUT;
746 break;
747 case BITBANG_CS_INACTIVE:
748 if (spi->mode & SPI_CS_HIGH)
749 regval &= ~SIRFSOC_SPI_CS_IO_OUT;
750 else
751 regval |= SIRFSOC_SPI_CS_IO_OUT;
752 break;
753 }
754 writel(regval, sspi->base + sspi->regs->spi_ctrl);
498 break; 755 break;
499 case BITBANG_CS_INACTIVE: 756 case SIRF_USP_SPI_P2:
500 if (spi->mode & SPI_CS_HIGH) 757 case SIRF_USP_SPI_A7:
501 regval &= ~SIRFSOC_SPI_CS_IO_OUT; 758 regval = readl(sspi->base +
502 else 759 sspi->regs->usp_pin_io_data);
503 regval |= SIRFSOC_SPI_CS_IO_OUT; 760 switch (value) {
761 case BITBANG_CS_ACTIVE:
762 if (spi->mode & SPI_CS_HIGH)
763 regval |= SIRFSOC_USP_CS_HIGH_VALUE;
764 else
765 regval &= ~(SIRFSOC_USP_CS_HIGH_VALUE);
766 break;
767 case BITBANG_CS_INACTIVE:
768 if (spi->mode & SPI_CS_HIGH)
769 regval &= ~(SIRFSOC_USP_CS_HIGH_VALUE);
770 else
771 regval |= SIRFSOC_USP_CS_HIGH_VALUE;
772 break;
773 }
774 writel(regval,
775 sspi->base + sspi->regs->usp_pin_io_data);
504 break; 776 break;
505 } 777 }
506 writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
507 } else { 778 } else {
508 switch (value) { 779 switch (value) {
509 case BITBANG_CS_ACTIVE: 780 case BITBANG_CS_ACTIVE:
@@ -518,27 +789,102 @@ static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
518 } 789 }
519} 790}
520 791
792static int spi_sirfsoc_config_mode(struct spi_device *spi)
793{
794 struct sirfsoc_spi *sspi;
795 u32 regval, usp_mode1;
796
797 sspi = spi_master_get_devdata(spi->master);
798 regval = readl(sspi->base + sspi->regs->spi_ctrl);
799 usp_mode1 = readl(sspi->base + sspi->regs->usp_mode1);
800 if (!(spi->mode & SPI_CS_HIGH)) {
801 regval |= SIRFSOC_SPI_CS_IDLE_STAT;
802 usp_mode1 &= ~SIRFSOC_USP_CS_HIGH_VALID;
803 } else {
804 regval &= ~SIRFSOC_SPI_CS_IDLE_STAT;
805 usp_mode1 |= SIRFSOC_USP_CS_HIGH_VALID;
806 }
807 if (!(spi->mode & SPI_LSB_FIRST)) {
808 regval |= SIRFSOC_SPI_TRAN_MSB;
809 usp_mode1 &= ~SIRFSOC_USP_LSB;
810 } else {
811 regval &= ~SIRFSOC_SPI_TRAN_MSB;
812 usp_mode1 |= SIRFSOC_USP_LSB;
813 }
814 if (spi->mode & SPI_CPOL) {
815 regval |= SIRFSOC_SPI_CLK_IDLE_STAT;
816 usp_mode1 |= SIRFSOC_USP_SCLK_IDLE_STAT;
817 } else {
818 regval &= ~SIRFSOC_SPI_CLK_IDLE_STAT;
819 usp_mode1 &= ~SIRFSOC_USP_SCLK_IDLE_STAT;
820 }
821 /*
822 * Data should be driven at least 1/2 cycle before the fetch edge
823 * to make sure that data gets stable at the fetch edge.
824 */
825 if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) ||
826 (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA))) {
827 regval &= ~SIRFSOC_SPI_DRV_POS_EDGE;
828 usp_mode1 |= (SIRFSOC_USP_TXD_FALLING_EDGE |
829 SIRFSOC_USP_RXD_FALLING_EDGE);
830 } else {
831 regval |= SIRFSOC_SPI_DRV_POS_EDGE;
832 usp_mode1 &= ~(SIRFSOC_USP_RXD_FALLING_EDGE |
833 SIRFSOC_USP_TXD_FALLING_EDGE);
834 }
835 writel((SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size - 2) <<
836 SIRFSOC_SPI_FIFO_SC_OFFSET) |
837 (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size / 2) <<
838 SIRFSOC_SPI_FIFO_LC_OFFSET) |
839 (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, 2) <<
840 SIRFSOC_SPI_FIFO_HC_OFFSET),
841 sspi->base + sspi->regs->txfifo_level_chk);
842 writel((SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, 2) <<
843 SIRFSOC_SPI_FIFO_SC_OFFSET) |
844 (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size / 2) <<
845 SIRFSOC_SPI_FIFO_LC_OFFSET) |
846 (SIRFSOC_SPI_FIFO_LEVEL_CHK_MASK(sspi, sspi->fifo_size - 2) <<
847 SIRFSOC_SPI_FIFO_HC_OFFSET),
848 sspi->base + sspi->regs->rxfifo_level_chk);
849 /*
850 * it should never set to hardware cs mode because in hardware cs mode,
851 * cs signal can't controlled by driver.
852 */
853 switch (sspi->type) {
854 case SIRF_REAL_SPI:
855 regval |= SIRFSOC_SPI_CS_IO_MODE;
856 writel(regval, sspi->base + sspi->regs->spi_ctrl);
857 break;
858 case SIRF_USP_SPI_P2:
859 case SIRF_USP_SPI_A7:
860 usp_mode1 |= SIRFSOC_USP_SYNC_MODE;
861 usp_mode1 |= SIRFSOC_USP_TFS_IO_MODE;
862 usp_mode1 &= ~SIRFSOC_USP_TFS_IO_INPUT;
863 writel(usp_mode1, sspi->base + sspi->regs->usp_mode1);
864 break;
865 }
866
867 return 0;
868}
869
521static int 870static int
522spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t) 871spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
523{ 872{
524 struct sirfsoc_spi *sspi; 873 struct sirfsoc_spi *sspi;
525 u8 bits_per_word = 0; 874 u8 bits_per_word = 0;
526 int hz = 0; 875 int hz = 0;
527 u32 regval; 876 u32 regval, txfifo_ctrl, rxfifo_ctrl, tx_frm_ctl, rx_frm_ctl, usp_mode2;
528 u32 txfifo_ctrl, rxfifo_ctrl;
529 u32 fifo_size = SIRFSOC_SPI_FIFO_SIZE / 4;
530 877
531 sspi = spi_master_get_devdata(spi->master); 878 sspi = spi_master_get_devdata(spi->master);
532 879
533 bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word; 880 bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
534 hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz; 881 hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
535 882
536 regval = (sspi->ctrl_freq / (2 * hz)) - 1; 883 usp_mode2 = regval = (sspi->ctrl_freq / (2 * hz)) - 1;
537 if (regval > 0xFFFF || regval < 0) { 884 if (regval > 0xFFFF || regval < 0) {
538 dev_err(&spi->dev, "Speed %d not supported\n", hz); 885 dev_err(&spi->dev, "Speed %d not supported\n", hz);
539 return -EINVAL; 886 return -EINVAL;
540 } 887 }
541
542 switch (bits_per_word) { 888 switch (bits_per_word) {
543 case 8: 889 case 8:
544 regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8; 890 regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8;
@@ -559,94 +905,177 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
559 sspi->tx_word = spi_sirfsoc_tx_word_u32; 905 sspi->tx_word = spi_sirfsoc_tx_word_u32;
560 break; 906 break;
561 default: 907 default:
562 BUG(); 908 dev_err(&spi->dev, "bpw %d not supported\n", bits_per_word);
909 return -EINVAL;
563 } 910 }
564
565 sspi->word_width = DIV_ROUND_UP(bits_per_word, 8); 911 sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
566 txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | 912 txfifo_ctrl = (((sspi->fifo_size / 2) &
567 (sspi->word_width >> 1); 913 SIRFSOC_SPI_FIFO_THD_MASK(sspi))
568 rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | 914 << SIRFSOC_SPI_FIFO_THD_OFFSET) |
569 (sspi->word_width >> 1); 915 (sspi->word_width >> 1);
570 916 rxfifo_ctrl = (((sspi->fifo_size / 2) &
571 if (!(spi->mode & SPI_CS_HIGH)) 917 SIRFSOC_SPI_FIFO_THD_MASK(sspi))
572 regval |= SIRFSOC_SPI_CS_IDLE_STAT; 918 << SIRFSOC_SPI_FIFO_THD_OFFSET) |
573 if (!(spi->mode & SPI_LSB_FIRST)) 919 (sspi->word_width >> 1);
574 regval |= SIRFSOC_SPI_TRAN_MSB; 920 writel(txfifo_ctrl, sspi->base + sspi->regs->txfifo_ctrl);
575 if (spi->mode & SPI_CPOL) 921 writel(rxfifo_ctrl, sspi->base + sspi->regs->rxfifo_ctrl);
576 regval |= SIRFSOC_SPI_CLK_IDLE_STAT; 922 if (sspi->type == SIRF_USP_SPI_P2 ||
577 923 sspi->type == SIRF_USP_SPI_A7) {
578 /* 924 tx_frm_ctl = 0;
579 * Data should be driven at least 1/2 cycle before the fetch edge 925 tx_frm_ctl |= ((bits_per_word - 1) & SIRFSOC_USP_TX_DATA_MASK)
580 * to make sure that data gets stable at the fetch edge. 926 << SIRFSOC_USP_TX_DATA_OFFSET;
581 */ 927 tx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_TXD_DELAY_LEN
582 if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) || 928 - 1) & SIRFSOC_USP_TX_SYNC_MASK) <<
583 (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA))) 929 SIRFSOC_USP_TX_SYNC_OFFSET;
584 regval &= ~SIRFSOC_SPI_DRV_POS_EDGE; 930 tx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_TXD_DELAY_LEN
585 else 931 + 2 - 1) & SIRFSOC_USP_TX_FRAME_MASK) <<
586 regval |= SIRFSOC_SPI_DRV_POS_EDGE; 932 SIRFSOC_USP_TX_FRAME_OFFSET;
587 933 tx_frm_ctl |= ((bits_per_word - 1) &
588 writel(SIRFSOC_SPI_FIFO_SC(fifo_size - 2) | 934 SIRFSOC_USP_TX_SHIFTER_MASK) <<
589 SIRFSOC_SPI_FIFO_LC(fifo_size / 2) | 935 SIRFSOC_USP_TX_SHIFTER_OFFSET;
590 SIRFSOC_SPI_FIFO_HC(2), 936 rx_frm_ctl = 0;
591 sspi->base + SIRFSOC_SPI_TXFIFO_LEVEL_CHK); 937 rx_frm_ctl |= ((bits_per_word - 1) & SIRFSOC_USP_RX_DATA_MASK)
592 writel(SIRFSOC_SPI_FIFO_SC(2) | 938 << SIRFSOC_USP_RX_DATA_OFFSET;
593 SIRFSOC_SPI_FIFO_LC(fifo_size / 2) | 939 rx_frm_ctl |= ((bits_per_word + 1 + SIRFSOC_USP_RXD_DELAY_LEN
594 SIRFSOC_SPI_FIFO_HC(fifo_size - 2), 940 + 2 - 1) & SIRFSOC_USP_RX_FRAME_MASK) <<
595 sspi->base + SIRFSOC_SPI_RXFIFO_LEVEL_CHK); 941 SIRFSOC_USP_RX_FRAME_OFFSET;
596 writel(txfifo_ctrl, sspi->base + SIRFSOC_SPI_TXFIFO_CTRL); 942 rx_frm_ctl |= ((bits_per_word - 1)
597 writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL); 943 & SIRFSOC_USP_RX_SHIFTER_MASK) <<
598 944 SIRFSOC_USP_RX_SHIFTER_OFFSET;
599 if (t && t->tx_buf && !t->rx_buf && (t->len <= SIRFSOC_MAX_CMD_BYTES)) { 945 writel(tx_frm_ctl | (((usp_mode2 >> 10) &
600 regval |= (SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) | 946 SIRFSOC_USP_CLK_10_11_MASK) <<
601 SIRFSOC_SPI_CMD_MODE); 947 SIRFSOC_USP_CLK_10_11_OFFSET),
602 sspi->tx_by_cmd = true; 948 sspi->base + sspi->regs->usp_tx_frame_ctrl);
603 } else { 949 writel(rx_frm_ctl | (((usp_mode2 >> 12) &
604 regval &= ~SIRFSOC_SPI_CMD_MODE; 950 SIRFSOC_USP_CLK_12_15_MASK) <<
605 sspi->tx_by_cmd = false; 951 SIRFSOC_USP_CLK_12_15_OFFSET),
952 sspi->base + sspi->regs->usp_rx_frame_ctrl);
953 writel(readl(sspi->base + sspi->regs->usp_mode2) |
954 ((usp_mode2 & SIRFSOC_USP_CLK_DIVISOR_MASK) <<
955 SIRFSOC_USP_CLK_DIVISOR_OFFSET) |
956 (SIRFSOC_USP_RXD_DELAY_LEN <<
957 SIRFSOC_USP_RXD_DELAY_OFFSET) |
958 (SIRFSOC_USP_TXD_DELAY_LEN <<
959 SIRFSOC_USP_TXD_DELAY_OFFSET),
960 sspi->base + sspi->regs->usp_mode2);
961 }
962 if (sspi->type == SIRF_REAL_SPI)
963 writel(regval, sspi->base + sspi->regs->spi_ctrl);
964 spi_sirfsoc_config_mode(spi);
965 if (sspi->type == SIRF_REAL_SPI) {
966 if (t && t->tx_buf && !t->rx_buf &&
967 (t->len <= SIRFSOC_MAX_CMD_BYTES)) {
968 sspi->tx_by_cmd = true;
969 writel(readl(sspi->base + sspi->regs->spi_ctrl) |
970 (SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) |
971 SIRFSOC_SPI_CMD_MODE),
972 sspi->base + sspi->regs->spi_ctrl);
973 } else {
974 sspi->tx_by_cmd = false;
975 writel(readl(sspi->base + sspi->regs->spi_ctrl) &
976 ~SIRFSOC_SPI_CMD_MODE,
977 sspi->base + sspi->regs->spi_ctrl);
978 }
606 } 979 }
607 /*
608 * it should never set to hardware cs mode because in hardware cs mode,
609 * cs signal can't controlled by driver.
610 */
611 regval |= SIRFSOC_SPI_CS_IO_MODE;
612 writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
613
614 if (IS_DMA_VALID(t)) { 980 if (IS_DMA_VALID(t)) {
615 /* Enable DMA mode for RX, TX */ 981 /* Enable DMA mode for RX, TX */
616 writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL); 982 writel(0, sspi->base + sspi->regs->tx_dma_io_ctrl);
617 writel(SIRFSOC_SPI_RX_DMA_FLUSH, 983 writel(SIRFSOC_SPI_RX_DMA_FLUSH,
618 sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL); 984 sspi->base + sspi->regs->rx_dma_io_ctrl);
619 } else { 985 } else {
620 /* Enable IO mode for RX, TX */ 986 /* Enable IO mode for RX, TX */
621 writel(SIRFSOC_SPI_IO_MODE_SEL, 987 writel(SIRFSOC_SPI_IO_MODE_SEL,
622 sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL); 988 sspi->base + sspi->regs->tx_dma_io_ctrl);
623 writel(SIRFSOC_SPI_IO_MODE_SEL, 989 writel(SIRFSOC_SPI_IO_MODE_SEL,
624 sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL); 990 sspi->base + sspi->regs->rx_dma_io_ctrl);
625 } 991 }
626
627 return 0; 992 return 0;
628} 993}
629 994
630static int spi_sirfsoc_setup(struct spi_device *spi) 995static int spi_sirfsoc_setup(struct spi_device *spi)
631{ 996{
632 struct sirfsoc_spi *sspi; 997 struct sirfsoc_spi *sspi;
998 int ret = 0;
633 999
634 sspi = spi_master_get_devdata(spi->master); 1000 sspi = spi_master_get_devdata(spi->master);
635
636 if (spi->cs_gpio == -ENOENT) 1001 if (spi->cs_gpio == -ENOENT)
637 sspi->hw_cs = true; 1002 sspi->hw_cs = true;
638 else 1003 else {
639 sspi->hw_cs = false; 1004 sspi->hw_cs = false;
640 return spi_sirfsoc_setup_transfer(spi, NULL); 1005 if (!spi_get_ctldata(spi)) {
1006 void *cs = kmalloc(sizeof(int), GFP_KERNEL);
1007 if (!cs) {
1008 ret = -ENOMEM;
1009 goto exit;
1010 }
1011 ret = gpio_is_valid(spi->cs_gpio);
1012 if (!ret) {
1013 dev_err(&spi->dev, "no valid gpio\n");
1014 ret = -ENOENT;
1015 goto exit;
1016 }
1017 ret = gpio_request(spi->cs_gpio, DRIVER_NAME);
1018 if (ret) {
1019 dev_err(&spi->dev, "failed to request gpio\n");
1020 goto exit;
1021 }
1022 spi_set_ctldata(spi, cs);
1023 }
1024 }
1025 spi_sirfsoc_config_mode(spi);
1026 spi_sirfsoc_chipselect(spi, BITBANG_CS_INACTIVE);
1027exit:
1028 return ret;
1029}
1030
1031static void spi_sirfsoc_cleanup(struct spi_device *spi)
1032{
1033 if (spi_get_ctldata(spi)) {
1034 gpio_free(spi->cs_gpio);
1035 kfree(spi_get_ctldata(spi));
1036 }
641} 1037}
642 1038
1039static const struct sirf_spi_comp_data sirf_real_spi = {
1040 .regs = &real_spi_register,
1041 .type = SIRF_REAL_SPI,
1042 .dat_max_frm_len = 64 * 1024,
1043 .fifo_size = 256,
1044};
1045
1046static const struct sirf_spi_comp_data sirf_usp_spi_p2 = {
1047 .regs = &usp_spi_register,
1048 .type = SIRF_USP_SPI_P2,
1049 .dat_max_frm_len = 1024 * 1024,
1050 .fifo_size = 128,
1051 .hwinit = sirfsoc_usp_hwinit,
1052};
1053
1054static const struct sirf_spi_comp_data sirf_usp_spi_a7 = {
1055 .regs = &usp_spi_register,
1056 .type = SIRF_USP_SPI_A7,
1057 .dat_max_frm_len = 1024 * 1024,
1058 .fifo_size = 512,
1059 .hwinit = sirfsoc_usp_hwinit,
1060};
1061
1062static const struct of_device_id spi_sirfsoc_of_match[] = {
1063 { .compatible = "sirf,prima2-spi", .data = &sirf_real_spi},
1064 { .compatible = "sirf,prima2-usp-spi", .data = &sirf_usp_spi_p2},
1065 { .compatible = "sirf,atlas7-usp-spi", .data = &sirf_usp_spi_a7},
1066 {}
1067};
1068MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
1069
643static int spi_sirfsoc_probe(struct platform_device *pdev) 1070static int spi_sirfsoc_probe(struct platform_device *pdev)
644{ 1071{
645 struct sirfsoc_spi *sspi; 1072 struct sirfsoc_spi *sspi;
646 struct spi_master *master; 1073 struct spi_master *master;
647 struct resource *mem_res; 1074 struct resource *mem_res;
1075 struct sirf_spi_comp_data *spi_comp_data;
648 int irq; 1076 int irq;
649 int i, ret; 1077 int ret;
1078 const struct of_device_id *match;
650 1079
651 ret = device_reset(&pdev->dev); 1080 ret = device_reset(&pdev->dev);
652 if (ret) { 1081 if (ret) {
@@ -659,16 +1088,22 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
659 dev_err(&pdev->dev, "Unable to allocate SPI master\n"); 1088 dev_err(&pdev->dev, "Unable to allocate SPI master\n");
660 return -ENOMEM; 1089 return -ENOMEM;
661 } 1090 }
1091 match = of_match_node(spi_sirfsoc_of_match, pdev->dev.of_node);
662 platform_set_drvdata(pdev, master); 1092 platform_set_drvdata(pdev, master);
663 sspi = spi_master_get_devdata(master); 1093 sspi = spi_master_get_devdata(master);
664 1094 sspi->fifo_full_offset = ilog2(sspi->fifo_size);
1095 spi_comp_data = (struct sirf_spi_comp_data *)match->data;
1096 sspi->regs = spi_comp_data->regs;
1097 sspi->type = spi_comp_data->type;
1098 sspi->fifo_level_chk_mask = (sspi->fifo_size / 4) - 1;
1099 sspi->dat_max_frm_len = spi_comp_data->dat_max_frm_len;
1100 sspi->fifo_size = spi_comp_data->fifo_size;
665 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1101 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
666 sspi->base = devm_ioremap_resource(&pdev->dev, mem_res); 1102 sspi->base = devm_ioremap_resource(&pdev->dev, mem_res);
667 if (IS_ERR(sspi->base)) { 1103 if (IS_ERR(sspi->base)) {
668 ret = PTR_ERR(sspi->base); 1104 ret = PTR_ERR(sspi->base);
669 goto free_master; 1105 goto free_master;
670 } 1106 }
671
672 irq = platform_get_irq(pdev, 0); 1107 irq = platform_get_irq(pdev, 0);
673 if (irq < 0) { 1108 if (irq < 0) {
674 ret = -ENXIO; 1109 ret = -ENXIO;
@@ -684,11 +1119,13 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
684 sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer; 1119 sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer;
685 sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer; 1120 sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
686 sspi->bitbang.master->setup = spi_sirfsoc_setup; 1121 sspi->bitbang.master->setup = spi_sirfsoc_setup;
1122 sspi->bitbang.master->cleanup = spi_sirfsoc_cleanup;
687 master->bus_num = pdev->id; 1123 master->bus_num = pdev->id;
688 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH; 1124 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH;
689 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) | 1125 master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) |
690 SPI_BPW_MASK(16) | SPI_BPW_MASK(32); 1126 SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
691 master->max_speed_hz = SIRFSOC_SPI_DEFAULT_FRQ; 1127 master->max_speed_hz = SIRFSOC_SPI_DEFAULT_FRQ;
1128 master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX;
692 sspi->bitbang.master->dev.of_node = pdev->dev.of_node; 1129 sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
693 1130
694 /* request DMA channels */ 1131 /* request DMA channels */
@@ -711,47 +1148,19 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
711 goto free_tx_dma; 1148 goto free_tx_dma;
712 } 1149 }
713 clk_prepare_enable(sspi->clk); 1150 clk_prepare_enable(sspi->clk);
1151 if (spi_comp_data->hwinit)
1152 spi_comp_data->hwinit(sspi);
714 sspi->ctrl_freq = clk_get_rate(sspi->clk); 1153 sspi->ctrl_freq = clk_get_rate(sspi->clk);
715 1154
716 init_completion(&sspi->rx_done); 1155 init_completion(&sspi->rx_done);
717 init_completion(&sspi->tx_done); 1156 init_completion(&sspi->tx_done);
718 1157
719 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
720 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
721 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
722 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
723 /* We are not using dummy delay between command and data */
724 writel(0, sspi->base + SIRFSOC_SPI_DUMMY_DELAY_CTL);
725
726 sspi->dummypage = kmalloc(2 * PAGE_SIZE, GFP_KERNEL);
727 if (!sspi->dummypage) {
728 ret = -ENOMEM;
729 goto free_clk;
730 }
731
732 ret = spi_bitbang_start(&sspi->bitbang); 1158 ret = spi_bitbang_start(&sspi->bitbang);
733 if (ret) 1159 if (ret)
734 goto free_dummypage; 1160 goto free_clk;
735 for (i = 0; master->cs_gpios && i < master->num_chipselect; i++) {
736 if (master->cs_gpios[i] == -ENOENT)
737 continue;
738 if (!gpio_is_valid(master->cs_gpios[i])) {
739 dev_err(&pdev->dev, "no valid gpio\n");
740 ret = -EINVAL;
741 goto free_dummypage;
742 }
743 ret = devm_gpio_request(&pdev->dev,
744 master->cs_gpios[i], DRIVER_NAME);
745 if (ret) {
746 dev_err(&pdev->dev, "failed to request gpio\n");
747 goto free_dummypage;
748 }
749 }
750 dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num); 1161 dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num);
751 1162
752 return 0; 1163 return 0;
753free_dummypage:
754 kfree(sspi->dummypage);
755free_clk: 1164free_clk:
756 clk_disable_unprepare(sspi->clk); 1165 clk_disable_unprepare(sspi->clk);
757 clk_put(sspi->clk); 1166 clk_put(sspi->clk);
@@ -772,9 +1181,7 @@ static int spi_sirfsoc_remove(struct platform_device *pdev)
772 1181
773 master = platform_get_drvdata(pdev); 1182 master = platform_get_drvdata(pdev);
774 sspi = spi_master_get_devdata(master); 1183 sspi = spi_master_get_devdata(master);
775
776 spi_bitbang_stop(&sspi->bitbang); 1184 spi_bitbang_stop(&sspi->bitbang);
777 kfree(sspi->dummypage);
778 clk_disable_unprepare(sspi->clk); 1185 clk_disable_unprepare(sspi->clk);
779 clk_put(sspi->clk); 1186 clk_put(sspi->clk);
780 dma_release_channel(sspi->rx_chan); 1187 dma_release_channel(sspi->rx_chan);
@@ -804,24 +1211,17 @@ static int spi_sirfsoc_resume(struct device *dev)
804 struct sirfsoc_spi *sspi = spi_master_get_devdata(master); 1211 struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
805 1212
806 clk_enable(sspi->clk); 1213 clk_enable(sspi->clk);
807 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 1214 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->txfifo_op);
808 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 1215 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + sspi->regs->rxfifo_op);
809 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 1216 writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->txfifo_op);
810 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 1217 writel(SIRFSOC_SPI_FIFO_START, sspi->base + sspi->regs->rxfifo_op);
811 1218 return 0;
812 return spi_master_resume(master);
813} 1219}
814#endif 1220#endif
815 1221
816static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend, 1222static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
817 spi_sirfsoc_resume); 1223 spi_sirfsoc_resume);
818 1224
819static const struct of_device_id spi_sirfsoc_of_match[] = {
820 { .compatible = "sirf,prima2-spi", },
821 {}
822};
823MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
824
825static struct platform_driver spi_sirfsoc_driver = { 1225static struct platform_driver spi_sirfsoc_driver = {
826 .driver = { 1226 .driver = {
827 .name = DRIVER_NAME, 1227 .name = DRIVER_NAME,
@@ -835,4 +1235,5 @@ module_platform_driver(spi_sirfsoc_driver);
835MODULE_DESCRIPTION("SiRF SoC SPI master driver"); 1235MODULE_DESCRIPTION("SiRF SoC SPI master driver");
836MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>"); 1236MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>");
837MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>"); 1237MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>");
1238MODULE_AUTHOR("Qipan Li <Qipan.Li@csr.com>");
838MODULE_LICENSE("GPL v2"); 1239MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
new file mode 100644
index 000000000000..f23f36ebaf3d
--- /dev/null
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -0,0 +1,1123 @@
1/*
2 * Xilinx Zynq UltraScale+ MPSoC Quad-SPI (QSPI) controller driver
3 * (master mode only)
4 *
5 * Copyright (C) 2009 - 2015 Xilinx, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include <linux/clk.h>
14#include <linux/delay.h>
15#include <linux/dma-mapping.h>
16#include <linux/dmaengine.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/module.h>
20#include <linux/of_irq.h>
21#include <linux/of_address.h>
22#include <linux/platform_device.h>
23#include <linux/spi/spi.h>
24#include <linux/spinlock.h>
25#include <linux/workqueue.h>
26
27/* Generic QSPI register offsets */
28#define GQSPI_CONFIG_OFST 0x00000100
29#define GQSPI_ISR_OFST 0x00000104
30#define GQSPI_IDR_OFST 0x0000010C
31#define GQSPI_IER_OFST 0x00000108
32#define GQSPI_IMASK_OFST 0x00000110
33#define GQSPI_EN_OFST 0x00000114
34#define GQSPI_TXD_OFST 0x0000011C
35#define GQSPI_RXD_OFST 0x00000120
36#define GQSPI_TX_THRESHOLD_OFST 0x00000128
37#define GQSPI_RX_THRESHOLD_OFST 0x0000012C
38#define GQSPI_LPBK_DLY_ADJ_OFST 0x00000138
39#define GQSPI_GEN_FIFO_OFST 0x00000140
40#define GQSPI_SEL_OFST 0x00000144
41#define GQSPI_GF_THRESHOLD_OFST 0x00000150
42#define GQSPI_FIFO_CTRL_OFST 0x0000014C
43#define GQSPI_QSPIDMA_DST_CTRL_OFST 0x0000080C
44#define GQSPI_QSPIDMA_DST_SIZE_OFST 0x00000804
45#define GQSPI_QSPIDMA_DST_STS_OFST 0x00000808
46#define GQSPI_QSPIDMA_DST_I_STS_OFST 0x00000814
47#define GQSPI_QSPIDMA_DST_I_EN_OFST 0x00000818
48#define GQSPI_QSPIDMA_DST_I_DIS_OFST 0x0000081C
49#define GQSPI_QSPIDMA_DST_I_MASK_OFST 0x00000820
50#define GQSPI_QSPIDMA_DST_ADDR_OFST 0x00000800
51#define GQSPI_QSPIDMA_DST_ADDR_MSB_OFST 0x00000828
52
53/* GQSPI register bit masks */
54#define GQSPI_SEL_MASK 0x00000001
55#define GQSPI_EN_MASK 0x00000001
56#define GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK 0x00000020
57#define GQSPI_ISR_WR_TO_CLR_MASK 0x00000002
58#define GQSPI_IDR_ALL_MASK 0x00000FBE
59#define GQSPI_CFG_MODE_EN_MASK 0xC0000000
60#define GQSPI_CFG_GEN_FIFO_START_MODE_MASK 0x20000000
61#define GQSPI_CFG_ENDIAN_MASK 0x04000000
62#define GQSPI_CFG_EN_POLL_TO_MASK 0x00100000
63#define GQSPI_CFG_WP_HOLD_MASK 0x00080000
64#define GQSPI_CFG_BAUD_RATE_DIV_MASK 0x00000038
65#define GQSPI_CFG_CLK_PHA_MASK 0x00000004
66#define GQSPI_CFG_CLK_POL_MASK 0x00000002
67#define GQSPI_CFG_START_GEN_FIFO_MASK 0x10000000
68#define GQSPI_GENFIFO_IMM_DATA_MASK 0x000000FF
69#define GQSPI_GENFIFO_DATA_XFER 0x00000100
70#define GQSPI_GENFIFO_EXP 0x00000200
71#define GQSPI_GENFIFO_MODE_SPI 0x00000400
72#define GQSPI_GENFIFO_MODE_DUALSPI 0x00000800
73#define GQSPI_GENFIFO_MODE_QUADSPI 0x00000C00
74#define GQSPI_GENFIFO_MODE_MASK 0x00000C00
75#define GQSPI_GENFIFO_CS_LOWER 0x00001000
76#define GQSPI_GENFIFO_CS_UPPER 0x00002000
77#define GQSPI_GENFIFO_BUS_LOWER 0x00004000
78#define GQSPI_GENFIFO_BUS_UPPER 0x00008000
79#define GQSPI_GENFIFO_BUS_BOTH 0x0000C000
80#define GQSPI_GENFIFO_BUS_MASK 0x0000C000
81#define GQSPI_GENFIFO_TX 0x00010000
82#define GQSPI_GENFIFO_RX 0x00020000
83#define GQSPI_GENFIFO_STRIPE 0x00040000
84#define GQSPI_GENFIFO_POLL 0x00080000
85#define GQSPI_GENFIFO_EXP_START 0x00000100
86#define GQSPI_FIFO_CTRL_RST_RX_FIFO_MASK 0x00000004
87#define GQSPI_FIFO_CTRL_RST_TX_FIFO_MASK 0x00000002
88#define GQSPI_FIFO_CTRL_RST_GEN_FIFO_MASK 0x00000001
89#define GQSPI_ISR_RXEMPTY_MASK 0x00000800
90#define GQSPI_ISR_GENFIFOFULL_MASK 0x00000400
91#define GQSPI_ISR_GENFIFONOT_FULL_MASK 0x00000200
92#define GQSPI_ISR_TXEMPTY_MASK 0x00000100
93#define GQSPI_ISR_GENFIFOEMPTY_MASK 0x00000080
94#define GQSPI_ISR_RXFULL_MASK 0x00000020
95#define GQSPI_ISR_RXNEMPTY_MASK 0x00000010
96#define GQSPI_ISR_TXFULL_MASK 0x00000008
97#define GQSPI_ISR_TXNOT_FULL_MASK 0x00000004
98#define GQSPI_ISR_POLL_TIME_EXPIRE_MASK 0x00000002
99#define GQSPI_IER_TXNOT_FULL_MASK 0x00000004
100#define GQSPI_IER_RXEMPTY_MASK 0x00000800
101#define GQSPI_IER_POLL_TIME_EXPIRE_MASK 0x00000002
102#define GQSPI_IER_RXNEMPTY_MASK 0x00000010
103#define GQSPI_IER_GENFIFOEMPTY_MASK 0x00000080
104#define GQSPI_IER_TXEMPTY_MASK 0x00000100
105#define GQSPI_QSPIDMA_DST_INTR_ALL_MASK 0x000000FE
106#define GQSPI_QSPIDMA_DST_STS_WTC 0x0000E000
107#define GQSPI_CFG_MODE_EN_DMA_MASK 0x80000000
108#define GQSPI_ISR_IDR_MASK 0x00000994
109#define GQSPI_QSPIDMA_DST_I_EN_DONE_MASK 0x00000002
110#define GQSPI_QSPIDMA_DST_I_STS_DONE_MASK 0x00000002
111#define GQSPI_IRQ_MASK 0x00000980
112
113#define GQSPI_CFG_BAUD_RATE_DIV_SHIFT 3
114#define GQSPI_GENFIFO_CS_SETUP 0x4
115#define GQSPI_GENFIFO_CS_HOLD 0x3
116#define GQSPI_TXD_DEPTH 64
117#define GQSPI_RX_FIFO_THRESHOLD 32
118#define GQSPI_RX_FIFO_FILL (GQSPI_RX_FIFO_THRESHOLD * 4)
119#define GQSPI_TX_FIFO_THRESHOLD_RESET_VAL 32
120#define GQSPI_TX_FIFO_FILL (GQSPI_TXD_DEPTH -\
121 GQSPI_TX_FIFO_THRESHOLD_RESET_VAL)
122#define GQSPI_GEN_FIFO_THRESHOLD_RESET_VAL 0X10
123#define GQSPI_QSPIDMA_DST_CTRL_RESET_VAL 0x803FFA00
124#define GQSPI_SELECT_FLASH_CS_LOWER 0x1
125#define GQSPI_SELECT_FLASH_CS_UPPER 0x2
126#define GQSPI_SELECT_FLASH_CS_BOTH 0x3
127#define GQSPI_SELECT_FLASH_BUS_LOWER 0x1
128#define GQSPI_SELECT_FLASH_BUS_UPPER 0x2
129#define GQSPI_SELECT_FLASH_BUS_BOTH 0x3
130#define GQSPI_BAUD_DIV_MAX 7 /* Baud rate divisor maximum */
131#define GQSPI_BAUD_DIV_SHIFT 2 /* Baud rate divisor shift */
132#define GQSPI_SELECT_MODE_SPI 0x1
133#define GQSPI_SELECT_MODE_DUALSPI 0x2
134#define GQSPI_SELECT_MODE_QUADSPI 0x4
135#define GQSPI_DMA_UNALIGN 0x3
136#define GQSPI_DEFAULT_NUM_CS 1 /* Default number of chip selects */
137
138enum mode_type {GQSPI_MODE_IO, GQSPI_MODE_DMA};
139
140/**
141 * struct zynqmp_qspi - Defines qspi driver instance
142 * @regs: Virtual address of the QSPI controller registers
143 * @refclk: Pointer to the peripheral clock
144 * @pclk: Pointer to the APB clock
145 * @irq: IRQ number
146 * @dev: Pointer to struct device
147 * @txbuf: Pointer to the TX buffer
148 * @rxbuf: Pointer to the RX buffer
149 * @bytes_to_transfer: Number of bytes left to transfer
150 * @bytes_to_receive: Number of bytes left to receive
151 * @genfifocs: Used for chip select
152 * @genfifobus: Used to select the upper or lower bus
153 * @dma_rx_bytes: Remaining bytes to receive by DMA mode
154 * @dma_addr: DMA address after mapping the kernel buffer
155 * @genfifoentry: Used for storing the genfifoentry instruction.
156 * @mode: Defines the mode in which QSPI is operating
157 */
158struct zynqmp_qspi {
159 void __iomem *regs;
160 struct clk *refclk;
161 struct clk *pclk;
162 int irq;
163 struct device *dev;
164 const void *txbuf;
165 void *rxbuf;
166 int bytes_to_transfer;
167 int bytes_to_receive;
168 u32 genfifocs;
169 u32 genfifobus;
170 u32 dma_rx_bytes;
171 dma_addr_t dma_addr;
172 u32 genfifoentry;
173 enum mode_type mode;
174};
175
176/**
177 * zynqmp_gqspi_read: For GQSPI controller read operation
178 * @xqspi: Pointer to the zynqmp_qspi structure
179 * @offset: Offset from where to read
180 */
181static u32 zynqmp_gqspi_read(struct zynqmp_qspi *xqspi, u32 offset)
182{
183 return readl_relaxed(xqspi->regs + offset);
184}
185
186/**
187 * zynqmp_gqspi_write: For GQSPI controller write operation
188 * @xqspi: Pointer to the zynqmp_qspi structure
189 * @offset: Offset where to write
190 * @val: Value to be written
191 */
192static inline void zynqmp_gqspi_write(struct zynqmp_qspi *xqspi, u32 offset,
193 u32 val)
194{
195 writel_relaxed(val, (xqspi->regs + offset));
196}
197
198/**
199 * zynqmp_gqspi_selectslave: For selection of slave device
200 * @instanceptr: Pointer to the zynqmp_qspi structure
201 * @flashcs: For chip select
202 * @flashbus: To check which bus is selected- upper or lower
203 */
204static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
205 u8 slavecs, u8 slavebus)
206{
207 /*
208 * Bus and CS lines selected here will be updated in the instance and
209 * used for subsequent GENFIFO entries during transfer.
210 */
211
212 /* Choose slave select line */
213 switch (slavecs) {
214 case GQSPI_SELECT_FLASH_CS_BOTH:
215 instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER |
216 GQSPI_GENFIFO_CS_UPPER;
217 break;
218 case GQSPI_SELECT_FLASH_CS_UPPER:
219 instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER;
220 break;
221 case GQSPI_SELECT_FLASH_CS_LOWER:
222 instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER;
223 break;
224 default:
225 dev_warn(instanceptr->dev, "Invalid slave select\n");
226 }
227
228 /* Choose the bus */
229 switch (slavebus) {
230 case GQSPI_SELECT_FLASH_BUS_BOTH:
231 instanceptr->genfifobus = GQSPI_GENFIFO_BUS_LOWER |
232 GQSPI_GENFIFO_BUS_UPPER;
233 break;
234 case GQSPI_SELECT_FLASH_BUS_UPPER:
235 instanceptr->genfifobus = GQSPI_GENFIFO_BUS_UPPER;
236 break;
237 case GQSPI_SELECT_FLASH_BUS_LOWER:
238 instanceptr->genfifobus = GQSPI_GENFIFO_BUS_LOWER;
239 break;
240 default:
241 dev_warn(instanceptr->dev, "Invalid slave bus\n");
242 }
243}
244
245/**
246 * zynqmp_qspi_init_hw: Initialize the hardware
247 * @xqspi: Pointer to the zynqmp_qspi structure
248 *
249 * The default settings of the QSPI controller's configurable parameters on
250 * reset are
251 * - Master mode
252 * - TX threshold set to 1
253 * - RX threshold set to 1
254 * - Flash memory interface mode enabled
255 * This function performs the following actions
256 * - Disable and clear all the interrupts
257 * - Enable manual slave select
258 * - Enable manual start
259 * - Deselect all the chip select lines
260 * - Set the little endian mode of TX FIFO and
261 * - Enable the QSPI controller
262 */
263static void zynqmp_qspi_init_hw(struct zynqmp_qspi *xqspi)
264{
265 u32 config_reg;
266
267 /* Select the GQSPI mode */
268 zynqmp_gqspi_write(xqspi, GQSPI_SEL_OFST, GQSPI_SEL_MASK);
269 /* Clear and disable interrupts */
270 zynqmp_gqspi_write(xqspi, GQSPI_ISR_OFST,
271 zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST) |
272 GQSPI_ISR_WR_TO_CLR_MASK);
273 /* Clear the DMA STS */
274 zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_I_STS_OFST,
275 zynqmp_gqspi_read(xqspi,
276 GQSPI_QSPIDMA_DST_I_STS_OFST));
277 zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_STS_OFST,
278 zynqmp_gqspi_read(xqspi,
279 GQSPI_QSPIDMA_DST_STS_OFST) |
280 GQSPI_QSPIDMA_DST_STS_WTC);
281 zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_IDR_ALL_MASK);
282 zynqmp_gqspi_write(xqspi,
283 GQSPI_QSPIDMA_DST_I_DIS_OFST,
284 GQSPI_QSPIDMA_DST_INTR_ALL_MASK);
285 /* Disable the GQSPI */
286 zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
287 config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
288 config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
289 /* Manual start */
290 config_reg |= GQSPI_CFG_GEN_FIFO_START_MODE_MASK;
291 /* Little endian by default */
292 config_reg &= ~GQSPI_CFG_ENDIAN_MASK;
293 /* Disable poll time out */
294 config_reg &= ~GQSPI_CFG_EN_POLL_TO_MASK;
295 /* Set hold bit */
296 config_reg |= GQSPI_CFG_WP_HOLD_MASK;
297 /* Clear pre-scalar by default */
298 config_reg &= ~GQSPI_CFG_BAUD_RATE_DIV_MASK;
299 /* CPHA 0 */
300 config_reg &= ~GQSPI_CFG_CLK_PHA_MASK;
301 /* CPOL 0 */
302 config_reg &= ~GQSPI_CFG_CLK_POL_MASK;
303 zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
304
305 /* Clear the TX and RX FIFO */
306 zynqmp_gqspi_write(xqspi, GQSPI_FIFO_CTRL_OFST,
307 GQSPI_FIFO_CTRL_RST_RX_FIFO_MASK |
308 GQSPI_FIFO_CTRL_RST_TX_FIFO_MASK |
309 GQSPI_FIFO_CTRL_RST_GEN_FIFO_MASK);
310 /* Set by default to allow for high frequencies */
311 zynqmp_gqspi_write(xqspi, GQSPI_LPBK_DLY_ADJ_OFST,
312 zynqmp_gqspi_read(xqspi, GQSPI_LPBK_DLY_ADJ_OFST) |
313 GQSPI_LPBK_DLY_ADJ_USE_LPBK_MASK);
314 /* Reset thresholds */
315 zynqmp_gqspi_write(xqspi, GQSPI_TX_THRESHOLD_OFST,
316 GQSPI_TX_FIFO_THRESHOLD_RESET_VAL);
317 zynqmp_gqspi_write(xqspi, GQSPI_RX_THRESHOLD_OFST,
318 GQSPI_RX_FIFO_THRESHOLD);
319 zynqmp_gqspi_write(xqspi, GQSPI_GF_THRESHOLD_OFST,
320 GQSPI_GEN_FIFO_THRESHOLD_RESET_VAL);
321 zynqmp_gqspi_selectslave(xqspi,
322 GQSPI_SELECT_FLASH_CS_LOWER,
323 GQSPI_SELECT_FLASH_BUS_LOWER);
324 /* Initialize DMA */
325 zynqmp_gqspi_write(xqspi,
326 GQSPI_QSPIDMA_DST_CTRL_OFST,
327 GQSPI_QSPIDMA_DST_CTRL_RESET_VAL);
328
329 /* Enable the GQSPI */
330 zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
331}
332
333/**
334 * zynqmp_qspi_copy_read_data: Copy data to RX buffer
335 * @xqspi: Pointer to the zynqmp_qspi structure
336 * @data: The variable where data is stored
337 * @size: Number of bytes to be copied from data to RX buffer
338 */
339static void zynqmp_qspi_copy_read_data(struct zynqmp_qspi *xqspi,
340 ulong data, u8 size)
341{
342 memcpy(xqspi->rxbuf, &data, size);
343 xqspi->rxbuf += size;
344 xqspi->bytes_to_receive -= size;
345}
346
347/**
348 * zynqmp_prepare_transfer_hardware: Prepares hardware for transfer.
349 * @master: Pointer to the spi_master structure which provides
350 * information about the controller.
351 *
352 * This function enables SPI master controller.
353 *
354 * Return: 0 on success; error value otherwise
355 */
356static int zynqmp_prepare_transfer_hardware(struct spi_master *master)
357{
358 struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
359 int ret;
360
361 ret = clk_enable(xqspi->refclk);
362 if (ret)
363 goto clk_err;
364
365 ret = clk_enable(xqspi->pclk);
366 if (ret)
367 goto clk_err;
368
369 zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, GQSPI_EN_MASK);
370 return 0;
371clk_err:
372 return ret;
373}
374
375/**
376 * zynqmp_unprepare_transfer_hardware: Relaxes hardware after transfer
377 * @master: Pointer to the spi_master structure which provides
378 * information about the controller.
379 *
380 * This function disables the SPI master controller.
381 *
382 * Return: Always 0
383 */
384static int zynqmp_unprepare_transfer_hardware(struct spi_master *master)
385{
386 struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
387
388 zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
389 clk_disable(xqspi->refclk);
390 clk_disable(xqspi->pclk);
391 return 0;
392}
393
394/**
395 * zynqmp_qspi_chipselect: Select or deselect the chip select line
396 * @qspi: Pointer to the spi_device structure
397 * @is_high: Select(0) or deselect (1) the chip select line
398 */
399static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
400{
401 struct zynqmp_qspi *xqspi = spi_master_get_devdata(qspi->master);
402 ulong timeout;
403 u32 genfifoentry = 0x0, statusreg;
404
405 genfifoentry |= GQSPI_GENFIFO_MODE_SPI;
406 genfifoentry |= xqspi->genfifobus;
407
408 if (!is_high) {
409 genfifoentry |= xqspi->genfifocs;
410 genfifoentry |= GQSPI_GENFIFO_CS_SETUP;
411 } else {
412 genfifoentry |= GQSPI_GENFIFO_CS_HOLD;
413 }
414
415 zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
416
417 /* Dummy generic FIFO entry */
418 zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
419
420 /* Manually start the generic FIFO command */
421 zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
422 zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
423 GQSPI_CFG_START_GEN_FIFO_MASK);
424
425 timeout = jiffies + msecs_to_jiffies(1000);
426
427 /* Wait until the generic FIFO command is empty */
428 do {
429 statusreg = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST);
430
431 if ((statusreg & GQSPI_ISR_GENFIFOEMPTY_MASK) &&
432 (statusreg & GQSPI_ISR_TXEMPTY_MASK))
433 break;
434 else
435 cpu_relax();
436 } while (!time_after_eq(jiffies, timeout));
437
438 if (time_after_eq(jiffies, timeout))
439 dev_err(xqspi->dev, "Chip select timed out\n");
440}
441
442/**
443 * zynqmp_qspi_setup_transfer: Configure QSPI controller for specified
444 * transfer
445 * @qspi: Pointer to the spi_device structure
446 * @transfer: Pointer to the spi_transfer structure which provides
447 * information about next transfer setup parameters
448 *
449 * Sets the operational mode of QSPI controller for the next QSPI transfer and
450 * sets the requested clock frequency.
451 *
452 * Return: Always 0
453 *
454 * Note:
455 * If the requested frequency is not an exact match with what can be
456 * obtained using the pre-scalar value, the driver sets the clock
457 * frequency which is lower than the requested frequency (maximum lower)
458 * for the transfer.
459 *
460 * If the requested frequency is higher or lower than that is supported
461 * by the QSPI controller the driver will set the highest or lowest
462 * frequency supported by controller.
463 */
464static int zynqmp_qspi_setup_transfer(struct spi_device *qspi,
465 struct spi_transfer *transfer)
466{
467 struct zynqmp_qspi *xqspi = spi_master_get_devdata(qspi->master);
468 ulong clk_rate;
469 u32 config_reg, req_hz, baud_rate_val = 0;
470
471 if (transfer)
472 req_hz = transfer->speed_hz;
473 else
474 req_hz = qspi->max_speed_hz;
475
476 /* Set the clock frequency */
477 /* If req_hz == 0, default to lowest speed */
478 clk_rate = clk_get_rate(xqspi->refclk);
479
480 while ((baud_rate_val < GQSPI_BAUD_DIV_MAX) &&
481 (clk_rate /
482 (GQSPI_BAUD_DIV_SHIFT << baud_rate_val)) > req_hz)
483 baud_rate_val++;
484
485 config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
486
487 /* Set the QSPI clock phase and clock polarity */
488 config_reg &= (~GQSPI_CFG_CLK_PHA_MASK) & (~GQSPI_CFG_CLK_POL_MASK);
489
490 if (qspi->mode & SPI_CPHA)
491 config_reg |= GQSPI_CFG_CLK_PHA_MASK;
492 if (qspi->mode & SPI_CPOL)
493 config_reg |= GQSPI_CFG_CLK_POL_MASK;
494
495 config_reg &= ~GQSPI_CFG_BAUD_RATE_DIV_MASK;
496 config_reg |= (baud_rate_val << GQSPI_CFG_BAUD_RATE_DIV_SHIFT);
497 zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
498 return 0;
499}
500
501/**
502 * zynqmp_qspi_setup: Configure the QSPI controller
503 * @qspi: Pointer to the spi_device structure
504 *
505 * Sets the operational mode of QSPI controller for the next QSPI transfer,
506 * baud rate and divisor value to setup the requested qspi clock.
507 *
508 * Return: 0 on success; error value otherwise.
509 */
510static int zynqmp_qspi_setup(struct spi_device *qspi)
511{
512 if (qspi->master->busy)
513 return -EBUSY;
514 return 0;
515}
516
517/**
518 * zynqmp_qspi_filltxfifo: Fills the TX FIFO as long as there is room in
519 * the FIFO or the bytes required to be
520 * transmitted.
521 * @xqspi: Pointer to the zynqmp_qspi structure
522 * @size: Number of bytes to be copied from TX buffer to TX FIFO
523 */
524static void zynqmp_qspi_filltxfifo(struct zynqmp_qspi *xqspi, int size)
525{
526 u32 count = 0, intermediate;
527
528 while ((xqspi->bytes_to_transfer > 0) && (count < size)) {
529 memcpy(&intermediate, xqspi->txbuf, 4);
530 zynqmp_gqspi_write(xqspi, GQSPI_TXD_OFST, intermediate);
531
532 if (xqspi->bytes_to_transfer >= 4) {
533 xqspi->txbuf += 4;
534 xqspi->bytes_to_transfer -= 4;
535 } else {
536 xqspi->txbuf += xqspi->bytes_to_transfer;
537 xqspi->bytes_to_transfer = 0;
538 }
539 count++;
540 }
541}
542
543/**
544 * zynqmp_qspi_readrxfifo: Fills the RX FIFO as long as there is room in
545 * the FIFO.
546 * @xqspi: Pointer to the zynqmp_qspi structure
547 * @size: Number of bytes to be copied from RX buffer to RX FIFO
548 */
549static void zynqmp_qspi_readrxfifo(struct zynqmp_qspi *xqspi, u32 size)
550{
551 ulong data;
552 int count = 0;
553
554 while ((count < size) && (xqspi->bytes_to_receive > 0)) {
555 if (xqspi->bytes_to_receive >= 4) {
556 (*(u32 *) xqspi->rxbuf) =
557 zynqmp_gqspi_read(xqspi, GQSPI_RXD_OFST);
558 xqspi->rxbuf += 4;
559 xqspi->bytes_to_receive -= 4;
560 count += 4;
561 } else {
562 data = zynqmp_gqspi_read(xqspi, GQSPI_RXD_OFST);
563 count += xqspi->bytes_to_receive;
564 zynqmp_qspi_copy_read_data(xqspi, data,
565 xqspi->bytes_to_receive);
566 xqspi->bytes_to_receive = 0;
567 }
568 }
569}
570
571/**
572 * zynqmp_process_dma_irq: Handler for DMA done interrupt of QSPI
573 * controller
574 * @xqspi: zynqmp_qspi instance pointer
575 *
576 * This function handles DMA interrupt only.
577 */
578static void zynqmp_process_dma_irq(struct zynqmp_qspi *xqspi)
579{
580 u32 config_reg, genfifoentry;
581
582 dma_unmap_single(xqspi->dev, xqspi->dma_addr,
583 xqspi->dma_rx_bytes, DMA_FROM_DEVICE);
584 xqspi->rxbuf += xqspi->dma_rx_bytes;
585 xqspi->bytes_to_receive -= xqspi->dma_rx_bytes;
586 xqspi->dma_rx_bytes = 0;
587
588 /* Disabling the DMA interrupts */
589 zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_I_DIS_OFST,
590 GQSPI_QSPIDMA_DST_I_EN_DONE_MASK);
591
592 if (xqspi->bytes_to_receive > 0) {
593 /* Switch to IO mode,for remaining bytes to receive */
594 config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
595 config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
596 zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
597
598 /* Initiate the transfer of remaining bytes */
599 genfifoentry = xqspi->genfifoentry;
600 genfifoentry |= xqspi->bytes_to_receive;
601 zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
602
603 /* Dummy generic FIFO entry */
604 zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
605
606 /* Manual start */
607 zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
608 (zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
609 GQSPI_CFG_START_GEN_FIFO_MASK));
610
611 /* Enable the RX interrupts for IO mode */
612 zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
613 GQSPI_IER_GENFIFOEMPTY_MASK |
614 GQSPI_IER_RXNEMPTY_MASK |
615 GQSPI_IER_RXEMPTY_MASK);
616 }
617}
618
619/**
620 * zynqmp_qspi_irq: Interrupt service routine of the QSPI controller
621 * @irq: IRQ number
622 * @dev_id: Pointer to the xqspi structure
623 *
624 * This function handles TX empty only.
625 * On TX empty interrupt this function reads the received data from RX FIFO
626 * and fills the TX FIFO if there is any data remaining to be transferred.
627 *
628 * Return: IRQ_HANDLED when interrupt is handled
629 * IRQ_NONE otherwise.
630 */
631static irqreturn_t zynqmp_qspi_irq(int irq, void *dev_id)
632{
633 struct spi_master *master = dev_id;
634 struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
635 int ret = IRQ_NONE;
636 u32 status, mask, dma_status = 0;
637
638 status = zynqmp_gqspi_read(xqspi, GQSPI_ISR_OFST);
639 zynqmp_gqspi_write(xqspi, GQSPI_ISR_OFST, status);
640 mask = (status & ~(zynqmp_gqspi_read(xqspi, GQSPI_IMASK_OFST)));
641
642 /* Read and clear DMA status */
643 if (xqspi->mode == GQSPI_MODE_DMA) {
644 dma_status =
645 zynqmp_gqspi_read(xqspi, GQSPI_QSPIDMA_DST_I_STS_OFST);
646 zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_I_STS_OFST,
647 dma_status);
648 }
649
650 if (mask & GQSPI_ISR_TXNOT_FULL_MASK) {
651 zynqmp_qspi_filltxfifo(xqspi, GQSPI_TX_FIFO_FILL);
652 ret = IRQ_HANDLED;
653 }
654
655 if (dma_status & GQSPI_QSPIDMA_DST_I_STS_DONE_MASK) {
656 zynqmp_process_dma_irq(xqspi);
657 ret = IRQ_HANDLED;
658 } else if (!(mask & GQSPI_IER_RXEMPTY_MASK) &&
659 (mask & GQSPI_IER_GENFIFOEMPTY_MASK)) {
660 zynqmp_qspi_readrxfifo(xqspi, GQSPI_RX_FIFO_FILL);
661 ret = IRQ_HANDLED;
662 }
663
664 if ((xqspi->bytes_to_receive == 0) && (xqspi->bytes_to_transfer == 0)
665 && ((status & GQSPI_IRQ_MASK) == GQSPI_IRQ_MASK)) {
666 zynqmp_gqspi_write(xqspi, GQSPI_IDR_OFST, GQSPI_ISR_IDR_MASK);
667 spi_finalize_current_transfer(master);
668 ret = IRQ_HANDLED;
669 }
670 return ret;
671}
672
673/**
674 * zynqmp_qspi_selectspimode: Selects SPI mode - x1 or x2 or x4.
675 * @xqspi: xqspi is a pointer to the GQSPI instance
676 * @spimode: spimode - SPI or DUAL or QUAD.
677 * Return: Mask to set desired SPI mode in GENFIFO entry.
678 */
679static inline u32 zynqmp_qspi_selectspimode(struct zynqmp_qspi *xqspi,
680 u8 spimode)
681{
682 u32 mask = 0;
683
684 switch (spimode) {
685 case GQSPI_SELECT_MODE_DUALSPI:
686 mask = GQSPI_GENFIFO_MODE_DUALSPI;
687 break;
688 case GQSPI_SELECT_MODE_QUADSPI:
689 mask = GQSPI_GENFIFO_MODE_QUADSPI;
690 break;
691 case GQSPI_SELECT_MODE_SPI:
692 mask = GQSPI_GENFIFO_MODE_SPI;
693 break;
694 default:
695 dev_warn(xqspi->dev, "Invalid SPI mode\n");
696 }
697
698 return mask;
699}
700
701/**
702 * zynq_qspi_setuprxdma: This function sets up the RX DMA operation
703 * @xqspi: xqspi is a pointer to the GQSPI instance.
704 */
705static void zynq_qspi_setuprxdma(struct zynqmp_qspi *xqspi)
706{
707 u32 rx_bytes, rx_rem, config_reg;
708 dma_addr_t addr;
709 u64 dma_align = (u64)(uintptr_t)xqspi->rxbuf;
710
711 if ((xqspi->bytes_to_receive < 8) ||
712 ((dma_align & GQSPI_DMA_UNALIGN) != 0x0)) {
713 /* Setting to IO mode */
714 config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
715 config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
716 zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
717 xqspi->mode = GQSPI_MODE_IO;
718 xqspi->dma_rx_bytes = 0;
719 return;
720 }
721
722 rx_rem = xqspi->bytes_to_receive % 4;
723 rx_bytes = (xqspi->bytes_to_receive - rx_rem);
724
725 addr = dma_map_single(xqspi->dev, (void *)xqspi->rxbuf,
726 rx_bytes, DMA_FROM_DEVICE);
727 if (dma_mapping_error(xqspi->dev, addr))
728 dev_err(xqspi->dev, "ERR:rxdma:memory not mapped\n");
729
730 xqspi->dma_rx_bytes = rx_bytes;
731 xqspi->dma_addr = addr;
732 zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_ADDR_OFST,
733 (u32)(addr & 0xffffffff));
734 addr = ((addr >> 16) >> 16);
735 zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_ADDR_MSB_OFST,
736 ((u32)addr) & 0xfff);
737
738 /* Enabling the DMA mode */
739 config_reg = zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST);
740 config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
741 config_reg |= GQSPI_CFG_MODE_EN_DMA_MASK;
742 zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST, config_reg);
743
744 /* Switch to DMA mode */
745 xqspi->mode = GQSPI_MODE_DMA;
746
747 /* Write the number of bytes to transfer */
748 zynqmp_gqspi_write(xqspi, GQSPI_QSPIDMA_DST_SIZE_OFST, rx_bytes);
749}
750
751/**
752 * zynqmp_qspi_txrxsetup: This function checks the TX/RX buffers in
753 * the transfer and sets up the GENFIFO entries,
754 * TX FIFO as required.
755 * @xqspi: xqspi is a pointer to the GQSPI instance.
756 * @transfer: It is a pointer to the structure containing transfer data.
757 * @genfifoentry: genfifoentry is pointer to the variable in which
758 * GENFIFO mask is returned to calling function
759 */
760static void zynqmp_qspi_txrxsetup(struct zynqmp_qspi *xqspi,
761 struct spi_transfer *transfer,
762 u32 *genfifoentry)
763{
764 u32 config_reg;
765
766 /* Transmit */
767 if ((xqspi->txbuf != NULL) && (xqspi->rxbuf == NULL)) {
768 /* Setup data to be TXed */
769 *genfifoentry &= ~GQSPI_GENFIFO_RX;
770 *genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
771 *genfifoentry |= GQSPI_GENFIFO_TX;
772 *genfifoentry |=
773 zynqmp_qspi_selectspimode(xqspi, transfer->tx_nbits);
774 xqspi->bytes_to_transfer = transfer->len;
775 if (xqspi->mode == GQSPI_MODE_DMA) {
776 config_reg = zynqmp_gqspi_read(xqspi,
777 GQSPI_CONFIG_OFST);
778 config_reg &= ~GQSPI_CFG_MODE_EN_MASK;
779 zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
780 config_reg);
781 xqspi->mode = GQSPI_MODE_IO;
782 }
783 zynqmp_qspi_filltxfifo(xqspi, GQSPI_TXD_DEPTH);
784 /* Discard RX data */
785 xqspi->bytes_to_receive = 0;
786 } else if ((xqspi->txbuf == NULL) && (xqspi->rxbuf != NULL)) {
787 /* Receive */
788
789 /* TX auto fill */
790 *genfifoentry &= ~GQSPI_GENFIFO_TX;
791 /* Setup RX */
792 *genfifoentry |= GQSPI_GENFIFO_DATA_XFER;
793 *genfifoentry |= GQSPI_GENFIFO_RX;
794 *genfifoentry |=
795 zynqmp_qspi_selectspimode(xqspi, transfer->rx_nbits);
796 xqspi->bytes_to_transfer = 0;
797 xqspi->bytes_to_receive = transfer->len;
798 zynq_qspi_setuprxdma(xqspi);
799 }
800}
801
802/**
803 * zynqmp_qspi_start_transfer: Initiates the QSPI transfer
804 * @master: Pointer to the spi_master structure which provides
805 * information about the controller.
806 * @qspi: Pointer to the spi_device structure
807 * @transfer: Pointer to the spi_transfer structure which provide information
808 * about next transfer parameters
809 *
810 * This function fills the TX FIFO, starts the QSPI transfer, and waits for the
811 * transfer to be completed.
812 *
813 * Return: Number of bytes transferred in the last transfer
814 */
815static int zynqmp_qspi_start_transfer(struct spi_master *master,
816 struct spi_device *qspi,
817 struct spi_transfer *transfer)
818{
819 struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
820 u32 genfifoentry = 0x0, transfer_len;
821
822 xqspi->txbuf = transfer->tx_buf;
823 xqspi->rxbuf = transfer->rx_buf;
824
825 zynqmp_qspi_setup_transfer(qspi, transfer);
826
827 genfifoentry |= xqspi->genfifocs;
828 genfifoentry |= xqspi->genfifobus;
829
830 zynqmp_qspi_txrxsetup(xqspi, transfer, &genfifoentry);
831
832 if (xqspi->mode == GQSPI_MODE_DMA)
833 transfer_len = xqspi->dma_rx_bytes;
834 else
835 transfer_len = transfer->len;
836
837 xqspi->genfifoentry = genfifoentry;
838 if ((transfer_len) < GQSPI_GENFIFO_IMM_DATA_MASK) {
839 genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
840 genfifoentry |= transfer_len;
841 zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
842 } else {
843 int tempcount = transfer_len;
844 u32 exponent = 8; /* 2^8 = 256 */
845 u8 imm_data = tempcount & 0xFF;
846
847 tempcount &= ~(tempcount & 0xFF);
848 /* Immediate entry */
849 if (tempcount != 0) {
850 /* Exponent entries */
851 genfifoentry |= GQSPI_GENFIFO_EXP;
852 while (tempcount != 0) {
853 if (tempcount & GQSPI_GENFIFO_EXP_START) {
854 genfifoentry &=
855 ~GQSPI_GENFIFO_IMM_DATA_MASK;
856 genfifoentry |= exponent;
857 zynqmp_gqspi_write(xqspi,
858 GQSPI_GEN_FIFO_OFST,
859 genfifoentry);
860 }
861 tempcount = tempcount >> 1;
862 exponent++;
863 }
864 }
865 if (imm_data != 0) {
866 genfifoentry &= ~GQSPI_GENFIFO_EXP;
867 genfifoentry &= ~GQSPI_GENFIFO_IMM_DATA_MASK;
868 genfifoentry |= (u8) (imm_data & 0xFF);
869 zynqmp_gqspi_write(xqspi,
870 GQSPI_GEN_FIFO_OFST, genfifoentry);
871 }
872 }
873
874 if ((xqspi->mode == GQSPI_MODE_IO) &&
875 (xqspi->rxbuf != NULL)) {
876 /* Dummy generic FIFO entry */
877 zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
878 }
879
880 /* Since we are using manual mode */
881 zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
882 zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
883 GQSPI_CFG_START_GEN_FIFO_MASK);
884
885 if (xqspi->txbuf != NULL)
886 /* Enable interrupts for TX */
887 zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
888 GQSPI_IER_TXEMPTY_MASK |
889 GQSPI_IER_GENFIFOEMPTY_MASK |
890 GQSPI_IER_TXNOT_FULL_MASK);
891
892 if (xqspi->rxbuf != NULL) {
893 /* Enable interrupts for RX */
894 if (xqspi->mode == GQSPI_MODE_DMA) {
895 /* Enable DMA interrupts */
896 zynqmp_gqspi_write(xqspi,
897 GQSPI_QSPIDMA_DST_I_EN_OFST,
898 GQSPI_QSPIDMA_DST_I_EN_DONE_MASK);
899 } else {
900 zynqmp_gqspi_write(xqspi, GQSPI_IER_OFST,
901 GQSPI_IER_GENFIFOEMPTY_MASK |
902 GQSPI_IER_RXNEMPTY_MASK |
903 GQSPI_IER_RXEMPTY_MASK);
904 }
905 }
906
907 return transfer->len;
908}
909
910/**
911 * zynqmp_qspi_suspend: Suspend method for the QSPI driver
912 * @_dev: Address of the platform_device structure
913 *
914 * This function stops the QSPI driver queue and disables the QSPI controller
915 *
916 * Return: Always 0
917 */
918static int __maybe_unused zynqmp_qspi_suspend(struct device *dev)
919{
920 struct platform_device *pdev = container_of(dev,
921 struct platform_device,
922 dev);
923 struct spi_master *master = platform_get_drvdata(pdev);
924
925 spi_master_suspend(master);
926
927 zynqmp_unprepare_transfer_hardware(master);
928
929 return 0;
930}
931
932/**
933 * zynqmp_qspi_resume: Resume method for the QSPI driver
934 * @dev: Address of the platform_device structure
935 *
936 * The function starts the QSPI driver queue and initializes the QSPI
937 * controller
938 *
939 * Return: 0 on success; error value otherwise
940 */
941static int __maybe_unused zynqmp_qspi_resume(struct device *dev)
942{
943 struct platform_device *pdev = container_of(dev,
944 struct platform_device,
945 dev);
946 struct spi_master *master = platform_get_drvdata(pdev);
947 struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
948 int ret = 0;
949
950 ret = clk_enable(xqspi->pclk);
951 if (ret) {
952 dev_err(dev, "Cannot enable APB clock.\n");
953 return ret;
954 }
955
956 ret = clk_enable(xqspi->refclk);
957 if (ret) {
958 dev_err(dev, "Cannot enable device clock.\n");
959 clk_disable(xqspi->pclk);
960 return ret;
961 }
962
963 spi_master_resume(master);
964
965 return 0;
966}
967
968static SIMPLE_DEV_PM_OPS(zynqmp_qspi_dev_pm_ops, zynqmp_qspi_suspend,
969 zynqmp_qspi_resume);
970
971/**
972 * zynqmp_qspi_probe: Probe method for the QSPI driver
973 * @pdev: Pointer to the platform_device structure
974 *
975 * This function initializes the driver data structures and the hardware.
976 *
977 * Return: 0 on success; error value otherwise
978 */
979static int zynqmp_qspi_probe(struct platform_device *pdev)
980{
981 int ret = 0;
982 struct spi_master *master;
983 struct zynqmp_qspi *xqspi;
984 struct resource *res;
985 struct device *dev = &pdev->dev;
986
987 master = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
988 if (!master)
989 return -ENOMEM;
990
991 xqspi = spi_master_get_devdata(master);
992 master->dev.of_node = pdev->dev.of_node;
993 platform_set_drvdata(pdev, master);
994
995 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
996 xqspi->regs = devm_ioremap_resource(&pdev->dev, res);
997 if (IS_ERR(xqspi->regs)) {
998 ret = PTR_ERR(xqspi->regs);
999 goto remove_master;
1000 }
1001
1002 xqspi->dev = dev;
1003 xqspi->pclk = devm_clk_get(&pdev->dev, "pclk");
1004 if (IS_ERR(xqspi->pclk)) {
1005 dev_err(dev, "pclk clock not found.\n");
1006 ret = PTR_ERR(xqspi->pclk);
1007 goto remove_master;
1008 }
1009
1010 ret = clk_prepare_enable(xqspi->pclk);
1011 if (ret) {
1012 dev_err(dev, "Unable to enable APB clock.\n");
1013 goto remove_master;
1014 }
1015
1016 xqspi->refclk = devm_clk_get(&pdev->dev, "ref_clk");
1017 if (IS_ERR(xqspi->refclk)) {
1018 dev_err(dev, "ref_clk clock not found.\n");
1019 ret = PTR_ERR(xqspi->refclk);
1020 goto clk_dis_pclk;
1021 }
1022
1023 ret = clk_prepare_enable(xqspi->refclk);
1024 if (ret) {
1025 dev_err(dev, "Unable to enable device clock.\n");
1026 goto clk_dis_pclk;
1027 }
1028
1029 /* QSPI controller initializations */
1030 zynqmp_qspi_init_hw(xqspi);
1031
1032 xqspi->irq = platform_get_irq(pdev, 0);
1033 if (xqspi->irq <= 0) {
1034 ret = -ENXIO;
1035 dev_err(dev, "irq resource not found\n");
1036 goto clk_dis_all;
1037 }
1038 ret = devm_request_irq(&pdev->dev, xqspi->irq, zynqmp_qspi_irq,
1039 0, pdev->name, master);
1040 if (ret != 0) {
1041 ret = -ENXIO;
1042 dev_err(dev, "request_irq failed\n");
1043 goto clk_dis_all;
1044 }
1045
1046 master->num_chipselect = GQSPI_DEFAULT_NUM_CS;
1047
1048 master->setup = zynqmp_qspi_setup;
1049 master->set_cs = zynqmp_qspi_chipselect;
1050 master->transfer_one = zynqmp_qspi_start_transfer;
1051 master->prepare_transfer_hardware = zynqmp_prepare_transfer_hardware;
1052 master->unprepare_transfer_hardware =
1053 zynqmp_unprepare_transfer_hardware;
1054 master->max_speed_hz = clk_get_rate(xqspi->refclk) / 2;
1055 master->bits_per_word_mask = SPI_BPW_MASK(8);
1056 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD |
1057 SPI_TX_DUAL | SPI_TX_QUAD;
1058
1059 if (master->dev.parent == NULL)
1060 master->dev.parent = &master->dev;
1061
1062 ret = spi_register_master(master);
1063 if (ret)
1064 goto clk_dis_all;
1065
1066 return 0;
1067
1068clk_dis_all:
1069 clk_disable_unprepare(xqspi->refclk);
1070clk_dis_pclk:
1071 clk_disable_unprepare(xqspi->pclk);
1072remove_master:
1073 spi_master_put(master);
1074
1075 return ret;
1076}
1077
1078/**
1079 * zynqmp_qspi_remove: Remove method for the QSPI driver
1080 * @pdev: Pointer to the platform_device structure
1081 *
1082 * This function is called if a device is physically removed from the system or
1083 * if the driver module is being unloaded. It frees all resources allocated to
1084 * the device.
1085 *
1086 * Return: 0 Always
1087 */
1088static int zynqmp_qspi_remove(struct platform_device *pdev)
1089{
1090 struct spi_master *master = platform_get_drvdata(pdev);
1091 struct zynqmp_qspi *xqspi = spi_master_get_devdata(master);
1092
1093 zynqmp_gqspi_write(xqspi, GQSPI_EN_OFST, 0x0);
1094 clk_disable_unprepare(xqspi->refclk);
1095 clk_disable_unprepare(xqspi->pclk);
1096
1097 spi_unregister_master(master);
1098
1099 return 0;
1100}
1101
1102static const struct of_device_id zynqmp_qspi_of_match[] = {
1103 { .compatible = "xlnx,zynqmp-qspi-1.0", },
1104 { /* End of table */ }
1105};
1106
1107MODULE_DEVICE_TABLE(of, zynqmp_qspi_of_match);
1108
1109static struct platform_driver zynqmp_qspi_driver = {
1110 .probe = zynqmp_qspi_probe,
1111 .remove = zynqmp_qspi_remove,
1112 .driver = {
1113 .name = "zynqmp-qspi",
1114 .of_match_table = zynqmp_qspi_of_match,
1115 .pm = &zynqmp_qspi_dev_pm_ops,
1116 },
1117};
1118
1119module_platform_driver(zynqmp_qspi_driver);
1120
1121MODULE_AUTHOR("Xilinx, Inc.");
1122MODULE_DESCRIPTION("Xilinx Zynqmp QSPI driver");
1123MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 50910d85df5a..cf8b91b23a76 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -571,7 +571,7 @@ static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
571 return 0; 571 return 0;
572} 572}
573 573
574static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) 574static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
575{ 575{
576 struct spi_transfer *xfer; 576 struct spi_transfer *xfer;
577 struct device *tx_dev, *rx_dev; 577 struct device *tx_dev, *rx_dev;
@@ -583,15 +583,6 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
583 rx_dev = master->dma_rx->device->dev; 583 rx_dev = master->dma_rx->device->dev;
584 584
585 list_for_each_entry(xfer, &msg->transfers, transfer_list) { 585 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
586 /*
587 * Restore the original value of tx_buf or rx_buf if they are
588 * NULL.
589 */
590 if (xfer->tx_buf == master->dummy_tx)
591 xfer->tx_buf = NULL;
592 if (xfer->rx_buf == master->dummy_rx)
593 xfer->rx_buf = NULL;
594
595 if (!master->can_dma(master, msg->spi, xfer)) 586 if (!master->can_dma(master, msg->spi, xfer))
596 continue; 587 continue;
597 588
@@ -608,13 +599,32 @@ static inline int __spi_map_msg(struct spi_master *master,
608 return 0; 599 return 0;
609} 600}
610 601
611static inline int spi_unmap_msg(struct spi_master *master, 602static inline int __spi_unmap_msg(struct spi_master *master,
612 struct spi_message *msg) 603 struct spi_message *msg)
613{ 604{
614 return 0; 605 return 0;
615} 606}
616#endif /* !CONFIG_HAS_DMA */ 607#endif /* !CONFIG_HAS_DMA */
617 608
609static inline int spi_unmap_msg(struct spi_master *master,
610 struct spi_message *msg)
611{
612 struct spi_transfer *xfer;
613
614 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
615 /*
616 * Restore the original value of tx_buf or rx_buf if they are
617 * NULL.
618 */
619 if (xfer->tx_buf == master->dummy_tx)
620 xfer->tx_buf = NULL;
621 if (xfer->rx_buf == master->dummy_rx)
622 xfer->rx_buf = NULL;
623 }
624
625 return __spi_unmap_msg(master, msg);
626}
627
618static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 628static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
619{ 629{
620 struct spi_transfer *xfer; 630 struct spi_transfer *xfer;
@@ -988,9 +998,6 @@ void spi_finalize_current_message(struct spi_master *master)
988 998
989 spin_lock_irqsave(&master->queue_lock, flags); 999 spin_lock_irqsave(&master->queue_lock, flags);
990 mesg = master->cur_msg; 1000 mesg = master->cur_msg;
991 master->cur_msg = NULL;
992
993 queue_kthread_work(&master->kworker, &master->pump_messages);
994 spin_unlock_irqrestore(&master->queue_lock, flags); 1001 spin_unlock_irqrestore(&master->queue_lock, flags);
995 1002
996 spi_unmap_msg(master, mesg); 1003 spi_unmap_msg(master, mesg);
@@ -1003,9 +1010,13 @@ void spi_finalize_current_message(struct spi_master *master)
1003 } 1010 }
1004 } 1011 }
1005 1012
1006 trace_spi_message_done(mesg); 1013 spin_lock_irqsave(&master->queue_lock, flags);
1007 1014 master->cur_msg = NULL;
1008 master->cur_msg_prepared = false; 1015 master->cur_msg_prepared = false;
1016 queue_kthread_work(&master->kworker, &master->pump_messages);
1017 spin_unlock_irqrestore(&master->queue_lock, flags);
1018
1019 trace_spi_message_done(mesg);
1009 1020
1010 mesg->state = NULL; 1021 mesg->state = NULL;
1011 if (mesg->complete) 1022 if (mesg->complete)
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 92c909eed6b5..c7de64171c45 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -95,37 +95,25 @@ MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
95 95
96/*-------------------------------------------------------------------------*/ 96/*-------------------------------------------------------------------------*/
97 97
98/*
99 * We can't use the standard synchronous wrappers for file I/O; we
100 * need to protect against async removal of the underlying spi_device.
101 */
102static void spidev_complete(void *arg)
103{
104 complete(arg);
105}
106
107static ssize_t 98static ssize_t
108spidev_sync(struct spidev_data *spidev, struct spi_message *message) 99spidev_sync(struct spidev_data *spidev, struct spi_message *message)
109{ 100{
110 DECLARE_COMPLETION_ONSTACK(done); 101 DECLARE_COMPLETION_ONSTACK(done);
111 int status; 102 int status;
112 103 struct spi_device *spi;
113 message->complete = spidev_complete;
114 message->context = &done;
115 104
116 spin_lock_irq(&spidev->spi_lock); 105 spin_lock_irq(&spidev->spi_lock);
117 if (spidev->spi == NULL) 106 spi = spidev->spi;
107 spin_unlock_irq(&spidev->spi_lock);
108
109 if (spi == NULL)
118 status = -ESHUTDOWN; 110 status = -ESHUTDOWN;
119 else 111 else
120 status = spi_async(spidev->spi, message); 112 status = spi_sync(spi, message);
121 spin_unlock_irq(&spidev->spi_lock); 113
114 if (status == 0)
115 status = message->actual_length;
122 116
123 if (status == 0) {
124 wait_for_completion(&done);
125 status = message->status;
126 if (status == 0)
127 status = message->actual_length;
128 }
129 return status; 117 return status;
130} 118}
131 119
@@ -647,7 +635,6 @@ err_find_dev:
647static int spidev_release(struct inode *inode, struct file *filp) 635static int spidev_release(struct inode *inode, struct file *filp)
648{ 636{
649 struct spidev_data *spidev; 637 struct spidev_data *spidev;
650 int status = 0;
651 638
652 mutex_lock(&device_list_lock); 639 mutex_lock(&device_list_lock);
653 spidev = filp->private_data; 640 spidev = filp->private_data;
@@ -676,7 +663,7 @@ static int spidev_release(struct inode *inode, struct file *filp)
676 } 663 }
677 mutex_unlock(&device_list_lock); 664 mutex_unlock(&device_list_lock);
678 665
679 return status; 666 return 0;
680} 667}
681 668
682static const struct file_operations spidev_fops = { 669static const struct file_operations spidev_fops = {
@@ -706,6 +693,7 @@ static struct class *spidev_class;
706#ifdef CONFIG_OF 693#ifdef CONFIG_OF
707static const struct of_device_id spidev_dt_ids[] = { 694static const struct of_device_id spidev_dt_ids[] = {
708 { .compatible = "rohm,dh2228fv" }, 695 { .compatible = "rohm,dh2228fv" },
696 { .compatible = "lineartechnology,ltc2488" },
709 {}, 697 {},
710}; 698};
711MODULE_DEVICE_TABLE(of, spidev_dt_ids); 699MODULE_DEVICE_TABLE(of, spidev_dt_ids);