aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-03-16 00:07:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-16 00:07:33 -0400
commitff280e3639548fc8c366f6e4bd471e715ac590c7 (patch)
tree91c960af9702419a9e28f6bdc2f9cdb3921deb40
parent5ca5446ec5ba5e79a6f271cd026bb153d6850fcc (diff)
parentc508709bcffb644afbf5e5016fc7c90bf80c30ff (diff)
Merge tag 'spi-v4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
Pull spi updates from Mark Brown: "Not the biggest set of changes for SPI but a bit of a pickup in activity on the core: - Support for memory mapped read from flash devices via a SPI controller. - The beginnings of a message rewriting framework in the core which should in time allow us to support transforming messages to work around the limits of controllers or optimise the performance for controllers transparently to calling drivers. - Updates to the PXA2xx, the main functional change being to improve the ACPI support. - A new driver for the Analog Devices AXI SPI engine" * tag 'spi-v4.6' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi: (66 commits) spi: Add gfp parameter to kernel-doc to fix build warning spi: Fix htmldocs build error due struct spi_replaced_transfers spi: rockchip: covert rsd_nsecs to u32 type spi: rockchip: header file cleanup spi: xilinx: Add devicetree binding for spi-xilinx spi: respect the maximum segment size of DMA device spi: rockchip: check requesting dma channel with EPROBE_DEFER spi: rockchip: migrate to dmaengine_terminate_async spi: rockchip: check return value of dmaengine_prep_slave_sg spi: core: Fix deadlock when sending messages spi/rockchip: fix endian mode for 16-bit transfers spi/rockchip: Make sure spi clk is on in rockchip_spi_set_cs spi: pxa2xx: Use newer more explicit DMAengine terminate API spi: pxa2xx: Add support for Intel Broxton B-Step spi: lp-8841: return correct error code from probe spi: imx: drop bogus tests for rx/tx bufs in DMA transfer spi: imx: set MX51_ECSPI_CTRL_SMC bit in setup function spi: imx: make some register defines simpler spi: imx: remove unnecessary bit clearing in mx51_ecspi_config spi: imx: add support for all SPI word width for DMA ...
-rw-r--r--Documentation/devicetree/bindings/spi/adi,axi-spi-engine.txt31
-rw-r--r--Documentation/devicetree/bindings/spi/icpdas-lp8841-spi-rtc.txt54
-rw-r--r--Documentation/devicetree/bindings/spi/spi-rockchip.txt1
-rw-r--r--Documentation/devicetree/bindings/spi/spi-xilinx.txt22
-rw-r--r--drivers/spi/Kconfig96
-rw-r--r--drivers/spi/Makefile5
-rw-r--r--drivers/spi/spi-axi-spi-engine.c591
-rw-r--r--drivers/spi/spi-bcm2835.c5
-rw-r--r--drivers/spi/spi-bcm2835aux.c72
-rw-r--r--drivers/spi/spi-dw-mid.c4
-rw-r--r--drivers/spi/spi-dw-mmio.c5
-rw-r--r--drivers/spi/spi-imx.c341
-rw-r--r--drivers/spi/spi-lp8841-rtc.c256
-rw-r--r--drivers/spi/spi-pl022.c7
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c8
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c13
-rw-r--r--drivers/spi/spi-pxa2xx.c151
-rw-r--r--drivers/spi/spi-pxa2xx.h37
-rw-r--r--drivers/spi/spi-rockchip.c56
-rw-r--r--drivers/spi/spi-ti-qspi.c139
-rw-r--r--drivers/spi/spi.c426
-rw-r--r--include/linux/pxa2xx_ssp.h1
-rw-r--r--include/linux/spi/spi.h145
23 files changed, 2080 insertions, 386 deletions
diff --git a/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.txt b/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.txt
new file mode 100644
index 000000000000..8a18d71e6879
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/adi,axi-spi-engine.txt
@@ -0,0 +1,31 @@
1Analog Devices AXI SPI Engine controller Device Tree Bindings
2
3Required properties:
4- compatible : Must be "adi,axi-spi-engine-1.00.a""
5- reg : Physical base address and size of the register map.
6- interrupts : Property with a value describing the interrupt
7 number.
8- clock-names : List of input clock names - "s_axi_aclk", "spi_clk"
9- clocks : Clock phandles and specifiers (See clock bindings for
10 details on clock-names and clocks).
11- #address-cells : Must be <1>
12- #size-cells : Must be <0>
13
14Optional subnodes:
15 Subnodes are use to represent the SPI slave devices connected to the SPI
16 master. They follow the generic SPI bindings as outlined in spi-bus.txt.
17
18Example:
19
20 spi@@44a00000 {
21 compatible = "adi,axi-spi-engine-1.00.a";
22 reg = <0x44a00000 0x1000>;
23 interrupts = <0 56 4>;
24 clocks = <&clkc 15 &clkc 15>;
25 clock-names = "s_axi_aclk", "spi_clk";
26
27 #address-cells = <1>;
28 #size-cells = <0>;
29
30 /* SPI devices */
31 };
diff --git a/Documentation/devicetree/bindings/spi/icpdas-lp8841-spi-rtc.txt b/Documentation/devicetree/bindings/spi/icpdas-lp8841-spi-rtc.txt
new file mode 100644
index 000000000000..852b651f3bc5
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/icpdas-lp8841-spi-rtc.txt
@@ -0,0 +1,54 @@
1* ICP DAS LP-8841 SPI Controller for RTC
2
3ICP DAS LP-8841 contains a DS-1302 RTC. RTC is connected to an IO
4memory register, which acts as an SPI master device.
5
6The device uses the standard MicroWire half-duplex transfer timing.
7Master output is set on low clock and sensed by the RTC on the rising
8edge. Master input is set by the RTC on the trailing edge and is sensed
9by the master on low clock.
10
11Required properties:
12
13- #address-cells: should be 1
14
15- #size-cells: should be 0
16
17- compatible: should be "icpdas,lp8841-spi-rtc"
18
19- reg: should provide IO memory address
20
21Requirements to SPI slave nodes:
22
23- There can be only one slave device.
24
25- The spi slave node should claim the following flags which are
26 required by the spi controller.
27
28 - spi-3wire: The master itself has only 3 wire. It cannor work in
29 full duplex mode.
30
31 - spi-cs-high: DS-1302 has active high chip select line. The master
32 doesn't support active low.
33
34 - spi-lsb-first: DS-1302 requires least significant bit first
35 transfers. The master only support this type of bit ordering.
36
37
38Example:
39
40spi@901c {
41 #address-cells = <1>;
42 #size-cells = <0>;
43 compatible = "icpdas,lp8841-spi-rtc";
44 reg = <0x901c 0x1>;
45
46 rtc@0 {
47 compatible = "maxim,ds1302";
48 reg = <0>;
49 spi-max-frequency = <500000>;
50 spi-3wire;
51 spi-lsb-first;
52 spi-cs-high;
53 };
54};
diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.txt b/Documentation/devicetree/bindings/spi/spi-rockchip.txt
index 0c491bda4c65..1b14d69d8903 100644
--- a/Documentation/devicetree/bindings/spi/spi-rockchip.txt
+++ b/Documentation/devicetree/bindings/spi/spi-rockchip.txt
@@ -9,6 +9,7 @@ Required Properties:
9 "rockchip,rk3066-spi" for rk3066. 9 "rockchip,rk3066-spi" for rk3066.
10 "rockchip,rk3188-spi", "rockchip,rk3066-spi" for rk3188. 10 "rockchip,rk3188-spi", "rockchip,rk3066-spi" for rk3188.
11 "rockchip,rk3288-spi", "rockchip,rk3066-spi" for rk3288. 11 "rockchip,rk3288-spi", "rockchip,rk3066-spi" for rk3288.
12 "rockchip,rk3399-spi", "rockchip,rk3066-spi" for rk3399.
12- reg: physical base address of the controller and length of memory mapped 13- reg: physical base address of the controller and length of memory mapped
13 region. 14 region.
14- interrupts: The interrupt number to the cpu. The interrupt specifier format 15- interrupts: The interrupt number to the cpu. The interrupt specifier format
diff --git a/Documentation/devicetree/bindings/spi/spi-xilinx.txt b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
new file mode 100644
index 000000000000..c7b7856bd528
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-xilinx.txt
@@ -0,0 +1,22 @@
1Xilinx SPI controller Device Tree Bindings
2-------------------------------------------------
3
4Required properties:
5- compatible : Should be "xlnx,xps-spi-2.00.a" or "xlnx,xps-spi-2.00.b"
6- reg : Physical base address and size of SPI registers map.
7- interrupts : Property with a value describing the interrupt
8 number.
9- interrupt-parent : Must be core interrupt controller
10
11Optional properties:
12- xlnx,num-ss-bits : Number of chip selects used.
13
14Example:
15 axi_quad_spi@41e00000 {
16 compatible = "xlnx,xps-spi-2.00.a";
17 interrupt-parent = <&intc>;
18 interrupts = <0 31 1>;
19 reg = <0x41e00000 0x10000>;
20 xlnx,num-ss-bits = <0x1>;
21 };
22
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 77064160dd76..9d8c84bb1544 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -75,11 +75,26 @@ config SPI_ATMEL
75 This selects a driver for the Atmel SPI Controller, present on 75 This selects a driver for the Atmel SPI Controller, present on
76 many AT32 (AVR32) and AT91 (ARM) chips. 76 many AT32 (AVR32) and AT91 (ARM) chips.
77 77
78config SPI_AU1550
79 tristate "Au1550/Au1200/Au1300 SPI Controller"
80 depends on MIPS_ALCHEMY
81 select SPI_BITBANG
82 help
83 If you say yes to this option, support will be included for the
84 PSC SPI controller found on Au1550, Au1200 and Au1300 series.
85
86config SPI_AXI_SPI_ENGINE
87 tristate "Analog Devices AXI SPI Engine controller"
88 depends on HAS_IOMEM
89 help
90 This enables support for the Analog Devices AXI SPI Engine SPI controller.
91 It is part of the SPI Engine framework that is used in some Analog Devices
92 reference designs for FPGAs.
93
78config SPI_BCM2835 94config SPI_BCM2835
79 tristate "BCM2835 SPI controller" 95 tristate "BCM2835 SPI controller"
80 depends on GPIOLIB 96 depends on GPIOLIB
81 depends on ARCH_BCM2835 || COMPILE_TEST 97 depends on ARCH_BCM2835 || COMPILE_TEST
82 depends on GPIOLIB
83 help 98 help
84 This selects a driver for the Broadcom BCM2835 SPI master. 99 This selects a driver for the Broadcom BCM2835 SPI master.
85 100
@@ -90,8 +105,7 @@ config SPI_BCM2835
90 105
91config SPI_BCM2835AUX 106config SPI_BCM2835AUX
92 tristate "BCM2835 SPI auxiliary controller" 107 tristate "BCM2835 SPI auxiliary controller"
93 depends on ARCH_BCM2835 || COMPILE_TEST 108 depends on (ARCH_BCM2835 && GPIOLIB) || COMPILE_TEST
94 depends on GPIOLIB
95 help 109 help
96 This selects a driver for the Broadcom BCM2835 SPI aux master. 110 This selects a driver for the Broadcom BCM2835 SPI aux master.
97 111
@@ -118,14 +132,6 @@ config SPI_BFIN_SPORT
118 help 132 help
119 Enable support for a SPI bus via the Blackfin SPORT peripheral. 133 Enable support for a SPI bus via the Blackfin SPORT peripheral.
120 134
121config SPI_AU1550
122 tristate "Au1550/Au1200/Au1300 SPI Controller"
123 depends on MIPS_ALCHEMY
124 select SPI_BITBANG
125 help
126 If you say yes to this option, support will be included for the
127 PSC SPI controller found on Au1550, Au1200 and Au1300 series.
128
129config SPI_BCM53XX 135config SPI_BCM53XX
130 tristate "Broadcom BCM53xx SPI controller" 136 tristate "Broadcom BCM53xx SPI controller"
131 depends on ARCH_BCM_5301X 137 depends on ARCH_BCM_5301X
@@ -197,6 +203,23 @@ config SPI_DAVINCI
197 help 203 help
198 SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. 204 SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
199 205
206config SPI_DESIGNWARE
207 tristate "DesignWare SPI controller core support"
208 help
209 general driver for SPI controller core from DesignWare
210
211config SPI_DW_PCI
212 tristate "PCI interface driver for DW SPI core"
213 depends on SPI_DESIGNWARE && PCI
214
215config SPI_DW_MID_DMA
216 bool "DMA support for DW SPI controller on Intel MID platform"
217 depends on SPI_DW_PCI && DW_DMAC_PCI
218
219config SPI_DW_MMIO
220 tristate "Memory-mapped io interface driver for DW SPI core"
221 depends on SPI_DESIGNWARE
222
200config SPI_DLN2 223config SPI_DLN2
201 tristate "Diolan DLN-2 USB SPI adapter" 224 tristate "Diolan DLN-2 USB SPI adapter"
202 depends on MFD_DLN2 225 depends on MFD_DLN2
@@ -271,6 +294,16 @@ config SPI_LM70_LLP
271 which interfaces to an LM70 temperature sensor using 294 which interfaces to an LM70 temperature sensor using
272 a parallel port. 295 a parallel port.
273 296
297config SPI_LP8841_RTC
298 tristate "ICP DAS LP-8841 SPI Controller for RTC"
299 depends on MACH_PXA27X_DT || COMPILE_TEST
300 help
301 This driver provides an SPI master device to drive Maxim
302 DS-1302 real time clock.
303
304 Say N here unless you plan to run the kernel on an ICP DAS
305 LP-8x4x industrial computer.
306
274config SPI_MPC52xx 307config SPI_MPC52xx
275 tristate "Freescale MPC52xx SPI (non-PSC) controller support" 308 tristate "Freescale MPC52xx SPI (non-PSC) controller support"
276 depends on PPC_MPC52xx 309 depends on PPC_MPC52xx
@@ -346,6 +379,13 @@ config SPI_MT65XX
346 say Y or M here.If you are not sure, say N. 379 say Y or M here.If you are not sure, say N.
347 SPI drivers for Mediatek MT65XX and MT81XX series ARM SoCs. 380 SPI drivers for Mediatek MT65XX and MT81XX series ARM SoCs.
348 381
382config SPI_NUC900
383 tristate "Nuvoton NUC900 series SPI"
384 depends on ARCH_W90X900
385 select SPI_BITBANG
386 help
387 SPI driver for Nuvoton NUC900 series ARM SoCs
388
349config SPI_OC_TINY 389config SPI_OC_TINY
350 tristate "OpenCores tiny SPI" 390 tristate "OpenCores tiny SPI"
351 depends on GPIOLIB || COMPILE_TEST 391 depends on GPIOLIB || COMPILE_TEST
@@ -415,10 +455,6 @@ config SPI_PPC4xx
415 help 455 help
416 This selects a driver for the PPC4xx SPI Controller. 456 This selects a driver for the PPC4xx SPI Controller.
417 457
418config SPI_PXA2XX_DMA
419 def_bool y
420 depends on SPI_PXA2XX
421
422config SPI_PXA2XX 458config SPI_PXA2XX
423 tristate "PXA2xx SSP SPI master" 459 tristate "PXA2xx SSP SPI master"
424 depends on (ARCH_PXA || PCI || ACPI) 460 depends on (ARCH_PXA || PCI || ACPI)
@@ -451,7 +487,7 @@ config SPI_RB4XX
451 487
452config SPI_RSPI 488config SPI_RSPI
453 tristate "Renesas RSPI/QSPI controller" 489 tristate "Renesas RSPI/QSPI controller"
454 depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST 490 depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
455 help 491 help
456 SPI driver for Renesas RSPI and QSPI blocks. 492 SPI driver for Renesas RSPI and QSPI blocks.
457 493
@@ -501,7 +537,7 @@ config SPI_SC18IS602
501config SPI_SH_MSIOF 537config SPI_SH_MSIOF
502 tristate "SuperH MSIOF SPI controller" 538 tristate "SuperH MSIOF SPI controller"
503 depends on HAVE_CLK && HAS_DMA 539 depends on HAVE_CLK && HAS_DMA
504 depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST 540 depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
505 help 541 help
506 SPI driver for SuperH and SH Mobile MSIOF blocks. 542 SPI driver for SuperH and SH Mobile MSIOF blocks.
507 543
@@ -520,7 +556,7 @@ config SPI_SH_SCI
520 556
521config SPI_SH_HSPI 557config SPI_SH_HSPI
522 tristate "SuperH HSPI controller" 558 tristate "SuperH HSPI controller"
523 depends on ARCH_SHMOBILE || COMPILE_TEST 559 depends on ARCH_RENESAS || COMPILE_TEST
524 help 560 help
525 SPI driver for SuperH HSPI blocks. 561 SPI driver for SuperH HSPI blocks.
526 562
@@ -647,34 +683,10 @@ config SPI_ZYNQMP_GQSPI
647 help 683 help
648 Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC. 684 Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
649 685
650config SPI_NUC900
651 tristate "Nuvoton NUC900 series SPI"
652 depends on ARCH_W90X900
653 select SPI_BITBANG
654 help
655 SPI driver for Nuvoton NUC900 series ARM SoCs
656
657# 686#
658# Add new SPI master controllers in alphabetical order above this line 687# Add new SPI master controllers in alphabetical order above this line
659# 688#
660 689
661config SPI_DESIGNWARE
662 tristate "DesignWare SPI controller core support"
663 help
664 general driver for SPI controller core from DesignWare
665
666config SPI_DW_PCI
667 tristate "PCI interface driver for DW SPI core"
668 depends on SPI_DESIGNWARE && PCI
669
670config SPI_DW_MID_DMA
671 bool "DMA support for DW SPI controller on Intel MID platform"
672 depends on SPI_DW_PCI && DW_DMAC_PCI
673
674config SPI_DW_MMIO
675 tristate "Memory-mapped io interface driver for DW SPI core"
676 depends on SPI_DESIGNWARE
677
678# 690#
679# There are lots of SPI device types, with sensors and memory 691# There are lots of SPI device types, with sensors and memory
680# being probably the most widely used ones. 692# being probably the most widely used ones.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 8991ffce6e12..fbb255c5a608 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_SPI_ALTERA) += spi-altera.o
15obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o 15obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
16obj-$(CONFIG_SPI_ATH79) += spi-ath79.o 16obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
17obj-$(CONFIG_SPI_AU1550) += spi-au1550.o 17obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
18obj-$(CONFIG_SPI_AXI_SPI_ENGINE) += spi-axi-spi-engine.o
18obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o 19obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o
19obj-$(CONFIG_SPI_BCM2835AUX) += spi-bcm2835aux.o 20obj-$(CONFIG_SPI_BCM2835AUX) += spi-bcm2835aux.o
20obj-$(CONFIG_SPI_BCM53XX) += spi-bcm53xx.o 21obj-$(CONFIG_SPI_BCM53XX) += spi-bcm53xx.o
@@ -46,6 +47,7 @@ obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
46obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o 47obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
47obj-$(CONFIG_SPI_IMX) += spi-imx.o 48obj-$(CONFIG_SPI_IMX) += spi-imx.o
48obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o 49obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
50obj-$(CONFIG_SPI_LP8841_RTC) += spi-lp8841-rtc.o
49obj-$(CONFIG_SPI_MESON_SPIFC) += spi-meson-spifc.o 51obj-$(CONFIG_SPI_MESON_SPIFC) += spi-meson-spifc.o
50obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o 52obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
51obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o 53obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
@@ -62,8 +64,7 @@ obj-$(CONFIG_SPI_TI_QSPI) += spi-ti-qspi.o
62obj-$(CONFIG_SPI_ORION) += spi-orion.o 64obj-$(CONFIG_SPI_ORION) += spi-orion.o
63obj-$(CONFIG_SPI_PL022) += spi-pl022.o 65obj-$(CONFIG_SPI_PL022) += spi-pl022.o
64obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o 66obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
65spi-pxa2xx-platform-objs := spi-pxa2xx.o 67spi-pxa2xx-platform-objs := spi-pxa2xx.o spi-pxa2xx-dma.o
66spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_DMA) += spi-pxa2xx-dma.o
67obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o 68obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
68obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o 69obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
69obj-$(CONFIG_SPI_QUP) += spi-qup.o 70obj-$(CONFIG_SPI_QUP) += spi-qup.o
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
new file mode 100644
index 000000000000..c968ab210a51
--- /dev/null
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -0,0 +1,591 @@
1/*
2 * SPI-Engine SPI controller driver
3 * Copyright 2015 Analog Devices Inc.
4 * Author: Lars-Peter Clausen <lars@metafoo.de>
5 *
6 * Licensed under the GPL-2.
7 */
8
9#include <linux/clk.h>
10#include <linux/interrupt.h>
11#include <linux/io.h>
12#include <linux/of.h>
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/spi/spi.h>
16
17#define SPI_ENGINE_VERSION_MAJOR(x) ((x >> 16) & 0xff)
18#define SPI_ENGINE_VERSION_MINOR(x) ((x >> 8) & 0xff)
19#define SPI_ENGINE_VERSION_PATCH(x) (x & 0xff)
20
21#define SPI_ENGINE_REG_VERSION 0x00
22
23#define SPI_ENGINE_REG_RESET 0x40
24
25#define SPI_ENGINE_REG_INT_ENABLE 0x80
26#define SPI_ENGINE_REG_INT_PENDING 0x84
27#define SPI_ENGINE_REG_INT_SOURCE 0x88
28
29#define SPI_ENGINE_REG_SYNC_ID 0xc0
30
31#define SPI_ENGINE_REG_CMD_FIFO_ROOM 0xd0
32#define SPI_ENGINE_REG_SDO_FIFO_ROOM 0xd4
33#define SPI_ENGINE_REG_SDI_FIFO_LEVEL 0xd8
34
35#define SPI_ENGINE_REG_CMD_FIFO 0xe0
36#define SPI_ENGINE_REG_SDO_DATA_FIFO 0xe4
37#define SPI_ENGINE_REG_SDI_DATA_FIFO 0xe8
38#define SPI_ENGINE_REG_SDI_DATA_FIFO_PEEK 0xec
39
40#define SPI_ENGINE_INT_CMD_ALMOST_EMPTY BIT(0)
41#define SPI_ENGINE_INT_SDO_ALMOST_EMPTY BIT(1)
42#define SPI_ENGINE_INT_SDI_ALMOST_FULL BIT(2)
43#define SPI_ENGINE_INT_SYNC BIT(3)
44
45#define SPI_ENGINE_CONFIG_CPHA BIT(0)
46#define SPI_ENGINE_CONFIG_CPOL BIT(1)
47#define SPI_ENGINE_CONFIG_3WIRE BIT(2)
48
49#define SPI_ENGINE_INST_TRANSFER 0x0
50#define SPI_ENGINE_INST_ASSERT 0x1
51#define SPI_ENGINE_INST_WRITE 0x2
52#define SPI_ENGINE_INST_MISC 0x3
53
54#define SPI_ENGINE_CMD_REG_CLK_DIV 0x0
55#define SPI_ENGINE_CMD_REG_CONFIG 0x1
56
57#define SPI_ENGINE_MISC_SYNC 0x0
58#define SPI_ENGINE_MISC_SLEEP 0x1
59
60#define SPI_ENGINE_TRANSFER_WRITE 0x1
61#define SPI_ENGINE_TRANSFER_READ 0x2
62
63#define SPI_ENGINE_CMD(inst, arg1, arg2) \
64 (((inst) << 12) | ((arg1) << 8) | (arg2))
65
66#define SPI_ENGINE_CMD_TRANSFER(flags, n) \
67 SPI_ENGINE_CMD(SPI_ENGINE_INST_TRANSFER, (flags), (n))
68#define SPI_ENGINE_CMD_ASSERT(delay, cs) \
69 SPI_ENGINE_CMD(SPI_ENGINE_INST_ASSERT, (delay), (cs))
70#define SPI_ENGINE_CMD_WRITE(reg, val) \
71 SPI_ENGINE_CMD(SPI_ENGINE_INST_WRITE, (reg), (val))
72#define SPI_ENGINE_CMD_SLEEP(delay) \
73 SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SLEEP, (delay))
74#define SPI_ENGINE_CMD_SYNC(id) \
75 SPI_ENGINE_CMD(SPI_ENGINE_INST_MISC, SPI_ENGINE_MISC_SYNC, (id))
76
77struct spi_engine_program {
78 unsigned int length;
79 uint16_t instructions[];
80};
81
82struct spi_engine {
83 struct clk *clk;
84 struct clk *ref_clk;
85
86 spinlock_t lock;
87
88 void __iomem *base;
89
90 struct spi_message *msg;
91 struct spi_engine_program *p;
92 unsigned cmd_length;
93 const uint16_t *cmd_buf;
94
95 struct spi_transfer *tx_xfer;
96 unsigned int tx_length;
97 const uint8_t *tx_buf;
98
99 struct spi_transfer *rx_xfer;
100 unsigned int rx_length;
101 uint8_t *rx_buf;
102
103 unsigned int sync_id;
104 unsigned int completed_id;
105
106 unsigned int int_enable;
107};
108
109static void spi_engine_program_add_cmd(struct spi_engine_program *p,
110 bool dry, uint16_t cmd)
111{
112 if (!dry)
113 p->instructions[p->length] = cmd;
114 p->length++;
115}
116
117static unsigned int spi_engine_get_config(struct spi_device *spi)
118{
119 unsigned int config = 0;
120
121 if (spi->mode & SPI_CPOL)
122 config |= SPI_ENGINE_CONFIG_CPOL;
123 if (spi->mode & SPI_CPHA)
124 config |= SPI_ENGINE_CONFIG_CPHA;
125 if (spi->mode & SPI_3WIRE)
126 config |= SPI_ENGINE_CONFIG_3WIRE;
127
128 return config;
129}
130
131static unsigned int spi_engine_get_clk_div(struct spi_engine *spi_engine,
132 struct spi_device *spi, struct spi_transfer *xfer)
133{
134 unsigned int clk_div;
135
136 clk_div = DIV_ROUND_UP(clk_get_rate(spi_engine->ref_clk),
137 xfer->speed_hz * 2);
138 if (clk_div > 255)
139 clk_div = 255;
140 else if (clk_div > 0)
141 clk_div -= 1;
142
143 return clk_div;
144}
145
146static void spi_engine_gen_xfer(struct spi_engine_program *p, bool dry,
147 struct spi_transfer *xfer)
148{
149 unsigned int len = xfer->len;
150
151 while (len) {
152 unsigned int n = min(len, 256U);
153 unsigned int flags = 0;
154
155 if (xfer->tx_buf)
156 flags |= SPI_ENGINE_TRANSFER_WRITE;
157 if (xfer->rx_buf)
158 flags |= SPI_ENGINE_TRANSFER_READ;
159
160 spi_engine_program_add_cmd(p, dry,
161 SPI_ENGINE_CMD_TRANSFER(flags, n - 1));
162 len -= n;
163 }
164}
165
166static void spi_engine_gen_sleep(struct spi_engine_program *p, bool dry,
167 struct spi_engine *spi_engine, unsigned int clk_div, unsigned int delay)
168{
169 unsigned int spi_clk = clk_get_rate(spi_engine->ref_clk);
170 unsigned int t;
171
172 if (delay == 0)
173 return;
174
175 t = DIV_ROUND_UP(delay * spi_clk, (clk_div + 1) * 2);
176 while (t) {
177 unsigned int n = min(t, 256U);
178
179 spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_SLEEP(n - 1));
180 t -= n;
181 }
182}
183
184static void spi_engine_gen_cs(struct spi_engine_program *p, bool dry,
185 struct spi_device *spi, bool assert)
186{
187 unsigned int mask = 0xff;
188
189 if (assert)
190 mask ^= BIT(spi->chip_select);
191
192 spi_engine_program_add_cmd(p, dry, SPI_ENGINE_CMD_ASSERT(1, mask));
193}
194
195static int spi_engine_compile_message(struct spi_engine *spi_engine,
196 struct spi_message *msg, bool dry, struct spi_engine_program *p)
197{
198 struct spi_device *spi = msg->spi;
199 struct spi_transfer *xfer;
200 int clk_div, new_clk_div;
201 bool cs_change = true;
202
203 clk_div = -1;
204
205 spi_engine_program_add_cmd(p, dry,
206 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CONFIG,
207 spi_engine_get_config(spi)));
208
209 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
210 new_clk_div = spi_engine_get_clk_div(spi_engine, spi, xfer);
211 if (new_clk_div != clk_div) {
212 clk_div = new_clk_div;
213 spi_engine_program_add_cmd(p, dry,
214 SPI_ENGINE_CMD_WRITE(SPI_ENGINE_CMD_REG_CLK_DIV,
215 clk_div));
216 }
217
218 if (cs_change)
219 spi_engine_gen_cs(p, dry, spi, true);
220
221 spi_engine_gen_xfer(p, dry, xfer);
222 spi_engine_gen_sleep(p, dry, spi_engine, clk_div,
223 xfer->delay_usecs);
224
225 cs_change = xfer->cs_change;
226 if (list_is_last(&xfer->transfer_list, &msg->transfers))
227 cs_change = !cs_change;
228
229 if (cs_change)
230 spi_engine_gen_cs(p, dry, spi, false);
231 }
232
233 return 0;
234}
235
236static void spi_engine_xfer_next(struct spi_engine *spi_engine,
237 struct spi_transfer **_xfer)
238{
239 struct spi_message *msg = spi_engine->msg;
240 struct spi_transfer *xfer = *_xfer;
241
242 if (!xfer) {
243 xfer = list_first_entry(&msg->transfers,
244 struct spi_transfer, transfer_list);
245 } else if (list_is_last(&xfer->transfer_list, &msg->transfers)) {
246 xfer = NULL;
247 } else {
248 xfer = list_next_entry(xfer, transfer_list);
249 }
250
251 *_xfer = xfer;
252}
253
254static void spi_engine_tx_next(struct spi_engine *spi_engine)
255{
256 struct spi_transfer *xfer = spi_engine->tx_xfer;
257
258 do {
259 spi_engine_xfer_next(spi_engine, &xfer);
260 } while (xfer && !xfer->tx_buf);
261
262 spi_engine->tx_xfer = xfer;
263 if (xfer) {
264 spi_engine->tx_length = xfer->len;
265 spi_engine->tx_buf = xfer->tx_buf;
266 } else {
267 spi_engine->tx_buf = NULL;
268 }
269}
270
271static void spi_engine_rx_next(struct spi_engine *spi_engine)
272{
273 struct spi_transfer *xfer = spi_engine->rx_xfer;
274
275 do {
276 spi_engine_xfer_next(spi_engine, &xfer);
277 } while (xfer && !xfer->rx_buf);
278
279 spi_engine->rx_xfer = xfer;
280 if (xfer) {
281 spi_engine->rx_length = xfer->len;
282 spi_engine->rx_buf = xfer->rx_buf;
283 } else {
284 spi_engine->rx_buf = NULL;
285 }
286}
287
288static bool spi_engine_write_cmd_fifo(struct spi_engine *spi_engine)
289{
290 void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_CMD_FIFO;
291 unsigned int n, m, i;
292 const uint16_t *buf;
293
294 n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_CMD_FIFO_ROOM);
295 while (n && spi_engine->cmd_length) {
296 m = min(n, spi_engine->cmd_length);
297 buf = spi_engine->cmd_buf;
298 for (i = 0; i < m; i++)
299 writel_relaxed(buf[i], addr);
300 spi_engine->cmd_buf += m;
301 spi_engine->cmd_length -= m;
302 n -= m;
303 }
304
305 return spi_engine->cmd_length != 0;
306}
307
308static bool spi_engine_write_tx_fifo(struct spi_engine *spi_engine)
309{
310 void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDO_DATA_FIFO;
311 unsigned int n, m, i;
312 const uint8_t *buf;
313
314 n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDO_FIFO_ROOM);
315 while (n && spi_engine->tx_length) {
316 m = min(n, spi_engine->tx_length);
317 buf = spi_engine->tx_buf;
318 for (i = 0; i < m; i++)
319 writel_relaxed(buf[i], addr);
320 spi_engine->tx_buf += m;
321 spi_engine->tx_length -= m;
322 n -= m;
323 if (spi_engine->tx_length == 0)
324 spi_engine_tx_next(spi_engine);
325 }
326
327 return spi_engine->tx_length != 0;
328}
329
330static bool spi_engine_read_rx_fifo(struct spi_engine *spi_engine)
331{
332 void __iomem *addr = spi_engine->base + SPI_ENGINE_REG_SDI_DATA_FIFO;
333 unsigned int n, m, i;
334 uint8_t *buf;
335
336 n = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_SDI_FIFO_LEVEL);
337 while (n && spi_engine->rx_length) {
338 m = min(n, spi_engine->rx_length);
339 buf = spi_engine->rx_buf;
340 for (i = 0; i < m; i++)
341 buf[i] = readl_relaxed(addr);
342 spi_engine->rx_buf += m;
343 spi_engine->rx_length -= m;
344 n -= m;
345 if (spi_engine->rx_length == 0)
346 spi_engine_rx_next(spi_engine);
347 }
348
349 return spi_engine->rx_length != 0;
350}
351
352static irqreturn_t spi_engine_irq(int irq, void *devid)
353{
354 struct spi_master *master = devid;
355 struct spi_engine *spi_engine = spi_master_get_devdata(master);
356 unsigned int disable_int = 0;
357 unsigned int pending;
358
359 pending = readl_relaxed(spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
360
361 if (pending & SPI_ENGINE_INT_SYNC) {
362 writel_relaxed(SPI_ENGINE_INT_SYNC,
363 spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
364 spi_engine->completed_id = readl_relaxed(
365 spi_engine->base + SPI_ENGINE_REG_SYNC_ID);
366 }
367
368 spin_lock(&spi_engine->lock);
369
370 if (pending & SPI_ENGINE_INT_CMD_ALMOST_EMPTY) {
371 if (!spi_engine_write_cmd_fifo(spi_engine))
372 disable_int |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
373 }
374
375 if (pending & SPI_ENGINE_INT_SDO_ALMOST_EMPTY) {
376 if (!spi_engine_write_tx_fifo(spi_engine))
377 disable_int |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
378 }
379
380 if (pending & (SPI_ENGINE_INT_SDI_ALMOST_FULL | SPI_ENGINE_INT_SYNC)) {
381 if (!spi_engine_read_rx_fifo(spi_engine))
382 disable_int |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
383 }
384
385 if (pending & SPI_ENGINE_INT_SYNC) {
386 if (spi_engine->msg &&
387 spi_engine->completed_id == spi_engine->sync_id) {
388 struct spi_message *msg = spi_engine->msg;
389
390 kfree(spi_engine->p);
391 msg->status = 0;
392 msg->actual_length = msg->frame_length;
393 spi_engine->msg = NULL;
394 spi_finalize_current_message(master);
395 disable_int |= SPI_ENGINE_INT_SYNC;
396 }
397 }
398
399 if (disable_int) {
400 spi_engine->int_enable &= ~disable_int;
401 writel_relaxed(spi_engine->int_enable,
402 spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
403 }
404
405 spin_unlock(&spi_engine->lock);
406
407 return IRQ_HANDLED;
408}
409
410static int spi_engine_transfer_one_message(struct spi_master *master,
411 struct spi_message *msg)
412{
413 struct spi_engine_program p_dry, *p;
414 struct spi_engine *spi_engine = spi_master_get_devdata(master);
415 unsigned int int_enable = 0;
416 unsigned long flags;
417 size_t size;
418
419 p_dry.length = 0;
420 spi_engine_compile_message(spi_engine, msg, true, &p_dry);
421
422 size = sizeof(*p->instructions) * (p_dry.length + 1);
423 p = kzalloc(sizeof(*p) + size, GFP_KERNEL);
424 if (!p)
425 return -ENOMEM;
426 spi_engine_compile_message(spi_engine, msg, false, p);
427
428 spin_lock_irqsave(&spi_engine->lock, flags);
429 spi_engine->sync_id = (spi_engine->sync_id + 1) & 0xff;
430 spi_engine_program_add_cmd(p, false,
431 SPI_ENGINE_CMD_SYNC(spi_engine->sync_id));
432
433 spi_engine->msg = msg;
434 spi_engine->p = p;
435
436 spi_engine->cmd_buf = p->instructions;
437 spi_engine->cmd_length = p->length;
438 if (spi_engine_write_cmd_fifo(spi_engine))
439 int_enable |= SPI_ENGINE_INT_CMD_ALMOST_EMPTY;
440
441 spi_engine_tx_next(spi_engine);
442 if (spi_engine_write_tx_fifo(spi_engine))
443 int_enable |= SPI_ENGINE_INT_SDO_ALMOST_EMPTY;
444
445 spi_engine_rx_next(spi_engine);
446 if (spi_engine->rx_length != 0)
447 int_enable |= SPI_ENGINE_INT_SDI_ALMOST_FULL;
448
449 int_enable |= SPI_ENGINE_INT_SYNC;
450
451 writel_relaxed(int_enable,
452 spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
453 spi_engine->int_enable = int_enable;
454 spin_unlock_irqrestore(&spi_engine->lock, flags);
455
456 return 0;
457}
458
459static int spi_engine_probe(struct platform_device *pdev)
460{
461 struct spi_engine *spi_engine;
462 struct spi_master *master;
463 unsigned int version;
464 struct resource *res;
465 int irq;
466 int ret;
467
468 irq = platform_get_irq(pdev, 0);
469 if (irq <= 0)
470 return -ENXIO;
471
472 spi_engine = devm_kzalloc(&pdev->dev, sizeof(*spi_engine), GFP_KERNEL);
473 if (!spi_engine)
474 return -ENOMEM;
475
476 master = spi_alloc_master(&pdev->dev, 0);
477 if (!master)
478 return -ENOMEM;
479
480 spi_master_set_devdata(master, spi_engine);
481
482 spin_lock_init(&spi_engine->lock);
483
484 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
485 spi_engine->base = devm_ioremap_resource(&pdev->dev, res);
486 if (IS_ERR(spi_engine->base)) {
487 ret = PTR_ERR(spi_engine->base);
488 goto err_put_master;
489 }
490
491 version = readl(spi_engine->base + SPI_ENGINE_REG_VERSION);
492 if (SPI_ENGINE_VERSION_MAJOR(version) != 1) {
493 dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n",
494 SPI_ENGINE_VERSION_MAJOR(version),
495 SPI_ENGINE_VERSION_MINOR(version),
496 SPI_ENGINE_VERSION_PATCH(version));
497 return -ENODEV;
498 }
499
500 spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
501 if (IS_ERR(spi_engine->clk)) {
502 ret = PTR_ERR(spi_engine->clk);
503 goto err_put_master;
504 }
505
506 spi_engine->ref_clk = devm_clk_get(&pdev->dev, "spi_clk");
507 if (IS_ERR(spi_engine->ref_clk)) {
508 ret = PTR_ERR(spi_engine->ref_clk);
509 goto err_put_master;
510 }
511
512 ret = clk_prepare_enable(spi_engine->clk);
513 if (ret)
514 goto err_put_master;
515
516 ret = clk_prepare_enable(spi_engine->ref_clk);
517 if (ret)
518 goto err_clk_disable;
519
520 writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_RESET);
521 writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
522 writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
523
524 ret = request_irq(irq, spi_engine_irq, 0, pdev->name, master);
525 if (ret)
526 goto err_ref_clk_disable;
527
528 master->dev.parent = &pdev->dev;
529 master->dev.of_node = pdev->dev.of_node;
530 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE;
531 master->bits_per_word_mask = SPI_BPW_MASK(8);
532 master->max_speed_hz = clk_get_rate(spi_engine->ref_clk) / 2;
533 master->transfer_one_message = spi_engine_transfer_one_message;
534 master->num_chipselect = 8;
535
536 ret = spi_register_master(master);
537 if (ret)
538 goto err_free_irq;
539
540 platform_set_drvdata(pdev, master);
541
542 return 0;
543err_free_irq:
544 free_irq(irq, master);
545err_ref_clk_disable:
546 clk_disable_unprepare(spi_engine->ref_clk);
547err_clk_disable:
548 clk_disable_unprepare(spi_engine->clk);
549err_put_master:
550 spi_master_put(master);
551 return ret;
552}
553
554static int spi_engine_remove(struct platform_device *pdev)
555{
556 struct spi_master *master = platform_get_drvdata(pdev);
557 struct spi_engine *spi_engine = spi_master_get_devdata(master);
558 int irq = platform_get_irq(pdev, 0);
559
560 spi_unregister_master(master);
561
562 free_irq(irq, master);
563
564 writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING);
565 writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE);
566 writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET);
567
568 clk_disable_unprepare(spi_engine->ref_clk);
569 clk_disable_unprepare(spi_engine->clk);
570
571 return 0;
572}
573
574static const struct of_device_id spi_engine_match_table[] = {
575 { .compatible = "adi,axi-spi-engine-1.00.a" },
576 { },
577};
578
579static struct platform_driver spi_engine_driver = {
580 .probe = spi_engine_probe,
581 .remove = spi_engine_remove,
582 .driver = {
583 .name = "spi-engine",
584 .of_match_table = spi_engine_match_table,
585 },
586};
587module_platform_driver(spi_engine_driver);
588
589MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
590MODULE_DESCRIPTION("Analog Devices SPI engine peripheral driver");
591MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index cf04960cc3e6..f35cc10772f6 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -727,11 +727,6 @@ static int bcm2835_spi_setup(struct spi_device *spi)
727 spi->chip_select, spi->cs_gpio, err); 727 spi->chip_select, spi->cs_gpio, err);
728 return err; 728 return err;
729 } 729 }
730 /* the implementation of pinctrl-bcm2835 currently does not
731 * set the GPIO value when using gpio_direction_output
732 * so we are setting it here explicitly
733 */
734 gpio_set_value(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
735 730
736 return 0; 731 return 0;
737} 732}
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index ecc73c0a97cf..7428091d3f5b 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -64,9 +64,9 @@
64#define BCM2835_AUX_SPI_CNTL0_VAR_WIDTH 0x00004000 64#define BCM2835_AUX_SPI_CNTL0_VAR_WIDTH 0x00004000
65#define BCM2835_AUX_SPI_CNTL0_DOUTHOLD 0x00003000 65#define BCM2835_AUX_SPI_CNTL0_DOUTHOLD 0x00003000
66#define BCM2835_AUX_SPI_CNTL0_ENABLE 0x00000800 66#define BCM2835_AUX_SPI_CNTL0_ENABLE 0x00000800
67#define BCM2835_AUX_SPI_CNTL0_CPHA_IN 0x00000400 67#define BCM2835_AUX_SPI_CNTL0_IN_RISING 0x00000400
68#define BCM2835_AUX_SPI_CNTL0_CLEARFIFO 0x00000200 68#define BCM2835_AUX_SPI_CNTL0_CLEARFIFO 0x00000200
69#define BCM2835_AUX_SPI_CNTL0_CPHA_OUT 0x00000100 69#define BCM2835_AUX_SPI_CNTL0_OUT_RISING 0x00000100
70#define BCM2835_AUX_SPI_CNTL0_CPOL 0x00000080 70#define BCM2835_AUX_SPI_CNTL0_CPOL 0x00000080
71#define BCM2835_AUX_SPI_CNTL0_MSBF_OUT 0x00000040 71#define BCM2835_AUX_SPI_CNTL0_MSBF_OUT 0x00000040
72#define BCM2835_AUX_SPI_CNTL0_SHIFTLEN 0x0000003F 72#define BCM2835_AUX_SPI_CNTL0_SHIFTLEN 0x0000003F
@@ -92,9 +92,6 @@
92#define BCM2835_AUX_SPI_POLLING_LIMIT_US 30 92#define BCM2835_AUX_SPI_POLLING_LIMIT_US 30
93#define BCM2835_AUX_SPI_POLLING_JIFFIES 2 93#define BCM2835_AUX_SPI_POLLING_JIFFIES 2
94 94
95#define BCM2835_AUX_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
96 | SPI_NO_CS)
97
98struct bcm2835aux_spi { 95struct bcm2835aux_spi {
99 void __iomem *regs; 96 void __iomem *regs;
100 struct clk *clk; 97 struct clk *clk;
@@ -212,9 +209,15 @@ static irqreturn_t bcm2835aux_spi_interrupt(int irq, void *dev_id)
212 ret = IRQ_HANDLED; 209 ret = IRQ_HANDLED;
213 } 210 }
214 211
215 /* and if rx_len is 0 then wake up completion and disable spi */ 212 if (!bs->tx_len) {
213 /* disable tx fifo empty interrupt */
214 bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1] |
215 BCM2835_AUX_SPI_CNTL1_IDLE);
216 }
217
218 /* and if rx_len is 0 then disable interrupts and wake up completion */
216 if (!bs->rx_len) { 219 if (!bs->rx_len) {
217 bcm2835aux_spi_reset_hw(bs); 220 bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
218 complete(&master->xfer_completion); 221 complete(&master->xfer_completion);
219 } 222 }
220 223
@@ -307,9 +310,6 @@ static int bcm2835aux_spi_transfer_one_poll(struct spi_master *master,
307 } 310 }
308 } 311 }
309 312
310 /* Transfer complete - reset SPI HW */
311 bcm2835aux_spi_reset_hw(bs);
312
313 /* and return without waiting for completion */ 313 /* and return without waiting for completion */
314 return 0; 314 return 0;
315} 315}
@@ -330,10 +330,6 @@ static int bcm2835aux_spi_transfer_one(struct spi_master *master,
330 * resulting (potentially) in more interrupts when transferring 330 * resulting (potentially) in more interrupts when transferring
331 * more than 12 bytes 331 * more than 12 bytes
332 */ 332 */
333 bs->cntl[0] = BCM2835_AUX_SPI_CNTL0_ENABLE |
334 BCM2835_AUX_SPI_CNTL0_VAR_WIDTH |
335 BCM2835_AUX_SPI_CNTL0_MSBF_OUT;
336 bs->cntl[1] = BCM2835_AUX_SPI_CNTL1_MSBF_IN;
337 333
338 /* set clock */ 334 /* set clock */
339 spi_hz = tfr->speed_hz; 335 spi_hz = tfr->speed_hz;
@@ -348,17 +344,13 @@ static int bcm2835aux_spi_transfer_one(struct spi_master *master,
348 } else { /* the slowest we can go */ 344 } else { /* the slowest we can go */
349 speed = BCM2835_AUX_SPI_CNTL0_SPEED_MAX; 345 speed = BCM2835_AUX_SPI_CNTL0_SPEED_MAX;
350 } 346 }
347 /* mask out old speed from previous spi_transfer */
348 bs->cntl[0] &= ~(BCM2835_AUX_SPI_CNTL0_SPEED);
349 /* set the new speed */
351 bs->cntl[0] |= speed << BCM2835_AUX_SPI_CNTL0_SPEED_SHIFT; 350 bs->cntl[0] |= speed << BCM2835_AUX_SPI_CNTL0_SPEED_SHIFT;
352 351
353 spi_used_hz = clk_hz / (2 * (speed + 1)); 352 spi_used_hz = clk_hz / (2 * (speed + 1));
354 353
355 /* handle all the modes */
356 if (spi->mode & SPI_CPOL)
357 bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_CPOL;
358 if (spi->mode & SPI_CPHA)
359 bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_CPHA_OUT |
360 BCM2835_AUX_SPI_CNTL0_CPHA_IN;
361
362 /* set transmit buffers and length */ 354 /* set transmit buffers and length */
363 bs->tx_buf = tfr->tx_buf; 355 bs->tx_buf = tfr->tx_buf;
364 bs->rx_buf = tfr->rx_buf; 356 bs->rx_buf = tfr->rx_buf;
@@ -382,6 +374,40 @@ static int bcm2835aux_spi_transfer_one(struct spi_master *master,
382 return bcm2835aux_spi_transfer_one_irq(master, spi, tfr); 374 return bcm2835aux_spi_transfer_one_irq(master, spi, tfr);
383} 375}
384 376
377static int bcm2835aux_spi_prepare_message(struct spi_master *master,
378 struct spi_message *msg)
379{
380 struct spi_device *spi = msg->spi;
381 struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
382
383 bs->cntl[0] = BCM2835_AUX_SPI_CNTL0_ENABLE |
384 BCM2835_AUX_SPI_CNTL0_VAR_WIDTH |
385 BCM2835_AUX_SPI_CNTL0_MSBF_OUT;
386 bs->cntl[1] = BCM2835_AUX_SPI_CNTL1_MSBF_IN;
387
388 /* handle all the modes */
389 if (spi->mode & SPI_CPOL) {
390 bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_CPOL;
391 bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_OUT_RISING;
392 } else {
393 bs->cntl[0] |= BCM2835_AUX_SPI_CNTL0_IN_RISING;
394 }
395 bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL1, bs->cntl[1]);
396 bcm2835aux_wr(bs, BCM2835_AUX_SPI_CNTL0, bs->cntl[0]);
397
398 return 0;
399}
400
401static int bcm2835aux_spi_unprepare_message(struct spi_master *master,
402 struct spi_message *msg)
403{
404 struct bcm2835aux_spi *bs = spi_master_get_devdata(master);
405
406 bcm2835aux_spi_reset_hw(bs);
407
408 return 0;
409}
410
385static void bcm2835aux_spi_handle_err(struct spi_master *master, 411static void bcm2835aux_spi_handle_err(struct spi_master *master,
386 struct spi_message *msg) 412 struct spi_message *msg)
387{ 413{
@@ -405,11 +431,13 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
405 } 431 }
406 432
407 platform_set_drvdata(pdev, master); 433 platform_set_drvdata(pdev, master);
408 master->mode_bits = BCM2835_AUX_SPI_MODE_BITS; 434 master->mode_bits = (SPI_CPOL | SPI_CS_HIGH | SPI_NO_CS);
409 master->bits_per_word_mask = SPI_BPW_MASK(8); 435 master->bits_per_word_mask = SPI_BPW_MASK(8);
410 master->num_chipselect = -1; 436 master->num_chipselect = -1;
411 master->transfer_one = bcm2835aux_spi_transfer_one; 437 master->transfer_one = bcm2835aux_spi_transfer_one;
412 master->handle_err = bcm2835aux_spi_handle_err; 438 master->handle_err = bcm2835aux_spi_handle_err;
439 master->prepare_message = bcm2835aux_spi_prepare_message;
440 master->unprepare_message = bcm2835aux_spi_unprepare_message;
413 master->dev.of_node = pdev->dev.of_node; 441 master->dev.of_node = pdev->dev.of_node;
414 442
415 bs = spi_master_get_devdata(master); 443 bs = spi_master_get_devdata(master);
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 9185f6c08459..e31971f91475 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -89,10 +89,10 @@ static void mid_spi_dma_exit(struct dw_spi *dws)
89 if (!dws->dma_inited) 89 if (!dws->dma_inited)
90 return; 90 return;
91 91
92 dmaengine_terminate_all(dws->txchan); 92 dmaengine_terminate_sync(dws->txchan);
93 dma_release_channel(dws->txchan); 93 dma_release_channel(dws->txchan);
94 94
95 dmaengine_terminate_all(dws->rxchan); 95 dmaengine_terminate_sync(dws->rxchan);
96 dma_release_channel(dws->rxchan); 96 dma_release_channel(dws->rxchan);
97} 97}
98 98
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index a6d7029a85ac..447497e9124c 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -47,11 +47,6 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
47 47
48 /* Get basic io resource and map it */ 48 /* Get basic io resource and map it */
49 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 49 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
50 if (!mem) {
51 dev_err(&pdev->dev, "no mem resource?\n");
52 return -EINVAL;
53 }
54
55 dws->regs = devm_ioremap_resource(&pdev->dev, mem); 50 dws->regs = devm_ioremap_resource(&pdev->dev, mem);
56 if (IS_ERR(dws->regs)) { 51 if (IS_ERR(dws->regs)) {
57 dev_err(&pdev->dev, "SPI region map failed\n"); 52 dev_err(&pdev->dev, "SPI region map failed\n");
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index c688efa95e29..e7a19be87c38 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -56,7 +56,6 @@
56 56
57/* The maximum bytes that a sdma BD can transfer.*/ 57/* The maximum bytes that a sdma BD can transfer.*/
58#define MAX_SDMA_BD_BYTES (1 << 15) 58#define MAX_SDMA_BD_BYTES (1 << 15)
59#define IMX_DMA_TIMEOUT (msecs_to_jiffies(3000))
60struct spi_imx_config { 59struct spi_imx_config {
61 unsigned int speed_hz; 60 unsigned int speed_hz;
62 unsigned int bpw; 61 unsigned int bpw;
@@ -86,12 +85,18 @@ struct spi_imx_devtype_data {
86 85
87struct spi_imx_data { 86struct spi_imx_data {
88 struct spi_bitbang bitbang; 87 struct spi_bitbang bitbang;
88 struct device *dev;
89 89
90 struct completion xfer_done; 90 struct completion xfer_done;
91 void __iomem *base; 91 void __iomem *base;
92 unsigned long base_phys;
93
92 struct clk *clk_per; 94 struct clk *clk_per;
93 struct clk *clk_ipg; 95 struct clk *clk_ipg;
94 unsigned long spi_clk; 96 unsigned long spi_clk;
97 unsigned int spi_bus_clk;
98
99 unsigned int bytes_per_word;
95 100
96 unsigned int count; 101 unsigned int count;
97 void (*tx)(struct spi_imx_data *); 102 void (*tx)(struct spi_imx_data *);
@@ -101,8 +106,6 @@ struct spi_imx_data {
101 unsigned int txfifo; /* number of words pushed in tx FIFO */ 106 unsigned int txfifo; /* number of words pushed in tx FIFO */
102 107
103 /* DMA */ 108 /* DMA */
104 unsigned int dma_is_inited;
105 unsigned int dma_finished;
106 bool usedma; 109 bool usedma;
107 u32 wml; 110 u32 wml;
108 struct completion dma_rx_completion; 111 struct completion dma_rx_completion;
@@ -199,15 +202,35 @@ static unsigned int spi_imx_clkdiv_2(unsigned int fin,
199 return 7; 202 return 7;
200} 203}
201 204
205static int spi_imx_bytes_per_word(const int bpw)
206{
207 return DIV_ROUND_UP(bpw, BITS_PER_BYTE);
208}
209
202static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi, 210static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
203 struct spi_transfer *transfer) 211 struct spi_transfer *transfer)
204{ 212{
205 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 213 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
214 unsigned int bpw = transfer->bits_per_word;
215
216 if (!master->dma_rx)
217 return false;
206 218
207 if (spi_imx->dma_is_inited && transfer->len >= spi_imx->wml && 219 if (!bpw)
208 (transfer->len % spi_imx->wml) == 0) 220 bpw = spi->bits_per_word;
209 return true; 221
210 return false; 222 bpw = spi_imx_bytes_per_word(bpw);
223
224 if (bpw != 1 && bpw != 2 && bpw != 4)
225 return false;
226
227 if (transfer->len < spi_imx->wml * bpw)
228 return false;
229
230 if (transfer->len % (spi_imx->wml * bpw))
231 return false;
232
233 return true;
211} 234}
212 235
213#define MX51_ECSPI_CTRL 0x08 236#define MX51_ECSPI_CTRL 0x08
@@ -232,16 +255,13 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
232#define MX51_ECSPI_INT_RREN (1 << 3) 255#define MX51_ECSPI_INT_RREN (1 << 3)
233 256
234#define MX51_ECSPI_DMA 0x14 257#define MX51_ECSPI_DMA 0x14
235#define MX51_ECSPI_DMA_TX_WML_OFFSET 0 258#define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f)
236#define MX51_ECSPI_DMA_TX_WML_MASK 0x3F 259#define MX51_ECSPI_DMA_RX_WML(wml) (((wml) & 0x3f) << 16)
237#define MX51_ECSPI_DMA_RX_WML_OFFSET 16 260#define MX51_ECSPI_DMA_RXT_WML(wml) (((wml) & 0x3f) << 24)
238#define MX51_ECSPI_DMA_RX_WML_MASK (0x3F << 16)
239#define MX51_ECSPI_DMA_RXT_WML_OFFSET 24
240#define MX51_ECSPI_DMA_RXT_WML_MASK (0x3F << 24)
241 261
242#define MX51_ECSPI_DMA_TEDEN_OFFSET 7 262#define MX51_ECSPI_DMA_TEDEN (1 << 7)
243#define MX51_ECSPI_DMA_RXDEN_OFFSET 23 263#define MX51_ECSPI_DMA_RXDEN (1 << 23)
244#define MX51_ECSPI_DMA_RXTDEN_OFFSET 31 264#define MX51_ECSPI_DMA_RXTDEN (1 << 31)
245 265
246#define MX51_ECSPI_STAT 0x18 266#define MX51_ECSPI_STAT 0x18
247#define MX51_ECSPI_STAT_RR (1 << 3) 267#define MX51_ECSPI_STAT_RR (1 << 3)
@@ -250,14 +270,15 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
250#define MX51_ECSPI_TESTREG_LBC BIT(31) 270#define MX51_ECSPI_TESTREG_LBC BIT(31)
251 271
252/* MX51 eCSPI */ 272/* MX51 eCSPI */
253static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi, 273static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx,
254 unsigned int *fres) 274 unsigned int fspi, unsigned int *fres)
255{ 275{
256 /* 276 /*
257 * there are two 4-bit dividers, the pre-divider divides by 277 * there are two 4-bit dividers, the pre-divider divides by
258 * $pre, the post-divider by 2^$post 278 * $pre, the post-divider by 2^$post
259 */ 279 */
260 unsigned int pre, post; 280 unsigned int pre, post;
281 unsigned int fin = spi_imx->spi_clk;
261 282
262 if (unlikely(fspi > fin)) 283 if (unlikely(fspi > fin))
263 return 0; 284 return 0;
@@ -270,14 +291,14 @@ static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi,
270 291
271 post = max(4U, post) - 4; 292 post = max(4U, post) - 4;
272 if (unlikely(post > 0xf)) { 293 if (unlikely(post > 0xf)) {
273 pr_err("%s: cannot set clock freq: %u (base freq: %u)\n", 294 dev_err(spi_imx->dev, "cannot set clock freq: %u (base freq: %u)\n",
274 __func__, fspi, fin); 295 fspi, fin);
275 return 0xff; 296 return 0xff;
276 } 297 }
277 298
278 pre = DIV_ROUND_UP(fin, fspi << post) - 1; 299 pre = DIV_ROUND_UP(fin, fspi << post) - 1;
279 300
280 pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n", 301 dev_dbg(spi_imx->dev, "%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
281 __func__, fin, fspi, post, pre); 302 __func__, fin, fspi, post, pre);
282 303
283 /* Resulting frequency for the SCLK line. */ 304 /* Resulting frequency for the SCLK line. */
@@ -302,22 +323,17 @@ static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int
302 323
303static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx) 324static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
304{ 325{
305 u32 reg = readl(spi_imx->base + MX51_ECSPI_CTRL); 326 u32 reg;
306 327
307 if (!spi_imx->usedma) 328 reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
308 reg |= MX51_ECSPI_CTRL_XCH; 329 reg |= MX51_ECSPI_CTRL_XCH;
309 else if (!spi_imx->dma_finished)
310 reg |= MX51_ECSPI_CTRL_SMC;
311 else
312 reg &= ~MX51_ECSPI_CTRL_SMC;
313 writel(reg, spi_imx->base + MX51_ECSPI_CTRL); 330 writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
314} 331}
315 332
316static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx, 333static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
317 struct spi_imx_config *config) 334 struct spi_imx_config *config)
318{ 335{
319 u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0, dma = 0; 336 u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0;
320 u32 tx_wml_cfg, rx_wml_cfg, rxt_wml_cfg;
321 u32 clk = config->speed_hz, delay, reg; 337 u32 clk = config->speed_hz, delay, reg;
322 338
323 /* 339 /*
@@ -330,7 +346,8 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
330 ctrl |= MX51_ECSPI_CTRL_MODE_MASK; 346 ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
331 347
332 /* set clock speed */ 348 /* set clock speed */
333 ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz, &clk); 349 ctrl |= mx51_ecspi_clkdiv(spi_imx, config->speed_hz, &clk);
350 spi_imx->spi_bus_clk = clk;
334 351
335 /* set chip select to use */ 352 /* set chip select to use */
336 ctrl |= MX51_ECSPI_CTRL_CS(config->cs); 353 ctrl |= MX51_ECSPI_CTRL_CS(config->cs);
@@ -341,20 +358,16 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
341 358
342 if (config->mode & SPI_CPHA) 359 if (config->mode & SPI_CPHA)
343 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs); 360 cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
344 else
345 cfg &= ~MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
346 361
347 if (config->mode & SPI_CPOL) { 362 if (config->mode & SPI_CPOL) {
348 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs); 363 cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
349 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs); 364 cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
350 } else {
351 cfg &= ~MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
352 cfg &= ~MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
353 } 365 }
354 if (config->mode & SPI_CS_HIGH) 366 if (config->mode & SPI_CS_HIGH)
355 cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs); 367 cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs);
356 else 368
357 cfg &= ~MX51_ECSPI_CONFIG_SSBPOL(config->cs); 369 if (spi_imx->usedma)
370 ctrl |= MX51_ECSPI_CTRL_SMC;
358 371
359 /* CTRL register always go first to bring out controller from reset */ 372 /* CTRL register always go first to bring out controller from reset */
360 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); 373 writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
@@ -389,22 +402,12 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
389 * Configure the DMA register: setup the watermark 402 * Configure the DMA register: setup the watermark
390 * and enable DMA request. 403 * and enable DMA request.
391 */ 404 */
392 if (spi_imx->dma_is_inited) { 405
393 dma = readl(spi_imx->base + MX51_ECSPI_DMA); 406 writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml) |
394 407 MX51_ECSPI_DMA_TX_WML(spi_imx->wml) |
395 rx_wml_cfg = spi_imx->wml << MX51_ECSPI_DMA_RX_WML_OFFSET; 408 MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
396 tx_wml_cfg = spi_imx->wml << MX51_ECSPI_DMA_TX_WML_OFFSET; 409 MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
397 rxt_wml_cfg = spi_imx->wml << MX51_ECSPI_DMA_RXT_WML_OFFSET; 410 MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
398 dma = (dma & ~MX51_ECSPI_DMA_TX_WML_MASK
399 & ~MX51_ECSPI_DMA_RX_WML_MASK
400 & ~MX51_ECSPI_DMA_RXT_WML_MASK)
401 | rx_wml_cfg | tx_wml_cfg | rxt_wml_cfg
402 |(1 << MX51_ECSPI_DMA_TEDEN_OFFSET)
403 |(1 << MX51_ECSPI_DMA_RXDEN_OFFSET)
404 |(1 << MX51_ECSPI_DMA_RXTDEN_OFFSET);
405
406 writel(dma, spi_imx->base + MX51_ECSPI_DMA);
407 }
408 411
409 return 0; 412 return 0;
410} 413}
@@ -784,11 +787,63 @@ static irqreturn_t spi_imx_isr(int irq, void *dev_id)
784 return IRQ_HANDLED; 787 return IRQ_HANDLED;
785} 788}
786 789
790static int spi_imx_dma_configure(struct spi_master *master,
791 int bytes_per_word)
792{
793 int ret;
794 enum dma_slave_buswidth buswidth;
795 struct dma_slave_config rx = {}, tx = {};
796 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
797
798 if (bytes_per_word == spi_imx->bytes_per_word)
799 /* Same as last time */
800 return 0;
801
802 switch (bytes_per_word) {
803 case 4:
804 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
805 break;
806 case 2:
807 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
808 break;
809 case 1:
810 buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
811 break;
812 default:
813 return -EINVAL;
814 }
815
816 tx.direction = DMA_MEM_TO_DEV;
817 tx.dst_addr = spi_imx->base_phys + MXC_CSPITXDATA;
818 tx.dst_addr_width = buswidth;
819 tx.dst_maxburst = spi_imx->wml;
820 ret = dmaengine_slave_config(master->dma_tx, &tx);
821 if (ret) {
822 dev_err(spi_imx->dev, "TX dma configuration failed with %d\n", ret);
823 return ret;
824 }
825
826 rx.direction = DMA_DEV_TO_MEM;
827 rx.src_addr = spi_imx->base_phys + MXC_CSPIRXDATA;
828 rx.src_addr_width = buswidth;
829 rx.src_maxburst = spi_imx->wml;
830 ret = dmaengine_slave_config(master->dma_rx, &rx);
831 if (ret) {
832 dev_err(spi_imx->dev, "RX dma configuration failed with %d\n", ret);
833 return ret;
834 }
835
836 spi_imx->bytes_per_word = bytes_per_word;
837
838 return 0;
839}
840
787static int spi_imx_setupxfer(struct spi_device *spi, 841static int spi_imx_setupxfer(struct spi_device *spi,
788 struct spi_transfer *t) 842 struct spi_transfer *t)
789{ 843{
790 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 844 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
791 struct spi_imx_config config; 845 struct spi_imx_config config;
846 int ret;
792 847
793 config.bpw = t ? t->bits_per_word : spi->bits_per_word; 848 config.bpw = t ? t->bits_per_word : spi->bits_per_word;
794 config.speed_hz = t ? t->speed_hz : spi->max_speed_hz; 849 config.speed_hz = t ? t->speed_hz : spi->max_speed_hz;
@@ -812,6 +867,18 @@ static int spi_imx_setupxfer(struct spi_device *spi,
812 spi_imx->tx = spi_imx_buf_tx_u32; 867 spi_imx->tx = spi_imx_buf_tx_u32;
813 } 868 }
814 869
870 if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t))
871 spi_imx->usedma = 1;
872 else
873 spi_imx->usedma = 0;
874
875 if (spi_imx->usedma) {
876 ret = spi_imx_dma_configure(spi->master,
877 spi_imx_bytes_per_word(config.bpw));
878 if (ret)
879 return ret;
880 }
881
815 spi_imx->devtype_data->config(spi_imx, &config); 882 spi_imx->devtype_data->config(spi_imx, &config);
816 883
817 return 0; 884 return 0;
@@ -830,15 +897,11 @@ static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
830 dma_release_channel(master->dma_tx); 897 dma_release_channel(master->dma_tx);
831 master->dma_tx = NULL; 898 master->dma_tx = NULL;
832 } 899 }
833
834 spi_imx->dma_is_inited = 0;
835} 900}
836 901
837static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx, 902static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
838 struct spi_master *master, 903 struct spi_master *master)
839 const struct resource *res)
840{ 904{
841 struct dma_slave_config slave_config = {};
842 int ret; 905 int ret;
843 906
844 /* use pio mode for i.mx6dl chip TKT238285 */ 907 /* use pio mode for i.mx6dl chip TKT238285 */
@@ -856,16 +919,6 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
856 goto err; 919 goto err;
857 } 920 }
858 921
859 slave_config.direction = DMA_MEM_TO_DEV;
860 slave_config.dst_addr = res->start + MXC_CSPITXDATA;
861 slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
862 slave_config.dst_maxburst = spi_imx->wml;
863 ret = dmaengine_slave_config(master->dma_tx, &slave_config);
864 if (ret) {
865 dev_err(dev, "error in TX dma configuration.\n");
866 goto err;
867 }
868
869 /* Prepare for RX : */ 922 /* Prepare for RX : */
870 master->dma_rx = dma_request_slave_channel_reason(dev, "rx"); 923 master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
871 if (IS_ERR(master->dma_rx)) { 924 if (IS_ERR(master->dma_rx)) {
@@ -875,15 +928,7 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
875 goto err; 928 goto err;
876 } 929 }
877 930
878 slave_config.direction = DMA_DEV_TO_MEM; 931 spi_imx_dma_configure(master, 1);
879 slave_config.src_addr = res->start + MXC_CSPIRXDATA;
880 slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
881 slave_config.src_maxburst = spi_imx->wml;
882 ret = dmaengine_slave_config(master->dma_rx, &slave_config);
883 if (ret) {
884 dev_err(dev, "error in RX dma configuration.\n");
885 goto err;
886 }
887 932
888 init_completion(&spi_imx->dma_rx_completion); 933 init_completion(&spi_imx->dma_rx_completion);
889 init_completion(&spi_imx->dma_tx_completion); 934 init_completion(&spi_imx->dma_tx_completion);
@@ -891,7 +936,6 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
891 master->max_dma_len = MAX_SDMA_BD_BYTES; 936 master->max_dma_len = MAX_SDMA_BD_BYTES;
892 spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX | 937 spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
893 SPI_MASTER_MUST_TX; 938 SPI_MASTER_MUST_TX;
894 spi_imx->dma_is_inited = 1;
895 939
896 return 0; 940 return 0;
897err: 941err:
@@ -913,95 +957,81 @@ static void spi_imx_dma_tx_callback(void *cookie)
913 complete(&spi_imx->dma_tx_completion); 957 complete(&spi_imx->dma_tx_completion);
914} 958}
915 959
960static int spi_imx_calculate_timeout(struct spi_imx_data *spi_imx, int size)
961{
962 unsigned long timeout = 0;
963
964 /* Time with actual data transfer and CS change delay related to HW */
965 timeout = (8 + 4) * size / spi_imx->spi_bus_clk;
966
967 /* Add extra second for scheduler related activities */
968 timeout += 1;
969
970 /* Double calculated timeout */
971 return msecs_to_jiffies(2 * timeout * MSEC_PER_SEC);
972}
973
916static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx, 974static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
917 struct spi_transfer *transfer) 975 struct spi_transfer *transfer)
918{ 976{
919 struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL; 977 struct dma_async_tx_descriptor *desc_tx, *desc_rx;
920 int ret; 978 unsigned long transfer_timeout;
921 unsigned long timeout; 979 unsigned long timeout;
922 struct spi_master *master = spi_imx->bitbang.master; 980 struct spi_master *master = spi_imx->bitbang.master;
923 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg; 981 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
924 982
925 if (tx) { 983 /*
926 desc_tx = dmaengine_prep_slave_sg(master->dma_tx, 984 * The TX DMA setup starts the transfer, so make sure RX is configured
927 tx->sgl, tx->nents, DMA_MEM_TO_DEV, 985 * before TX.
928 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 986 */
929 if (!desc_tx) 987 desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
930 goto tx_nodma; 988 rx->sgl, rx->nents, DMA_DEV_TO_MEM,
931 989 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
932 desc_tx->callback = spi_imx_dma_tx_callback; 990 if (!desc_rx)
933 desc_tx->callback_param = (void *)spi_imx; 991 return -EINVAL;
934 dmaengine_submit(desc_tx);
935 }
936 992
937 if (rx) { 993 desc_rx->callback = spi_imx_dma_rx_callback;
938 desc_rx = dmaengine_prep_slave_sg(master->dma_rx, 994 desc_rx->callback_param = (void *)spi_imx;
939 rx->sgl, rx->nents, DMA_DEV_TO_MEM, 995 dmaengine_submit(desc_rx);
940 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 996 reinit_completion(&spi_imx->dma_rx_completion);
941 if (!desc_rx) 997 dma_async_issue_pending(master->dma_rx);
942 goto rx_nodma;
943 998
944 desc_rx->callback = spi_imx_dma_rx_callback; 999 desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
945 desc_rx->callback_param = (void *)spi_imx; 1000 tx->sgl, tx->nents, DMA_MEM_TO_DEV,
946 dmaengine_submit(desc_rx); 1001 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1002 if (!desc_tx) {
1003 dmaengine_terminate_all(master->dma_tx);
1004 return -EINVAL;
947 } 1005 }
948 1006
949 reinit_completion(&spi_imx->dma_rx_completion); 1007 desc_tx->callback = spi_imx_dma_tx_callback;
1008 desc_tx->callback_param = (void *)spi_imx;
1009 dmaengine_submit(desc_tx);
950 reinit_completion(&spi_imx->dma_tx_completion); 1010 reinit_completion(&spi_imx->dma_tx_completion);
951
952 /* Trigger the cspi module. */
953 spi_imx->dma_finished = 0;
954
955 /*
956 * Set these order to avoid potential RX overflow. The overflow may
957 * happen if we enable SPI HW before starting RX DMA due to rescheduling
958 * for another task and/or interrupt.
959 * So RX DMA enabled first to make sure data would be read out from FIFO
960 * ASAP. TX DMA enabled next to start filling TX FIFO with new data.
961 * And finaly SPI HW enabled to start actual data transfer.
962 */
963 dma_async_issue_pending(master->dma_rx);
964 dma_async_issue_pending(master->dma_tx); 1011 dma_async_issue_pending(master->dma_tx);
965 spi_imx->devtype_data->trigger(spi_imx); 1012
1013 transfer_timeout = spi_imx_calculate_timeout(spi_imx, transfer->len);
966 1014
967 /* Wait SDMA to finish the data transfer.*/ 1015 /* Wait SDMA to finish the data transfer.*/
968 timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion, 1016 timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
969 IMX_DMA_TIMEOUT); 1017 transfer_timeout);
970 if (!timeout) { 1018 if (!timeout) {
971 pr_warn("%s %s: I/O Error in DMA TX\n", 1019 dev_err(spi_imx->dev, "I/O Error in DMA TX\n");
972 dev_driver_string(&master->dev),
973 dev_name(&master->dev));
974 dmaengine_terminate_all(master->dma_tx); 1020 dmaengine_terminate_all(master->dma_tx);
975 dmaengine_terminate_all(master->dma_rx); 1021 dmaengine_terminate_all(master->dma_rx);
976 } else { 1022 return -ETIMEDOUT;
977 timeout = wait_for_completion_timeout(
978 &spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT);
979 if (!timeout) {
980 pr_warn("%s %s: I/O Error in DMA RX\n",
981 dev_driver_string(&master->dev),
982 dev_name(&master->dev));
983 spi_imx->devtype_data->reset(spi_imx);
984 dmaengine_terminate_all(master->dma_rx);
985 }
986 } 1023 }
987 1024
988 spi_imx->dma_finished = 1; 1025 timeout = wait_for_completion_timeout(&spi_imx->dma_rx_completion,
989 spi_imx->devtype_data->trigger(spi_imx); 1026 transfer_timeout);
990 1027 if (!timeout) {
991 if (!timeout) 1028 dev_err(&master->dev, "I/O Error in DMA RX\n");
992 ret = -ETIMEDOUT; 1029 spi_imx->devtype_data->reset(spi_imx);
993 else 1030 dmaengine_terminate_all(master->dma_rx);
994 ret = transfer->len; 1031 return -ETIMEDOUT;
995 1032 }
996 return ret;
997 1033
998rx_nodma: 1034 return transfer->len;
999 dmaengine_terminate_all(master->dma_tx);
1000tx_nodma:
1001 pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
1002 dev_driver_string(&master->dev),
1003 dev_name(&master->dev));
1004 return -EAGAIN;
1005} 1035}
1006 1036
1007static int spi_imx_pio_transfer(struct spi_device *spi, 1037static int spi_imx_pio_transfer(struct spi_device *spi,
@@ -1028,19 +1058,12 @@ static int spi_imx_pio_transfer(struct spi_device *spi,
1028static int spi_imx_transfer(struct spi_device *spi, 1058static int spi_imx_transfer(struct spi_device *spi,
1029 struct spi_transfer *transfer) 1059 struct spi_transfer *transfer)
1030{ 1060{
1031 int ret;
1032 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); 1061 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
1033 1062
1034 if (spi_imx->bitbang.master->can_dma && 1063 if (spi_imx->usedma)
1035 spi_imx_can_dma(spi_imx->bitbang.master, spi, transfer)) { 1064 return spi_imx_dma_transfer(spi_imx, transfer);
1036 spi_imx->usedma = true; 1065 else
1037 ret = spi_imx_dma_transfer(spi_imx, transfer); 1066 return spi_imx_pio_transfer(spi, transfer);
1038 if (ret != -EAGAIN)
1039 return ret;
1040 }
1041 spi_imx->usedma = false;
1042
1043 return spi_imx_pio_transfer(spi, transfer);
1044} 1067}
1045 1068
1046static int spi_imx_setup(struct spi_device *spi) 1069static int spi_imx_setup(struct spi_device *spi)
@@ -1130,6 +1153,7 @@ static int spi_imx_probe(struct platform_device *pdev)
1130 1153
1131 spi_imx = spi_master_get_devdata(master); 1154 spi_imx = spi_master_get_devdata(master);
1132 spi_imx->bitbang.master = master; 1155 spi_imx->bitbang.master = master;
1156 spi_imx->dev = &pdev->dev;
1133 1157
1134 spi_imx->devtype_data = of_id ? of_id->data : 1158 spi_imx->devtype_data = of_id ? of_id->data :
1135 (struct spi_imx_devtype_data *)pdev->id_entry->driver_data; 1159 (struct spi_imx_devtype_data *)pdev->id_entry->driver_data;
@@ -1170,6 +1194,7 @@ static int spi_imx_probe(struct platform_device *pdev)
1170 ret = PTR_ERR(spi_imx->base); 1194 ret = PTR_ERR(spi_imx->base);
1171 goto out_master_put; 1195 goto out_master_put;
1172 } 1196 }
1197 spi_imx->base_phys = res->start;
1173 1198
1174 irq = platform_get_irq(pdev, 0); 1199 irq = platform_get_irq(pdev, 0);
1175 if (irq < 0) { 1200 if (irq < 0) {
@@ -1210,7 +1235,7 @@ static int spi_imx_probe(struct platform_device *pdev)
1210 * other chips. 1235 * other chips.
1211 */ 1236 */
1212 if (is_imx51_ecspi(spi_imx)) { 1237 if (is_imx51_ecspi(spi_imx)) {
1213 ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master, res); 1238 ret = spi_imx_sdma_init(&pdev->dev, spi_imx, master);
1214 if (ret == -EPROBE_DEFER) 1239 if (ret == -EPROBE_DEFER)
1215 goto out_clk_put; 1240 goto out_clk_put;
1216 1241
diff --git a/drivers/spi/spi-lp8841-rtc.c b/drivers/spi/spi-lp8841-rtc.c
new file mode 100644
index 000000000000..faa577d282c0
--- /dev/null
+++ b/drivers/spi/spi-lp8841-rtc.c
@@ -0,0 +1,256 @@
1/*
2 * SPI master driver for ICP DAS LP-8841 RTC
3 *
4 * Copyright (C) 2016 Sergei Ianovich
5 *
6 * based on
7 *
8 * Dallas DS1302 RTC Support
9 * Copyright (C) 2002 David McCullough
10 * Copyright (C) 2003 - 2007 Paul Mundt
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 */
22#include <linux/delay.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/of.h>
27#include <linux/of_device.h>
28#include <linux/spi/spi.h>
29
30#define DRIVER_NAME "spi_lp8841_rtc"
31
32#define SPI_LP8841_RTC_CE 0x01
33#define SPI_LP8841_RTC_CLK 0x02
34#define SPI_LP8841_RTC_nWE 0x04
35#define SPI_LP8841_RTC_MOSI 0x08
36#define SPI_LP8841_RTC_MISO 0x01
37
38/*
39 * REVISIT If there is support for SPI_3WIRE and SPI_LSB_FIRST in SPI
40 * GPIO driver, this SPI driver can be replaced by a simple GPIO driver
41 * providing 3 GPIO pins.
42 */
43
44struct spi_lp8841_rtc {
45 void *iomem;
46 unsigned long state;
47};
48
49static inline void
50setsck(struct spi_lp8841_rtc *data, int is_on)
51{
52 if (is_on)
53 data->state |= SPI_LP8841_RTC_CLK;
54 else
55 data->state &= ~SPI_LP8841_RTC_CLK;
56 writeb(data->state, data->iomem);
57}
58
59static inline void
60setmosi(struct spi_lp8841_rtc *data, int is_on)
61{
62 if (is_on)
63 data->state |= SPI_LP8841_RTC_MOSI;
64 else
65 data->state &= ~SPI_LP8841_RTC_MOSI;
66 writeb(data->state, data->iomem);
67}
68
69static inline int
70getmiso(struct spi_lp8841_rtc *data)
71{
72 return ioread8(data->iomem) & SPI_LP8841_RTC_MISO;
73}
74
75static inline u32
76bitbang_txrx_be_cpha0_lsb(struct spi_lp8841_rtc *data,
77 unsigned usecs, unsigned cpol, unsigned flags,
78 u32 word, u8 bits)
79{
80 /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
81
82 u32 shift = 32 - bits;
83 /* clock starts at inactive polarity */
84 for (; likely(bits); bits--) {
85
86 /* setup LSB (to slave) on leading edge */
87 if ((flags & SPI_MASTER_NO_TX) == 0)
88 setmosi(data, (word & 1));
89
90 usleep_range(usecs, usecs + 1); /* T(setup) */
91
92 /* sample LSB (from slave) on trailing edge */
93 word >>= 1;
94 if ((flags & SPI_MASTER_NO_RX) == 0)
95 word |= (getmiso(data) << 31);
96
97 setsck(data, !cpol);
98 usleep_range(usecs, usecs + 1);
99
100 setsck(data, cpol);
101 }
102
103 word >>= shift;
104 return word;
105}
106
107static int
108spi_lp8841_rtc_transfer_one(struct spi_master *master,
109 struct spi_device *spi,
110 struct spi_transfer *t)
111{
112 struct spi_lp8841_rtc *data = spi_master_get_devdata(master);
113 unsigned count = t->len;
114 const u8 *tx = t->tx_buf;
115 u8 *rx = t->rx_buf;
116 u8 word = 0;
117 int ret = 0;
118
119 if (tx) {
120 data->state &= ~SPI_LP8841_RTC_nWE;
121 writeb(data->state, data->iomem);
122 while (likely(count > 0)) {
123 word = *tx++;
124 bitbang_txrx_be_cpha0_lsb(data, 1, 0,
125 SPI_MASTER_NO_RX, word, 8);
126 count--;
127 }
128 } else if (rx) {
129 data->state |= SPI_LP8841_RTC_nWE;
130 writeb(data->state, data->iomem);
131 while (likely(count > 0)) {
132 word = bitbang_txrx_be_cpha0_lsb(data, 1, 0,
133 SPI_MASTER_NO_TX, word, 8);
134 *rx++ = word;
135 count--;
136 }
137 } else {
138 ret = -EINVAL;
139 }
140
141 spi_finalize_current_transfer(master);
142
143 return ret;
144}
145
146static void
147spi_lp8841_rtc_set_cs(struct spi_device *spi, bool enable)
148{
149 struct spi_lp8841_rtc *data = spi_master_get_devdata(spi->master);
150
151 data->state = 0;
152 writeb(data->state, data->iomem);
153 if (enable) {
154 usleep_range(4, 5);
155 data->state |= SPI_LP8841_RTC_CE;
156 writeb(data->state, data->iomem);
157 usleep_range(4, 5);
158 }
159}
160
161static int
162spi_lp8841_rtc_setup(struct spi_device *spi)
163{
164 if ((spi->mode & SPI_CS_HIGH) == 0) {
165 dev_err(&spi->dev, "unsupported active low chip select\n");
166 return -EINVAL;
167 }
168
169 if ((spi->mode & SPI_LSB_FIRST) == 0) {
170 dev_err(&spi->dev, "unsupported MSB first mode\n");
171 return -EINVAL;
172 }
173
174 if ((spi->mode & SPI_3WIRE) == 0) {
175 dev_err(&spi->dev, "unsupported wiring. 3 wires required\n");
176 return -EINVAL;
177 }
178
179 return 0;
180}
181
182#ifdef CONFIG_OF
183static const struct of_device_id spi_lp8841_rtc_dt_ids[] = {
184 { .compatible = "icpdas,lp8841-spi-rtc" },
185 { }
186};
187
188MODULE_DEVICE_TABLE(of, spi_lp8841_rtc_dt_ids);
189#endif
190
191static int
192spi_lp8841_rtc_probe(struct platform_device *pdev)
193{
194 int ret;
195 struct spi_master *master;
196 struct spi_lp8841_rtc *data;
197 void *iomem;
198
199 master = spi_alloc_master(&pdev->dev, sizeof(*data));
200 if (!master)
201 return -ENOMEM;
202 platform_set_drvdata(pdev, master);
203
204 master->flags = SPI_MASTER_HALF_DUPLEX;
205 master->mode_bits = SPI_CS_HIGH | SPI_3WIRE | SPI_LSB_FIRST;
206
207 master->bus_num = pdev->id;
208 master->num_chipselect = 1;
209 master->setup = spi_lp8841_rtc_setup;
210 master->set_cs = spi_lp8841_rtc_set_cs;
211 master->transfer_one = spi_lp8841_rtc_transfer_one;
212 master->bits_per_word_mask = SPI_BPW_MASK(8);
213#ifdef CONFIG_OF
214 master->dev.of_node = pdev->dev.of_node;
215#endif
216
217 data = spi_master_get_devdata(master);
218
219 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
220 data->iomem = devm_ioremap_resource(&pdev->dev, iomem);
221 ret = PTR_ERR_OR_ZERO(data->iomem);
222 if (ret) {
223 dev_err(&pdev->dev, "failed to get IO address\n");
224 goto err_put_master;
225 }
226
227 /* register with the SPI framework */
228 ret = devm_spi_register_master(&pdev->dev, master);
229 if (ret) {
230 dev_err(&pdev->dev, "cannot register spi master\n");
231 goto err_put_master;
232 }
233
234 return ret;
235
236
237err_put_master:
238 spi_master_put(master);
239
240 return ret;
241}
242
243MODULE_ALIAS("platform:" DRIVER_NAME);
244
245static struct platform_driver spi_lp8841_rtc_driver = {
246 .driver = {
247 .name = DRIVER_NAME,
248 .of_match_table = of_match_ptr(spi_lp8841_rtc_dt_ids),
249 },
250 .probe = spi_lp8841_rtc_probe,
251};
252module_platform_driver(spi_lp8841_rtc_driver);
253
254MODULE_DESCRIPTION("SPI master driver for ICP DAS LP-8841 RTC");
255MODULE_AUTHOR("Sergei Ianovich");
256MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 5e5fd77e2711..f7f7ba17b40e 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -346,13 +346,6 @@ struct vendor_data {
346 * @clk: outgoing clock "SPICLK" for the SPI bus 346 * @clk: outgoing clock "SPICLK" for the SPI bus
347 * @master: SPI framework hookup 347 * @master: SPI framework hookup
348 * @master_info: controller-specific data from machine setup 348 * @master_info: controller-specific data from machine setup
349 * @kworker: thread struct for message pump
350 * @kworker_task: pointer to task for message pump kworker thread
351 * @pump_messages: work struct for scheduling work to the message pump
352 * @queue_lock: spinlock to syncronise access to message queue
353 * @queue: message queue
354 * @busy: message pump is busy
355 * @running: message pump is running
356 * @pump_transfers: Tasklet used in Interrupt Transfer mode 349 * @pump_transfers: Tasklet used in Interrupt Transfer mode
357 * @cur_msg: Pointer to current spi_message being processed 350 * @cur_msg: Pointer to current spi_message being processed
358 * @cur_transfer: Pointer to current spi_transfer 351 * @cur_transfer: Pointer to current spi_transfer
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index bd8b369a343c..365fc22c3572 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -254,8 +254,8 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
254 if (status & SSSR_ROR) { 254 if (status & SSSR_ROR) {
255 dev_err(&drv_data->pdev->dev, "FIFO overrun\n"); 255 dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
256 256
257 dmaengine_terminate_all(drv_data->rx_chan); 257 dmaengine_terminate_async(drv_data->rx_chan);
258 dmaengine_terminate_all(drv_data->tx_chan); 258 dmaengine_terminate_async(drv_data->tx_chan);
259 259
260 pxa2xx_spi_dma_transfer_complete(drv_data, true); 260 pxa2xx_spi_dma_transfer_complete(drv_data, true);
261 return IRQ_HANDLED; 261 return IRQ_HANDLED;
@@ -331,13 +331,13 @@ int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
331void pxa2xx_spi_dma_release(struct driver_data *drv_data) 331void pxa2xx_spi_dma_release(struct driver_data *drv_data)
332{ 332{
333 if (drv_data->rx_chan) { 333 if (drv_data->rx_chan) {
334 dmaengine_terminate_all(drv_data->rx_chan); 334 dmaengine_terminate_sync(drv_data->rx_chan);
335 dma_release_channel(drv_data->rx_chan); 335 dma_release_channel(drv_data->rx_chan);
336 sg_free_table(&drv_data->rx_sgt); 336 sg_free_table(&drv_data->rx_sgt);
337 drv_data->rx_chan = NULL; 337 drv_data->rx_chan = NULL;
338 } 338 }
339 if (drv_data->tx_chan) { 339 if (drv_data->tx_chan) {
340 dmaengine_terminate_all(drv_data->tx_chan); 340 dmaengine_terminate_sync(drv_data->tx_chan);
341 dma_release_channel(drv_data->tx_chan); 341 dma_release_channel(drv_data->tx_chan);
342 sg_free_table(&drv_data->tx_sgt); 342 sg_free_table(&drv_data->tx_sgt);
343 drv_data->tx_chan = NULL; 343 drv_data->tx_chan = NULL;
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index d19d7f28aecb..520ed1dd5780 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -19,6 +19,7 @@ enum {
19 PORT_BSW1, 19 PORT_BSW1,
20 PORT_BSW2, 20 PORT_BSW2,
21 PORT_QUARK_X1000, 21 PORT_QUARK_X1000,
22 PORT_LPT,
22}; 23};
23 24
24struct pxa_spi_info { 25struct pxa_spi_info {
@@ -42,6 +43,9 @@ static struct dw_dma_slave bsw1_rx_param = { .src_id = 7 };
42static struct dw_dma_slave bsw2_tx_param = { .dst_id = 8 }; 43static struct dw_dma_slave bsw2_tx_param = { .dst_id = 8 };
43static struct dw_dma_slave bsw2_rx_param = { .src_id = 9 }; 44static struct dw_dma_slave bsw2_rx_param = { .src_id = 9 };
44 45
46static struct dw_dma_slave lpt_tx_param = { .dst_id = 0 };
47static struct dw_dma_slave lpt_rx_param = { .src_id = 1 };
48
45static bool lpss_dma_filter(struct dma_chan *chan, void *param) 49static bool lpss_dma_filter(struct dma_chan *chan, void *param)
46{ 50{
47 struct dw_dma_slave *dws = param; 51 struct dw_dma_slave *dws = param;
@@ -98,6 +102,14 @@ static struct pxa_spi_info spi_info_configs[] = {
98 .num_chipselect = 1, 102 .num_chipselect = 1,
99 .max_clk_rate = 50000000, 103 .max_clk_rate = 50000000,
100 }, 104 },
105 [PORT_LPT] = {
106 .type = LPSS_LPT_SSP,
107 .port_id = 0,
108 .num_chipselect = 1,
109 .max_clk_rate = 50000000,
110 .tx_param = &lpt_tx_param,
111 .rx_param = &lpt_rx_param,
112 },
101}; 113};
102 114
103static int pxa2xx_spi_pci_probe(struct pci_dev *dev, 115static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
@@ -202,6 +214,7 @@ static const struct pci_device_id pxa2xx_spi_pci_devices[] = {
202 { PCI_VDEVICE(INTEL, 0x228e), PORT_BSW0 }, 214 { PCI_VDEVICE(INTEL, 0x228e), PORT_BSW0 },
203 { PCI_VDEVICE(INTEL, 0x2290), PORT_BSW1 }, 215 { PCI_VDEVICE(INTEL, 0x2290), PORT_BSW1 },
204 { PCI_VDEVICE(INTEL, 0x22ac), PORT_BSW2 }, 216 { PCI_VDEVICE(INTEL, 0x22ac), PORT_BSW2 },
217 { PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT },
205 { }, 218 { },
206}; 219};
207MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices); 220MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index ab9914ad8365..85e59a406a4c 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -65,8 +65,6 @@ MODULE_ALIAS("platform:pxa2xx-spi");
65#define LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24) 65#define LPSS_GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
66#define LPSS_CS_CONTROL_SW_MODE BIT(0) 66#define LPSS_CS_CONTROL_SW_MODE BIT(0)
67#define LPSS_CS_CONTROL_CS_HIGH BIT(1) 67#define LPSS_CS_CONTROL_CS_HIGH BIT(1)
68#define LPSS_CS_CONTROL_CS_SEL_SHIFT 8
69#define LPSS_CS_CONTROL_CS_SEL_MASK (3 << LPSS_CS_CONTROL_CS_SEL_SHIFT)
70#define LPSS_CAPS_CS_EN_SHIFT 9 68#define LPSS_CAPS_CS_EN_SHIFT 9
71#define LPSS_CAPS_CS_EN_MASK (0xf << LPSS_CAPS_CS_EN_SHIFT) 69#define LPSS_CAPS_CS_EN_MASK (0xf << LPSS_CAPS_CS_EN_SHIFT)
72 70
@@ -82,6 +80,10 @@ struct lpss_config {
82 u32 rx_threshold; 80 u32 rx_threshold;
83 u32 tx_threshold_lo; 81 u32 tx_threshold_lo;
84 u32 tx_threshold_hi; 82 u32 tx_threshold_hi;
83 /* Chip select control */
84 unsigned cs_sel_shift;
85 unsigned cs_sel_mask;
86 unsigned cs_num;
85}; 87};
86 88
87/* Keep these sorted with enum pxa_ssp_type */ 89/* Keep these sorted with enum pxa_ssp_type */
@@ -106,6 +108,19 @@ static const struct lpss_config lpss_platforms[] = {
106 .tx_threshold_lo = 160, 108 .tx_threshold_lo = 160,
107 .tx_threshold_hi = 224, 109 .tx_threshold_hi = 224,
108 }, 110 },
111 { /* LPSS_BSW_SSP */
112 .offset = 0x400,
113 .reg_general = 0x08,
114 .reg_ssp = 0x0c,
115 .reg_cs_ctrl = 0x18,
116 .reg_capabilities = -1,
117 .rx_threshold = 64,
118 .tx_threshold_lo = 160,
119 .tx_threshold_hi = 224,
120 .cs_sel_shift = 2,
121 .cs_sel_mask = 1 << 2,
122 .cs_num = 2,
123 },
109 { /* LPSS_SPT_SSP */ 124 { /* LPSS_SPT_SSP */
110 .offset = 0x200, 125 .offset = 0x200,
111 .reg_general = -1, 126 .reg_general = -1,
@@ -125,6 +140,8 @@ static const struct lpss_config lpss_platforms[] = {
125 .rx_threshold = 1, 140 .rx_threshold = 1,
126 .tx_threshold_lo = 16, 141 .tx_threshold_lo = 16,
127 .tx_threshold_hi = 48, 142 .tx_threshold_hi = 48,
143 .cs_sel_shift = 8,
144 .cs_sel_mask = 3 << 8,
128 }, 145 },
129}; 146};
130 147
@@ -139,6 +156,7 @@ static bool is_lpss_ssp(const struct driver_data *drv_data)
139 switch (drv_data->ssp_type) { 156 switch (drv_data->ssp_type) {
140 case LPSS_LPT_SSP: 157 case LPSS_LPT_SSP:
141 case LPSS_BYT_SSP: 158 case LPSS_BYT_SSP:
159 case LPSS_BSW_SSP:
142 case LPSS_SPT_SSP: 160 case LPSS_SPT_SSP:
143 case LPSS_BXT_SSP: 161 case LPSS_BXT_SSP:
144 return true; 162 return true;
@@ -288,37 +306,50 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
288 } 306 }
289} 307}
290 308
309static void lpss_ssp_select_cs(struct driver_data *drv_data,
310 const struct lpss_config *config)
311{
312 u32 value, cs;
313
314 if (!config->cs_sel_mask)
315 return;
316
317 value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
318
319 cs = drv_data->cur_msg->spi->chip_select;
320 cs <<= config->cs_sel_shift;
321 if (cs != (value & config->cs_sel_mask)) {
322 /*
323 * When switching another chip select output active the
324 * output must be selected first and wait 2 ssp_clk cycles
325 * before changing state to active. Otherwise a short
326 * glitch will occur on the previous chip select since
327 * output select is latched but state control is not.
328 */
329 value &= ~config->cs_sel_mask;
330 value |= cs;
331 __lpss_ssp_write_priv(drv_data,
332 config->reg_cs_ctrl, value);
333 ndelay(1000000000 /
334 (drv_data->master->max_speed_hz / 2));
335 }
336}
337
291static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable) 338static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
292{ 339{
293 const struct lpss_config *config; 340 const struct lpss_config *config;
294 u32 value, cs; 341 u32 value;
295 342
296 config = lpss_get_config(drv_data); 343 config = lpss_get_config(drv_data);
297 344
345 if (enable)
346 lpss_ssp_select_cs(drv_data, config);
347
298 value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl); 348 value = __lpss_ssp_read_priv(drv_data, config->reg_cs_ctrl);
299 if (enable) { 349 if (enable)
300 cs = drv_data->cur_msg->spi->chip_select;
301 cs <<= LPSS_CS_CONTROL_CS_SEL_SHIFT;
302 if (cs != (value & LPSS_CS_CONTROL_CS_SEL_MASK)) {
303 /*
304 * When switching another chip select output active
305 * the output must be selected first and wait 2 ssp_clk
306 * cycles before changing state to active. Otherwise
307 * a short glitch will occur on the previous chip
308 * select since output select is latched but state
309 * control is not.
310 */
311 value &= ~LPSS_CS_CONTROL_CS_SEL_MASK;
312 value |= cs;
313 __lpss_ssp_write_priv(drv_data,
314 config->reg_cs_ctrl, value);
315 ndelay(1000000000 /
316 (drv_data->master->max_speed_hz / 2));
317 }
318 value &= ~LPSS_CS_CONTROL_CS_HIGH; 350 value &= ~LPSS_CS_CONTROL_CS_HIGH;
319 } else { 351 else
320 value |= LPSS_CS_CONTROL_CS_HIGH; 352 value |= LPSS_CS_CONTROL_CS_HIGH;
321 }
322 __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value); 353 __lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
323} 354}
324 355
@@ -496,6 +527,7 @@ static void giveback(struct driver_data *drv_data)
496{ 527{
497 struct spi_transfer* last_transfer; 528 struct spi_transfer* last_transfer;
498 struct spi_message *msg; 529 struct spi_message *msg;
530 unsigned long timeout;
499 531
500 msg = drv_data->cur_msg; 532 msg = drv_data->cur_msg;
501 drv_data->cur_msg = NULL; 533 drv_data->cur_msg = NULL;
@@ -508,6 +540,12 @@ static void giveback(struct driver_data *drv_data)
508 if (last_transfer->delay_usecs) 540 if (last_transfer->delay_usecs)
509 udelay(last_transfer->delay_usecs); 541 udelay(last_transfer->delay_usecs);
510 542
543 /* Wait until SSP becomes idle before deasserting the CS */
544 timeout = jiffies + msecs_to_jiffies(10);
545 while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY &&
546 !time_after(jiffies, timeout))
547 cpu_relax();
548
511 /* Drop chip select UNLESS cs_change is true or we are returning 549 /* Drop chip select UNLESS cs_change is true or we are returning
512 * a message with an error, or next message is for another chip 550 * a message with an error, or next message is for another chip
513 */ 551 */
@@ -572,7 +610,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
572 610
573static void int_transfer_complete(struct driver_data *drv_data) 611static void int_transfer_complete(struct driver_data *drv_data)
574{ 612{
575 /* Stop SSP */ 613 /* Clear and disable interrupts */
576 write_SSSR_CS(drv_data, drv_data->clear_sr); 614 write_SSSR_CS(drv_data, drv_data->clear_sr);
577 reset_sccr1(drv_data); 615 reset_sccr1(drv_data);
578 if (!pxa25x_ssp_comp(drv_data)) 616 if (!pxa25x_ssp_comp(drv_data))
@@ -957,8 +995,6 @@ static void pump_transfers(unsigned long data)
957 drv_data->tx_end = drv_data->tx + transfer->len; 995 drv_data->tx_end = drv_data->tx + transfer->len;
958 drv_data->rx = transfer->rx_buf; 996 drv_data->rx = transfer->rx_buf;
959 drv_data->rx_end = drv_data->rx + transfer->len; 997 drv_data->rx_end = drv_data->rx + transfer->len;
960 drv_data->rx_dma = transfer->rx_dma;
961 drv_data->tx_dma = transfer->tx_dma;
962 drv_data->len = transfer->len; 998 drv_data->len = transfer->len;
963 drv_data->write = drv_data->tx ? chip->write : null_writer; 999 drv_data->write = drv_data->tx ? chip->write : null_writer;
964 drv_data->read = drv_data->rx ? chip->read : null_reader; 1000 drv_data->read = drv_data->rx ? chip->read : null_reader;
@@ -1001,19 +1037,6 @@ static void pump_transfers(unsigned long data)
1001 "pump_transfers: DMA burst size reduced to match bits_per_word\n"); 1037 "pump_transfers: DMA burst size reduced to match bits_per_word\n");
1002 } 1038 }
1003 1039
1004 /* NOTE: PXA25x_SSP _could_ use external clocking ... */
1005 cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
1006 if (!pxa25x_ssp_comp(drv_data))
1007 dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
1008 drv_data->master->max_speed_hz
1009 / (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
1010 chip->enable_dma ? "DMA" : "PIO");
1011 else
1012 dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
1013 drv_data->master->max_speed_hz / 2
1014 / (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
1015 chip->enable_dma ? "DMA" : "PIO");
1016
1017 message->state = RUNNING_STATE; 1040 message->state = RUNNING_STATE;
1018 1041
1019 drv_data->dma_mapped = 0; 1042 drv_data->dma_mapped = 0;
@@ -1040,6 +1063,19 @@ static void pump_transfers(unsigned long data)
1040 write_SSSR_CS(drv_data, drv_data->clear_sr); 1063 write_SSSR_CS(drv_data, drv_data->clear_sr);
1041 } 1064 }
1042 1065
1066 /* NOTE: PXA25x_SSP _could_ use external clocking ... */
1067 cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
1068 if (!pxa25x_ssp_comp(drv_data))
1069 dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
1070 drv_data->master->max_speed_hz
1071 / (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
1072 drv_data->dma_mapped ? "DMA" : "PIO");
1073 else
1074 dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
1075 drv_data->master->max_speed_hz / 2
1076 / (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
1077 drv_data->dma_mapped ? "DMA" : "PIO");
1078
1043 if (is_lpss_ssp(drv_data)) { 1079 if (is_lpss_ssp(drv_data)) {
1044 if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff) 1080 if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff)
1045 != chip->lpss_rx_threshold) 1081 != chip->lpss_rx_threshold)
@@ -1166,6 +1202,7 @@ static int setup(struct spi_device *spi)
1166 break; 1202 break;
1167 case LPSS_LPT_SSP: 1203 case LPSS_LPT_SSP:
1168 case LPSS_BYT_SSP: 1204 case LPSS_BYT_SSP:
1205 case LPSS_BSW_SSP:
1169 case LPSS_SPT_SSP: 1206 case LPSS_SPT_SSP:
1170 case LPSS_BXT_SSP: 1207 case LPSS_BXT_SSP:
1171 config = lpss_get_config(drv_data); 1208 config = lpss_get_config(drv_data);
@@ -1313,7 +1350,7 @@ static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
1313 { "INT3430", LPSS_LPT_SSP }, 1350 { "INT3430", LPSS_LPT_SSP },
1314 { "INT3431", LPSS_LPT_SSP }, 1351 { "INT3431", LPSS_LPT_SSP },
1315 { "80860F0E", LPSS_BYT_SSP }, 1352 { "80860F0E", LPSS_BYT_SSP },
1316 { "8086228E", LPSS_BYT_SSP }, 1353 { "8086228E", LPSS_BSW_SSP },
1317 { }, 1354 { },
1318}; 1355};
1319MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match); 1356MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
@@ -1347,10 +1384,14 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
1347 /* SPT-H */ 1384 /* SPT-H */
1348 { PCI_VDEVICE(INTEL, 0xa129), LPSS_SPT_SSP }, 1385 { PCI_VDEVICE(INTEL, 0xa129), LPSS_SPT_SSP },
1349 { PCI_VDEVICE(INTEL, 0xa12a), LPSS_SPT_SSP }, 1386 { PCI_VDEVICE(INTEL, 0xa12a), LPSS_SPT_SSP },
1350 /* BXT */ 1387 /* BXT A-Step */
1351 { PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP }, 1388 { PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP },
1352 { PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP }, 1389 { PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP },
1353 { PCI_VDEVICE(INTEL, 0x0ac6), LPSS_BXT_SSP }, 1390 { PCI_VDEVICE(INTEL, 0x0ac6), LPSS_BXT_SSP },
1391 /* BXT B-Step */
1392 { PCI_VDEVICE(INTEL, 0x1ac2), LPSS_BXT_SSP },
1393 { PCI_VDEVICE(INTEL, 0x1ac4), LPSS_BXT_SSP },
1394 { PCI_VDEVICE(INTEL, 0x1ac6), LPSS_BXT_SSP },
1354 /* APL */ 1395 /* APL */
1355 { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP }, 1396 { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
1356 { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP }, 1397 { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
@@ -1438,6 +1479,29 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
1438} 1479}
1439#endif 1480#endif
1440 1481
1482static int pxa2xx_spi_fw_translate_cs(struct spi_master *master, unsigned cs)
1483{
1484 struct driver_data *drv_data = spi_master_get_devdata(master);
1485
1486 if (has_acpi_companion(&drv_data->pdev->dev)) {
1487 switch (drv_data->ssp_type) {
1488 /*
1489 * For Atoms the ACPI DeviceSelection used by the Windows
1490 * driver starts from 1 instead of 0 so translate it here
1491 * to match what Linux expects.
1492 */
1493 case LPSS_BYT_SSP:
1494 case LPSS_BSW_SSP:
1495 return cs - 1;
1496
1497 default:
1498 break;
1499 }
1500 }
1501
1502 return cs;
1503}
1504
1441static int pxa2xx_spi_probe(struct platform_device *pdev) 1505static int pxa2xx_spi_probe(struct platform_device *pdev)
1442{ 1506{
1443 struct device *dev = &pdev->dev; 1507 struct device *dev = &pdev->dev;
@@ -1490,6 +1554,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1490 master->setup = setup; 1554 master->setup = setup;
1491 master->transfer_one_message = pxa2xx_spi_transfer_one_message; 1555 master->transfer_one_message = pxa2xx_spi_transfer_one_message;
1492 master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer; 1556 master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
1557 master->fw_translate_cs = pxa2xx_spi_fw_translate_cs;
1493 master->auto_runtime_pm = true; 1558 master->auto_runtime_pm = true;
1494 1559
1495 drv_data->ssp_type = ssp->type; 1560 drv_data->ssp_type = ssp->type;
@@ -1576,6 +1641,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1576 tmp &= LPSS_CAPS_CS_EN_MASK; 1641 tmp &= LPSS_CAPS_CS_EN_MASK;
1577 tmp >>= LPSS_CAPS_CS_EN_SHIFT; 1642 tmp >>= LPSS_CAPS_CS_EN_SHIFT;
1578 platform_info->num_chipselect = ffz(tmp); 1643 platform_info->num_chipselect = ffz(tmp);
1644 } else if (config->cs_num) {
1645 platform_info->num_chipselect = config->cs_num;
1579 } 1646 }
1580 } 1647 }
1581 master->num_chipselect = platform_info->num_chipselect; 1648 master->num_chipselect = platform_info->num_chipselect;
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 58efa98313aa..a1ef88948144 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -69,8 +69,6 @@ struct driver_data {
69 void *rx; 69 void *rx;
70 void *rx_end; 70 void *rx_end;
71 int dma_mapped; 71 int dma_mapped;
72 dma_addr_t rx_dma;
73 dma_addr_t tx_dma;
74 size_t rx_map_len; 72 size_t rx_map_len;
75 size_t tx_map_len; 73 size_t tx_map_len;
76 u8 n_bytes; 74 u8 n_bytes;
@@ -147,20 +145,9 @@ static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val)
147extern int pxa2xx_spi_flush(struct driver_data *drv_data); 145extern int pxa2xx_spi_flush(struct driver_data *drv_data);
148extern void *pxa2xx_spi_next_transfer(struct driver_data *drv_data); 146extern void *pxa2xx_spi_next_transfer(struct driver_data *drv_data);
149 147
150/*
151 * Select the right DMA implementation.
152 */
153#if defined(CONFIG_SPI_PXA2XX_DMA)
154#define SPI_PXA2XX_USE_DMA 1
155#define MAX_DMA_LEN SZ_64K 148#define MAX_DMA_LEN SZ_64K
156#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL) 149#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL)
157#else
158#undef SPI_PXA2XX_USE_DMA
159#define MAX_DMA_LEN 0
160#define DEFAULT_DMA_CR1 0
161#endif
162 150
163#ifdef SPI_PXA2XX_USE_DMA
164extern bool pxa2xx_spi_dma_is_possible(size_t len); 151extern bool pxa2xx_spi_dma_is_possible(size_t len);
165extern int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data); 152extern int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data);
166extern irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data); 153extern irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data);
@@ -173,29 +160,5 @@ extern int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
173 u8 bits_per_word, 160 u8 bits_per_word,
174 u32 *burst_code, 161 u32 *burst_code,
175 u32 *threshold); 162 u32 *threshold);
176#else
177static inline bool pxa2xx_spi_dma_is_possible(size_t len) { return false; }
178static inline int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
179{
180 return 0;
181}
182#define pxa2xx_spi_dma_transfer NULL
183static inline void pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
184 u32 dma_burst) {}
185static inline void pxa2xx_spi_dma_start(struct driver_data *drv_data) {}
186static inline int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
187{
188 return 0;
189}
190static inline void pxa2xx_spi_dma_release(struct driver_data *drv_data) {}
191static inline int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
192 struct spi_device *spi,
193 u8 bits_per_word,
194 u32 *burst_code,
195 u32 *threshold)
196{
197 return -ENODEV;
198}
199#endif
200 163
201#endif /* SPI_PXA2XX_H */ 164#endif /* SPI_PXA2XX_H */
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 7cb1b2d710c1..26e2688c104e 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -13,20 +13,14 @@
13 * 13 *
14 */ 14 */
15 15
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/clk.h> 16#include <linux/clk.h>
19#include <linux/err.h> 17#include <linux/dmaengine.h>
20#include <linux/delay.h> 18#include <linux/module.h>
21#include <linux/interrupt.h> 19#include <linux/of.h>
22#include <linux/platform_device.h> 20#include <linux/platform_device.h>
23#include <linux/slab.h>
24#include <linux/spi/spi.h> 21#include <linux/spi/spi.h>
25#include <linux/scatterlist.h>
26#include <linux/of.h>
27#include <linux/pm_runtime.h> 22#include <linux/pm_runtime.h>
28#include <linux/io.h> 23#include <linux/scatterlist.h>
29#include <linux/dmaengine.h>
30 24
31#define DRIVER_NAME "rockchip-spi" 25#define DRIVER_NAME "rockchip-spi"
32 26
@@ -179,7 +173,7 @@ struct rockchip_spi {
179 u8 tmode; 173 u8 tmode;
180 u8 bpw; 174 u8 bpw;
181 u8 n_bytes; 175 u8 n_bytes;
182 u8 rsd_nsecs; 176 u32 rsd_nsecs;
183 unsigned len; 177 unsigned len;
184 u32 speed; 178 u32 speed;
185 179
@@ -192,8 +186,6 @@ struct rockchip_spi {
192 /* protect state */ 186 /* protect state */
193 spinlock_t lock; 187 spinlock_t lock;
194 188
195 struct completion xfer_completion;
196
197 u32 use_dma; 189 u32 use_dma;
198 struct sg_table tx_sg; 190 struct sg_table tx_sg;
199 struct sg_table rx_sg; 191 struct sg_table rx_sg;
@@ -265,7 +257,10 @@ static inline u32 rx_max(struct rockchip_spi *rs)
265static void rockchip_spi_set_cs(struct spi_device *spi, bool enable) 257static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
266{ 258{
267 u32 ser; 259 u32 ser;
268 struct rockchip_spi *rs = spi_master_get_devdata(spi->master); 260 struct spi_master *master = spi->master;
261 struct rockchip_spi *rs = spi_master_get_devdata(master);
262
263 pm_runtime_get_sync(rs->dev);
269 264
270 ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK; 265 ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
271 266
@@ -290,6 +285,8 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
290 ser &= ~(1 << spi->chip_select); 285 ser &= ~(1 << spi->chip_select);
291 286
292 writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER); 287 writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
288
289 pm_runtime_put_sync(rs->dev);
293} 290}
294 291
295static int rockchip_spi_prepare_message(struct spi_master *master, 292static int rockchip_spi_prepare_message(struct spi_master *master,
@@ -319,12 +316,12 @@ static void rockchip_spi_handle_err(struct spi_master *master,
319 */ 316 */
320 if (rs->use_dma) { 317 if (rs->use_dma) {
321 if (rs->state & RXBUSY) { 318 if (rs->state & RXBUSY) {
322 dmaengine_terminate_all(rs->dma_rx.ch); 319 dmaengine_terminate_async(rs->dma_rx.ch);
323 flush_fifo(rs); 320 flush_fifo(rs);
324 } 321 }
325 322
326 if (rs->state & TXBUSY) 323 if (rs->state & TXBUSY)
327 dmaengine_terminate_all(rs->dma_tx.ch); 324 dmaengine_terminate_async(rs->dma_tx.ch);
328 } 325 }
329 326
330 spin_unlock_irqrestore(&rs->lock, flags); 327 spin_unlock_irqrestore(&rs->lock, flags);
@@ -433,7 +430,7 @@ static void rockchip_spi_dma_txcb(void *data)
433 spin_unlock_irqrestore(&rs->lock, flags); 430 spin_unlock_irqrestore(&rs->lock, flags);
434} 431}
435 432
436static void rockchip_spi_prepare_dma(struct rockchip_spi *rs) 433static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
437{ 434{
438 unsigned long flags; 435 unsigned long flags;
439 struct dma_slave_config rxconf, txconf; 436 struct dma_slave_config rxconf, txconf;
@@ -456,6 +453,8 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
456 rs->dma_rx.ch, 453 rs->dma_rx.ch,
457 rs->rx_sg.sgl, rs->rx_sg.nents, 454 rs->rx_sg.sgl, rs->rx_sg.nents,
458 rs->dma_rx.direction, DMA_PREP_INTERRUPT); 455 rs->dma_rx.direction, DMA_PREP_INTERRUPT);
456 if (!rxdesc)
457 return -EINVAL;
459 458
460 rxdesc->callback = rockchip_spi_dma_rxcb; 459 rxdesc->callback = rockchip_spi_dma_rxcb;
461 rxdesc->callback_param = rs; 460 rxdesc->callback_param = rs;
@@ -473,6 +472,11 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
473 rs->dma_tx.ch, 472 rs->dma_tx.ch,
474 rs->tx_sg.sgl, rs->tx_sg.nents, 473 rs->tx_sg.sgl, rs->tx_sg.nents,
475 rs->dma_tx.direction, DMA_PREP_INTERRUPT); 474 rs->dma_tx.direction, DMA_PREP_INTERRUPT);
475 if (!txdesc) {
476 if (rxdesc)
477 dmaengine_terminate_sync(rs->dma_rx.ch);
478 return -EINVAL;
479 }
476 480
477 txdesc->callback = rockchip_spi_dma_txcb; 481 txdesc->callback = rockchip_spi_dma_txcb;
478 txdesc->callback_param = rs; 482 txdesc->callback_param = rs;
@@ -494,6 +498,8 @@ static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
494 dmaengine_submit(txdesc); 498 dmaengine_submit(txdesc);
495 dma_async_issue_pending(rs->dma_tx.ch); 499 dma_async_issue_pending(rs->dma_tx.ch);
496 } 500 }
501
502 return 0;
497} 503}
498 504
499static void rockchip_spi_config(struct rockchip_spi *rs) 505static void rockchip_spi_config(struct rockchip_spi *rs)
@@ -503,7 +509,8 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
503 int rsd = 0; 509 int rsd = 0;
504 510
505 u32 cr0 = (CR0_BHT_8BIT << CR0_BHT_OFFSET) 511 u32 cr0 = (CR0_BHT_8BIT << CR0_BHT_OFFSET)
506 | (CR0_SSD_ONE << CR0_SSD_OFFSET); 512 | (CR0_SSD_ONE << CR0_SSD_OFFSET)
513 | (CR0_EM_BIG << CR0_EM_OFFSET);
507 514
508 cr0 |= (rs->n_bytes << CR0_DFS_OFFSET); 515 cr0 |= (rs->n_bytes << CR0_DFS_OFFSET);
509 cr0 |= ((rs->mode & 0x3) << CR0_SCPH_OFFSET); 516 cr0 |= ((rs->mode & 0x3) << CR0_SCPH_OFFSET);
@@ -606,12 +613,12 @@ static int rockchip_spi_transfer_one(
606 if (rs->use_dma) { 613 if (rs->use_dma) {
607 if (rs->tmode == CR0_XFM_RO) { 614 if (rs->tmode == CR0_XFM_RO) {
608 /* rx: dma must be prepared first */ 615 /* rx: dma must be prepared first */
609 rockchip_spi_prepare_dma(rs); 616 ret = rockchip_spi_prepare_dma(rs);
610 spi_enable_chip(rs, 1); 617 spi_enable_chip(rs, 1);
611 } else { 618 } else {
612 /* tx or tr: spi must be enabled first */ 619 /* tx or tr: spi must be enabled first */
613 spi_enable_chip(rs, 1); 620 spi_enable_chip(rs, 1);
614 rockchip_spi_prepare_dma(rs); 621 ret = rockchip_spi_prepare_dma(rs);
615 } 622 }
616 } else { 623 } else {
617 spi_enable_chip(rs, 1); 624 spi_enable_chip(rs, 1);
@@ -717,8 +724,14 @@ static int rockchip_spi_probe(struct platform_device *pdev)
717 master->handle_err = rockchip_spi_handle_err; 724 master->handle_err = rockchip_spi_handle_err;
718 725
719 rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx"); 726 rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx");
720 if (!rs->dma_tx.ch) 727 if (IS_ERR_OR_NULL(rs->dma_tx.ch)) {
728 /* Check tx to see if we need defer probing driver */
729 if (PTR_ERR(rs->dma_tx.ch) == -EPROBE_DEFER) {
730 ret = -EPROBE_DEFER;
731 goto err_get_fifo_len;
732 }
721 dev_warn(rs->dev, "Failed to request TX DMA channel\n"); 733 dev_warn(rs->dev, "Failed to request TX DMA channel\n");
734 }
722 735
723 rs->dma_rx.ch = dma_request_slave_channel(rs->dev, "rx"); 736 rs->dma_rx.ch = dma_request_slave_channel(rs->dev, "rx");
724 if (!rs->dma_rx.ch) { 737 if (!rs->dma_rx.ch) {
@@ -871,6 +884,7 @@ static const struct of_device_id rockchip_spi_dt_match[] = {
871 { .compatible = "rockchip,rk3066-spi", }, 884 { .compatible = "rockchip,rk3066-spi", },
872 { .compatible = "rockchip,rk3188-spi", }, 885 { .compatible = "rockchip,rk3188-spi", },
873 { .compatible = "rockchip,rk3288-spi", }, 886 { .compatible = "rockchip,rk3288-spi", },
887 { .compatible = "rockchip,rk3399-spi", },
874 { }, 888 { },
875}; 889};
876MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match); 890MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 64318fcfacf2..eac3c960b2de 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -31,6 +31,8 @@
31#include <linux/of.h> 31#include <linux/of.h>
32#include <linux/of_device.h> 32#include <linux/of_device.h>
33#include <linux/pinctrl/consumer.h> 33#include <linux/pinctrl/consumer.h>
34#include <linux/mfd/syscon.h>
35#include <linux/regmap.h>
34 36
35#include <linux/spi/spi.h> 37#include <linux/spi/spi.h>
36 38
@@ -44,8 +46,9 @@ struct ti_qspi {
44 46
45 struct spi_master *master; 47 struct spi_master *master;
46 void __iomem *base; 48 void __iomem *base;
47 void __iomem *ctrl_base;
48 void __iomem *mmap_base; 49 void __iomem *mmap_base;
50 struct regmap *ctrl_base;
51 unsigned int ctrl_reg;
49 struct clk *fclk; 52 struct clk *fclk;
50 struct device *dev; 53 struct device *dev;
51 54
@@ -55,7 +58,7 @@ struct ti_qspi {
55 u32 cmd; 58 u32 cmd;
56 u32 dc; 59 u32 dc;
57 60
58 bool ctrl_mod; 61 bool mmap_enabled;
59}; 62};
60 63
61#define QSPI_PID (0x0) 64#define QSPI_PID (0x0)
@@ -65,11 +68,8 @@ struct ti_qspi {
65#define QSPI_SPI_CMD_REG (0x48) 68#define QSPI_SPI_CMD_REG (0x48)
66#define QSPI_SPI_STATUS_REG (0x4c) 69#define QSPI_SPI_STATUS_REG (0x4c)
67#define QSPI_SPI_DATA_REG (0x50) 70#define QSPI_SPI_DATA_REG (0x50)
68#define QSPI_SPI_SETUP0_REG (0x54) 71#define QSPI_SPI_SETUP_REG(n) ((0x54 + 4 * n))
69#define QSPI_SPI_SWITCH_REG (0x64) 72#define QSPI_SPI_SWITCH_REG (0x64)
70#define QSPI_SPI_SETUP1_REG (0x58)
71#define QSPI_SPI_SETUP2_REG (0x5c)
72#define QSPI_SPI_SETUP3_REG (0x60)
73#define QSPI_SPI_DATA_REG_1 (0x68) 73#define QSPI_SPI_DATA_REG_1 (0x68)
74#define QSPI_SPI_DATA_REG_2 (0x6c) 74#define QSPI_SPI_DATA_REG_2 (0x6c)
75#define QSPI_SPI_DATA_REG_3 (0x70) 75#define QSPI_SPI_DATA_REG_3 (0x70)
@@ -109,6 +109,17 @@ struct ti_qspi {
109 109
110#define QSPI_AUTOSUSPEND_TIMEOUT 2000 110#define QSPI_AUTOSUSPEND_TIMEOUT 2000
111 111
112#define MEM_CS_EN(n) ((n + 1) << 8)
113#define MEM_CS_MASK (7 << 8)
114
115#define MM_SWITCH 0x1
116
117#define QSPI_SETUP_RD_NORMAL (0x0 << 12)
118#define QSPI_SETUP_RD_DUAL (0x1 << 12)
119#define QSPI_SETUP_RD_QUAD (0x3 << 12)
120#define QSPI_SETUP_ADDR_SHIFT 8
121#define QSPI_SETUP_DUMMY_SHIFT 10
122
112static inline unsigned long ti_qspi_read(struct ti_qspi *qspi, 123static inline unsigned long ti_qspi_read(struct ti_qspi *qspi,
113 unsigned long reg) 124 unsigned long reg)
114{ 125{
@@ -366,6 +377,72 @@ static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
366 return 0; 377 return 0;
367} 378}
368 379
380static void ti_qspi_enable_memory_map(struct spi_device *spi)
381{
382 struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
383
384 ti_qspi_write(qspi, MM_SWITCH, QSPI_SPI_SWITCH_REG);
385 if (qspi->ctrl_base) {
386 regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
387 MEM_CS_EN(spi->chip_select),
388 MEM_CS_MASK);
389 }
390 qspi->mmap_enabled = true;
391}
392
393static void ti_qspi_disable_memory_map(struct spi_device *spi)
394{
395 struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
396
397 ti_qspi_write(qspi, 0, QSPI_SPI_SWITCH_REG);
398 if (qspi->ctrl_base)
399 regmap_update_bits(qspi->ctrl_base, qspi->ctrl_reg,
400 0, MEM_CS_MASK);
401 qspi->mmap_enabled = false;
402}
403
404static void ti_qspi_setup_mmap_read(struct spi_device *spi,
405 struct spi_flash_read_message *msg)
406{
407 struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
408 u32 memval = msg->read_opcode;
409
410 switch (msg->data_nbits) {
411 case SPI_NBITS_QUAD:
412 memval |= QSPI_SETUP_RD_QUAD;
413 break;
414 case SPI_NBITS_DUAL:
415 memval |= QSPI_SETUP_RD_DUAL;
416 break;
417 default:
418 memval |= QSPI_SETUP_RD_NORMAL;
419 break;
420 }
421 memval |= ((msg->addr_width - 1) << QSPI_SETUP_ADDR_SHIFT |
422 msg->dummy_bytes << QSPI_SETUP_DUMMY_SHIFT);
423 ti_qspi_write(qspi, memval,
424 QSPI_SPI_SETUP_REG(spi->chip_select));
425}
426
427static int ti_qspi_spi_flash_read(struct spi_device *spi,
428 struct spi_flash_read_message *msg)
429{
430 struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
431 int ret = 0;
432
433 mutex_lock(&qspi->list_lock);
434
435 if (!qspi->mmap_enabled)
436 ti_qspi_enable_memory_map(spi);
437 ti_qspi_setup_mmap_read(spi, msg);
438 memcpy_fromio(msg->buf, qspi->mmap_base + msg->from, msg->len);
439 msg->retlen = msg->len;
440
441 mutex_unlock(&qspi->list_lock);
442
443 return ret;
444}
445
369static int ti_qspi_start_transfer_one(struct spi_master *master, 446static int ti_qspi_start_transfer_one(struct spi_master *master,
370 struct spi_message *m) 447 struct spi_message *m)
371{ 448{
@@ -398,6 +475,9 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
398 475
399 mutex_lock(&qspi->list_lock); 476 mutex_lock(&qspi->list_lock);
400 477
478 if (qspi->mmap_enabled)
479 ti_qspi_disable_memory_map(spi);
480
401 list_for_each_entry(t, &m->transfers, transfer_list) { 481 list_for_each_entry(t, &m->transfers, transfer_list) {
402 qspi->cmd |= QSPI_WLEN(t->bits_per_word); 482 qspi->cmd |= QSPI_WLEN(t->bits_per_word);
403 483
@@ -441,7 +521,7 @@ static int ti_qspi_probe(struct platform_device *pdev)
441{ 521{
442 struct ti_qspi *qspi; 522 struct ti_qspi *qspi;
443 struct spi_master *master; 523 struct spi_master *master;
444 struct resource *r, *res_ctrl, *res_mmap; 524 struct resource *r, *res_mmap;
445 struct device_node *np = pdev->dev.of_node; 525 struct device_node *np = pdev->dev.of_node;
446 u32 max_freq; 526 u32 max_freq;
447 int ret = 0, num_cs, irq; 527 int ret = 0, num_cs, irq;
@@ -487,16 +567,6 @@ static int ti_qspi_probe(struct platform_device *pdev)
487 } 567 }
488 } 568 }
489 569
490 res_ctrl = platform_get_resource_byname(pdev,
491 IORESOURCE_MEM, "qspi_ctrlmod");
492 if (res_ctrl == NULL) {
493 res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 2);
494 if (res_ctrl == NULL) {
495 dev_dbg(&pdev->dev,
496 "control module resources not required\n");
497 }
498 }
499
500 irq = platform_get_irq(pdev, 0); 570 irq = platform_get_irq(pdev, 0);
501 if (irq < 0) { 571 if (irq < 0) {
502 dev_err(&pdev->dev, "no irq resource?\n"); 572 dev_err(&pdev->dev, "no irq resource?\n");
@@ -511,20 +581,31 @@ static int ti_qspi_probe(struct platform_device *pdev)
511 goto free_master; 581 goto free_master;
512 } 582 }
513 583
514 if (res_ctrl) {
515 qspi->ctrl_mod = true;
516 qspi->ctrl_base = devm_ioremap_resource(&pdev->dev, res_ctrl);
517 if (IS_ERR(qspi->ctrl_base)) {
518 ret = PTR_ERR(qspi->ctrl_base);
519 goto free_master;
520 }
521 }
522
523 if (res_mmap) { 584 if (res_mmap) {
524 qspi->mmap_base = devm_ioremap_resource(&pdev->dev, res_mmap); 585 qspi->mmap_base = devm_ioremap_resource(&pdev->dev,
586 res_mmap);
587 master->spi_flash_read = ti_qspi_spi_flash_read;
525 if (IS_ERR(qspi->mmap_base)) { 588 if (IS_ERR(qspi->mmap_base)) {
526 ret = PTR_ERR(qspi->mmap_base); 589 dev_err(&pdev->dev,
527 goto free_master; 590 "falling back to PIO mode\n");
591 master->spi_flash_read = NULL;
592 }
593 }
594 qspi->mmap_enabled = false;
595
596 if (of_property_read_bool(np, "syscon-chipselects")) {
597 qspi->ctrl_base =
598 syscon_regmap_lookup_by_phandle(np,
599 "syscon-chipselects");
600 if (IS_ERR(qspi->ctrl_base))
601 return PTR_ERR(qspi->ctrl_base);
602 ret = of_property_read_u32_index(np,
603 "syscon-chipselects",
604 1, &qspi->ctrl_reg);
605 if (ret) {
606 dev_err(&pdev->dev,
607 "couldn't get ctrl_mod reg index\n");
608 return ret;
528 } 609 }
529 } 610 }
530 611
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 47eff8012a77..de2f2f90d799 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -144,6 +144,8 @@ SPI_STATISTICS_TRANSFER_BYTES_HISTO(14, "16384-32767");
144SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535"); 144SPI_STATISTICS_TRANSFER_BYTES_HISTO(15, "32768-65535");
145SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+"); 145SPI_STATISTICS_TRANSFER_BYTES_HISTO(16, "65536+");
146 146
147SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
148
147static struct attribute *spi_dev_attrs[] = { 149static struct attribute *spi_dev_attrs[] = {
148 &dev_attr_modalias.attr, 150 &dev_attr_modalias.attr,
149 NULL, 151 NULL,
@@ -181,6 +183,7 @@ static struct attribute *spi_device_statistics_attrs[] = {
181 &dev_attr_spi_device_transfer_bytes_histo14.attr, 183 &dev_attr_spi_device_transfer_bytes_histo14.attr,
182 &dev_attr_spi_device_transfer_bytes_histo15.attr, 184 &dev_attr_spi_device_transfer_bytes_histo15.attr,
183 &dev_attr_spi_device_transfer_bytes_histo16.attr, 185 &dev_attr_spi_device_transfer_bytes_histo16.attr,
186 &dev_attr_spi_device_transfers_split_maxsize.attr,
184 NULL, 187 NULL,
185}; 188};
186 189
@@ -223,6 +226,7 @@ static struct attribute *spi_master_statistics_attrs[] = {
223 &dev_attr_spi_master_transfer_bytes_histo14.attr, 226 &dev_attr_spi_master_transfer_bytes_histo14.attr,
224 &dev_attr_spi_master_transfer_bytes_histo15.attr, 227 &dev_attr_spi_master_transfer_bytes_histo15.attr,
225 &dev_attr_spi_master_transfer_bytes_histo16.attr, 228 &dev_attr_spi_master_transfer_bytes_histo16.attr,
229 &dev_attr_spi_master_transfers_split_maxsize.attr,
226 NULL, 230 NULL,
227}; 231};
228 232
@@ -702,6 +706,7 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
702 enum dma_data_direction dir) 706 enum dma_data_direction dir)
703{ 707{
704 const bool vmalloced_buf = is_vmalloc_addr(buf); 708 const bool vmalloced_buf = is_vmalloc_addr(buf);
709 unsigned int max_seg_size = dma_get_max_seg_size(dev);
705 int desc_len; 710 int desc_len;
706 int sgs; 711 int sgs;
707 struct page *vm_page; 712 struct page *vm_page;
@@ -710,10 +715,10 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
710 int i, ret; 715 int i, ret;
711 716
712 if (vmalloced_buf) { 717 if (vmalloced_buf) {
713 desc_len = PAGE_SIZE; 718 desc_len = min_t(int, max_seg_size, PAGE_SIZE);
714 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len); 719 sgs = DIV_ROUND_UP(len + offset_in_page(buf), desc_len);
715 } else { 720 } else {
716 desc_len = master->max_dma_len; 721 desc_len = min_t(int, max_seg_size, master->max_dma_len);
717 sgs = DIV_ROUND_UP(len, desc_len); 722 sgs = DIV_ROUND_UP(len, desc_len);
718 } 723 }
719 724
@@ -739,7 +744,6 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
739 sg_set_buf(&sgt->sgl[i], sg_buf, min); 744 sg_set_buf(&sgt->sgl[i], sg_buf, min);
740 } 745 }
741 746
742
743 buf += min; 747 buf += min;
744 len -= min; 748 len -= min;
745 } 749 }
@@ -1024,6 +1028,8 @@ out:
1024 if (msg->status && master->handle_err) 1028 if (msg->status && master->handle_err)
1025 master->handle_err(master, msg); 1029 master->handle_err(master, msg);
1026 1030
1031 spi_res_release(master, msg);
1032
1027 spi_finalize_current_message(master); 1033 spi_finalize_current_message(master);
1028 1034
1029 return ret; 1035 return ret;
@@ -1047,6 +1053,7 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1047 * __spi_pump_messages - function which processes spi message queue 1053 * __spi_pump_messages - function which processes spi message queue
1048 * @master: master to process queue for 1054 * @master: master to process queue for
1049 * @in_kthread: true if we are in the context of the message pump thread 1055 * @in_kthread: true if we are in the context of the message pump thread
1056 * @bus_locked: true if the bus mutex is held when calling this function
1050 * 1057 *
1051 * This function checks if there is any spi message in the queue that 1058 * This function checks if there is any spi message in the queue that
1052 * needs processing and if so call out to the driver to initialize hardware 1059 * needs processing and if so call out to the driver to initialize hardware
@@ -1056,7 +1063,8 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
1056 * inside spi_sync(); the queue extraction handling at the top of the 1063 * inside spi_sync(); the queue extraction handling at the top of the
1057 * function should deal with this safely. 1064 * function should deal with this safely.
1058 */ 1065 */
1059static void __spi_pump_messages(struct spi_master *master, bool in_kthread) 1066static void __spi_pump_messages(struct spi_master *master, bool in_kthread,
1067 bool bus_locked)
1060{ 1068{
1061 unsigned long flags; 1069 unsigned long flags;
1062 bool was_busy = false; 1070 bool was_busy = false;
@@ -1152,6 +1160,9 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1152 } 1160 }
1153 } 1161 }
1154 1162
1163 if (!bus_locked)
1164 mutex_lock(&master->bus_lock_mutex);
1165
1155 trace_spi_message_start(master->cur_msg); 1166 trace_spi_message_start(master->cur_msg);
1156 1167
1157 if (master->prepare_message) { 1168 if (master->prepare_message) {
@@ -1161,7 +1172,7 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1161 "failed to prepare message: %d\n", ret); 1172 "failed to prepare message: %d\n", ret);
1162 master->cur_msg->status = ret; 1173 master->cur_msg->status = ret;
1163 spi_finalize_current_message(master); 1174 spi_finalize_current_message(master);
1164 return; 1175 goto out;
1165 } 1176 }
1166 master->cur_msg_prepared = true; 1177 master->cur_msg_prepared = true;
1167 } 1178 }
@@ -1170,15 +1181,23 @@ static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
1170 if (ret) { 1181 if (ret) {
1171 master->cur_msg->status = ret; 1182 master->cur_msg->status = ret;
1172 spi_finalize_current_message(master); 1183 spi_finalize_current_message(master);
1173 return; 1184 goto out;
1174 } 1185 }
1175 1186
1176 ret = master->transfer_one_message(master, master->cur_msg); 1187 ret = master->transfer_one_message(master, master->cur_msg);
1177 if (ret) { 1188 if (ret) {
1178 dev_err(&master->dev, 1189 dev_err(&master->dev,
1179 "failed to transfer one message from queue\n"); 1190 "failed to transfer one message from queue\n");
1180 return; 1191 goto out;
1181 } 1192 }
1193
1194out:
1195 if (!bus_locked)
1196 mutex_unlock(&master->bus_lock_mutex);
1197
1198 /* Prod the scheduler in case transfer_one() was busy waiting */
1199 if (!ret)
1200 cond_resched();
1182} 1201}
1183 1202
1184/** 1203/**
@@ -1190,7 +1209,7 @@ static void spi_pump_messages(struct kthread_work *work)
1190 struct spi_master *master = 1209 struct spi_master *master =
1191 container_of(work, struct spi_master, pump_messages); 1210 container_of(work, struct spi_master, pump_messages);
1192 1211
1193 __spi_pump_messages(master, true); 1212 __spi_pump_messages(master, true, false);
1194} 1213}
1195 1214
1196static int spi_init_queue(struct spi_master *master) 1215static int spi_init_queue(struct spi_master *master)
@@ -1581,13 +1600,30 @@ static void of_register_spi_devices(struct spi_master *master) { }
1581static int acpi_spi_add_resource(struct acpi_resource *ares, void *data) 1600static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
1582{ 1601{
1583 struct spi_device *spi = data; 1602 struct spi_device *spi = data;
1603 struct spi_master *master = spi->master;
1584 1604
1585 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { 1605 if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
1586 struct acpi_resource_spi_serialbus *sb; 1606 struct acpi_resource_spi_serialbus *sb;
1587 1607
1588 sb = &ares->data.spi_serial_bus; 1608 sb = &ares->data.spi_serial_bus;
1589 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) { 1609 if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
1590 spi->chip_select = sb->device_selection; 1610 /*
1611 * ACPI DeviceSelection numbering is handled by the
1612 * host controller driver in Windows and can vary
1613 * from driver to driver. In Linux we always expect
1614 * 0 .. max - 1 so we need to ask the driver to
1615 * translate between the two schemes.
1616 */
1617 if (master->fw_translate_cs) {
1618 int cs = master->fw_translate_cs(master,
1619 sb->device_selection);
1620 if (cs < 0)
1621 return cs;
1622 spi->chip_select = cs;
1623 } else {
1624 spi->chip_select = sb->device_selection;
1625 }
1626
1591 spi->max_speed_hz = sb->connection_speed; 1627 spi->max_speed_hz = sb->connection_speed;
1592 1628
1593 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE) 1629 if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
@@ -2013,6 +2049,336 @@ struct spi_master *spi_busnum_to_master(u16 bus_num)
2013} 2049}
2014EXPORT_SYMBOL_GPL(spi_busnum_to_master); 2050EXPORT_SYMBOL_GPL(spi_busnum_to_master);
2015 2051
2052/*-------------------------------------------------------------------------*/
2053
2054/* Core methods for SPI resource management */
2055
2056/**
2057 * spi_res_alloc - allocate a spi resource that is life-cycle managed
2058 * during the processing of a spi_message while using
2059 * spi_transfer_one
2060 * @spi: the spi device for which we allocate memory
2061 * @release: the release code to execute for this resource
2062 * @size: size to alloc and return
2063 * @gfp: GFP allocation flags
2064 *
2065 * Return: the pointer to the allocated data
2066 *
2067 * This may get enhanced in the future to allocate from a memory pool
2068 * of the @spi_device or @spi_master to avoid repeated allocations.
2069 */
2070void *spi_res_alloc(struct spi_device *spi,
2071 spi_res_release_t release,
2072 size_t size, gfp_t gfp)
2073{
2074 struct spi_res *sres;
2075
2076 sres = kzalloc(sizeof(*sres) + size, gfp);
2077 if (!sres)
2078 return NULL;
2079
2080 INIT_LIST_HEAD(&sres->entry);
2081 sres->release = release;
2082
2083 return sres->data;
2084}
2085EXPORT_SYMBOL_GPL(spi_res_alloc);
2086
2087/**
2088 * spi_res_free - free an spi resource
2089 * @res: pointer to the custom data of a resource
2090 *
2091 */
2092void spi_res_free(void *res)
2093{
2094 struct spi_res *sres = container_of(res, struct spi_res, data);
2095
2096 if (!res)
2097 return;
2098
2099 WARN_ON(!list_empty(&sres->entry));
2100 kfree(sres);
2101}
2102EXPORT_SYMBOL_GPL(spi_res_free);
2103
2104/**
2105 * spi_res_add - add a spi_res to the spi_message
2106 * @message: the spi message
2107 * @res: the spi_resource
2108 */
2109void spi_res_add(struct spi_message *message, void *res)
2110{
2111 struct spi_res *sres = container_of(res, struct spi_res, data);
2112
2113 WARN_ON(!list_empty(&sres->entry));
2114 list_add_tail(&sres->entry, &message->resources);
2115}
2116EXPORT_SYMBOL_GPL(spi_res_add);
2117
2118/**
2119 * spi_res_release - release all spi resources for this message
2120 * @master: the @spi_master
2121 * @message: the @spi_message
2122 */
2123void spi_res_release(struct spi_master *master,
2124 struct spi_message *message)
2125{
2126 struct spi_res *res;
2127
2128 while (!list_empty(&message->resources)) {
2129 res = list_last_entry(&message->resources,
2130 struct spi_res, entry);
2131
2132 if (res->release)
2133 res->release(master, message, res->data);
2134
2135 list_del(&res->entry);
2136
2137 kfree(res);
2138 }
2139}
2140EXPORT_SYMBOL_GPL(spi_res_release);
2141
2142/*-------------------------------------------------------------------------*/
2143
2144/* Core methods for spi_message alterations */
2145
2146static void __spi_replace_transfers_release(struct spi_master *master,
2147 struct spi_message *msg,
2148 void *res)
2149{
2150 struct spi_replaced_transfers *rxfer = res;
2151 size_t i;
2152
2153 /* call extra callback if requested */
2154 if (rxfer->release)
2155 rxfer->release(master, msg, res);
2156
2157 /* insert replaced transfers back into the message */
2158 list_splice(&rxfer->replaced_transfers, rxfer->replaced_after);
2159
2160 /* remove the formerly inserted entries */
2161 for (i = 0; i < rxfer->inserted; i++)
2162 list_del(&rxfer->inserted_transfers[i].transfer_list);
2163}
2164
2165/**
2166 * spi_replace_transfers - replace transfers with several transfers
2167 * and register change with spi_message.resources
2168 * @msg: the spi_message we work upon
2169 * @xfer_first: the first spi_transfer we want to replace
2170 * @remove: number of transfers to remove
2171 * @insert: the number of transfers we want to insert instead
2172 * @release: extra release code necessary in some circumstances
2173 * @extradatasize: extra data to allocate (with alignment guarantees
2174 * of struct @spi_transfer)
2175 * @gfp: gfp flags
2176 *
2177 * Returns: pointer to @spi_replaced_transfers,
2178 * PTR_ERR(...) in case of errors.
2179 */
2180struct spi_replaced_transfers *spi_replace_transfers(
2181 struct spi_message *msg,
2182 struct spi_transfer *xfer_first,
2183 size_t remove,
2184 size_t insert,
2185 spi_replaced_release_t release,
2186 size_t extradatasize,
2187 gfp_t gfp)
2188{
2189 struct spi_replaced_transfers *rxfer;
2190 struct spi_transfer *xfer;
2191 size_t i;
2192
2193 /* allocate the structure using spi_res */
2194 rxfer = spi_res_alloc(msg->spi, __spi_replace_transfers_release,
2195 insert * sizeof(struct spi_transfer)
2196 + sizeof(struct spi_replaced_transfers)
2197 + extradatasize,
2198 gfp);
2199 if (!rxfer)
2200 return ERR_PTR(-ENOMEM);
2201
2202 /* the release code to invoke before running the generic release */
2203 rxfer->release = release;
2204
2205 /* assign extradata */
2206 if (extradatasize)
2207 rxfer->extradata =
2208 &rxfer->inserted_transfers[insert];
2209
2210 /* init the replaced_transfers list */
2211 INIT_LIST_HEAD(&rxfer->replaced_transfers);
2212
2213 /* assign the list_entry after which we should reinsert
2214 * the @replaced_transfers - it may be spi_message.messages!
2215 */
2216 rxfer->replaced_after = xfer_first->transfer_list.prev;
2217
2218 /* remove the requested number of transfers */
2219 for (i = 0; i < remove; i++) {
2220 /* if the entry after replaced_after it is msg->transfers
2221 * then we have been requested to remove more transfers
2222 * than are in the list
2223 */
2224 if (rxfer->replaced_after->next == &msg->transfers) {
2225 dev_err(&msg->spi->dev,
2226 "requested to remove more spi_transfers than are available\n");
2227 /* insert replaced transfers back into the message */
2228 list_splice(&rxfer->replaced_transfers,
2229 rxfer->replaced_after);
2230
2231 /* free the spi_replace_transfer structure */
2232 spi_res_free(rxfer);
2233
2234 /* and return with an error */
2235 return ERR_PTR(-EINVAL);
2236 }
2237
2238 /* remove the entry after replaced_after from list of
2239 * transfers and add it to list of replaced_transfers
2240 */
2241 list_move_tail(rxfer->replaced_after->next,
2242 &rxfer->replaced_transfers);
2243 }
2244
2245 /* create copy of the given xfer with identical settings
2246 * based on the first transfer to get removed
2247 */
2248 for (i = 0; i < insert; i++) {
2249 /* we need to run in reverse order */
2250 xfer = &rxfer->inserted_transfers[insert - 1 - i];
2251
2252 /* copy all spi_transfer data */
2253 memcpy(xfer, xfer_first, sizeof(*xfer));
2254
2255 /* add to list */
2256 list_add(&xfer->transfer_list, rxfer->replaced_after);
2257
2258 /* clear cs_change and delay_usecs for all but the last */
2259 if (i) {
2260 xfer->cs_change = false;
2261 xfer->delay_usecs = 0;
2262 }
2263 }
2264
2265 /* set up inserted */
2266 rxfer->inserted = insert;
2267
2268 /* and register it with spi_res/spi_message */
2269 spi_res_add(msg, rxfer);
2270
2271 return rxfer;
2272}
2273EXPORT_SYMBOL_GPL(spi_replace_transfers);
2274
2275static int __spi_split_transfer_maxsize(struct spi_master *master,
2276 struct spi_message *msg,
2277 struct spi_transfer **xferp,
2278 size_t maxsize,
2279 gfp_t gfp)
2280{
2281 struct spi_transfer *xfer = *xferp, *xfers;
2282 struct spi_replaced_transfers *srt;
2283 size_t offset;
2284 size_t count, i;
2285
2286 /* warn once about this fact that we are splitting a transfer */
2287 dev_warn_once(&msg->spi->dev,
2288 "spi_transfer of length %i exceed max length of %zu - needed to split transfers\n",
2289 xfer->len, maxsize);
2290
2291 /* calculate how many we have to replace */
2292 count = DIV_ROUND_UP(xfer->len, maxsize);
2293
2294 /* create replacement */
2295 srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
2296 if (IS_ERR(srt))
2297 return PTR_ERR(srt);
2298 xfers = srt->inserted_transfers;
2299
2300 /* now handle each of those newly inserted spi_transfers
2301 * note that the replacements spi_transfers all are preset
2302 * to the same values as *xferp, so tx_buf, rx_buf and len
2303 * are all identical (as well as most others)
2304 * so we just have to fix up len and the pointers.
2305 *
2306 * this also includes support for the depreciated
2307 * spi_message.is_dma_mapped interface
2308 */
2309
2310 /* the first transfer just needs the length modified, so we
2311 * run it outside the loop
2312 */
2313 xfers[0].len = min_t(size_t, maxsize, xfer[0].len);
2314
2315 /* all the others need rx_buf/tx_buf also set */
2316 for (i = 1, offset = maxsize; i < count; offset += maxsize, i++) {
2317 /* update rx_buf, tx_buf and dma */
2318 if (xfers[i].rx_buf)
2319 xfers[i].rx_buf += offset;
2320 if (xfers[i].rx_dma)
2321 xfers[i].rx_dma += offset;
2322 if (xfers[i].tx_buf)
2323 xfers[i].tx_buf += offset;
2324 if (xfers[i].tx_dma)
2325 xfers[i].tx_dma += offset;
2326
2327 /* update length */
2328 xfers[i].len = min(maxsize, xfers[i].len - offset);
2329 }
2330
2331 /* we set up xferp to the last entry we have inserted,
2332 * so that we skip those already split transfers
2333 */
2334 *xferp = &xfers[count - 1];
2335
2336 /* increment statistics counters */
2337 SPI_STATISTICS_INCREMENT_FIELD(&master->statistics,
2338 transfers_split_maxsize);
2339 SPI_STATISTICS_INCREMENT_FIELD(&msg->spi->statistics,
2340 transfers_split_maxsize);
2341
2342 return 0;
2343}
2344
2345/**
2346 * spi_split_tranfers_maxsize - split spi transfers into multiple transfers
2347 * when an individual transfer exceeds a
2348 * certain size
2349 * @master: the @spi_master for this transfer
2350 * @msg: the @spi_message to transform
2351 * @maxsize: the maximum when to apply this
2352 * @gfp: GFP allocation flags
2353 *
2354 * Return: status of transformation
2355 */
2356int spi_split_transfers_maxsize(struct spi_master *master,
2357 struct spi_message *msg,
2358 size_t maxsize,
2359 gfp_t gfp)
2360{
2361 struct spi_transfer *xfer;
2362 int ret;
2363
2364 /* iterate over the transfer_list,
2365 * but note that xfer is advanced to the last transfer inserted
2366 * to avoid checking sizes again unnecessarily (also xfer does
2367 * potentiall belong to a different list by the time the
2368 * replacement has happened
2369 */
2370 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
2371 if (xfer->len > maxsize) {
2372 ret = __spi_split_transfer_maxsize(
2373 master, msg, &xfer, maxsize, gfp);
2374 if (ret)
2375 return ret;
2376 }
2377 }
2378
2379 return 0;
2380}
2381EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
2016 2382
2017/*-------------------------------------------------------------------------*/ 2383/*-------------------------------------------------------------------------*/
2018 2384
@@ -2351,6 +2717,46 @@ int spi_async_locked(struct spi_device *spi, struct spi_message *message)
2351EXPORT_SYMBOL_GPL(spi_async_locked); 2717EXPORT_SYMBOL_GPL(spi_async_locked);
2352 2718
2353 2719
2720int spi_flash_read(struct spi_device *spi,
2721 struct spi_flash_read_message *msg)
2722
2723{
2724 struct spi_master *master = spi->master;
2725 int ret;
2726
2727 if ((msg->opcode_nbits == SPI_NBITS_DUAL ||
2728 msg->addr_nbits == SPI_NBITS_DUAL) &&
2729 !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
2730 return -EINVAL;
2731 if ((msg->opcode_nbits == SPI_NBITS_QUAD ||
2732 msg->addr_nbits == SPI_NBITS_QUAD) &&
2733 !(spi->mode & SPI_TX_QUAD))
2734 return -EINVAL;
2735 if (msg->data_nbits == SPI_NBITS_DUAL &&
2736 !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
2737 return -EINVAL;
2738 if (msg->data_nbits == SPI_NBITS_QUAD &&
2739 !(spi->mode & SPI_RX_QUAD))
2740 return -EINVAL;
2741
2742 if (master->auto_runtime_pm) {
2743 ret = pm_runtime_get_sync(master->dev.parent);
2744 if (ret < 0) {
2745 dev_err(&master->dev, "Failed to power device: %d\n",
2746 ret);
2747 return ret;
2748 }
2749 }
2750 mutex_lock(&master->bus_lock_mutex);
2751 ret = master->spi_flash_read(spi, msg);
2752 mutex_unlock(&master->bus_lock_mutex);
2753 if (master->auto_runtime_pm)
2754 pm_runtime_put(master->dev.parent);
2755
2756 return ret;
2757}
2758EXPORT_SYMBOL_GPL(spi_flash_read);
2759
2354/*-------------------------------------------------------------------------*/ 2760/*-------------------------------------------------------------------------*/
2355 2761
2356/* Utility methods for SPI master protocol drivers, layered on 2762/* Utility methods for SPI master protocol drivers, layered on
@@ -2414,7 +2820,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message,
2414 spi_sync_immediate); 2820 spi_sync_immediate);
2415 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics, 2821 SPI_STATISTICS_INCREMENT_FIELD(&spi->statistics,
2416 spi_sync_immediate); 2822 spi_sync_immediate);
2417 __spi_pump_messages(master, false); 2823 __spi_pump_messages(master, false, bus_locked);
2418 } 2824 }
2419 2825
2420 wait_for_completion(&done); 2826 wait_for_completion(&done);
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index c2f2574ff61c..2a097d176ba9 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -197,6 +197,7 @@ enum pxa_ssp_type {
197 QUARK_X1000_SSP, 197 QUARK_X1000_SSP,
198 LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */ 198 LPSS_LPT_SSP, /* Keep LPSS types sorted with lpss_platforms[] */
199 LPSS_BYT_SSP, 199 LPSS_BYT_SSP,
200 LPSS_BSW_SSP,
200 LPSS_SPT_SSP, 201 LPSS_SPT_SSP,
201 LPSS_BXT_SSP, 202 LPSS_BXT_SSP,
202}; 203};
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 53be3a4c60cb..857a9a1d82b5 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -25,6 +25,7 @@
25struct dma_chan; 25struct dma_chan;
26struct spi_master; 26struct spi_master;
27struct spi_transfer; 27struct spi_transfer;
28struct spi_flash_read_message;
28 29
29/* 30/*
30 * INTERFACES between SPI master-side drivers and SPI infrastructure. 31 * INTERFACES between SPI master-side drivers and SPI infrastructure.
@@ -53,6 +54,10 @@ extern struct bus_type spi_bus_type;
53 * 54 *
54 * @transfer_bytes_histo: 55 * @transfer_bytes_histo:
55 * transfer bytes histogramm 56 * transfer bytes histogramm
57 *
58 * @transfers_split_maxsize:
59 * number of transfers that have been split because of
60 * maxsize limit
56 */ 61 */
57struct spi_statistics { 62struct spi_statistics {
58 spinlock_t lock; /* lock for the whole structure */ 63 spinlock_t lock; /* lock for the whole structure */
@@ -72,6 +77,8 @@ struct spi_statistics {
72 77
73#define SPI_STATISTICS_HISTO_SIZE 17 78#define SPI_STATISTICS_HISTO_SIZE 17
74 unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE]; 79 unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE];
80
81 unsigned long transfers_split_maxsize;
75}; 82};
76 83
77void spi_statistics_add_transfer_stats(struct spi_statistics *stats, 84void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
@@ -303,6 +310,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
303 * @min_speed_hz: Lowest supported transfer speed 310 * @min_speed_hz: Lowest supported transfer speed
304 * @max_speed_hz: Highest supported transfer speed 311 * @max_speed_hz: Highest supported transfer speed
305 * @flags: other constraints relevant to this driver 312 * @flags: other constraints relevant to this driver
313 * @max_transfer_size: function that returns the max transfer size for
314 * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
306 * @bus_lock_spinlock: spinlock for SPI bus locking 315 * @bus_lock_spinlock: spinlock for SPI bus locking
307 * @bus_lock_mutex: mutex for SPI bus locking 316 * @bus_lock_mutex: mutex for SPI bus locking
308 * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use 317 * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
@@ -361,6 +370,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
361 * @handle_err: the subsystem calls the driver to handle an error that occurs 370 * @handle_err: the subsystem calls the driver to handle an error that occurs
362 * in the generic implementation of transfer_one_message(). 371 * in the generic implementation of transfer_one_message().
363 * @unprepare_message: undo any work done by prepare_message(). 372 * @unprepare_message: undo any work done by prepare_message().
373 * @spi_flash_read: to support spi-controller hardwares that provide
374 * accelerated interface to read from flash devices.
364 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS 375 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
365 * number. Any individual value may be -ENOENT for CS lines that 376 * number. Any individual value may be -ENOENT for CS lines that
366 * are not GPIOs (driven by the SPI controller itself). 377 * are not GPIOs (driven by the SPI controller itself).
@@ -369,6 +380,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
369 * @dma_rx: DMA receive channel 380 * @dma_rx: DMA receive channel
370 * @dummy_rx: dummy receive buffer for full-duplex devices 381 * @dummy_rx: dummy receive buffer for full-duplex devices
371 * @dummy_tx: dummy transmit buffer for full-duplex devices 382 * @dummy_tx: dummy transmit buffer for full-duplex devices
383 * @fw_translate_cs: If the boot firmware uses different numbering scheme
384 * what Linux expects, this optional hook can be used to translate
385 * between the two.
372 * 386 *
373 * Each SPI master controller can communicate with one or more @spi_device 387 * Each SPI master controller can communicate with one or more @spi_device
374 * children. These make a small bus, sharing MOSI, MISO and SCK signals 388 * children. These make a small bus, sharing MOSI, MISO and SCK signals
@@ -513,6 +527,8 @@ struct spi_master {
513 struct spi_message *message); 527 struct spi_message *message);
514 int (*unprepare_message)(struct spi_master *master, 528 int (*unprepare_message)(struct spi_master *master,
515 struct spi_message *message); 529 struct spi_message *message);
530 int (*spi_flash_read)(struct spi_device *spi,
531 struct spi_flash_read_message *msg);
516 532
517 /* 533 /*
518 * These hooks are for drivers that use a generic implementation 534 * These hooks are for drivers that use a generic implementation
@@ -537,6 +553,8 @@ struct spi_master {
537 /* dummy data for full duplex devices */ 553 /* dummy data for full duplex devices */
538 void *dummy_rx; 554 void *dummy_rx;
539 void *dummy_tx; 555 void *dummy_tx;
556
557 int (*fw_translate_cs)(struct spi_master *master, unsigned cs);
540}; 558};
541 559
542static inline void *spi_master_get_devdata(struct spi_master *master) 560static inline void *spi_master_get_devdata(struct spi_master *master)
@@ -582,6 +600,38 @@ extern void spi_unregister_master(struct spi_master *master);
582 600
583extern struct spi_master *spi_busnum_to_master(u16 busnum); 601extern struct spi_master *spi_busnum_to_master(u16 busnum);
584 602
603/*
604 * SPI resource management while processing a SPI message
605 */
606
607typedef void (*spi_res_release_t)(struct spi_master *master,
608 struct spi_message *msg,
609 void *res);
610
611/**
612 * struct spi_res - spi resource management structure
613 * @entry: list entry
614 * @release: release code called prior to freeing this resource
615 * @data: extra data allocated for the specific use-case
616 *
617 * this is based on ideas from devres, but focused on life-cycle
618 * management during spi_message processing
619 */
620struct spi_res {
621 struct list_head entry;
622 spi_res_release_t release;
623 unsigned long long data[]; /* guarantee ull alignment */
624};
625
626extern void *spi_res_alloc(struct spi_device *spi,
627 spi_res_release_t release,
628 size_t size, gfp_t gfp);
629extern void spi_res_add(struct spi_message *message, void *res);
630extern void spi_res_free(void *res);
631
632extern void spi_res_release(struct spi_master *master,
633 struct spi_message *message);
634
585/*---------------------------------------------------------------------------*/ 635/*---------------------------------------------------------------------------*/
586 636
587/* 637/*
@@ -720,6 +770,7 @@ struct spi_transfer {
720 * @status: zero for success, else negative errno 770 * @status: zero for success, else negative errno
721 * @queue: for use by whichever driver currently owns the message 771 * @queue: for use by whichever driver currently owns the message
722 * @state: for use by whichever driver currently owns the message 772 * @state: for use by whichever driver currently owns the message
773 * @resources: for resource management when the spi message is processed
723 * 774 *
724 * A @spi_message is used to execute an atomic sequence of data transfers, 775 * A @spi_message is used to execute an atomic sequence of data transfers,
725 * each represented by a struct spi_transfer. The sequence is "atomic" 776 * each represented by a struct spi_transfer. The sequence is "atomic"
@@ -766,11 +817,15 @@ struct spi_message {
766 */ 817 */
767 struct list_head queue; 818 struct list_head queue;
768 void *state; 819 void *state;
820
821 /* list of spi_res reources when the spi message is processed */
822 struct list_head resources;
769}; 823};
770 824
771static inline void spi_message_init_no_memset(struct spi_message *m) 825static inline void spi_message_init_no_memset(struct spi_message *m)
772{ 826{
773 INIT_LIST_HEAD(&m->transfers); 827 INIT_LIST_HEAD(&m->transfers);
828 INIT_LIST_HEAD(&m->resources);
774} 829}
775 830
776static inline void spi_message_init(struct spi_message *m) 831static inline void spi_message_init(struct spi_message *m)
@@ -854,6 +909,60 @@ spi_max_transfer_size(struct spi_device *spi)
854 909
855/*---------------------------------------------------------------------------*/ 910/*---------------------------------------------------------------------------*/
856 911
912/* SPI transfer replacement methods which make use of spi_res */
913
914struct spi_replaced_transfers;
915typedef void (*spi_replaced_release_t)(struct spi_master *master,
916 struct spi_message *msg,
917 struct spi_replaced_transfers *res);
918/**
919 * struct spi_replaced_transfers - structure describing the spi_transfer
920 * replacements that have occurred
921 * so that they can get reverted
922 * @release: some extra release code to get executed prior to
923 * relasing this structure
924 * @extradata: pointer to some extra data if requested or NULL
925 * @replaced_transfers: transfers that have been replaced and which need
926 * to get restored
927 * @replaced_after: the transfer after which the @replaced_transfers
928 * are to get re-inserted
929 * @inserted: number of transfers inserted
930 * @inserted_transfers: array of spi_transfers of array-size @inserted,
931 * that have been replacing replaced_transfers
932 *
933 * note: that @extradata will point to @inserted_transfers[@inserted]
934 * if some extra allocation is requested, so alignment will be the same
935 * as for spi_transfers
936 */
937struct spi_replaced_transfers {
938 spi_replaced_release_t release;
939 void *extradata;
940 struct list_head replaced_transfers;
941 struct list_head *replaced_after;
942 size_t inserted;
943 struct spi_transfer inserted_transfers[];
944};
945
946extern struct spi_replaced_transfers *spi_replace_transfers(
947 struct spi_message *msg,
948 struct spi_transfer *xfer_first,
949 size_t remove,
950 size_t insert,
951 spi_replaced_release_t release,
952 size_t extradatasize,
953 gfp_t gfp);
954
955/*---------------------------------------------------------------------------*/
956
957/* SPI transfer transformation methods */
958
959extern int spi_split_transfers_maxsize(struct spi_master *master,
960 struct spi_message *msg,
961 size_t maxsize,
962 gfp_t gfp);
963
964/*---------------------------------------------------------------------------*/
965
857/* All these synchronous SPI transfer routines are utilities layered 966/* All these synchronous SPI transfer routines are utilities layered
858 * over the core async transfer primitive. Here, "synchronous" means 967 * over the core async transfer primitive. Here, "synchronous" means
859 * they will sleep uninterruptibly until the async transfer completes. 968 * they will sleep uninterruptibly until the async transfer completes.
@@ -1019,6 +1128,42 @@ static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
1019 return be16_to_cpu(result); 1128 return be16_to_cpu(result);
1020} 1129}
1021 1130
1131/**
1132 * struct spi_flash_read_message - flash specific information for
1133 * spi-masters that provide accelerated flash read interfaces
1134 * @buf: buffer to read data
1135 * @from: offset within the flash from where data is to be read
1136 * @len: length of data to be read
1137 * @retlen: actual length of data read
1138 * @read_opcode: read_opcode to be used to communicate with flash
1139 * @addr_width: number of address bytes
1140 * @dummy_bytes: number of dummy bytes
1141 * @opcode_nbits: number of lines to send opcode
1142 * @addr_nbits: number of lines to send address
1143 * @data_nbits: number of lines for data
1144 */
1145struct spi_flash_read_message {
1146 void *buf;
1147 loff_t from;
1148 size_t len;
1149 size_t retlen;
1150 u8 read_opcode;
1151 u8 addr_width;
1152 u8 dummy_bytes;
1153 u8 opcode_nbits;
1154 u8 addr_nbits;
1155 u8 data_nbits;
1156};
1157
1158/* SPI core interface for flash read support */
1159static inline bool spi_flash_read_supported(struct spi_device *spi)
1160{
1161 return spi->master->spi_flash_read ? true : false;
1162}
1163
1164int spi_flash_read(struct spi_device *spi,
1165 struct spi_flash_read_message *msg);
1166
1022/*---------------------------------------------------------------------------*/ 1167/*---------------------------------------------------------------------------*/
1023 1168
1024/* 1169/*