aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 14:34:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-03 14:34:43 -0400
commite13cccfd86481bd4c0499577f44c570d334da79b (patch)
treef88757c397e60fe29c63c38819531eec413621ac /drivers
parentde6b25de4584febbe1808bc782734ae163b22c26 (diff)
parent69e25c755722056b57892bebeb1892e3a6fe8774 (diff)
Merge tag 'spi-v3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi into next
Pull spi updates from Mark Brown: "For this release SPI has been exceptionally quiet, all the work has been on improving drivers (including taking advantage of some of the recent framework updates): - DMA support for the rspi driver providing a nice performance boost - performance improvement for the SIRF controller in PIO mode - new support for the Cadence SPI IP and for pxa2xx on BayTrail" * tag 'spi-v3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi: (59 commits) spi: rspi: Extract rspi_common_transfer() spi: rspi: Add DMA support for RSPI on RZ/A1H spi: rspi: Add DMA support for QSPI on R-Car Gen2 spi: rspi: Absorb rspi_rz_transfer_out_in() into rspi_rz_transfer_one() spi: rspi: Merge rspi_*_dma() into rspi_dma_transfer() spi: rspi: Pass sg_tables instead of spi_tranfer to rspi_*_dma() spi: rspi: Move RSPI-specific setup out of DMA routines spi: rspi: Use SPI core DMA mapping framework spi: rspi: SPI DMA core needs both RX and TX DMA to function spi: rspi: Remove unneeded resource test in DMA setup spi: rspi: Extract rspi_request_dma_chan() spi: rspi: Don't consider DMA configuration failures fatal spi: rspi: Extract rspi_pio_transfer() spi: rspi: Use core SPI_MASTER_MUST_[RT]X handling spi: rspi: Remove unused 16-bit DMA support spi: rspi: Do not call rspi_receive_init() for TX-only spi: rspi: Extract rspi_wait_for_{tx_empty,rx_full}() spi/pxa2xx: fix runtime PM enabling order spi/fsl-espi: fix rx_buf in fsl_espi_cmd_trans()/fsl_espi_rw_trans() spi: core: Ignore unsupported spi-[tr]x-bus-width property values ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/spi/Kconfig13
-rw-r--r--drivers/spi/Makefile3
-rw-r--r--drivers/spi/spi-adi-v3.c (renamed from drivers/spi/spi-bfin-v3.c)433
-rw-r--r--drivers/spi/spi-ath79.c1
-rw-r--r--drivers/spi/spi-atmel.c9
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c1
-rw-r--r--drivers/spi/spi-bcm63xx.c1
-rw-r--r--drivers/spi/spi-cadence.c673
-rw-r--r--drivers/spi/spi-dw-mmio.c22
-rw-r--r--drivers/spi/spi-dw.c197
-rw-r--r--drivers/spi/spi-dw.h24
-rw-r--r--drivers/spi/spi-falcon.c1
-rw-r--r--drivers/spi/spi-fsl-dspi.c2
-rw-r--r--drivers/spi/spi-fsl-espi.c40
-rw-r--r--drivers/spi/spi-fsl-lib.c6
-rw-r--r--drivers/spi/spi-fsl-lib.h1
-rw-r--r--drivers/spi/spi-fsl-spi.c2
-rw-r--r--drivers/spi/spi-gpio.c2
-rw-r--r--drivers/spi/spi-nuc900.c1
-rw-r--r--drivers/spi/spi-omap-uwire.c1
-rw-r--r--drivers/spi/spi-pl022.c13
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c76
-rw-r--r--drivers/spi/spi-pxa2xx.c21
-rw-r--r--drivers/spi/spi-qup.c6
-rw-r--r--drivers/spi/spi-rspi.c601
-rw-r--r--drivers/spi/spi-s3c24xx.c15
-rw-r--r--drivers/spi/spi-s3c64xx.c6
-rw-r--r--drivers/spi/spi-sh-msiof.c4
-rw-r--r--drivers/spi/spi-sh-sci.c1
-rw-r--r--drivers/spi/spi-sirf.c305
-rw-r--r--drivers/spi/spi-sun4i.c1
-rw-r--r--drivers/spi/spi-sun6i.c1
-rw-r--r--drivers/spi/spi-tegra114.c2
-rw-r--r--drivers/spi/spi-tegra20-sflash.c2
-rw-r--r--drivers/spi/spi-tegra20-slink.c2
-rw-r--r--drivers/spi/spi-tle62x0.c4
-rw-r--r--drivers/spi/spi-topcliff-pch.c5
-rw-r--r--drivers/spi/spi.c22
38 files changed, 1493 insertions, 1027 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 60f2b41c7310..213b5cbb9dcc 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -91,8 +91,8 @@ config SPI_BFIN5XX
91 help 91 help
92 This is the SPI controller master driver for Blackfin 5xx processor. 92 This is the SPI controller master driver for Blackfin 5xx processor.
93 93
94config SPI_BFIN_V3 94config SPI_ADI_V3
95 tristate "SPI controller v3 for Blackfin" 95 tristate "SPI controller v3 for ADI"
96 depends on BF60x 96 depends on BF60x
97 help 97 help
98 This is the SPI controller v3 master driver 98 This is the SPI controller v3 master driver
@@ -148,6 +148,13 @@ config SPI_BUTTERFLY
148 inexpensive battery powered microcontroller evaluation board. 148 inexpensive battery powered microcontroller evaluation board.
149 This same cable can be used to flash new firmware. 149 This same cable can be used to flash new firmware.
150 150
151config SPI_CADENCE
152 tristate "Cadence SPI controller"
153 depends on ARM
154 help
155 This selects the Cadence SPI controller master driver
156 used by Xilinx Zynq.
157
151config SPI_CLPS711X 158config SPI_CLPS711X
152 tristate "CLPS711X host SPI controller" 159 tristate "CLPS711X host SPI controller"
153 depends on ARCH_CLPS711X || COMPILE_TEST 160 depends on ARCH_CLPS711X || COMPILE_TEST
@@ -505,7 +512,7 @@ config SPI_TEGRA20_SLINK
505 512
506config SPI_TOPCLIFF_PCH 513config SPI_TOPCLIFF_PCH
507 tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) SPI" 514 tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) SPI"
508 depends on PCI 515 depends on PCI && (X86_32 || COMPILE_TEST)
509 help 516 help
510 SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus 517 SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus
511 used in some x86 embedded processors. 518 used in some x86 embedded processors.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index bd792669e563..929c9f5eac01 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -18,10 +18,11 @@ obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o
18obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o 18obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
19obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o 19obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o
20obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o 20obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o
21obj-$(CONFIG_SPI_BFIN_V3) += spi-bfin-v3.o 21obj-$(CONFIG_SPI_ADI_V3) += spi-adi-v3.o
22obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o 22obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o
23obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o 23obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
24obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o 24obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
25obj-$(CONFIG_SPI_CADENCE) += spi-cadence.o
25obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o 26obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o
26obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o 27obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
27obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o 28obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
diff --git a/drivers/spi/spi-bfin-v3.c b/drivers/spi/spi-adi-v3.c
index 4089d0e0d84e..dcb2287c7f8a 100644
--- a/drivers/spi/spi-bfin-v3.c
+++ b/drivers/spi/spi-adi-v3.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Analog Devices SPI3 controller driver 2 * Analog Devices SPI3 controller driver
3 * 3 *
4 * Copyright (c) 2013 Analog Devices Inc. 4 * Copyright (c) 2014 Analog Devices Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
@@ -13,6 +13,7 @@
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 */ 14 */
15 15
16#include <linux/clk.h>
16#include <linux/delay.h> 17#include <linux/delay.h>
17#include <linux/device.h> 18#include <linux/device.h>
18#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
@@ -26,35 +27,34 @@
26#include <linux/platform_device.h> 27#include <linux/platform_device.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28#include <linux/spi/spi.h> 29#include <linux/spi/spi.h>
30#include <linux/spi/adi_spi3.h>
29#include <linux/types.h> 31#include <linux/types.h>
30 32
31#include <asm/bfin_spi3.h>
32#include <asm/cacheflush.h>
33#include <asm/dma.h> 33#include <asm/dma.h>
34#include <asm/portmux.h> 34#include <asm/portmux.h>
35 35
36enum bfin_spi_state { 36enum adi_spi_state {
37 START_STATE, 37 START_STATE,
38 RUNNING_STATE, 38 RUNNING_STATE,
39 DONE_STATE, 39 DONE_STATE,
40 ERROR_STATE 40 ERROR_STATE
41}; 41};
42 42
43struct bfin_spi_master; 43struct adi_spi_master;
44 44
45struct bfin_spi_transfer_ops { 45struct adi_spi_transfer_ops {
46 void (*write) (struct bfin_spi_master *); 46 void (*write) (struct adi_spi_master *);
47 void (*read) (struct bfin_spi_master *); 47 void (*read) (struct adi_spi_master *);
48 void (*duplex) (struct bfin_spi_master *); 48 void (*duplex) (struct adi_spi_master *);
49}; 49};
50 50
51/* runtime info for spi master */ 51/* runtime info for spi master */
52struct bfin_spi_master { 52struct adi_spi_master {
53 /* SPI framework hookup */ 53 /* SPI framework hookup */
54 struct spi_master *master; 54 struct spi_master *master;
55 55
56 /* Regs base of SPI controller */ 56 /* Regs base of SPI controller */
57 struct bfin_spi_regs __iomem *regs; 57 struct adi_spi_regs __iomem *regs;
58 58
59 /* Pin request list */ 59 /* Pin request list */
60 u16 *pin_req; 60 u16 *pin_req;
@@ -65,7 +65,7 @@ struct bfin_spi_master {
65 /* Current message transfer state info */ 65 /* Current message transfer state info */
66 struct spi_message *cur_msg; 66 struct spi_message *cur_msg;
67 struct spi_transfer *cur_transfer; 67 struct spi_transfer *cur_transfer;
68 struct bfin_spi_device *cur_chip; 68 struct adi_spi_device *cur_chip;
69 unsigned transfer_len; 69 unsigned transfer_len;
70 70
71 /* transfer buffer */ 71 /* transfer buffer */
@@ -90,12 +90,12 @@ struct bfin_spi_master {
90 u32 ssel; 90 u32 ssel;
91 91
92 unsigned long sclk; 92 unsigned long sclk;
93 enum bfin_spi_state state; 93 enum adi_spi_state state;
94 94
95 const struct bfin_spi_transfer_ops *ops; 95 const struct adi_spi_transfer_ops *ops;
96}; 96};
97 97
98struct bfin_spi_device { 98struct adi_spi_device {
99 u32 control; 99 u32 control;
100 u32 clock; 100 u32 clock;
101 u32 ssel; 101 u32 ssel;
@@ -105,17 +105,25 @@ struct bfin_spi_device {
105 u32 cs_gpio; 105 u32 cs_gpio;
106 u32 tx_dummy_val; /* tx value for rx only transfer */ 106 u32 tx_dummy_val; /* tx value for rx only transfer */
107 bool enable_dma; 107 bool enable_dma;
108 const struct bfin_spi_transfer_ops *ops; 108 const struct adi_spi_transfer_ops *ops;
109}; 109};
110 110
111static void bfin_spi_enable(struct bfin_spi_master *drv_data) 111static void adi_spi_enable(struct adi_spi_master *drv_data)
112{ 112{
113 bfin_write_or(&drv_data->regs->control, SPI_CTL_EN); 113 u32 ctl;
114
115 ctl = ioread32(&drv_data->regs->control);
116 ctl |= SPI_CTL_EN;
117 iowrite32(ctl, &drv_data->regs->control);
114} 118}
115 119
116static void bfin_spi_disable(struct bfin_spi_master *drv_data) 120static void adi_spi_disable(struct adi_spi_master *drv_data)
117{ 121{
118 bfin_write_and(&drv_data->regs->control, ~SPI_CTL_EN); 122 u32 ctl;
123
124 ctl = ioread32(&drv_data->regs->control);
125 ctl &= ~SPI_CTL_EN;
126 iowrite32(ctl, &drv_data->regs->control);
119} 127}
120 128
121/* Caculate the SPI_CLOCK register value based on input HZ */ 129/* Caculate the SPI_CLOCK register value based on input HZ */
@@ -128,35 +136,43 @@ static u32 hz_to_spi_clock(u32 sclk, u32 speed_hz)
128 return spi_clock; 136 return spi_clock;
129} 137}
130 138
131static int bfin_spi_flush(struct bfin_spi_master *drv_data) 139static int adi_spi_flush(struct adi_spi_master *drv_data)
132{ 140{
133 unsigned long limit = loops_per_jiffy << 1; 141 unsigned long limit = loops_per_jiffy << 1;
134 142
135 /* wait for stop and clear stat */ 143 /* wait for stop and clear stat */
136 while (!(bfin_read(&drv_data->regs->status) & SPI_STAT_SPIF) && --limit) 144 while (!(ioread32(&drv_data->regs->status) & SPI_STAT_SPIF) && --limit)
137 cpu_relax(); 145 cpu_relax();
138 146
139 bfin_write(&drv_data->regs->status, 0xFFFFFFFF); 147 iowrite32(0xFFFFFFFF, &drv_data->regs->status);
140 148
141 return limit; 149 return limit;
142} 150}
143 151
144/* Chip select operation functions for cs_change flag */ 152/* Chip select operation functions for cs_change flag */
145static void bfin_spi_cs_active(struct bfin_spi_master *drv_data, struct bfin_spi_device *chip) 153static void adi_spi_cs_active(struct adi_spi_master *drv_data, struct adi_spi_device *chip)
146{ 154{
147 if (likely(chip->cs < MAX_CTRL_CS)) 155 if (likely(chip->cs < MAX_CTRL_CS)) {
148 bfin_write_and(&drv_data->regs->ssel, ~chip->ssel); 156 u32 reg;
149 else 157 reg = ioread32(&drv_data->regs->ssel);
158 reg &= ~chip->ssel;
159 iowrite32(reg, &drv_data->regs->ssel);
160 } else {
150 gpio_set_value(chip->cs_gpio, 0); 161 gpio_set_value(chip->cs_gpio, 0);
162 }
151} 163}
152 164
153static void bfin_spi_cs_deactive(struct bfin_spi_master *drv_data, 165static void adi_spi_cs_deactive(struct adi_spi_master *drv_data,
154 struct bfin_spi_device *chip) 166 struct adi_spi_device *chip)
155{ 167{
156 if (likely(chip->cs < MAX_CTRL_CS)) 168 if (likely(chip->cs < MAX_CTRL_CS)) {
157 bfin_write_or(&drv_data->regs->ssel, chip->ssel); 169 u32 reg;
158 else 170 reg = ioread32(&drv_data->regs->ssel);
171 reg |= chip->ssel;
172 iowrite32(reg, &drv_data->regs->ssel);
173 } else {
159 gpio_set_value(chip->cs_gpio, 1); 174 gpio_set_value(chip->cs_gpio, 1);
175 }
160 176
161 /* Move delay here for consistency */ 177 /* Move delay here for consistency */
162 if (chip->cs_chg_udelay) 178 if (chip->cs_chg_udelay)
@@ -164,187 +180,192 @@ static void bfin_spi_cs_deactive(struct bfin_spi_master *drv_data,
164} 180}
165 181
166/* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */ 182/* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */
167static inline void bfin_spi_cs_enable(struct bfin_spi_master *drv_data, 183static inline void adi_spi_cs_enable(struct adi_spi_master *drv_data,
168 struct bfin_spi_device *chip) 184 struct adi_spi_device *chip)
169{ 185{
170 if (chip->cs < MAX_CTRL_CS) 186 if (chip->cs < MAX_CTRL_CS) {
171 bfin_write_or(&drv_data->regs->ssel, chip->ssel >> 8); 187 u32 reg;
188 reg = ioread32(&drv_data->regs->ssel);
189 reg |= chip->ssel >> 8;
190 iowrite32(reg, &drv_data->regs->ssel);
191 }
172} 192}
173 193
174static inline void bfin_spi_cs_disable(struct bfin_spi_master *drv_data, 194static inline void adi_spi_cs_disable(struct adi_spi_master *drv_data,
175 struct bfin_spi_device *chip) 195 struct adi_spi_device *chip)
176{ 196{
177 if (chip->cs < MAX_CTRL_CS) 197 if (chip->cs < MAX_CTRL_CS) {
178 bfin_write_and(&drv_data->regs->ssel, ~(chip->ssel >> 8)); 198 u32 reg;
199 reg = ioread32(&drv_data->regs->ssel);
200 reg &= ~(chip->ssel >> 8);
201 iowrite32(reg, &drv_data->regs->ssel);
202 }
179} 203}
180 204
181/* stop controller and re-config current chip*/ 205/* stop controller and re-config current chip*/
182static void bfin_spi_restore_state(struct bfin_spi_master *drv_data) 206static void adi_spi_restore_state(struct adi_spi_master *drv_data)
183{ 207{
184 struct bfin_spi_device *chip = drv_data->cur_chip; 208 struct adi_spi_device *chip = drv_data->cur_chip;
185 209
186 /* Clear status and disable clock */ 210 /* Clear status and disable clock */
187 bfin_write(&drv_data->regs->status, 0xFFFFFFFF); 211 iowrite32(0xFFFFFFFF, &drv_data->regs->status);
188 bfin_write(&drv_data->regs->rx_control, 0x0); 212 iowrite32(0x0, &drv_data->regs->rx_control);
189 bfin_write(&drv_data->regs->tx_control, 0x0); 213 iowrite32(0x0, &drv_data->regs->tx_control);
190 bfin_spi_disable(drv_data); 214 adi_spi_disable(drv_data);
191
192 SSYNC();
193 215
194 /* Load the registers */ 216 /* Load the registers */
195 bfin_write(&drv_data->regs->control, chip->control); 217 iowrite32(chip->control, &drv_data->regs->control);
196 bfin_write(&drv_data->regs->clock, chip->clock); 218 iowrite32(chip->clock, &drv_data->regs->clock);
197 219
198 bfin_spi_enable(drv_data); 220 adi_spi_enable(drv_data);
199 drv_data->tx_num = drv_data->rx_num = 0; 221 drv_data->tx_num = drv_data->rx_num = 0;
200 /* we always choose tx transfer initiate */ 222 /* we always choose tx transfer initiate */
201 bfin_write(&drv_data->regs->rx_control, SPI_RXCTL_REN); 223 iowrite32(SPI_RXCTL_REN, &drv_data->regs->rx_control);
202 bfin_write(&drv_data->regs->tx_control, 224 iowrite32(SPI_TXCTL_TEN | SPI_TXCTL_TTI, &drv_data->regs->tx_control);
203 SPI_TXCTL_TEN | SPI_TXCTL_TTI); 225 adi_spi_cs_active(drv_data, chip);
204 bfin_spi_cs_active(drv_data, chip);
205} 226}
206 227
207/* discard invalid rx data and empty rfifo */ 228/* discard invalid rx data and empty rfifo */
208static inline void dummy_read(struct bfin_spi_master *drv_data) 229static inline void dummy_read(struct adi_spi_master *drv_data)
209{ 230{
210 while (!(bfin_read(&drv_data->regs->status) & SPI_STAT_RFE)) 231 while (!(ioread32(&drv_data->regs->status) & SPI_STAT_RFE))
211 bfin_read(&drv_data->regs->rfifo); 232 ioread32(&drv_data->regs->rfifo);
212} 233}
213 234
214static void bfin_spi_u8_write(struct bfin_spi_master *drv_data) 235static void adi_spi_u8_write(struct adi_spi_master *drv_data)
215{ 236{
216 dummy_read(drv_data); 237 dummy_read(drv_data);
217 while (drv_data->tx < drv_data->tx_end) { 238 while (drv_data->tx < drv_data->tx_end) {
218 bfin_write(&drv_data->regs->tfifo, (*(u8 *)(drv_data->tx++))); 239 iowrite32(*(u8 *)(drv_data->tx++), &drv_data->regs->tfifo);
219 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 240 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
220 cpu_relax(); 241 cpu_relax();
221 bfin_read(&drv_data->regs->rfifo); 242 ioread32(&drv_data->regs->rfifo);
222 } 243 }
223} 244}
224 245
225static void bfin_spi_u8_read(struct bfin_spi_master *drv_data) 246static void adi_spi_u8_read(struct adi_spi_master *drv_data)
226{ 247{
227 u32 tx_val = drv_data->cur_chip->tx_dummy_val; 248 u32 tx_val = drv_data->cur_chip->tx_dummy_val;
228 249
229 dummy_read(drv_data); 250 dummy_read(drv_data);
230 while (drv_data->rx < drv_data->rx_end) { 251 while (drv_data->rx < drv_data->rx_end) {
231 bfin_write(&drv_data->regs->tfifo, tx_val); 252 iowrite32(tx_val, &drv_data->regs->tfifo);
232 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 253 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
233 cpu_relax(); 254 cpu_relax();
234 *(u8 *)(drv_data->rx++) = bfin_read(&drv_data->regs->rfifo); 255 *(u8 *)(drv_data->rx++) = ioread32(&drv_data->regs->rfifo);
235 } 256 }
236} 257}
237 258
238static void bfin_spi_u8_duplex(struct bfin_spi_master *drv_data) 259static void adi_spi_u8_duplex(struct adi_spi_master *drv_data)
239{ 260{
240 dummy_read(drv_data); 261 dummy_read(drv_data);
241 while (drv_data->rx < drv_data->rx_end) { 262 while (drv_data->rx < drv_data->rx_end) {
242 bfin_write(&drv_data->regs->tfifo, (*(u8 *)(drv_data->tx++))); 263 iowrite32(*(u8 *)(drv_data->tx++), &drv_data->regs->tfifo);
243 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 264 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
244 cpu_relax(); 265 cpu_relax();
245 *(u8 *)(drv_data->rx++) = bfin_read(&drv_data->regs->rfifo); 266 *(u8 *)(drv_data->rx++) = ioread32(&drv_data->regs->rfifo);
246 } 267 }
247} 268}
248 269
249static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u8 = { 270static const struct adi_spi_transfer_ops adi_spi_transfer_ops_u8 = {
250 .write = bfin_spi_u8_write, 271 .write = adi_spi_u8_write,
251 .read = bfin_spi_u8_read, 272 .read = adi_spi_u8_read,
252 .duplex = bfin_spi_u8_duplex, 273 .duplex = adi_spi_u8_duplex,
253}; 274};
254 275
255static void bfin_spi_u16_write(struct bfin_spi_master *drv_data) 276static void adi_spi_u16_write(struct adi_spi_master *drv_data)
256{ 277{
257 dummy_read(drv_data); 278 dummy_read(drv_data);
258 while (drv_data->tx < drv_data->tx_end) { 279 while (drv_data->tx < drv_data->tx_end) {
259 bfin_write(&drv_data->regs->tfifo, (*(u16 *)drv_data->tx)); 280 iowrite32(*(u16 *)drv_data->tx, &drv_data->regs->tfifo);
260 drv_data->tx += 2; 281 drv_data->tx += 2;
261 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 282 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
262 cpu_relax(); 283 cpu_relax();
263 bfin_read(&drv_data->regs->rfifo); 284 ioread32(&drv_data->regs->rfifo);
264 } 285 }
265} 286}
266 287
267static void bfin_spi_u16_read(struct bfin_spi_master *drv_data) 288static void adi_spi_u16_read(struct adi_spi_master *drv_data)
268{ 289{
269 u32 tx_val = drv_data->cur_chip->tx_dummy_val; 290 u32 tx_val = drv_data->cur_chip->tx_dummy_val;
270 291
271 dummy_read(drv_data); 292 dummy_read(drv_data);
272 while (drv_data->rx < drv_data->rx_end) { 293 while (drv_data->rx < drv_data->rx_end) {
273 bfin_write(&drv_data->regs->tfifo, tx_val); 294 iowrite32(tx_val, &drv_data->regs->tfifo);
274 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 295 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
275 cpu_relax(); 296 cpu_relax();
276 *(u16 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo); 297 *(u16 *)drv_data->rx = ioread32(&drv_data->regs->rfifo);
277 drv_data->rx += 2; 298 drv_data->rx += 2;
278 } 299 }
279} 300}
280 301
281static void bfin_spi_u16_duplex(struct bfin_spi_master *drv_data) 302static void adi_spi_u16_duplex(struct adi_spi_master *drv_data)
282{ 303{
283 dummy_read(drv_data); 304 dummy_read(drv_data);
284 while (drv_data->rx < drv_data->rx_end) { 305 while (drv_data->rx < drv_data->rx_end) {
285 bfin_write(&drv_data->regs->tfifo, (*(u16 *)drv_data->tx)); 306 iowrite32(*(u16 *)drv_data->tx, &drv_data->regs->tfifo);
286 drv_data->tx += 2; 307 drv_data->tx += 2;
287 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 308 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
288 cpu_relax(); 309 cpu_relax();
289 *(u16 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo); 310 *(u16 *)drv_data->rx = ioread32(&drv_data->regs->rfifo);
290 drv_data->rx += 2; 311 drv_data->rx += 2;
291 } 312 }
292} 313}
293 314
294static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u16 = { 315static const struct adi_spi_transfer_ops adi_spi_transfer_ops_u16 = {
295 .write = bfin_spi_u16_write, 316 .write = adi_spi_u16_write,
296 .read = bfin_spi_u16_read, 317 .read = adi_spi_u16_read,
297 .duplex = bfin_spi_u16_duplex, 318 .duplex = adi_spi_u16_duplex,
298}; 319};
299 320
300static void bfin_spi_u32_write(struct bfin_spi_master *drv_data) 321static void adi_spi_u32_write(struct adi_spi_master *drv_data)
301{ 322{
302 dummy_read(drv_data); 323 dummy_read(drv_data);
303 while (drv_data->tx < drv_data->tx_end) { 324 while (drv_data->tx < drv_data->tx_end) {
304 bfin_write(&drv_data->regs->tfifo, (*(u32 *)drv_data->tx)); 325 iowrite32(*(u32 *)drv_data->tx, &drv_data->regs->tfifo);
305 drv_data->tx += 4; 326 drv_data->tx += 4;
306 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 327 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
307 cpu_relax(); 328 cpu_relax();
308 bfin_read(&drv_data->regs->rfifo); 329 ioread32(&drv_data->regs->rfifo);
309 } 330 }
310} 331}
311 332
312static void bfin_spi_u32_read(struct bfin_spi_master *drv_data) 333static void adi_spi_u32_read(struct adi_spi_master *drv_data)
313{ 334{
314 u32 tx_val = drv_data->cur_chip->tx_dummy_val; 335 u32 tx_val = drv_data->cur_chip->tx_dummy_val;
315 336
316 dummy_read(drv_data); 337 dummy_read(drv_data);
317 while (drv_data->rx < drv_data->rx_end) { 338 while (drv_data->rx < drv_data->rx_end) {
318 bfin_write(&drv_data->regs->tfifo, tx_val); 339 iowrite32(tx_val, &drv_data->regs->tfifo);
319 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 340 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
320 cpu_relax(); 341 cpu_relax();
321 *(u32 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo); 342 *(u32 *)drv_data->rx = ioread32(&drv_data->regs->rfifo);
322 drv_data->rx += 4; 343 drv_data->rx += 4;
323 } 344 }
324} 345}
325 346
326static void bfin_spi_u32_duplex(struct bfin_spi_master *drv_data) 347static void adi_spi_u32_duplex(struct adi_spi_master *drv_data)
327{ 348{
328 dummy_read(drv_data); 349 dummy_read(drv_data);
329 while (drv_data->rx < drv_data->rx_end) { 350 while (drv_data->rx < drv_data->rx_end) {
330 bfin_write(&drv_data->regs->tfifo, (*(u32 *)drv_data->tx)); 351 iowrite32(*(u32 *)drv_data->tx, &drv_data->regs->tfifo);
331 drv_data->tx += 4; 352 drv_data->tx += 4;
332 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 353 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
333 cpu_relax(); 354 cpu_relax();
334 *(u32 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo); 355 *(u32 *)drv_data->rx = ioread32(&drv_data->regs->rfifo);
335 drv_data->rx += 4; 356 drv_data->rx += 4;
336 } 357 }
337} 358}
338 359
339static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u32 = { 360static const struct adi_spi_transfer_ops adi_spi_transfer_ops_u32 = {
340 .write = bfin_spi_u32_write, 361 .write = adi_spi_u32_write,
341 .read = bfin_spi_u32_read, 362 .read = adi_spi_u32_read,
342 .duplex = bfin_spi_u32_duplex, 363 .duplex = adi_spi_u32_duplex,
343}; 364};
344 365
345 366
346/* test if there is more transfer to be done */ 367/* test if there is more transfer to be done */
347static void bfin_spi_next_transfer(struct bfin_spi_master *drv) 368static void adi_spi_next_transfer(struct adi_spi_master *drv)
348{ 369{
349 struct spi_message *msg = drv->cur_msg; 370 struct spi_message *msg = drv->cur_msg;
350 struct spi_transfer *t = drv->cur_transfer; 371 struct spi_transfer *t = drv->cur_transfer;
@@ -360,15 +381,15 @@ static void bfin_spi_next_transfer(struct bfin_spi_master *drv)
360 } 381 }
361} 382}
362 383
363static void bfin_spi_giveback(struct bfin_spi_master *drv_data) 384static void adi_spi_giveback(struct adi_spi_master *drv_data)
364{ 385{
365 struct bfin_spi_device *chip = drv_data->cur_chip; 386 struct adi_spi_device *chip = drv_data->cur_chip;
366 387
367 bfin_spi_cs_deactive(drv_data, chip); 388 adi_spi_cs_deactive(drv_data, chip);
368 spi_finalize_current_message(drv_data->master); 389 spi_finalize_current_message(drv_data->master);
369} 390}
370 391
371static int bfin_spi_setup_transfer(struct bfin_spi_master *drv) 392static int adi_spi_setup_transfer(struct adi_spi_master *drv)
372{ 393{
373 struct spi_transfer *t = drv->cur_transfer; 394 struct spi_transfer *t = drv->cur_transfer;
374 u32 cr, cr_width; 395 u32 cr, cr_width;
@@ -393,34 +414,33 @@ static int bfin_spi_setup_transfer(struct bfin_spi_master *drv)
393 switch (t->bits_per_word) { 414 switch (t->bits_per_word) {
394 case 8: 415 case 8:
395 cr_width = SPI_CTL_SIZE08; 416 cr_width = SPI_CTL_SIZE08;
396 drv->ops = &bfin_bfin_spi_transfer_ops_u8; 417 drv->ops = &adi_spi_transfer_ops_u8;
397 break; 418 break;
398 case 16: 419 case 16:
399 cr_width = SPI_CTL_SIZE16; 420 cr_width = SPI_CTL_SIZE16;
400 drv->ops = &bfin_bfin_spi_transfer_ops_u16; 421 drv->ops = &adi_spi_transfer_ops_u16;
401 break; 422 break;
402 case 32: 423 case 32:
403 cr_width = SPI_CTL_SIZE32; 424 cr_width = SPI_CTL_SIZE32;
404 drv->ops = &bfin_bfin_spi_transfer_ops_u32; 425 drv->ops = &adi_spi_transfer_ops_u32;
405 break; 426 break;
406 default: 427 default:
407 return -EINVAL; 428 return -EINVAL;
408 } 429 }
409 cr = bfin_read(&drv->regs->control) & ~SPI_CTL_SIZE; 430 cr = ioread32(&drv->regs->control) & ~SPI_CTL_SIZE;
410 cr |= cr_width; 431 cr |= cr_width;
411 bfin_write(&drv->regs->control, cr); 432 iowrite32(cr, &drv->regs->control);
412 433
413 /* speed setup */ 434 /* speed setup */
414 bfin_write(&drv->regs->clock, 435 iowrite32(hz_to_spi_clock(drv->sclk, t->speed_hz), &drv->regs->clock);
415 hz_to_spi_clock(drv->sclk, t->speed_hz));
416 return 0; 436 return 0;
417} 437}
418 438
419static int bfin_spi_dma_xfer(struct bfin_spi_master *drv_data) 439static int adi_spi_dma_xfer(struct adi_spi_master *drv_data)
420{ 440{
421 struct spi_transfer *t = drv_data->cur_transfer; 441 struct spi_transfer *t = drv_data->cur_transfer;
422 struct spi_message *msg = drv_data->cur_msg; 442 struct spi_message *msg = drv_data->cur_msg;
423 struct bfin_spi_device *chip = drv_data->cur_chip; 443 struct adi_spi_device *chip = drv_data->cur_chip;
424 u32 dma_config; 444 u32 dma_config;
425 unsigned long word_count, word_size; 445 unsigned long word_count, word_size;
426 void *tx_buf, *rx_buf; 446 void *tx_buf, *rx_buf;
@@ -498,17 +518,16 @@ static int bfin_spi_dma_xfer(struct bfin_spi_master *drv_data)
498 set_dma_config(drv_data->rx_dma, dma_config | WNR); 518 set_dma_config(drv_data->rx_dma, dma_config | WNR);
499 enable_dma(drv_data->tx_dma); 519 enable_dma(drv_data->tx_dma);
500 enable_dma(drv_data->rx_dma); 520 enable_dma(drv_data->rx_dma);
501 SSYNC();
502 521
503 bfin_write(&drv_data->regs->rx_control, SPI_RXCTL_REN | SPI_RXCTL_RDR_NE); 522 iowrite32(SPI_RXCTL_REN | SPI_RXCTL_RDR_NE,
504 SSYNC(); 523 &drv_data->regs->rx_control);
505 bfin_write(&drv_data->regs->tx_control, 524 iowrite32(SPI_TXCTL_TEN | SPI_TXCTL_TTI | SPI_TXCTL_TDR_NF,
506 SPI_TXCTL_TEN | SPI_TXCTL_TTI | SPI_TXCTL_TDR_NF); 525 &drv_data->regs->tx_control);
507 526
508 return 0; 527 return 0;
509} 528}
510 529
511static int bfin_spi_pio_xfer(struct bfin_spi_master *drv_data) 530static int adi_spi_pio_xfer(struct adi_spi_master *drv_data)
512{ 531{
513 struct spi_message *msg = drv_data->cur_msg; 532 struct spi_message *msg = drv_data->cur_msg;
514 533
@@ -529,19 +548,19 @@ static int bfin_spi_pio_xfer(struct bfin_spi_master *drv_data)
529 return -EIO; 548 return -EIO;
530 } 549 }
531 550
532 if (!bfin_spi_flush(drv_data)) 551 if (!adi_spi_flush(drv_data))
533 return -EIO; 552 return -EIO;
534 msg->actual_length += drv_data->transfer_len; 553 msg->actual_length += drv_data->transfer_len;
535 tasklet_schedule(&drv_data->pump_transfers); 554 tasklet_schedule(&drv_data->pump_transfers);
536 return 0; 555 return 0;
537} 556}
538 557
539static void bfin_spi_pump_transfers(unsigned long data) 558static void adi_spi_pump_transfers(unsigned long data)
540{ 559{
541 struct bfin_spi_master *drv_data = (struct bfin_spi_master *)data; 560 struct adi_spi_master *drv_data = (struct adi_spi_master *)data;
542 struct spi_message *msg = NULL; 561 struct spi_message *msg = NULL;
543 struct spi_transfer *t = NULL; 562 struct spi_transfer *t = NULL;
544 struct bfin_spi_device *chip = NULL; 563 struct adi_spi_device *chip = NULL;
545 int ret; 564 int ret;
546 565
547 /* Get current state information */ 566 /* Get current state information */
@@ -552,7 +571,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
552 /* Handle for abort */ 571 /* Handle for abort */
553 if (drv_data->state == ERROR_STATE) { 572 if (drv_data->state == ERROR_STATE) {
554 msg->status = -EIO; 573 msg->status = -EIO;
555 bfin_spi_giveback(drv_data); 574 adi_spi_giveback(drv_data);
556 return; 575 return;
557 } 576 }
558 577
@@ -560,14 +579,14 @@ static void bfin_spi_pump_transfers(unsigned long data)
560 if (t->delay_usecs) 579 if (t->delay_usecs)
561 udelay(t->delay_usecs); 580 udelay(t->delay_usecs);
562 if (t->cs_change) 581 if (t->cs_change)
563 bfin_spi_cs_deactive(drv_data, chip); 582 adi_spi_cs_deactive(drv_data, chip);
564 bfin_spi_next_transfer(drv_data); 583 adi_spi_next_transfer(drv_data);
565 t = drv_data->cur_transfer; 584 t = drv_data->cur_transfer;
566 } 585 }
567 /* Handle end of message */ 586 /* Handle end of message */
568 if (drv_data->state == DONE_STATE) { 587 if (drv_data->state == DONE_STATE) {
569 msg->status = 0; 588 msg->status = 0;
570 bfin_spi_giveback(drv_data); 589 adi_spi_giveback(drv_data);
571 return; 590 return;
572 } 591 }
573 592
@@ -577,34 +596,34 @@ static void bfin_spi_pump_transfers(unsigned long data)
577 return; 596 return;
578 } 597 }
579 598
580 ret = bfin_spi_setup_transfer(drv_data); 599 ret = adi_spi_setup_transfer(drv_data);
581 if (ret) { 600 if (ret) {
582 msg->status = ret; 601 msg->status = ret;
583 bfin_spi_giveback(drv_data); 602 adi_spi_giveback(drv_data);
584 } 603 }
585 604
586 bfin_write(&drv_data->regs->status, 0xFFFFFFFF); 605 iowrite32(0xFFFFFFFF, &drv_data->regs->status);
587 bfin_spi_cs_active(drv_data, chip); 606 adi_spi_cs_active(drv_data, chip);
588 drv_data->state = RUNNING_STATE; 607 drv_data->state = RUNNING_STATE;
589 608
590 if (chip->enable_dma) 609 if (chip->enable_dma)
591 ret = bfin_spi_dma_xfer(drv_data); 610 ret = adi_spi_dma_xfer(drv_data);
592 else 611 else
593 ret = bfin_spi_pio_xfer(drv_data); 612 ret = adi_spi_pio_xfer(drv_data);
594 if (ret) { 613 if (ret) {
595 msg->status = ret; 614 msg->status = ret;
596 bfin_spi_giveback(drv_data); 615 adi_spi_giveback(drv_data);
597 } 616 }
598} 617}
599 618
600static int bfin_spi_transfer_one_message(struct spi_master *master, 619static int adi_spi_transfer_one_message(struct spi_master *master,
601 struct spi_message *m) 620 struct spi_message *m)
602{ 621{
603 struct bfin_spi_master *drv_data = spi_master_get_devdata(master); 622 struct adi_spi_master *drv_data = spi_master_get_devdata(master);
604 623
605 drv_data->cur_msg = m; 624 drv_data->cur_msg = m;
606 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); 625 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
607 bfin_spi_restore_state(drv_data); 626 adi_spi_restore_state(drv_data);
608 627
609 drv_data->state = START_STATE; 628 drv_data->state = START_STATE;
610 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, 629 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
@@ -630,15 +649,15 @@ static const u16 ssel[][MAX_SPI_SSEL] = {
630 P_SPI2_SSEL6, P_SPI2_SSEL7}, 649 P_SPI2_SSEL6, P_SPI2_SSEL7},
631}; 650};
632 651
633static int bfin_spi_setup(struct spi_device *spi) 652static int adi_spi_setup(struct spi_device *spi)
634{ 653{
635 struct bfin_spi_master *drv_data = spi_master_get_devdata(spi->master); 654 struct adi_spi_master *drv_data = spi_master_get_devdata(spi->master);
636 struct bfin_spi_device *chip = spi_get_ctldata(spi); 655 struct adi_spi_device *chip = spi_get_ctldata(spi);
637 u32 bfin_ctl_reg = SPI_CTL_ODM | SPI_CTL_PSSE; 656 u32 ctl_reg = SPI_CTL_ODM | SPI_CTL_PSSE;
638 int ret = -EINVAL; 657 int ret = -EINVAL;
639 658
640 if (!chip) { 659 if (!chip) {
641 struct bfin_spi3_chip *chip_info = spi->controller_data; 660 struct adi_spi3_chip *chip_info = spi->controller_data;
642 661
643 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 662 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
644 if (!chip) { 663 if (!chip) {
@@ -646,7 +665,7 @@ static int bfin_spi_setup(struct spi_device *spi)
646 return -ENOMEM; 665 return -ENOMEM;
647 } 666 }
648 if (chip_info) { 667 if (chip_info) {
649 if (chip_info->control & ~bfin_ctl_reg) { 668 if (chip_info->control & ~ctl_reg) {
650 dev_err(&spi->dev, 669 dev_err(&spi->dev,
651 "do not set bits that the SPI framework manages\n"); 670 "do not set bits that the SPI framework manages\n");
652 goto error; 671 goto error;
@@ -657,6 +676,7 @@ static int bfin_spi_setup(struct spi_device *spi)
657 chip->enable_dma = chip_info->enable_dma; 676 chip->enable_dma = chip_info->enable_dma;
658 } 677 }
659 chip->cs = spi->chip_select; 678 chip->cs = spi->chip_select;
679
660 if (chip->cs < MAX_CTRL_CS) { 680 if (chip->cs < MAX_CTRL_CS) {
661 chip->ssel = (1 << chip->cs) << 8; 681 chip->ssel = (1 << chip->cs) << 8;
662 ret = peripheral_request(ssel[spi->master->bus_num] 682 ret = peripheral_request(ssel[spi->master->bus_num]
@@ -678,7 +698,7 @@ static int bfin_spi_setup(struct spi_device *spi)
678 } 698 }
679 699
680 /* force a default base state */ 700 /* force a default base state */
681 chip->control &= bfin_ctl_reg; 701 chip->control &= ctl_reg;
682 702
683 if (spi->mode & SPI_CPOL) 703 if (spi->mode & SPI_CPOL)
684 chip->control |= SPI_CTL_CPOL; 704 chip->control |= SPI_CTL_CPOL;
@@ -692,8 +712,8 @@ static int bfin_spi_setup(struct spi_device *spi)
692 712
693 chip->clock = hz_to_spi_clock(drv_data->sclk, spi->max_speed_hz); 713 chip->clock = hz_to_spi_clock(drv_data->sclk, spi->max_speed_hz);
694 714
695 bfin_spi_cs_enable(drv_data, chip); 715 adi_spi_cs_enable(drv_data, chip);
696 bfin_spi_cs_deactive(drv_data, chip); 716 adi_spi_cs_deactive(drv_data, chip);
697 717
698 return 0; 718 return 0;
699error: 719error:
@@ -705,10 +725,10 @@ error:
705 return ret; 725 return ret;
706} 726}
707 727
708static void bfin_spi_cleanup(struct spi_device *spi) 728static void adi_spi_cleanup(struct spi_device *spi)
709{ 729{
710 struct bfin_spi_device *chip = spi_get_ctldata(spi); 730 struct adi_spi_device *chip = spi_get_ctldata(spi);
711 struct bfin_spi_master *drv_data = spi_master_get_devdata(spi->master); 731 struct adi_spi_master *drv_data = spi_master_get_devdata(spi->master);
712 732
713 if (!chip) 733 if (!chip)
714 return; 734 return;
@@ -716,7 +736,7 @@ static void bfin_spi_cleanup(struct spi_device *spi)
716 if (chip->cs < MAX_CTRL_CS) { 736 if (chip->cs < MAX_CTRL_CS) {
717 peripheral_free(ssel[spi->master->bus_num] 737 peripheral_free(ssel[spi->master->bus_num]
718 [chip->cs-1]); 738 [chip->cs-1]);
719 bfin_spi_cs_disable(drv_data, chip); 739 adi_spi_cs_disable(drv_data, chip);
720 } else { 740 } else {
721 gpio_free(chip->cs_gpio); 741 gpio_free(chip->cs_gpio);
722 } 742 }
@@ -725,10 +745,11 @@ static void bfin_spi_cleanup(struct spi_device *spi)
725 spi_set_ctldata(spi, NULL); 745 spi_set_ctldata(spi, NULL);
726} 746}
727 747
728static irqreturn_t bfin_spi_tx_dma_isr(int irq, void *dev_id) 748static irqreturn_t adi_spi_tx_dma_isr(int irq, void *dev_id)
729{ 749{
730 struct bfin_spi_master *drv_data = dev_id; 750 struct adi_spi_master *drv_data = dev_id;
731 u32 dma_stat = get_dma_curr_irqstat(drv_data->tx_dma); 751 u32 dma_stat = get_dma_curr_irqstat(drv_data->tx_dma);
752 u32 tx_ctl;
732 753
733 clear_dma_irqstat(drv_data->tx_dma); 754 clear_dma_irqstat(drv_data->tx_dma);
734 if (dma_stat & DMA_DONE) { 755 if (dma_stat & DMA_DONE) {
@@ -739,13 +760,15 @@ static irqreturn_t bfin_spi_tx_dma_isr(int irq, void *dev_id)
739 if (drv_data->tx) 760 if (drv_data->tx)
740 drv_data->state = ERROR_STATE; 761 drv_data->state = ERROR_STATE;
741 } 762 }
742 bfin_write_and(&drv_data->regs->tx_control, ~SPI_TXCTL_TDR_NF); 763 tx_ctl = ioread32(&drv_data->regs->tx_control);
764 tx_ctl &= ~SPI_TXCTL_TDR_NF;
765 iowrite32(tx_ctl, &drv_data->regs->tx_control);
743 return IRQ_HANDLED; 766 return IRQ_HANDLED;
744} 767}
745 768
746static irqreturn_t bfin_spi_rx_dma_isr(int irq, void *dev_id) 769static irqreturn_t adi_spi_rx_dma_isr(int irq, void *dev_id)
747{ 770{
748 struct bfin_spi_master *drv_data = dev_id; 771 struct adi_spi_master *drv_data = dev_id;
749 struct spi_message *msg = drv_data->cur_msg; 772 struct spi_message *msg = drv_data->cur_msg;
750 u32 dma_stat = get_dma_curr_irqstat(drv_data->rx_dma); 773 u32 dma_stat = get_dma_curr_irqstat(drv_data->rx_dma);
751 774
@@ -760,8 +783,8 @@ static irqreturn_t bfin_spi_rx_dma_isr(int irq, void *dev_id)
760 dev_err(&drv_data->master->dev, 783 dev_err(&drv_data->master->dev,
761 "spi rx dma error: %d\n", dma_stat); 784 "spi rx dma error: %d\n", dma_stat);
762 } 785 }
763 bfin_write(&drv_data->regs->tx_control, 0); 786 iowrite32(0, &drv_data->regs->tx_control);
764 bfin_write(&drv_data->regs->rx_control, 0); 787 iowrite32(0, &drv_data->regs->rx_control);
765 if (drv_data->rx_num != drv_data->tx_num) 788 if (drv_data->rx_num != drv_data->tx_num)
766 dev_dbg(&drv_data->master->dev, 789 dev_dbg(&drv_data->master->dev,
767 "dma interrupt missing: tx=%d,rx=%d\n", 790 "dma interrupt missing: tx=%d,rx=%d\n",
@@ -770,15 +793,15 @@ static irqreturn_t bfin_spi_rx_dma_isr(int irq, void *dev_id)
770 return IRQ_HANDLED; 793 return IRQ_HANDLED;
771} 794}
772 795
773static int bfin_spi_probe(struct platform_device *pdev) 796static int adi_spi_probe(struct platform_device *pdev)
774{ 797{
775 struct device *dev = &pdev->dev; 798 struct device *dev = &pdev->dev;
776 struct bfin_spi3_master *info = dev_get_platdata(dev); 799 struct adi_spi3_master *info = dev_get_platdata(dev);
777 struct spi_master *master; 800 struct spi_master *master;
778 struct bfin_spi_master *drv_data; 801 struct adi_spi_master *drv_data;
779 struct resource *mem, *res; 802 struct resource *mem, *res;
780 unsigned int tx_dma, rx_dma; 803 unsigned int tx_dma, rx_dma;
781 unsigned long sclk; 804 struct clk *sclk;
782 int ret; 805 int ret;
783 806
784 if (!info) { 807 if (!info) {
@@ -786,10 +809,10 @@ static int bfin_spi_probe(struct platform_device *pdev)
786 return -ENODEV; 809 return -ENODEV;
787 } 810 }
788 811
789 sclk = get_sclk1(); 812 sclk = devm_clk_get(dev, "spi");
790 if (!sclk) { 813 if (IS_ERR(sclk)) {
791 dev_err(dev, "can not get sclk1\n"); 814 dev_err(dev, "can not get spi clock\n");
792 return -ENXIO; 815 return PTR_ERR(sclk);
793 } 816 }
794 817
795 res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 818 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -819,9 +842,9 @@ static int bfin_spi_probe(struct platform_device *pdev)
819 842
820 master->bus_num = pdev->id; 843 master->bus_num = pdev->id;
821 master->num_chipselect = info->num_chipselect; 844 master->num_chipselect = info->num_chipselect;
822 master->cleanup = bfin_spi_cleanup; 845 master->cleanup = adi_spi_cleanup;
823 master->setup = bfin_spi_setup; 846 master->setup = adi_spi_setup;
824 master->transfer_one_message = bfin_spi_transfer_one_message; 847 master->transfer_one_message = adi_spi_transfer_one_message;
825 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | 848 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
826 SPI_BPW_MASK(8); 849 SPI_BPW_MASK(8);
827 850
@@ -830,7 +853,7 @@ static int bfin_spi_probe(struct platform_device *pdev)
830 drv_data->tx_dma = tx_dma; 853 drv_data->tx_dma = tx_dma;
831 drv_data->rx_dma = rx_dma; 854 drv_data->rx_dma = rx_dma;
832 drv_data->pin_req = info->pin_req; 855 drv_data->pin_req = info->pin_req;
833 drv_data->sclk = sclk; 856 drv_data->sclk = clk_get_rate(sclk);
834 857
835 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 858 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
836 drv_data->regs = devm_ioremap_resource(dev, mem); 859 drv_data->regs = devm_ioremap_resource(dev, mem);
@@ -845,28 +868,28 @@ static int bfin_spi_probe(struct platform_device *pdev)
845 dev_err(dev, "can not request SPI TX DMA channel\n"); 868 dev_err(dev, "can not request SPI TX DMA channel\n");
846 goto err_put_master; 869 goto err_put_master;
847 } 870 }
848 set_dma_callback(tx_dma, bfin_spi_tx_dma_isr, drv_data); 871 set_dma_callback(tx_dma, adi_spi_tx_dma_isr, drv_data);
849 872
850 ret = request_dma(rx_dma, "SPI_RX_DMA"); 873 ret = request_dma(rx_dma, "SPI_RX_DMA");
851 if (ret) { 874 if (ret) {
852 dev_err(dev, "can not request SPI RX DMA channel\n"); 875 dev_err(dev, "can not request SPI RX DMA channel\n");
853 goto err_free_tx_dma; 876 goto err_free_tx_dma;
854 } 877 }
855 set_dma_callback(drv_data->rx_dma, bfin_spi_rx_dma_isr, drv_data); 878 set_dma_callback(drv_data->rx_dma, adi_spi_rx_dma_isr, drv_data);
856 879
857 /* request CLK, MOSI and MISO */ 880 /* request CLK, MOSI and MISO */
858 ret = peripheral_request_list(drv_data->pin_req, "bfin-spi3"); 881 ret = peripheral_request_list(drv_data->pin_req, "adi-spi3");
859 if (ret < 0) { 882 if (ret < 0) {
860 dev_err(dev, "can not request spi pins\n"); 883 dev_err(dev, "can not request spi pins\n");
861 goto err_free_rx_dma; 884 goto err_free_rx_dma;
862 } 885 }
863 886
864 bfin_write(&drv_data->regs->control, SPI_CTL_MSTR | SPI_CTL_CPHA); 887 iowrite32(SPI_CTL_MSTR | SPI_CTL_CPHA, &drv_data->regs->control);
865 bfin_write(&drv_data->regs->ssel, 0x0000FE00); 888 iowrite32(0x0000FE00, &drv_data->regs->ssel);
866 bfin_write(&drv_data->regs->delay, 0x0); 889 iowrite32(0x0, &drv_data->regs->delay);
867 890
868 tasklet_init(&drv_data->pump_transfers, 891 tasklet_init(&drv_data->pump_transfers,
869 bfin_spi_pump_transfers, (unsigned long)drv_data); 892 adi_spi_pump_transfers, (unsigned long)drv_data);
870 /* register with the SPI framework */ 893 /* register with the SPI framework */
871 ret = devm_spi_register_master(dev, master); 894 ret = devm_spi_register_master(dev, master);
872 if (ret) { 895 if (ret) {
@@ -888,43 +911,41 @@ err_put_master:
888 return ret; 911 return ret;
889} 912}
890 913
891static int bfin_spi_remove(struct platform_device *pdev) 914static int adi_spi_remove(struct platform_device *pdev)
892{ 915{
893 struct spi_master *master = platform_get_drvdata(pdev); 916 struct spi_master *master = platform_get_drvdata(pdev);
894 struct bfin_spi_master *drv_data = spi_master_get_devdata(master); 917 struct adi_spi_master *drv_data = spi_master_get_devdata(master);
895
896 bfin_spi_disable(drv_data);
897 918
919 adi_spi_disable(drv_data);
898 peripheral_free_list(drv_data->pin_req); 920 peripheral_free_list(drv_data->pin_req);
899 free_dma(drv_data->rx_dma); 921 free_dma(drv_data->rx_dma);
900 free_dma(drv_data->tx_dma); 922 free_dma(drv_data->tx_dma);
901
902 return 0; 923 return 0;
903} 924}
904 925
905#ifdef CONFIG_PM 926#ifdef CONFIG_PM
906static int bfin_spi_suspend(struct device *dev) 927static int adi_spi_suspend(struct device *dev)
907{ 928{
908 struct spi_master *master = dev_get_drvdata(dev); 929 struct spi_master *master = dev_get_drvdata(dev);
909 struct bfin_spi_master *drv_data = spi_master_get_devdata(master); 930 struct adi_spi_master *drv_data = spi_master_get_devdata(master);
910 931
911 spi_master_suspend(master); 932 spi_master_suspend(master);
912 933
913 drv_data->control = bfin_read(&drv_data->regs->control); 934 drv_data->control = ioread32(&drv_data->regs->control);
914 drv_data->ssel = bfin_read(&drv_data->regs->ssel); 935 drv_data->ssel = ioread32(&drv_data->regs->ssel);
915 936
916 bfin_write(&drv_data->regs->control, SPI_CTL_MSTR | SPI_CTL_CPHA); 937 iowrite32(SPI_CTL_MSTR | SPI_CTL_CPHA, &drv_data->regs->control);
917 bfin_write(&drv_data->regs->ssel, 0x0000FE00); 938 iowrite32(0x0000FE00, &drv_data->regs->ssel);
918 dma_disable_irq(drv_data->rx_dma); 939 dma_disable_irq(drv_data->rx_dma);
919 dma_disable_irq(drv_data->tx_dma); 940 dma_disable_irq(drv_data->tx_dma);
920 941
921 return 0; 942 return 0;
922} 943}
923 944
924static int bfin_spi_resume(struct device *dev) 945static int adi_spi_resume(struct device *dev)
925{ 946{
926 struct spi_master *master = dev_get_drvdata(dev); 947 struct spi_master *master = dev_get_drvdata(dev);
927 struct bfin_spi_master *drv_data = spi_master_get_devdata(master); 948 struct adi_spi_master *drv_data = spi_master_get_devdata(master);
928 int ret = 0; 949 int ret = 0;
929 950
930 /* bootrom may modify spi and dma status when resume in spi boot mode */ 951 /* bootrom may modify spi and dma status when resume in spi boot mode */
@@ -932,8 +953,8 @@ static int bfin_spi_resume(struct device *dev)
932 953
933 dma_enable_irq(drv_data->rx_dma); 954 dma_enable_irq(drv_data->rx_dma);
934 dma_enable_irq(drv_data->tx_dma); 955 dma_enable_irq(drv_data->tx_dma);
935 bfin_write(&drv_data->regs->control, drv_data->control); 956 iowrite32(drv_data->control, &drv_data->regs->control);
936 bfin_write(&drv_data->regs->ssel, drv_data->ssel); 957 iowrite32(drv_data->ssel, &drv_data->regs->ssel);
937 958
938 ret = spi_master_resume(master); 959 ret = spi_master_resume(master);
939 if (ret) { 960 if (ret) {
@@ -944,21 +965,21 @@ static int bfin_spi_resume(struct device *dev)
944 return ret; 965 return ret;
945} 966}
946#endif 967#endif
947static const struct dev_pm_ops bfin_spi_pm_ops = { 968static const struct dev_pm_ops adi_spi_pm_ops = {
948 SET_SYSTEM_SLEEP_PM_OPS(bfin_spi_suspend, bfin_spi_resume) 969 SET_SYSTEM_SLEEP_PM_OPS(adi_spi_suspend, adi_spi_resume)
949}; 970};
950 971
951MODULE_ALIAS("platform:bfin-spi3"); 972MODULE_ALIAS("platform:adi-spi3");
952static struct platform_driver bfin_spi_driver = { 973static struct platform_driver adi_spi_driver = {
953 .driver = { 974 .driver = {
954 .name = "bfin-spi3", 975 .name = "adi-spi3",
955 .owner = THIS_MODULE, 976 .owner = THIS_MODULE,
956 .pm = &bfin_spi_pm_ops, 977 .pm = &adi_spi_pm_ops,
957 }, 978 },
958 .remove = bfin_spi_remove, 979 .remove = adi_spi_remove,
959}; 980};
960 981
961module_platform_driver_probe(bfin_spi_driver, bfin_spi_probe); 982module_platform_driver_probe(adi_spi_driver, adi_spi_probe);
962 983
963MODULE_DESCRIPTION("Analog Devices SPI3 controller driver"); 984MODULE_DESCRIPTION("Analog Devices SPI3 controller driver");
964MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>"); 985MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 3898b0b9ee77..058db0fe8dc7 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -16,7 +16,6 @@
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/workqueue.h>
20#include <linux/platform_device.h> 19#include <linux/platform_device.h>
21#include <linux/io.h> 20#include <linux/io.h>
22#include <linux/spi/spi.h> 21#include <linux/spi/spi.h>
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 079e6b1b0cdb..92a6f0d93233 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -224,7 +224,7 @@ struct atmel_spi {
224 struct platform_device *pdev; 224 struct platform_device *pdev;
225 225
226 struct spi_transfer *current_transfer; 226 struct spi_transfer *current_transfer;
227 unsigned long current_remaining_bytes; 227 int current_remaining_bytes;
228 int done_status; 228 int done_status;
229 229
230 struct completion xfer_completion; 230 struct completion xfer_completion;
@@ -874,8 +874,9 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
874 spi_readl(as, RDR); 874 spi_readl(as, RDR);
875 } 875 }
876 if (xfer->bits_per_word > 8) { 876 if (xfer->bits_per_word > 8) {
877 as->current_remaining_bytes -= 2; 877 if (as->current_remaining_bytes > 2)
878 if (as->current_remaining_bytes < 0) 878 as->current_remaining_bytes -= 2;
879 else
879 as->current_remaining_bytes = 0; 880 as->current_remaining_bytes = 0;
880 } else { 881 } else {
881 as->current_remaining_bytes--; 882 as->current_remaining_bytes--;
@@ -1110,6 +1111,8 @@ static int atmel_spi_one_transfer(struct spi_master *master,
1110 atmel_spi_next_xfer_pio(master, xfer); 1111 atmel_spi_next_xfer_pio(master, xfer);
1111 } else { 1112 } else {
1112 as->current_remaining_bytes -= len; 1113 as->current_remaining_bytes -= len;
1114 if (as->current_remaining_bytes < 0)
1115 as->current_remaining_bytes = 0;
1113 } 1116 }
1114 } else { 1117 } else {
1115 atmel_spi_next_xfer_pio(master, xfer); 1118 atmel_spi_next_xfer_pio(master, xfer);
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index 5a211e98383b..86f5a98aa7a2 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -18,7 +18,6 @@
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/spi/spi.h> 20#include <linux/spi/spi.h>
21#include <linux/workqueue.h>
22#include <linux/mutex.h> 21#include <linux/mutex.h>
23 22
24#define HSSPI_GLOBAL_CTRL_REG 0x0 23#define HSSPI_GLOBAL_CTRL_REG 0x0
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 0250fa721cea..8510400e7867 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -29,7 +29,6 @@
29#include <linux/spi/spi.h> 29#include <linux/spi/spi.h>
30#include <linux/completion.h> 30#include <linux/completion.h>
31#include <linux/err.h> 31#include <linux/err.h>
32#include <linux/workqueue.h>
33#include <linux/pm_runtime.h> 32#include <linux/pm_runtime.h>
34 33
35#include <bcm63xx_dev_spi.h> 34#include <bcm63xx_dev_spi.h>
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
new file mode 100644
index 000000000000..bb758978465d
--- /dev/null
+++ b/drivers/spi/spi-cadence.c
@@ -0,0 +1,673 @@
1/*
2 * Cadence SPI controller driver (master mode only)
3 *
4 * Copyright (C) 2008 - 2014 Xilinx, Inc.
5 *
6 * based on Blackfin On-Chip SPI Driver (spi_bfin5xx.c)
7 *
8 * This program is free software; you can redistribute it and/or modify it under
9 * the terms of the GNU General Public License version 2 as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/module.h>
19#include <linux/of_irq.h>
20#include <linux/of_address.h>
21#include <linux/platform_device.h>
22#include <linux/spi/spi.h>
23
24/* Name of this driver */
25#define CDNS_SPI_NAME "cdns-spi"
26
27/* Register offset definitions */
28#define CDNS_SPI_CR_OFFSET 0x00 /* Configuration Register, RW */
29#define CDNS_SPI_ISR_OFFSET 0x04 /* Interrupt Status Register, RO */
30#define CDNS_SPI_IER_OFFSET 0x08 /* Interrupt Enable Register, WO */
31#define CDNS_SPI_IDR_OFFSET 0x0c /* Interrupt Disable Register, WO */
32#define CDNS_SPI_IMR_OFFSET 0x10 /* Interrupt Enabled Mask Register, RO */
33#define CDNS_SPI_ER_OFFSET 0x14 /* Enable/Disable Register, RW */
34#define CDNS_SPI_DR_OFFSET 0x18 /* Delay Register, RW */
35#define CDNS_SPI_TXD_OFFSET 0x1C /* Data Transmit Register, WO */
36#define CDNS_SPI_RXD_OFFSET 0x20 /* Data Receive Register, RO */
37#define CDNS_SPI_SICR_OFFSET 0x24 /* Slave Idle Count Register, RW */
38#define CDNS_SPI_THLD_OFFSET 0x28 /* Transmit FIFO Watermark Register,RW */
39
40/*
41 * SPI Configuration Register bit Masks
42 *
43 * This register contains various control bits that affect the operation
44 * of the SPI controller
45 */
46#define CDNS_SPI_CR_MANSTRT_MASK 0x00010000 /* Manual TX Start */
47#define CDNS_SPI_CR_CPHA_MASK 0x00000004 /* Clock Phase Control */
48#define CDNS_SPI_CR_CPOL_MASK 0x00000002 /* Clock Polarity Control */
49#define CDNS_SPI_CR_SSCTRL_MASK 0x00003C00 /* Slave Select Mask */
50#define CDNS_SPI_CR_BAUD_DIV_MASK 0x00000038 /* Baud Rate Divisor Mask */
51#define CDNS_SPI_CR_MSTREN_MASK 0x00000001 /* Master Enable Mask */
52#define CDNS_SPI_CR_MANSTRTEN_MASK 0x00008000 /* Manual TX Enable Mask */
53#define CDNS_SPI_CR_SSFORCE_MASK 0x00004000 /* Manual SS Enable Mask */
54#define CDNS_SPI_CR_BAUD_DIV_4_MASK 0x00000008 /* Default Baud Div Mask */
55#define CDNS_SPI_CR_DEFAULT_MASK (CDNS_SPI_CR_MSTREN_MASK | \
56 CDNS_SPI_CR_SSCTRL_MASK | \
57 CDNS_SPI_CR_SSFORCE_MASK | \
58 CDNS_SPI_CR_BAUD_DIV_4_MASK)
59
60/*
61 * SPI Configuration Register - Baud rate and slave select
62 *
63 * These are the values used in the calculation of baud rate divisor and
64 * setting the slave select.
65 */
66
67#define CDNS_SPI_BAUD_DIV_MAX 7 /* Baud rate divisor maximum */
68#define CDNS_SPI_BAUD_DIV_MIN 1 /* Baud rate divisor minimum */
69#define CDNS_SPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */
70#define CDNS_SPI_SS_SHIFT 10 /* Slave Select field shift in CR */
71#define CDNS_SPI_SS0 0x1 /* Slave Select zero */
72
73/*
74 * SPI Interrupt Registers bit Masks
75 *
76 * All the four interrupt registers (Status/Mask/Enable/Disable) have the same
77 * bit definitions.
78 */
79#define CDNS_SPI_IXR_TXOW_MASK 0x00000004 /* SPI TX FIFO Overwater */
80#define CDNS_SPI_IXR_MODF_MASK 0x00000002 /* SPI Mode Fault */
81#define CDNS_SPI_IXR_RXNEMTY_MASK 0x00000010 /* SPI RX FIFO Not Empty */
82#define CDNS_SPI_IXR_DEFAULT_MASK (CDNS_SPI_IXR_TXOW_MASK | \
83 CDNS_SPI_IXR_MODF_MASK)
84#define CDNS_SPI_IXR_TXFULL_MASK 0x00000008 /* SPI TX Full */
85#define CDNS_SPI_IXR_ALL_MASK 0x0000007F /* SPI all interrupts */
86
87/*
88 * SPI Enable Register bit Masks
89 *
90 * This register is used to enable or disable the SPI controller
91 */
92#define CDNS_SPI_ER_ENABLE_MASK 0x00000001 /* SPI Enable Bit Mask */
93#define CDNS_SPI_ER_DISABLE_MASK 0x0 /* SPI Disable Bit Mask */
94
95/* SPI FIFO depth in bytes */
96#define CDNS_SPI_FIFO_DEPTH 128
97
98/* Default number of chip select lines */
99#define CDNS_SPI_DEFAULT_NUM_CS 4
100
101/**
102 * struct cdns_spi - This definition defines spi driver instance
103 * @regs: Virtual address of the SPI controller registers
104 * @ref_clk: Pointer to the peripheral clock
105 * @pclk: Pointer to the APB clock
106 * @speed_hz: Current SPI bus clock speed in Hz
107 * @txbuf: Pointer to the TX buffer
108 * @rxbuf: Pointer to the RX buffer
109 * @tx_bytes: Number of bytes left to transfer
110 * @rx_bytes: Number of bytes requested
111 * @dev_busy: Device busy flag
112 * @is_decoded_cs: Flag for decoder property set or not
113 */
114struct cdns_spi {
115 void __iomem *regs;
116 struct clk *ref_clk;
117 struct clk *pclk;
118 u32 speed_hz;
119 const u8 *txbuf;
120 u8 *rxbuf;
121 int tx_bytes;
122 int rx_bytes;
123 u8 dev_busy;
124 u32 is_decoded_cs;
125};
126
127/* Macros for the SPI controller read/write */
128static inline u32 cdns_spi_read(struct cdns_spi *xspi, u32 offset)
129{
130 return readl_relaxed(xspi->regs + offset);
131}
132
133static inline void cdns_spi_write(struct cdns_spi *xspi, u32 offset, u32 val)
134{
135 writel_relaxed(val, xspi->regs + offset);
136}
137
138/**
139 * cdns_spi_init_hw - Initialize the hardware and configure the SPI controller
140 * @xspi: Pointer to the cdns_spi structure
141 *
142 * On reset the SPI controller is configured to be in master mode, baud rate
143 * divisor is set to 4, threshold value for TX FIFO not full interrupt is set
144 * to 1 and size of the word to be transferred as 8 bit.
145 * This function initializes the SPI controller to disable and clear all the
146 * interrupts, enable manual slave select and manual start, deselect all the
147 * chip select lines, and enable the SPI controller.
148 */
149static void cdns_spi_init_hw(struct cdns_spi *xspi)
150{
151 cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
152 CDNS_SPI_ER_DISABLE_MASK);
153 cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
154 CDNS_SPI_IXR_ALL_MASK);
155
156 /* Clear the RX FIFO */
157 while (cdns_spi_read(xspi, CDNS_SPI_ISR_OFFSET) &
158 CDNS_SPI_IXR_RXNEMTY_MASK)
159 cdns_spi_read(xspi, CDNS_SPI_RXD_OFFSET);
160
161 cdns_spi_write(xspi, CDNS_SPI_ISR_OFFSET,
162 CDNS_SPI_IXR_ALL_MASK);
163 cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET,
164 CDNS_SPI_CR_DEFAULT_MASK);
165 cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
166 CDNS_SPI_ER_ENABLE_MASK);
167}
168
169/**
170 * cdns_spi_chipselect - Select or deselect the chip select line
171 * @spi: Pointer to the spi_device structure
172 * @is_on: Select(0) or deselect (1) the chip select line
173 */
174static void cdns_spi_chipselect(struct spi_device *spi, bool is_high)
175{
176 struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
177 u32 ctrl_reg;
178
179 ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET);
180
181 if (is_high) {
182 /* Deselect the slave */
183 ctrl_reg |= CDNS_SPI_CR_SSCTRL_MASK;
184 } else {
185 /* Select the slave */
186 ctrl_reg &= ~CDNS_SPI_CR_SSCTRL_MASK;
187 if (!(xspi->is_decoded_cs))
188 ctrl_reg |= ((~(CDNS_SPI_SS0 << spi->chip_select)) <<
189 CDNS_SPI_SS_SHIFT) &
190 CDNS_SPI_CR_SSCTRL_MASK;
191 else
192 ctrl_reg |= (spi->chip_select << CDNS_SPI_SS_SHIFT) &
193 CDNS_SPI_CR_SSCTRL_MASK;
194 }
195
196 cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
197}
198
199/**
200 * cdns_spi_config_clock_mode - Sets clock polarity and phase
201 * @spi: Pointer to the spi_device structure
202 *
203 * Sets the requested clock polarity and phase.
204 */
205static void cdns_spi_config_clock_mode(struct spi_device *spi)
206{
207 struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
208 u32 ctrl_reg;
209
210 ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET);
211
212 /* Set the SPI clock phase and clock polarity */
213 ctrl_reg &= ~(CDNS_SPI_CR_CPHA_MASK | CDNS_SPI_CR_CPOL_MASK);
214 if (spi->mode & SPI_CPHA)
215 ctrl_reg |= CDNS_SPI_CR_CPHA_MASK;
216 if (spi->mode & SPI_CPOL)
217 ctrl_reg |= CDNS_SPI_CR_CPOL_MASK;
218
219 cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
220}
221
222/**
223 * cdns_spi_config_clock_freq - Sets clock frequency
224 * @spi: Pointer to the spi_device structure
225 * @transfer: Pointer to the spi_transfer structure which provides
226 * information about next transfer setup parameters
227 *
228 * Sets the requested clock frequency.
229 * Note: If the requested frequency is not an exact match with what can be
230 * obtained using the prescalar value the driver sets the clock frequency which
231 * is lower than the requested frequency (maximum lower) for the transfer. If
232 * the requested frequency is higher or lower than that is supported by the SPI
233 * controller the driver will set the highest or lowest frequency supported by
234 * controller.
235 */
236static void cdns_spi_config_clock_freq(struct spi_device *spi,
237 struct spi_transfer *transfer)
238{
239 struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
240 u32 ctrl_reg, baud_rate_val;
241 unsigned long frequency;
242
243 frequency = clk_get_rate(xspi->ref_clk);
244
245 ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET);
246
247 /* Set the clock frequency */
248 if (xspi->speed_hz != transfer->speed_hz) {
249 /* first valid value is 1 */
250 baud_rate_val = CDNS_SPI_BAUD_DIV_MIN;
251 while ((baud_rate_val < CDNS_SPI_BAUD_DIV_MAX) &&
252 (frequency / (2 << baud_rate_val)) > transfer->speed_hz)
253 baud_rate_val++;
254
255 ctrl_reg &= ~CDNS_SPI_CR_BAUD_DIV_MASK;
256 ctrl_reg |= baud_rate_val << CDNS_SPI_BAUD_DIV_SHIFT;
257
258 xspi->speed_hz = frequency / (2 << baud_rate_val);
259 }
260 cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
261}
262
263/**
264 * cdns_spi_setup_transfer - Configure SPI controller for specified transfer
265 * @spi: Pointer to the spi_device structure
266 * @transfer: Pointer to the spi_transfer structure which provides
267 * information about next transfer setup parameters
268 *
269 * Sets the operational mode of SPI controller for the next SPI transfer and
270 * sets the requested clock frequency.
271 *
272 * Return: Always 0
273 */
274static int cdns_spi_setup_transfer(struct spi_device *spi,
275 struct spi_transfer *transfer)
276{
277 struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
278
279 cdns_spi_config_clock_freq(spi, transfer);
280
281 dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u clock speed\n",
282 __func__, spi->mode, spi->bits_per_word,
283 xspi->speed_hz);
284
285 return 0;
286}
287
288/**
289 * cdns_spi_fill_tx_fifo - Fills the TX FIFO with as many bytes as possible
290 * @xspi: Pointer to the cdns_spi structure
291 */
292static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
293{
294 unsigned long trans_cnt = 0;
295
296 while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) &&
297 (xspi->tx_bytes > 0)) {
298 if (xspi->txbuf)
299 cdns_spi_write(xspi, CDNS_SPI_TXD_OFFSET,
300 *xspi->txbuf++);
301 else
302 cdns_spi_write(xspi, CDNS_SPI_TXD_OFFSET, 0);
303
304 xspi->tx_bytes--;
305 trans_cnt++;
306 }
307}
308
309/**
310 * cdns_spi_irq - Interrupt service routine of the SPI controller
311 * @irq: IRQ number
312 * @dev_id: Pointer to the xspi structure
313 *
314 * This function handles TX empty and Mode Fault interrupts only.
315 * On TX empty interrupt this function reads the received data from RX FIFO and
316 * fills the TX FIFO if there is any data remaining to be transferred.
317 * On Mode Fault interrupt this function indicates that transfer is completed,
318 * the SPI subsystem will identify the error as the remaining bytes to be
319 * transferred is non-zero.
320 *
321 * Return: IRQ_HANDLED when handled; IRQ_NONE otherwise.
322 */
323static irqreturn_t cdns_spi_irq(int irq, void *dev_id)
324{
325 struct spi_master *master = dev_id;
326 struct cdns_spi *xspi = spi_master_get_devdata(master);
327 u32 intr_status, status;
328
329 status = IRQ_NONE;
330 intr_status = cdns_spi_read(xspi, CDNS_SPI_ISR_OFFSET);
331 cdns_spi_write(xspi, CDNS_SPI_ISR_OFFSET, intr_status);
332
333 if (intr_status & CDNS_SPI_IXR_MODF_MASK) {
334 /* Indicate that transfer is completed, the SPI subsystem will
335 * identify the error as the remaining bytes to be
336 * transferred is non-zero
337 */
338 cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
339 CDNS_SPI_IXR_DEFAULT_MASK);
340 spi_finalize_current_transfer(master);
341 status = IRQ_HANDLED;
342 } else if (intr_status & CDNS_SPI_IXR_TXOW_MASK) {
343 unsigned long trans_cnt;
344
345 trans_cnt = xspi->rx_bytes - xspi->tx_bytes;
346
347 /* Read out the data from the RX FIFO */
348 while (trans_cnt) {
349 u8 data;
350
351 data = cdns_spi_read(xspi, CDNS_SPI_RXD_OFFSET);
352 if (xspi->rxbuf)
353 *xspi->rxbuf++ = data;
354
355 xspi->rx_bytes--;
356 trans_cnt--;
357 }
358
359 if (xspi->tx_bytes) {
360 /* There is more data to send */
361 cdns_spi_fill_tx_fifo(xspi);
362 } else {
363 /* Transfer is completed */
364 cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
365 CDNS_SPI_IXR_DEFAULT_MASK);
366 spi_finalize_current_transfer(master);
367 }
368 status = IRQ_HANDLED;
369 }
370
371 return status;
372}
373
374/**
375 * cdns_transfer_one - Initiates the SPI transfer
376 * @master: Pointer to spi_master structure
377 * @spi: Pointer to the spi_device structure
378 * @transfer: Pointer to the spi_transfer structure which provides
379 * information about next transfer parameters
380 *
381 * This function fills the TX FIFO, starts the SPI transfer and
382 * returns a positive transfer count so that core will wait for completion.
383 *
384 * Return: Number of bytes transferred in the last transfer
385 */
386static int cdns_transfer_one(struct spi_master *master,
387 struct spi_device *spi,
388 struct spi_transfer *transfer)
389{
390 struct cdns_spi *xspi = spi_master_get_devdata(master);
391
392 xspi->txbuf = transfer->tx_buf;
393 xspi->rxbuf = transfer->rx_buf;
394 xspi->tx_bytes = transfer->len;
395 xspi->rx_bytes = transfer->len;
396
397 cdns_spi_setup_transfer(spi, transfer);
398
399 cdns_spi_fill_tx_fifo(xspi);
400
401 cdns_spi_write(xspi, CDNS_SPI_IER_OFFSET,
402 CDNS_SPI_IXR_DEFAULT_MASK);
403 return transfer->len;
404}
405
406/**
407 * cdns_prepare_transfer_hardware - Prepares hardware for transfer.
408 * @master: Pointer to the spi_master structure which provides
409 * information about the controller.
410 *
411 * This function enables SPI master controller.
412 *
413 * Return: 0 always
414 */
415static int cdns_prepare_transfer_hardware(struct spi_master *master)
416{
417 struct cdns_spi *xspi = spi_master_get_devdata(master);
418
419 cdns_spi_config_clock_mode(master->cur_msg->spi);
420
421 cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
422 CDNS_SPI_ER_ENABLE_MASK);
423
424 return 0;
425}
426
427/**
428 * cdns_unprepare_transfer_hardware - Relaxes hardware after transfer
429 * @master: Pointer to the spi_master structure which provides
430 * information about the controller.
431 *
432 * This function disables the SPI master controller.
433 *
434 * Return: 0 always
435 */
436static int cdns_unprepare_transfer_hardware(struct spi_master *master)
437{
438 struct cdns_spi *xspi = spi_master_get_devdata(master);
439
440 cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
441 CDNS_SPI_ER_DISABLE_MASK);
442
443 return 0;
444}
445
446/**
447 * cdns_spi_probe - Probe method for the SPI driver
448 * @pdev: Pointer to the platform_device structure
449 *
450 * This function initializes the driver data structures and the hardware.
451 *
452 * Return: 0 on success and error value on error
453 */
454static int cdns_spi_probe(struct platform_device *pdev)
455{
456 int ret = 0, irq;
457 struct spi_master *master;
458 struct cdns_spi *xspi;
459 struct resource *res;
460 u32 num_cs;
461
462 master = spi_alloc_master(&pdev->dev, sizeof(*xspi));
463 if (master == NULL)
464 return -ENOMEM;
465
466 xspi = spi_master_get_devdata(master);
467 master->dev.of_node = pdev->dev.of_node;
468 platform_set_drvdata(pdev, master);
469
470 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
471 xspi->regs = devm_ioremap_resource(&pdev->dev, res);
472 if (IS_ERR(xspi->regs)) {
473 ret = PTR_ERR(xspi->regs);
474 goto remove_master;
475 }
476
477 xspi->pclk = devm_clk_get(&pdev->dev, "pclk");
478 if (IS_ERR(xspi->pclk)) {
479 dev_err(&pdev->dev, "pclk clock not found.\n");
480 ret = PTR_ERR(xspi->pclk);
481 goto remove_master;
482 }
483
484 xspi->ref_clk = devm_clk_get(&pdev->dev, "ref_clk");
485 if (IS_ERR(xspi->ref_clk)) {
486 dev_err(&pdev->dev, "ref_clk clock not found.\n");
487 ret = PTR_ERR(xspi->ref_clk);
488 goto remove_master;
489 }
490
491 ret = clk_prepare_enable(xspi->pclk);
492 if (ret) {
493 dev_err(&pdev->dev, "Unable to enable APB clock.\n");
494 goto remove_master;
495 }
496
497 ret = clk_prepare_enable(xspi->ref_clk);
498 if (ret) {
499 dev_err(&pdev->dev, "Unable to enable device clock.\n");
500 goto clk_dis_apb;
501 }
502
503 /* SPI controller initializations */
504 cdns_spi_init_hw(xspi);
505
506 irq = platform_get_irq(pdev, 0);
507 if (irq <= 0) {
508 ret = -ENXIO;
509 dev_err(&pdev->dev, "irq number is invalid\n");
510 goto remove_master;
511 }
512
513 ret = devm_request_irq(&pdev->dev, irq, cdns_spi_irq,
514 0, pdev->name, master);
515 if (ret != 0) {
516 ret = -ENXIO;
517 dev_err(&pdev->dev, "request_irq failed\n");
518 goto remove_master;
519 }
520
521 ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
522
523 if (ret < 0)
524 master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
525 else
526 master->num_chipselect = num_cs;
527
528 ret = of_property_read_u32(pdev->dev.of_node, "is-decoded-cs",
529 &xspi->is_decoded_cs);
530
531 if (ret < 0)
532 xspi->is_decoded_cs = 0;
533
534 master->prepare_transfer_hardware = cdns_prepare_transfer_hardware;
535 master->transfer_one = cdns_transfer_one;
536 master->unprepare_transfer_hardware = cdns_unprepare_transfer_hardware;
537 master->set_cs = cdns_spi_chipselect;
538 master->mode_bits = SPI_CPOL | SPI_CPHA;
539
540 /* Set to default valid value */
541 master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4;
542 xspi->speed_hz = master->max_speed_hz;
543
544 master->bits_per_word_mask = SPI_BPW_MASK(8);
545
546 ret = spi_register_master(master);
547 if (ret) {
548 dev_err(&pdev->dev, "spi_register_master failed\n");
549 goto clk_dis_all;
550 }
551
552 return ret;
553
554clk_dis_all:
555 clk_disable_unprepare(xspi->ref_clk);
556clk_dis_apb:
557 clk_disable_unprepare(xspi->pclk);
558remove_master:
559 spi_master_put(master);
560 return ret;
561}
562
563/**
564 * cdns_spi_remove - Remove method for the SPI driver
565 * @pdev: Pointer to the platform_device structure
566 *
567 * This function is called if a device is physically removed from the system or
568 * if the driver module is being unloaded. It frees all resources allocated to
569 * the device.
570 *
571 * Return: 0 on success and error value on error
572 */
573static int cdns_spi_remove(struct platform_device *pdev)
574{
575 struct spi_master *master = platform_get_drvdata(pdev);
576 struct cdns_spi *xspi = spi_master_get_devdata(master);
577
578 cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
579 CDNS_SPI_ER_DISABLE_MASK);
580
581 clk_disable_unprepare(xspi->ref_clk);
582 clk_disable_unprepare(xspi->pclk);
583
584 spi_unregister_master(master);
585
586 return 0;
587}
588
589/**
590 * cdns_spi_suspend - Suspend method for the SPI driver
591 * @dev: Address of the platform_device structure
592 *
593 * This function disables the SPI controller and
594 * changes the driver state to "suspend"
595 *
596 * Return: Always 0
597 */
598static int __maybe_unused cdns_spi_suspend(struct device *dev)
599{
600 struct platform_device *pdev = container_of(dev,
601 struct platform_device, dev);
602 struct spi_master *master = platform_get_drvdata(pdev);
603 struct cdns_spi *xspi = spi_master_get_devdata(master);
604
605 spi_master_suspend(master);
606
607 clk_disable_unprepare(xspi->ref_clk);
608
609 clk_disable_unprepare(xspi->pclk);
610
611 return 0;
612}
613
614/**
615 * cdns_spi_resume - Resume method for the SPI driver
616 * @dev: Address of the platform_device structure
617 *
618 * This function changes the driver state to "ready"
619 *
620 * Return: 0 on success and error value on error
621 */
622static int __maybe_unused cdns_spi_resume(struct device *dev)
623{
624 struct platform_device *pdev = container_of(dev,
625 struct platform_device, dev);
626 struct spi_master *master = platform_get_drvdata(pdev);
627 struct cdns_spi *xspi = spi_master_get_devdata(master);
628 int ret = 0;
629
630 ret = clk_prepare_enable(xspi->pclk);
631 if (ret) {
632 dev_err(dev, "Cannot enable APB clock.\n");
633 return ret;
634 }
635
636 ret = clk_prepare_enable(xspi->ref_clk);
637 if (ret) {
638 dev_err(dev, "Cannot enable device clock.\n");
639 clk_disable(xspi->pclk);
640 return ret;
641 }
642 spi_master_resume(master);
643
644 return 0;
645}
646
647static SIMPLE_DEV_PM_OPS(cdns_spi_dev_pm_ops, cdns_spi_suspend,
648 cdns_spi_resume);
649
650static struct of_device_id cdns_spi_of_match[] = {
651 { .compatible = "xlnx,zynq-spi-r1p6" },
652 { .compatible = "cdns,spi-r1p6" },
653 { /* end of table */ }
654};
655MODULE_DEVICE_TABLE(of, cdns_spi_of_match);
656
657/* cdns_spi_driver - This structure defines the SPI subsystem platform driver */
658static struct platform_driver cdns_spi_driver = {
659 .probe = cdns_spi_probe,
660 .remove = cdns_spi_remove,
661 .driver = {
662 .name = CDNS_SPI_NAME,
663 .owner = THIS_MODULE,
664 .of_match_table = cdns_spi_of_match,
665 .pm = &cdns_spi_dev_pm_ops,
666 },
667};
668
669module_platform_driver(cdns_spi_driver);
670
671MODULE_AUTHOR("Xilinx, Inc.");
672MODULE_DESCRIPTION("Cadence SPI driver");
673MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 1492f5ee9aaa..a5cba14ac3d2 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -16,6 +16,7 @@
16#include <linux/spi/spi.h> 16#include <linux/spi/spi.h>
17#include <linux/scatterlist.h> 17#include <linux/scatterlist.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/of_gpio.h>
19 20
20#include "spi-dw.h" 21#include "spi-dw.h"
21 22
@@ -70,6 +71,27 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
70 dws->num_cs = 4; 71 dws->num_cs = 4;
71 dws->max_freq = clk_get_rate(dwsmmio->clk); 72 dws->max_freq = clk_get_rate(dwsmmio->clk);
72 73
74 if (pdev->dev.of_node) {
75 int i;
76
77 for (i = 0; i < dws->num_cs; i++) {
78 int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
79 "cs-gpios", i);
80
81 if (cs_gpio == -EPROBE_DEFER) {
82 ret = cs_gpio;
83 goto out;
84 }
85
86 if (gpio_is_valid(cs_gpio)) {
87 ret = devm_gpio_request(&pdev->dev, cs_gpio,
88 dev_name(&pdev->dev));
89 if (ret)
90 goto out;
91 }
92 }
93 }
94
73 ret = dw_spi_add_host(&pdev->dev, dws); 95 ret = dw_spi_add_host(&pdev->dev, dws);
74 if (ret) 96 if (ret)
75 goto out; 97 goto out;
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 712ac5629cd4..29f33143b795 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -24,6 +24,7 @@
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/spi/spi.h> 26#include <linux/spi/spi.h>
27#include <linux/gpio.h>
27 28
28#include "spi-dw.h" 29#include "spi-dw.h"
29 30
@@ -36,12 +37,6 @@
36#define DONE_STATE ((void *)2) 37#define DONE_STATE ((void *)2)
37#define ERROR_STATE ((void *)-1) 38#define ERROR_STATE ((void *)-1)
38 39
39#define QUEUE_RUNNING 0
40#define QUEUE_STOPPED 1
41
42#define MRST_SPI_DEASSERT 0
43#define MRST_SPI_ASSERT 1
44
45/* Slave spi_dev related */ 40/* Slave spi_dev related */
46struct chip_data { 41struct chip_data {
47 u16 cr0; 42 u16 cr0;
@@ -263,28 +258,22 @@ static int map_dma_buffers(struct dw_spi *dws)
263static void giveback(struct dw_spi *dws) 258static void giveback(struct dw_spi *dws)
264{ 259{
265 struct spi_transfer *last_transfer; 260 struct spi_transfer *last_transfer;
266 unsigned long flags;
267 struct spi_message *msg; 261 struct spi_message *msg;
268 262
269 spin_lock_irqsave(&dws->lock, flags);
270 msg = dws->cur_msg; 263 msg = dws->cur_msg;
271 dws->cur_msg = NULL; 264 dws->cur_msg = NULL;
272 dws->cur_transfer = NULL; 265 dws->cur_transfer = NULL;
273 dws->prev_chip = dws->cur_chip; 266 dws->prev_chip = dws->cur_chip;
274 dws->cur_chip = NULL; 267 dws->cur_chip = NULL;
275 dws->dma_mapped = 0; 268 dws->dma_mapped = 0;
276 queue_work(dws->workqueue, &dws->pump_messages);
277 spin_unlock_irqrestore(&dws->lock, flags);
278 269
279 last_transfer = list_last_entry(&msg->transfers, struct spi_transfer, 270 last_transfer = list_last_entry(&msg->transfers, struct spi_transfer,
280 transfer_list); 271 transfer_list);
281 272
282 if (!last_transfer->cs_change && dws->cs_control) 273 if (!last_transfer->cs_change)
283 dws->cs_control(MRST_SPI_DEASSERT); 274 spi_chip_sel(dws, dws->cur_msg->spi, 0);
284 275
285 msg->state = NULL; 276 spi_finalize_current_message(dws->master);
286 if (msg->complete)
287 msg->complete(msg->context);
288} 277}
289 278
290static void int_error_stop(struct dw_spi *dws, const char *msg) 279static void int_error_stop(struct dw_spi *dws, const char *msg)
@@ -502,7 +491,7 @@ static void pump_transfers(unsigned long data)
502 dw_writew(dws, DW_SPI_CTRL0, cr0); 491 dw_writew(dws, DW_SPI_CTRL0, cr0);
503 492
504 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); 493 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
505 spi_chip_sel(dws, spi->chip_select); 494 spi_chip_sel(dws, spi, 1);
506 495
507 /* Set the interrupt mask, for poll mode just disable all int */ 496 /* Set the interrupt mask, for poll mode just disable all int */
508 spi_mask_intr(dws, 0xff); 497 spi_mask_intr(dws, 0xff);
@@ -529,30 +518,12 @@ early_exit:
529 return; 518 return;
530} 519}
531 520
532static void pump_messages(struct work_struct *work) 521static int dw_spi_transfer_one_message(struct spi_master *master,
522 struct spi_message *msg)
533{ 523{
534 struct dw_spi *dws = 524 struct dw_spi *dws = spi_master_get_devdata(master);
535 container_of(work, struct dw_spi, pump_messages);
536 unsigned long flags;
537
538 /* Lock queue and check for queue work */
539 spin_lock_irqsave(&dws->lock, flags);
540 if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {
541 dws->busy = 0;
542 spin_unlock_irqrestore(&dws->lock, flags);
543 return;
544 }
545
546 /* Make sure we are not already running a message */
547 if (dws->cur_msg) {
548 spin_unlock_irqrestore(&dws->lock, flags);
549 return;
550 }
551
552 /* Extract head of queue */
553 dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);
554 list_del_init(&dws->cur_msg->queue);
555 525
526 dws->cur_msg = msg;
556 /* Initial message state*/ 527 /* Initial message state*/
557 dws->cur_msg->state = START_STATE; 528 dws->cur_msg->state = START_STATE;
558 dws->cur_transfer = list_entry(dws->cur_msg->transfers.next, 529 dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,
@@ -560,46 +531,9 @@ static void pump_messages(struct work_struct *work)
560 transfer_list); 531 transfer_list);
561 dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi); 532 dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
562 533
563 /* Mark as busy and launch transfers */ 534 /* Launch transfers */
564 tasklet_schedule(&dws->pump_transfers); 535 tasklet_schedule(&dws->pump_transfers);
565 536
566 dws->busy = 1;
567 spin_unlock_irqrestore(&dws->lock, flags);
568}
569
570/* spi_device use this to queue in their spi_msg */
571static int dw_spi_transfer(struct spi_device *spi, struct spi_message *msg)
572{
573 struct dw_spi *dws = spi_master_get_devdata(spi->master);
574 unsigned long flags;
575
576 spin_lock_irqsave(&dws->lock, flags);
577
578 if (dws->run == QUEUE_STOPPED) {
579 spin_unlock_irqrestore(&dws->lock, flags);
580 return -ESHUTDOWN;
581 }
582
583 msg->actual_length = 0;
584 msg->status = -EINPROGRESS;
585 msg->state = START_STATE;
586
587 list_add_tail(&msg->queue, &dws->queue);
588
589 if (dws->run == QUEUE_RUNNING && !dws->busy) {
590
591 if (dws->cur_transfer || dws->cur_msg)
592 queue_work(dws->workqueue,
593 &dws->pump_messages);
594 else {
595 /* If no other data transaction in air, just go */
596 spin_unlock_irqrestore(&dws->lock, flags);
597 pump_messages(&dws->pump_messages);
598 return 0;
599 }
600 }
601
602 spin_unlock_irqrestore(&dws->lock, flags);
603 return 0; 537 return 0;
604} 538}
605 539
@@ -608,6 +542,7 @@ static int dw_spi_setup(struct spi_device *spi)
608{ 542{
609 struct dw_spi_chip *chip_info = NULL; 543 struct dw_spi_chip *chip_info = NULL;
610 struct chip_data *chip; 544 struct chip_data *chip;
545 int ret;
611 546
612 /* Only alloc on first setup */ 547 /* Only alloc on first setup */
613 chip = spi_get_ctldata(spi); 548 chip = spi_get_ctldata(spi);
@@ -661,81 +596,13 @@ static int dw_spi_setup(struct spi_device *spi)
661 | (spi->mode << SPI_MODE_OFFSET) 596 | (spi->mode << SPI_MODE_OFFSET)
662 | (chip->tmode << SPI_TMOD_OFFSET); 597 | (chip->tmode << SPI_TMOD_OFFSET);
663 598
664 return 0; 599 if (gpio_is_valid(spi->cs_gpio)) {
665} 600 ret = gpio_direction_output(spi->cs_gpio,
666 601 !(spi->mode & SPI_CS_HIGH));
667static int init_queue(struct dw_spi *dws) 602 if (ret)
668{ 603 return ret;
669 INIT_LIST_HEAD(&dws->queue);
670 spin_lock_init(&dws->lock);
671
672 dws->run = QUEUE_STOPPED;
673 dws->busy = 0;
674
675 tasklet_init(&dws->pump_transfers,
676 pump_transfers, (unsigned long)dws);
677
678 INIT_WORK(&dws->pump_messages, pump_messages);
679 dws->workqueue = create_singlethread_workqueue(
680 dev_name(dws->master->dev.parent));
681 if (dws->workqueue == NULL)
682 return -EBUSY;
683
684 return 0;
685}
686
687static int start_queue(struct dw_spi *dws)
688{
689 unsigned long flags;
690
691 spin_lock_irqsave(&dws->lock, flags);
692
693 if (dws->run == QUEUE_RUNNING || dws->busy) {
694 spin_unlock_irqrestore(&dws->lock, flags);
695 return -EBUSY;
696 } 604 }
697 605
698 dws->run = QUEUE_RUNNING;
699 dws->cur_msg = NULL;
700 dws->cur_transfer = NULL;
701 dws->cur_chip = NULL;
702 dws->prev_chip = NULL;
703 spin_unlock_irqrestore(&dws->lock, flags);
704
705 queue_work(dws->workqueue, &dws->pump_messages);
706
707 return 0;
708}
709
710static int stop_queue(struct dw_spi *dws)
711{
712 unsigned long flags;
713 unsigned limit = 50;
714 int status = 0;
715
716 spin_lock_irqsave(&dws->lock, flags);
717 dws->run = QUEUE_STOPPED;
718 while ((!list_empty(&dws->queue) || dws->busy) && limit--) {
719 spin_unlock_irqrestore(&dws->lock, flags);
720 msleep(10);
721 spin_lock_irqsave(&dws->lock, flags);
722 }
723
724 if (!list_empty(&dws->queue) || dws->busy)
725 status = -EBUSY;
726 spin_unlock_irqrestore(&dws->lock, flags);
727
728 return status;
729}
730
731static int destroy_queue(struct dw_spi *dws)
732{
733 int status;
734
735 status = stop_queue(dws);
736 if (status != 0)
737 return status;
738 destroy_workqueue(dws->workqueue);
739 return 0; 606 return 0;
740} 607}
741 608
@@ -794,7 +661,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
794 master->bus_num = dws->bus_num; 661 master->bus_num = dws->bus_num;
795 master->num_chipselect = dws->num_cs; 662 master->num_chipselect = dws->num_cs;
796 master->setup = dw_spi_setup; 663 master->setup = dw_spi_setup;
797 master->transfer = dw_spi_transfer; 664 master->transfer_one_message = dw_spi_transfer_one_message;
798 master->max_speed_hz = dws->max_freq; 665 master->max_speed_hz = dws->max_freq;
799 666
800 /* Basic HW init */ 667 /* Basic HW init */
@@ -808,33 +675,21 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
808 } 675 }
809 } 676 }
810 677
811 /* Initial and start queue */ 678 tasklet_init(&dws->pump_transfers, pump_transfers, (unsigned long)dws);
812 ret = init_queue(dws);
813 if (ret) {
814 dev_err(&master->dev, "problem initializing queue\n");
815 goto err_diable_hw;
816 }
817 ret = start_queue(dws);
818 if (ret) {
819 dev_err(&master->dev, "problem starting queue\n");
820 goto err_diable_hw;
821 }
822 679
823 spi_master_set_devdata(master, dws); 680 spi_master_set_devdata(master, dws);
824 ret = devm_spi_register_master(dev, master); 681 ret = devm_spi_register_master(dev, master);
825 if (ret) { 682 if (ret) {
826 dev_err(&master->dev, "problem registering spi master\n"); 683 dev_err(&master->dev, "problem registering spi master\n");
827 goto err_queue_alloc; 684 goto err_dma_exit;
828 } 685 }
829 686
830 mrst_spi_debugfs_init(dws); 687 mrst_spi_debugfs_init(dws);
831 return 0; 688 return 0;
832 689
833err_queue_alloc: 690err_dma_exit:
834 destroy_queue(dws);
835 if (dws->dma_ops && dws->dma_ops->dma_exit) 691 if (dws->dma_ops && dws->dma_ops->dma_exit)
836 dws->dma_ops->dma_exit(dws); 692 dws->dma_ops->dma_exit(dws);
837err_diable_hw:
838 spi_enable_chip(dws, 0); 693 spi_enable_chip(dws, 0);
839err_free_master: 694err_free_master:
840 spi_master_put(master); 695 spi_master_put(master);
@@ -844,18 +699,10 @@ EXPORT_SYMBOL_GPL(dw_spi_add_host);
844 699
845void dw_spi_remove_host(struct dw_spi *dws) 700void dw_spi_remove_host(struct dw_spi *dws)
846{ 701{
847 int status = 0;
848
849 if (!dws) 702 if (!dws)
850 return; 703 return;
851 mrst_spi_debugfs_remove(dws); 704 mrst_spi_debugfs_remove(dws);
852 705
853 /* Remove the queue */
854 status = destroy_queue(dws);
855 if (status != 0)
856 dev_err(&dws->master->dev,
857 "dw_spi_remove: workqueue will not complete, message memory not freed\n");
858
859 if (dws->dma_ops && dws->dma_ops->dma_exit) 706 if (dws->dma_ops && dws->dma_ops->dma_exit)
860 dws->dma_ops->dma_exit(dws); 707 dws->dma_ops->dma_exit(dws);
861 spi_enable_chip(dws, 0); 708 spi_enable_chip(dws, 0);
@@ -868,7 +715,7 @@ int dw_spi_suspend_host(struct dw_spi *dws)
868{ 715{
869 int ret = 0; 716 int ret = 0;
870 717
871 ret = stop_queue(dws); 718 ret = spi_master_suspend(dws->master);
872 if (ret) 719 if (ret)
873 return ret; 720 return ret;
874 spi_enable_chip(dws, 0); 721 spi_enable_chip(dws, 0);
@@ -882,7 +729,7 @@ int dw_spi_resume_host(struct dw_spi *dws)
882 int ret; 729 int ret;
883 730
884 spi_hw_init(dws); 731 spi_hw_init(dws);
885 ret = start_queue(dws); 732 ret = spi_master_resume(dws->master);
886 if (ret) 733 if (ret)
887 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret); 734 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
888 return ret; 735 return ret;
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 587643dae11e..6d2acad34f64 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/io.h> 4#include <linux/io.h>
5#include <linux/scatterlist.h> 5#include <linux/scatterlist.h>
6#include <linux/gpio.h>
6 7
7/* Register offsets */ 8/* Register offsets */
8#define DW_SPI_CTRL0 0x00 9#define DW_SPI_CTRL0 0x00
@@ -104,14 +105,6 @@ struct dw_spi {
104 u16 bus_num; 105 u16 bus_num;
105 u16 num_cs; /* supported slave numbers */ 106 u16 num_cs; /* supported slave numbers */
106 107
107 /* Driver message queue */
108 struct workqueue_struct *workqueue;
109 struct work_struct pump_messages;
110 spinlock_t lock;
111 struct list_head queue;
112 int busy;
113 int run;
114
115 /* Message Transfer pump */ 108 /* Message Transfer pump */
116 struct tasklet_struct pump_transfers; 109 struct tasklet_struct pump_transfers;
117 110
@@ -186,15 +179,20 @@ static inline void spi_set_clk(struct dw_spi *dws, u16 div)
186 dw_writel(dws, DW_SPI_BAUDR, div); 179 dw_writel(dws, DW_SPI_BAUDR, div);
187} 180}
188 181
189static inline void spi_chip_sel(struct dw_spi *dws, u16 cs) 182static inline void spi_chip_sel(struct dw_spi *dws, struct spi_device *spi,
183 int active)
190{ 184{
191 if (cs > dws->num_cs) 185 u16 cs = spi->chip_select;
192 return; 186 int gpio_val = active ? (spi->mode & SPI_CS_HIGH) :
187 !(spi->mode & SPI_CS_HIGH);
193 188
194 if (dws->cs_control) 189 if (dws->cs_control)
195 dws->cs_control(1); 190 dws->cs_control(active);
191 if (gpio_is_valid(spi->cs_gpio))
192 gpio_set_value(spi->cs_gpio, gpio_val);
196 193
197 dw_writel(dws, DW_SPI_SER, 1 << cs); 194 if (active)
195 dw_writel(dws, DW_SPI_SER, 1 << cs);
198} 196}
199 197
200/* Disable IRQ bits */ 198/* Disable IRQ bits */
diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c
index 09965f069a1c..ba441ad9a007 100644
--- a/drivers/spi/spi-falcon.c
+++ b/drivers/spi/spi-falcon.c
@@ -11,7 +11,6 @@
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/spi/spi.h> 12#include <linux/spi/spi.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/workqueue.h>
15#include <linux/of.h> 14#include <linux/of.h>
16#include <linux/of_platform.h> 15#include <linux/of_platform.h>
17 16
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index d565eeee3bd8..5021ddf03f60 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -406,7 +406,7 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
406 return IRQ_HANDLED; 406 return IRQ_HANDLED;
407} 407}
408 408
409static struct of_device_id fsl_dspi_dt_ids[] = { 409static const struct of_device_id fsl_dspi_dt_ids[] = {
410 { .compatible = "fsl,vf610-dspi", .data = NULL, }, 410 { .compatible = "fsl,vf610-dspi", .data = NULL, },
411 { /* sentinel */ } 411 { /* sentinel */ }
412}; 412};
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index e767f5831b9c..8ebd724e4c59 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -348,7 +348,7 @@ static void fsl_espi_cmd_trans(struct spi_message *m,
348 } 348 }
349 349
350 espi_trans->tx_buf = local_buf; 350 espi_trans->tx_buf = local_buf;
351 espi_trans->rx_buf = local_buf + espi_trans->n_tx; 351 espi_trans->rx_buf = local_buf;
352 fsl_espi_do_trans(m, espi_trans); 352 fsl_espi_do_trans(m, espi_trans);
353 353
354 espi_trans->actual_length = espi_trans->len; 354 espi_trans->actual_length = espi_trans->len;
@@ -397,7 +397,7 @@ static void fsl_espi_rw_trans(struct spi_message *m,
397 espi_trans->n_rx = trans_len; 397 espi_trans->n_rx = trans_len;
398 espi_trans->len = trans_len + n_tx; 398 espi_trans->len = trans_len + n_tx;
399 espi_trans->tx_buf = local_buf; 399 espi_trans->tx_buf = local_buf;
400 espi_trans->rx_buf = local_buf + n_tx; 400 espi_trans->rx_buf = local_buf;
401 fsl_espi_do_trans(m, espi_trans); 401 fsl_espi_do_trans(m, espi_trans);
402 402
403 memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len); 403 memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len);
@@ -458,7 +458,7 @@ static int fsl_espi_setup(struct spi_device *spi)
458 return -EINVAL; 458 return -EINVAL;
459 459
460 if (!cs) { 460 if (!cs) {
461 cs = kzalloc(sizeof *cs, GFP_KERNEL); 461 cs = devm_kzalloc(&spi->dev, sizeof(*cs), GFP_KERNEL);
462 if (!cs) 462 if (!cs)
463 return -ENOMEM; 463 return -ENOMEM;
464 spi->controller_state = cs; 464 spi->controller_state = cs;
@@ -586,8 +586,10 @@ static struct spi_master * fsl_espi_probe(struct device *dev,
586 struct spi_master *master; 586 struct spi_master *master;
587 struct mpc8xxx_spi *mpc8xxx_spi; 587 struct mpc8xxx_spi *mpc8xxx_spi;
588 struct fsl_espi_reg *reg_base; 588 struct fsl_espi_reg *reg_base;
589 u32 regval; 589 struct device_node *nc;
590 int i, ret = 0; 590 const __be32 *prop;
591 u32 regval, csmode;
592 int i, len, ret = 0;
591 593
592 master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi)); 594 master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi));
593 if (!master) { 595 if (!master) {
@@ -634,8 +636,32 @@ static struct spi_master * fsl_espi_probe(struct device *dev,
634 mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff); 636 mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff);
635 637
636 /* Init eSPI CS mode register */ 638 /* Init eSPI CS mode register */
637 for (i = 0; i < pdata->max_chipselect; i++) 639 for_each_available_child_of_node(master->dev.of_node, nc) {
638 mpc8xxx_spi_write_reg(&reg_base->csmode[i], CSMODE_INIT_VAL); 640 /* get chip select */
641 prop = of_get_property(nc, "reg", &len);
642 if (!prop || len < sizeof(*prop))
643 continue;
644 i = be32_to_cpup(prop);
645 if (i < 0 || i >= pdata->max_chipselect)
646 continue;
647
648 csmode = CSMODE_INIT_VAL;
649 /* check if CSBEF is set in device tree */
650 prop = of_get_property(nc, "fsl,csbef", &len);
651 if (prop && len >= sizeof(*prop)) {
652 csmode &= ~(CSMODE_BEF(0xf));
653 csmode |= CSMODE_BEF(be32_to_cpup(prop));
654 }
655 /* check if CSAFT is set in device tree */
656 prop = of_get_property(nc, "fsl,csaft", &len);
657 if (prop && len >= sizeof(*prop)) {
658 csmode &= ~(CSMODE_AFT(0xf));
659 csmode |= CSMODE_AFT(be32_to_cpup(prop));
660 }
661 mpc8xxx_spi_write_reg(&reg_base->csmode[i], csmode);
662
663 dev_info(dev, "cs=%d, init_csmode=0x%x\n", i, csmode);
664 }
639 665
640 /* Enable SPI interface */ 666 /* Enable SPI interface */
641 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; 667 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index e5d45fca3551..95212ea96c8d 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -99,11 +99,6 @@ int mpc8xxx_spi_transfer(struct spi_device *spi,
99 return 0; 99 return 0;
100} 100}
101 101
102void mpc8xxx_spi_cleanup(struct spi_device *spi)
103{
104 kfree(spi->controller_state);
105}
106
107const char *mpc8xxx_spi_strmode(unsigned int flags) 102const char *mpc8xxx_spi_strmode(unsigned int flags)
108{ 103{
109 if (flags & SPI_QE_CPU_MODE) { 104 if (flags & SPI_QE_CPU_MODE) {
@@ -134,7 +129,6 @@ int mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
134 | SPI_LSB_FIRST | SPI_LOOP; 129 | SPI_LSB_FIRST | SPI_LOOP;
135 130
136 master->transfer = mpc8xxx_spi_transfer; 131 master->transfer = mpc8xxx_spi_transfer;
137 master->cleanup = mpc8xxx_spi_cleanup;
138 master->dev.of_node = dev->of_node; 132 master->dev.of_node = dev->of_node;
139 133
140 mpc8xxx_spi = spi_master_get_devdata(master); 134 mpc8xxx_spi = spi_master_get_devdata(master);
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
index 52db6936778e..2fcbfd01d109 100644
--- a/drivers/spi/spi-fsl-lib.h
+++ b/drivers/spi/spi-fsl-lib.h
@@ -124,7 +124,6 @@ extern struct mpc8xxx_spi_probe_info *to_of_pinfo(
124extern int mpc8xxx_spi_bufs(struct mpc8xxx_spi *mspi, 124extern int mpc8xxx_spi_bufs(struct mpc8xxx_spi *mspi,
125 struct spi_transfer *t, unsigned int len); 125 struct spi_transfer *t, unsigned int len);
126extern int mpc8xxx_spi_transfer(struct spi_device *spi, struct spi_message *m); 126extern int mpc8xxx_spi_transfer(struct spi_device *spi, struct spi_message *m);
127extern void mpc8xxx_spi_cleanup(struct spi_device *spi);
128extern const char *mpc8xxx_spi_strmode(unsigned int flags); 127extern const char *mpc8xxx_spi_strmode(unsigned int flags);
129extern int mpc8xxx_spi_probe(struct device *dev, struct resource *mem, 128extern int mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
130 unsigned int irq); 129 unsigned int irq);
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index b3e7775034db..98ccd231bf00 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -431,7 +431,7 @@ static int fsl_spi_setup(struct spi_device *spi)
431 return -EINVAL; 431 return -EINVAL;
432 432
433 if (!cs) { 433 if (!cs) {
434 cs = kzalloc(sizeof *cs, GFP_KERNEL); 434 cs = devm_kzalloc(&spi->dev, sizeof(*cs), GFP_KERNEL);
435 if (!cs) 435 if (!cs)
436 return -ENOMEM; 436 return -ENOMEM;
437 spi->controller_state = cs; 437 spi->controller_state = cs;
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 09823076df88..9f595535cf27 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -340,7 +340,7 @@ done:
340} 340}
341 341
342#ifdef CONFIG_OF 342#ifdef CONFIG_OF
343static struct of_device_id spi_gpio_dt_ids[] = { 343static const struct of_device_id spi_gpio_dt_ids[] = {
344 { .compatible = "spi-gpio" }, 344 { .compatible = "spi-gpio" },
345 {} 345 {}
346}; 346};
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index 16e30de650b0..73e91d5a43df 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -10,7 +10,6 @@
10 10
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/workqueue.h>
14#include <linux/interrupt.h> 13#include <linux/interrupt.h>
15#include <linux/delay.h> 14#include <linux/delay.h>
16#include <linux/errno.h> 15#include <linux/errno.h>
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index be2a2e108e2f..0f5a0aa3b871 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -37,7 +37,6 @@
37#include <linux/init.h> 37#include <linux/init.h>
38#include <linux/delay.h> 38#include <linux/delay.h>
39#include <linux/platform_device.h> 39#include <linux/platform_device.h>
40#include <linux/workqueue.h>
41#include <linux/interrupt.h> 40#include <linux/interrupt.h>
42#include <linux/err.h> 41#include <linux/err.h>
43#include <linux/clk.h> 42#include <linux/clk.h>
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 51d99779682f..66d2ae21e78e 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1111,10 +1111,8 @@ static int pl022_dma_probe(struct pl022 *pl022)
1111 } 1111 }
1112 1112
1113 pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); 1113 pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
1114 if (!pl022->dummypage) { 1114 if (!pl022->dummypage)
1115 dev_dbg(&pl022->adev->dev, "no DMA dummypage!\n");
1116 goto err_no_dummypage; 1115 goto err_no_dummypage;
1117 }
1118 1116
1119 dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n", 1117 dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n",
1120 dma_chan_name(pl022->dma_rx_channel), 1118 dma_chan_name(pl022->dma_rx_channel),
@@ -1809,11 +1807,8 @@ static int pl022_setup(struct spi_device *spi)
1809 1807
1810 if (chip == NULL) { 1808 if (chip == NULL) {
1811 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 1809 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
1812 if (!chip) { 1810 if (!chip)
1813 dev_err(&spi->dev,
1814 "cannot allocate controller state\n");
1815 return -ENOMEM; 1811 return -ENOMEM;
1816 }
1817 dev_dbg(&spi->dev, 1812 dev_dbg(&spi->dev,
1818 "allocated memory for controller's runtime state\n"); 1813 "allocated memory for controller's runtime state\n");
1819 } 1814 }
@@ -2050,10 +2045,8 @@ pl022_platform_data_dt_get(struct device *dev)
2050 } 2045 }
2051 2046
2052 pd = devm_kzalloc(dev, sizeof(struct pl022_ssp_controller), GFP_KERNEL); 2047 pd = devm_kzalloc(dev, sizeof(struct pl022_ssp_controller), GFP_KERNEL);
2053 if (!pd) { 2048 if (!pd)
2054 dev_err(dev, "cannot allocate platform data memory\n");
2055 return NULL; 2049 return NULL;
2056 }
2057 2050
2058 pd->bus_id = -1; 2051 pd->bus_id = -1;
2059 pd->enable_dma = 1; 2052 pd->enable_dma = 1;
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
index 3f006d3ed2a8..c1865c92ccb9 100644
--- a/drivers/spi/spi-pxa2xx-pci.c
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -8,7 +8,43 @@
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/spi/pxa2xx_spi.h> 9#include <linux/spi/pxa2xx_spi.h>
10 10
11static int ce4100_spi_probe(struct pci_dev *dev, 11enum {
12 PORT_CE4100,
13 PORT_BYT,
14};
15
16struct pxa_spi_info {
17 enum pxa_ssp_type type;
18 int port_id;
19 int num_chipselect;
20 int tx_slave_id;
21 int tx_chan_id;
22 int rx_slave_id;
23 int rx_chan_id;
24};
25
26static struct pxa_spi_info spi_info_configs[] = {
27 [PORT_CE4100] = {
28 .type = PXA25x_SSP,
29 .port_id = -1,
30 .num_chipselect = -1,
31 .tx_slave_id = -1,
32 .tx_chan_id = -1,
33 .rx_slave_id = -1,
34 .rx_chan_id = -1,
35 },
36 [PORT_BYT] = {
37 .type = LPSS_SSP,
38 .port_id = 0,
39 .num_chipselect = 1,
40 .tx_slave_id = 0,
41 .tx_chan_id = 0,
42 .rx_slave_id = 1,
43 .rx_chan_id = 1,
44 },
45};
46
47static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
12 const struct pci_device_id *ent) 48 const struct pci_device_id *ent)
13{ 49{
14 struct platform_device_info pi; 50 struct platform_device_info pi;
@@ -16,6 +52,7 @@ static int ce4100_spi_probe(struct pci_dev *dev,
16 struct platform_device *pdev; 52 struct platform_device *pdev;
17 struct pxa2xx_spi_master spi_pdata; 53 struct pxa2xx_spi_master spi_pdata;
18 struct ssp_device *ssp; 54 struct ssp_device *ssp;
55 struct pxa_spi_info *c;
19 56
20 ret = pcim_enable_device(dev); 57 ret = pcim_enable_device(dev);
21 if (ret) 58 if (ret)
@@ -25,8 +62,16 @@ static int ce4100_spi_probe(struct pci_dev *dev,
25 if (ret) 62 if (ret)
26 return ret; 63 return ret;
27 64
65 c = &spi_info_configs[ent->driver_data];
66
28 memset(&spi_pdata, 0, sizeof(spi_pdata)); 67 memset(&spi_pdata, 0, sizeof(spi_pdata));
29 spi_pdata.num_chipselect = dev->devfn; 68 spi_pdata.num_chipselect = (c->num_chipselect > 0) ?
69 c->num_chipselect : dev->devfn;
70 spi_pdata.tx_slave_id = c->tx_slave_id;
71 spi_pdata.tx_chan_id = c->tx_chan_id;
72 spi_pdata.rx_slave_id = c->rx_slave_id;
73 spi_pdata.rx_chan_id = c->rx_chan_id;
74 spi_pdata.enable_dma = c->rx_slave_id >= 0 && c->tx_slave_id >= 0;
30 75
31 ssp = &spi_pdata.ssp; 76 ssp = &spi_pdata.ssp;
32 ssp->phys_base = pci_resource_start(dev, 0); 77 ssp->phys_base = pci_resource_start(dev, 0);
@@ -36,8 +81,8 @@ static int ce4100_spi_probe(struct pci_dev *dev,
36 return -EIO; 81 return -EIO;
37 } 82 }
38 ssp->irq = dev->irq; 83 ssp->irq = dev->irq;
39 ssp->port_id = dev->devfn; 84 ssp->port_id = (c->port_id >= 0) ? c->port_id : dev->devfn;
40 ssp->type = PXA25x_SSP; 85 ssp->type = c->type;
41 86
42 memset(&pi, 0, sizeof(pi)); 87 memset(&pi, 0, sizeof(pi));
43 pi.parent = &dev->dev; 88 pi.parent = &dev->dev;
@@ -55,28 +100,29 @@ static int ce4100_spi_probe(struct pci_dev *dev,
55 return 0; 100 return 0;
56} 101}
57 102
58static void ce4100_spi_remove(struct pci_dev *dev) 103static void pxa2xx_spi_pci_remove(struct pci_dev *dev)
59{ 104{
60 struct platform_device *pdev = pci_get_drvdata(dev); 105 struct platform_device *pdev = pci_get_drvdata(dev);
61 106
62 platform_device_unregister(pdev); 107 platform_device_unregister(pdev);
63} 108}
64 109
65static const struct pci_device_id ce4100_spi_devices[] = { 110static const struct pci_device_id pxa2xx_spi_pci_devices[] = {
66 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) }, 111 { PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 },
112 { PCI_VDEVICE(INTEL, 0x0f0e), PORT_BYT },
67 { }, 113 { },
68}; 114};
69MODULE_DEVICE_TABLE(pci, ce4100_spi_devices); 115MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices);
70 116
71static struct pci_driver ce4100_spi_driver = { 117static struct pci_driver pxa2xx_spi_pci_driver = {
72 .name = "ce4100_spi", 118 .name = "pxa2xx_spi_pci",
73 .id_table = ce4100_spi_devices, 119 .id_table = pxa2xx_spi_pci_devices,
74 .probe = ce4100_spi_probe, 120 .probe = pxa2xx_spi_pci_probe,
75 .remove = ce4100_spi_remove, 121 .remove = pxa2xx_spi_pci_remove,
76}; 122};
77 123
78module_pci_driver(ce4100_spi_driver); 124module_pci_driver(pxa2xx_spi_pci_driver);
79 125
80MODULE_DESCRIPTION("CE4100 PCI-SPI glue code for PXA's driver"); 126MODULE_DESCRIPTION("CE4100/LPSS PCI-SPI glue code for PXA's driver");
81MODULE_LICENSE("GPL v2"); 127MODULE_LICENSE("GPL v2");
82MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>"); 128MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 41185d0557fa..a98df7eeb42d 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -27,7 +27,6 @@
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/spi/pxa2xx_spi.h> 28#include <linux/spi/pxa2xx_spi.h>
29#include <linux/spi/spi.h> 29#include <linux/spi/spi.h>
30#include <linux/workqueue.h>
31#include <linux/delay.h> 30#include <linux/delay.h>
32#include <linux/gpio.h> 31#include <linux/gpio.h>
33#include <linux/slab.h> 32#include <linux/slab.h>
@@ -886,11 +885,8 @@ static int setup(struct spi_device *spi)
886 chip = spi_get_ctldata(spi); 885 chip = spi_get_ctldata(spi);
887 if (!chip) { 886 if (!chip) {
888 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); 887 chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
889 if (!chip) { 888 if (!chip)
890 dev_err(&spi->dev,
891 "failed setup: can't allocate chip data\n");
892 return -ENOMEM; 889 return -ENOMEM;
893 }
894 890
895 if (drv_data->ssp_type == CE4100_SSP) { 891 if (drv_data->ssp_type == CE4100_SSP) {
896 if (spi->chip_select > 4) { 892 if (spi->chip_select > 4) {
@@ -1037,11 +1033,8 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
1037 return NULL; 1033 return NULL;
1038 1034
1039 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 1035 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1040 if (!pdata) { 1036 if (!pdata)
1041 dev_err(&pdev->dev,
1042 "failed to allocate memory for platform data\n");
1043 return NULL; 1037 return NULL;
1044 }
1045 1038
1046 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1039 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1047 if (!res) 1040 if (!res)
@@ -1202,6 +1195,11 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1202 tasklet_init(&drv_data->pump_transfers, pump_transfers, 1195 tasklet_init(&drv_data->pump_transfers, pump_transfers,
1203 (unsigned long)drv_data); 1196 (unsigned long)drv_data);
1204 1197
1198 pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
1199 pm_runtime_use_autosuspend(&pdev->dev);
1200 pm_runtime_set_active(&pdev->dev);
1201 pm_runtime_enable(&pdev->dev);
1202
1205 /* Register with the SPI framework */ 1203 /* Register with the SPI framework */
1206 platform_set_drvdata(pdev, drv_data); 1204 platform_set_drvdata(pdev, drv_data);
1207 status = devm_spi_register_master(&pdev->dev, master); 1205 status = devm_spi_register_master(&pdev->dev, master);
@@ -1210,11 +1208,6 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
1210 goto out_error_clock_enabled; 1208 goto out_error_clock_enabled;
1211 } 1209 }
1212 1210
1213 pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
1214 pm_runtime_use_autosuspend(&pdev->dev);
1215 pm_runtime_set_active(&pdev->dev);
1216 pm_runtime_enable(&pdev->dev);
1217
1218 return status; 1211 return status;
1219 1212
1220out_error_clock_enabled: 1213out_error_clock_enabled:
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index 78c66e3c53ed..fc1de86d3c8a 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -287,7 +287,7 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
287 writel_relaxed(opflags, controller->base + QUP_OPERATIONAL); 287 writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
288 288
289 if (!xfer) { 289 if (!xfer) {
290 dev_err_ratelimited(controller->dev, "unexpected irq %x08 %x08 %x08\n", 290 dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n",
291 qup_err, spi_err, opflags); 291 qup_err, spi_err, opflags);
292 return IRQ_HANDLED; 292 return IRQ_HANDLED;
293 } 293 }
@@ -366,7 +366,7 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
366 n_words = xfer->len / w_size; 366 n_words = xfer->len / w_size;
367 controller->w_size = w_size; 367 controller->w_size = w_size;
368 368
369 if (n_words <= controller->in_fifo_sz) { 369 if (n_words <= (controller->in_fifo_sz / sizeof(u32))) {
370 mode = QUP_IO_M_MODE_FIFO; 370 mode = QUP_IO_M_MODE_FIFO;
371 writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT); 371 writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
372 writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT); 372 writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
@@ -749,7 +749,7 @@ static int spi_qup_remove(struct platform_device *pdev)
749 return 0; 749 return 0;
750} 750}
751 751
752static struct of_device_id spi_qup_dt_match[] = { 752static const struct of_device_id spi_qup_dt_match[] = {
753 { .compatible = "qcom,spi-qup-v2.1.1", }, 753 { .compatible = "qcom,spi-qup-v2.1.1", },
754 { .compatible = "qcom,spi-qup-v2.2.1", }, 754 { .compatible = "qcom,spi-qup-v2.2.1", },
755 { } 755 { }
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 1fb0ad213324..10112745bb17 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -183,8 +183,6 @@
183#define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */ 183#define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */
184#define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */ 184#define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */
185 185
186#define DUMMY_DATA 0x00
187
188struct rspi_data { 186struct rspi_data {
189 void __iomem *addr; 187 void __iomem *addr;
190 u32 max_speed_hz; 188 u32 max_speed_hz;
@@ -197,11 +195,6 @@ struct rspi_data {
197 int rx_irq, tx_irq; 195 int rx_irq, tx_irq;
198 const struct spi_ops *ops; 196 const struct spi_ops *ops;
199 197
200 /* for dmaengine */
201 struct dma_chan *chan_tx;
202 struct dma_chan *chan_rx;
203
204 unsigned dma_width_16bit:1;
205 unsigned dma_callbacked:1; 198 unsigned dma_callbacked:1;
206 unsigned byte_access:1; 199 unsigned byte_access:1;
207}; 200};
@@ -253,6 +246,8 @@ struct spi_ops {
253 int (*transfer_one)(struct spi_master *master, struct spi_device *spi, 246 int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
254 struct spi_transfer *xfer); 247 struct spi_transfer *xfer);
255 u16 mode_bits; 248 u16 mode_bits;
249 u16 flags;
250 u16 fifo_size;
256}; 251};
257 252
258/* 253/*
@@ -266,7 +261,8 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
266 rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR); 261 rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
267 262
268 /* Sets transfer bit rate */ 263 /* Sets transfer bit rate */
269 spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1; 264 spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk),
265 2 * rspi->max_speed_hz) - 1;
270 rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR); 266 rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
271 267
272 /* Disable dummy transmission, set 16-bit word access, 1 frame */ 268 /* Disable dummy transmission, set 16-bit word access, 1 frame */
@@ -302,7 +298,8 @@ static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
302 rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR); 298 rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
303 299
304 /* Sets transfer bit rate */ 300 /* Sets transfer bit rate */
305 spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1; 301 spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk),
302 2 * rspi->max_speed_hz) - 1;
306 rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR); 303 rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
307 304
308 /* Disable dummy transmission, set byte access */ 305 /* Disable dummy transmission, set byte access */
@@ -335,7 +332,7 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
335 rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR); 332 rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
336 333
337 /* Sets transfer bit rate */ 334 /* Sets transfer bit rate */
338 spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz); 335 spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->max_speed_hz);
339 rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR); 336 rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
340 337
341 /* Disable dummy transmission, set byte access */ 338 /* Disable dummy transmission, set byte access */
@@ -403,11 +400,22 @@ static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
403 return 0; 400 return 0;
404} 401}
405 402
403static inline int rspi_wait_for_tx_empty(struct rspi_data *rspi)
404{
405 return rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
406}
407
408static inline int rspi_wait_for_rx_full(struct rspi_data *rspi)
409{
410 return rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE);
411}
412
406static int rspi_data_out(struct rspi_data *rspi, u8 data) 413static int rspi_data_out(struct rspi_data *rspi, u8 data)
407{ 414{
408 if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) { 415 int error = rspi_wait_for_tx_empty(rspi);
416 if (error < 0) {
409 dev_err(&rspi->master->dev, "transmit timeout\n"); 417 dev_err(&rspi->master->dev, "transmit timeout\n");
410 return -ETIMEDOUT; 418 return error;
411 } 419 }
412 rspi_write_data(rspi, data); 420 rspi_write_data(rspi, data);
413 return 0; 421 return 0;
@@ -415,25 +423,36 @@ static int rspi_data_out(struct rspi_data *rspi, u8 data)
415 423
416static int rspi_data_in(struct rspi_data *rspi) 424static int rspi_data_in(struct rspi_data *rspi)
417{ 425{
426 int error;
418 u8 data; 427 u8 data;
419 428
420 if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) { 429 error = rspi_wait_for_rx_full(rspi);
430 if (error < 0) {
421 dev_err(&rspi->master->dev, "receive timeout\n"); 431 dev_err(&rspi->master->dev, "receive timeout\n");
422 return -ETIMEDOUT; 432 return error;
423 } 433 }
424 data = rspi_read_data(rspi); 434 data = rspi_read_data(rspi);
425 return data; 435 return data;
426} 436}
427 437
428static int rspi_data_out_in(struct rspi_data *rspi, u8 data) 438static int rspi_pio_transfer(struct rspi_data *rspi, const u8 *tx, u8 *rx,
439 unsigned int n)
429{ 440{
430 int ret; 441 while (n-- > 0) {
431 442 if (tx) {
432 ret = rspi_data_out(rspi, data); 443 int ret = rspi_data_out(rspi, *tx++);
433 if (ret < 0) 444 if (ret < 0)
434 return ret; 445 return ret;
446 }
447 if (rx) {
448 int ret = rspi_data_in(rspi);
449 if (ret < 0)
450 return ret;
451 *rx++ = ret;
452 }
453 }
435 454
436 return rspi_data_in(rspi); 455 return 0;
437} 456}
438 457
439static void rspi_dma_complete(void *arg) 458static void rspi_dma_complete(void *arg)
@@ -444,97 +463,67 @@ static void rspi_dma_complete(void *arg)
444 wake_up_interruptible(&rspi->wait); 463 wake_up_interruptible(&rspi->wait);
445} 464}
446 465
447static int rspi_dma_map_sg(struct scatterlist *sg, const void *buf, 466static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
448 unsigned len, struct dma_chan *chan, 467 struct sg_table *rx)
449 enum dma_transfer_direction dir)
450{
451 sg_init_table(sg, 1);
452 sg_set_buf(sg, buf, len);
453 sg_dma_len(sg) = len;
454 return dma_map_sg(chan->device->dev, sg, 1, dir);
455}
456
457static void rspi_dma_unmap_sg(struct scatterlist *sg, struct dma_chan *chan,
458 enum dma_transfer_direction dir)
459{ 468{
460 dma_unmap_sg(chan->device->dev, sg, 1, dir); 469 struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
461} 470 u8 irq_mask = 0;
462 471 unsigned int other_irq = 0;
463static void rspi_memory_to_8bit(void *buf, const void *data, unsigned len) 472 dma_cookie_t cookie;
464{ 473 int ret;
465 u16 *dst = buf;
466 const u8 *src = data;
467
468 while (len) {
469 *dst++ = (u16)(*src++);
470 len--;
471 }
472}
473
474static void rspi_memory_from_8bit(void *buf, const void *data, unsigned len)
475{
476 u8 *dst = buf;
477 const u16 *src = data;
478
479 while (len) {
480 *dst++ = (u8)*src++;
481 len--;
482 }
483}
484 474
485static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t) 475 if (tx) {
486{ 476 desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx,
487 struct scatterlist sg; 477 tx->sgl, tx->nents, DMA_TO_DEVICE,
488 const void *buf = NULL; 478 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
489 struct dma_async_tx_descriptor *desc; 479 if (!desc_tx)
490 unsigned int len; 480 return -EIO;
491 int ret = 0;
492
493 if (rspi->dma_width_16bit) {
494 void *tmp;
495 /*
496 * If DMAC bus width is 16-bit, the driver allocates a dummy
497 * buffer. And, the driver converts original data into the
498 * DMAC data as the following format:
499 * original data: 1st byte, 2nd byte ...
500 * DMAC data: 1st byte, dummy, 2nd byte, dummy ...
501 */
502 len = t->len * 2;
503 tmp = kmalloc(len, GFP_KERNEL);
504 if (!tmp)
505 return -ENOMEM;
506 rspi_memory_to_8bit(tmp, t->tx_buf, t->len);
507 buf = tmp;
508 } else {
509 len = t->len;
510 buf = t->tx_buf;
511 }
512 481
513 if (!rspi_dma_map_sg(&sg, buf, len, rspi->chan_tx, DMA_TO_DEVICE)) { 482 irq_mask |= SPCR_SPTIE;
514 ret = -EFAULT;
515 goto end_nomap;
516 } 483 }
517 desc = dmaengine_prep_slave_sg(rspi->chan_tx, &sg, 1, DMA_TO_DEVICE, 484 if (rx) {
518 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 485 desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx,
519 if (!desc) { 486 rx->sgl, rx->nents, DMA_FROM_DEVICE,
520 ret = -EIO; 487 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
521 goto end; 488 if (!desc_rx)
489 return -EIO;
490
491 irq_mask |= SPCR_SPRIE;
522 } 492 }
523 493
524 /* 494 /*
525 * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be 495 * DMAC needs SPxIE, but if SPxIE is set, the IRQ routine will be
526 * called. So, this driver disables the IRQ while DMA transfer. 496 * called. So, this driver disables the IRQ while DMA transfer.
527 */ 497 */
528 disable_irq(rspi->tx_irq); 498 if (tx)
499 disable_irq(other_irq = rspi->tx_irq);
500 if (rx && rspi->rx_irq != other_irq)
501 disable_irq(rspi->rx_irq);
529 502
530 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD, RSPI_SPCR); 503 rspi_enable_irq(rspi, irq_mask);
531 rspi_enable_irq(rspi, SPCR_SPTIE);
532 rspi->dma_callbacked = 0; 504 rspi->dma_callbacked = 0;
533 505
534 desc->callback = rspi_dma_complete; 506 if (rx) {
535 desc->callback_param = rspi; 507 desc_rx->callback = rspi_dma_complete;
536 dmaengine_submit(desc); 508 desc_rx->callback_param = rspi;
537 dma_async_issue_pending(rspi->chan_tx); 509 cookie = dmaengine_submit(desc_rx);
510 if (dma_submit_error(cookie))
511 return cookie;
512 dma_async_issue_pending(rspi->master->dma_rx);
513 }
514 if (tx) {
515 if (rx) {
516 /* No callback */
517 desc_tx->callback = NULL;
518 } else {
519 desc_tx->callback = rspi_dma_complete;
520 desc_tx->callback_param = rspi;
521 }
522 cookie = dmaengine_submit(desc_tx);
523 if (dma_submit_error(cookie))
524 return cookie;
525 dma_async_issue_pending(rspi->master->dma_tx);
526 }
538 527
539 ret = wait_event_interruptible_timeout(rspi->wait, 528 ret = wait_event_interruptible_timeout(rspi->wait,
540 rspi->dma_callbacked, HZ); 529 rspi->dma_callbacked, HZ);
@@ -542,15 +531,13 @@ static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
542 ret = 0; 531 ret = 0;
543 else if (!ret) 532 else if (!ret)
544 ret = -ETIMEDOUT; 533 ret = -ETIMEDOUT;
545 rspi_disable_irq(rspi, SPCR_SPTIE);
546 534
547 enable_irq(rspi->tx_irq); 535 rspi_disable_irq(rspi, irq_mask);
548 536
549end: 537 if (tx)
550 rspi_dma_unmap_sg(&sg, rspi->chan_tx, DMA_TO_DEVICE); 538 enable_irq(rspi->tx_irq);
551end_nomap: 539 if (rx && rspi->rx_irq != other_irq)
552 if (rspi->dma_width_16bit) 540 enable_irq(rspi->rx_irq);
553 kfree(buf);
554 541
555 return ret; 542 return ret;
556} 543}
@@ -585,157 +572,37 @@ static void qspi_receive_init(const struct rspi_data *rspi)
585 rspi_write8(rspi, 0, QSPI_SPBFCR); 572 rspi_write8(rspi, 0, QSPI_SPBFCR);
586} 573}
587 574
588static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t) 575static bool __rspi_can_dma(const struct rspi_data *rspi,
576 const struct spi_transfer *xfer)
589{ 577{
590 struct scatterlist sg, sg_dummy; 578 return xfer->len > rspi->ops->fifo_size;
591 void *dummy = NULL, *rx_buf = NULL;
592 struct dma_async_tx_descriptor *desc, *desc_dummy;
593 unsigned int len;
594 int ret = 0;
595
596 if (rspi->dma_width_16bit) {
597 /*
598 * If DMAC bus width is 16-bit, the driver allocates a dummy
599 * buffer. And, finally the driver converts the DMAC data into
600 * actual data as the following format:
601 * DMAC data: 1st byte, dummy, 2nd byte, dummy ...
602 * actual data: 1st byte, 2nd byte ...
603 */
604 len = t->len * 2;
605 rx_buf = kmalloc(len, GFP_KERNEL);
606 if (!rx_buf)
607 return -ENOMEM;
608 } else {
609 len = t->len;
610 rx_buf = t->rx_buf;
611 }
612
613 /* prepare dummy transfer to generate SPI clocks */
614 dummy = kzalloc(len, GFP_KERNEL);
615 if (!dummy) {
616 ret = -ENOMEM;
617 goto end_nomap;
618 }
619 if (!rspi_dma_map_sg(&sg_dummy, dummy, len, rspi->chan_tx,
620 DMA_TO_DEVICE)) {
621 ret = -EFAULT;
622 goto end_nomap;
623 }
624 desc_dummy = dmaengine_prep_slave_sg(rspi->chan_tx, &sg_dummy, 1,
625 DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
626 if (!desc_dummy) {
627 ret = -EIO;
628 goto end_dummy_mapped;
629 }
630
631 /* prepare receive transfer */
632 if (!rspi_dma_map_sg(&sg, rx_buf, len, rspi->chan_rx,
633 DMA_FROM_DEVICE)) {
634 ret = -EFAULT;
635 goto end_dummy_mapped;
636
637 }
638 desc = dmaengine_prep_slave_sg(rspi->chan_rx, &sg, 1, DMA_FROM_DEVICE,
639 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
640 if (!desc) {
641 ret = -EIO;
642 goto end;
643 }
644
645 rspi_receive_init(rspi);
646
647 /*
648 * DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
649 * called. So, this driver disables the IRQ while DMA transfer.
650 */
651 disable_irq(rspi->tx_irq);
652 if (rspi->rx_irq != rspi->tx_irq)
653 disable_irq(rspi->rx_irq);
654
655 rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD, RSPI_SPCR);
656 rspi_enable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
657 rspi->dma_callbacked = 0;
658
659 desc->callback = rspi_dma_complete;
660 desc->callback_param = rspi;
661 dmaengine_submit(desc);
662 dma_async_issue_pending(rspi->chan_rx);
663
664 desc_dummy->callback = NULL; /* No callback */
665 dmaengine_submit(desc_dummy);
666 dma_async_issue_pending(rspi->chan_tx);
667
668 ret = wait_event_interruptible_timeout(rspi->wait,
669 rspi->dma_callbacked, HZ);
670 if (ret > 0 && rspi->dma_callbacked)
671 ret = 0;
672 else if (!ret)
673 ret = -ETIMEDOUT;
674 rspi_disable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
675
676 enable_irq(rspi->tx_irq);
677 if (rspi->rx_irq != rspi->tx_irq)
678 enable_irq(rspi->rx_irq);
679
680end:
681 rspi_dma_unmap_sg(&sg, rspi->chan_rx, DMA_FROM_DEVICE);
682end_dummy_mapped:
683 rspi_dma_unmap_sg(&sg_dummy, rspi->chan_tx, DMA_TO_DEVICE);
684end_nomap:
685 if (rspi->dma_width_16bit) {
686 if (!ret)
687 rspi_memory_from_8bit(t->rx_buf, rx_buf, t->len);
688 kfree(rx_buf);
689 }
690 kfree(dummy);
691
692 return ret;
693} 579}
694 580
695static int rspi_is_dma(const struct rspi_data *rspi, struct spi_transfer *t) 581static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi,
582 struct spi_transfer *xfer)
696{ 583{
697 if (t->tx_buf && rspi->chan_tx) 584 struct rspi_data *rspi = spi_master_get_devdata(master);
698 return 1;
699 /* If the module receives data by DMAC, it also needs TX DMAC */
700 if (t->rx_buf && rspi->chan_tx && rspi->chan_rx)
701 return 1;
702 585
703 return 0; 586 return __rspi_can_dma(rspi, xfer);
704} 587}
705 588
706static int rspi_transfer_out_in(struct rspi_data *rspi, 589static int rspi_common_transfer(struct rspi_data *rspi,
707 struct spi_transfer *xfer) 590 struct spi_transfer *xfer)
708{ 591{
709 int remain = xfer->len, ret; 592 int ret;
710 const u8 *tx_buf = xfer->tx_buf;
711 u8 *rx_buf = xfer->rx_buf;
712 u8 spcr, data;
713
714 rspi_receive_init(rspi);
715
716 spcr = rspi_read8(rspi, RSPI_SPCR);
717 if (rx_buf)
718 spcr &= ~SPCR_TXMD;
719 else
720 spcr |= SPCR_TXMD;
721 rspi_write8(rspi, spcr, RSPI_SPCR);
722 593
723 while (remain > 0) { 594 if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
724 data = tx_buf ? *tx_buf++ : DUMMY_DATA; 595 /* rx_buf can be NULL on RSPI on SH in TX-only Mode */
725 ret = rspi_data_out(rspi, data); 596 return rspi_dma_transfer(rspi, &xfer->tx_sg,
726 if (ret < 0) 597 xfer->rx_buf ? &xfer->rx_sg : NULL);
727 return ret;
728 if (rx_buf) {
729 ret = rspi_data_in(rspi);
730 if (ret < 0)
731 return ret;
732 *rx_buf++ = ret;
733 }
734 remain--;
735 } 598 }
736 599
600 ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len);
601 if (ret < 0)
602 return ret;
603
737 /* Wait for the last transmission */ 604 /* Wait for the last transmission */
738 rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE); 605 rspi_wait_for_tx_empty(rspi);
739 606
740 return 0; 607 return 0;
741} 608}
@@ -744,46 +611,18 @@ static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
744 struct spi_transfer *xfer) 611 struct spi_transfer *xfer)
745{ 612{
746 struct rspi_data *rspi = spi_master_get_devdata(master); 613 struct rspi_data *rspi = spi_master_get_devdata(master);
747 int ret; 614 u8 spcr;
748 615
749 if (!rspi_is_dma(rspi, xfer)) 616 spcr = rspi_read8(rspi, RSPI_SPCR);
750 return rspi_transfer_out_in(rspi, xfer); 617 if (xfer->rx_buf) {
751 618 rspi_receive_init(rspi);
752 if (xfer->tx_buf) { 619 spcr &= ~SPCR_TXMD;
753 ret = rspi_send_dma(rspi, xfer); 620 } else {
754 if (ret < 0) 621 spcr |= SPCR_TXMD;
755 return ret;
756 }
757 if (xfer->rx_buf)
758 return rspi_receive_dma(rspi, xfer);
759
760 return 0;
761}
762
763static int rspi_rz_transfer_out_in(struct rspi_data *rspi,
764 struct spi_transfer *xfer)
765{
766 int remain = xfer->len, ret;
767 const u8 *tx_buf = xfer->tx_buf;
768 u8 *rx_buf = xfer->rx_buf;
769 u8 data;
770
771 rspi_rz_receive_init(rspi);
772
773 while (remain > 0) {
774 data = tx_buf ? *tx_buf++ : DUMMY_DATA;
775 ret = rspi_data_out_in(rspi, data);
776 if (ret < 0)
777 return ret;
778 if (rx_buf)
779 *rx_buf++ = ret;
780 remain--;
781 } 622 }
623 rspi_write8(rspi, spcr, RSPI_SPCR);
782 624
783 /* Wait for the last transmission */ 625 return rspi_common_transfer(rspi, xfer);
784 rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
785
786 return 0;
787} 626}
788 627
789static int rspi_rz_transfer_one(struct spi_master *master, 628static int rspi_rz_transfer_one(struct spi_master *master,
@@ -791,68 +630,44 @@ static int rspi_rz_transfer_one(struct spi_master *master,
791 struct spi_transfer *xfer) 630 struct spi_transfer *xfer)
792{ 631{
793 struct rspi_data *rspi = spi_master_get_devdata(master); 632 struct rspi_data *rspi = spi_master_get_devdata(master);
633 int ret;
794 634
795 return rspi_rz_transfer_out_in(rspi, xfer); 635 rspi_rz_receive_init(rspi);
636
637 return rspi_common_transfer(rspi, xfer);
796} 638}
797 639
798static int qspi_transfer_out_in(struct rspi_data *rspi, 640static int qspi_transfer_out_in(struct rspi_data *rspi,
799 struct spi_transfer *xfer) 641 struct spi_transfer *xfer)
800{ 642{
801 int remain = xfer->len, ret;
802 const u8 *tx_buf = xfer->tx_buf;
803 u8 *rx_buf = xfer->rx_buf;
804 u8 data;
805
806 qspi_receive_init(rspi); 643 qspi_receive_init(rspi);
807 644
808 while (remain > 0) { 645 return rspi_common_transfer(rspi, xfer);
809 data = tx_buf ? *tx_buf++ : DUMMY_DATA;
810 ret = rspi_data_out_in(rspi, data);
811 if (ret < 0)
812 return ret;
813 if (rx_buf)
814 *rx_buf++ = ret;
815 remain--;
816 }
817
818 /* Wait for the last transmission */
819 rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
820
821 return 0;
822} 646}
823 647
824static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer) 648static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
825{ 649{
826 const u8 *buf = xfer->tx_buf;
827 unsigned int i;
828 int ret; 650 int ret;
829 651
830 for (i = 0; i < xfer->len; i++) { 652 if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer))
831 ret = rspi_data_out(rspi, *buf++); 653 return rspi_dma_transfer(rspi, &xfer->tx_sg, NULL);
832 if (ret < 0) 654
833 return ret; 655 ret = rspi_pio_transfer(rspi, xfer->tx_buf, NULL, xfer->len);
834 } 656 if (ret < 0)
657 return ret;
835 658
836 /* Wait for the last transmission */ 659 /* Wait for the last transmission */
837 rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE); 660 rspi_wait_for_tx_empty(rspi);
838 661
839 return 0; 662 return 0;
840} 663}
841 664
842static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer) 665static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
843{ 666{
844 u8 *buf = xfer->rx_buf; 667 if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer))
845 unsigned int i; 668 return rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
846 int ret;
847
848 for (i = 0; i < xfer->len; i++) {
849 ret = rspi_data_in(rspi);
850 if (ret < 0)
851 return ret;
852 *buf++ = ret;
853 }
854 669
855 return 0; 670 return rspi_pio_transfer(rspi, NULL, xfer->rx_buf, xfer->len);
856} 671}
857 672
858static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi, 673static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
@@ -862,10 +677,10 @@ static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
862 677
863 if (spi->mode & SPI_LOOP) { 678 if (spi->mode & SPI_LOOP) {
864 return qspi_transfer_out_in(rspi, xfer); 679 return qspi_transfer_out_in(rspi, xfer);
865 } else if (xfer->tx_buf && xfer->tx_nbits > SPI_NBITS_SINGLE) { 680 } else if (xfer->tx_nbits > SPI_NBITS_SINGLE) {
866 /* Quad or Dual SPI Write */ 681 /* Quad or Dual SPI Write */
867 return qspi_transfer_out(rspi, xfer); 682 return qspi_transfer_out(rspi, xfer);
868 } else if (xfer->rx_buf && xfer->rx_nbits > SPI_NBITS_SINGLE) { 683 } else if (xfer->rx_nbits > SPI_NBITS_SINGLE) {
869 /* Quad or Dual SPI Read */ 684 /* Quad or Dual SPI Read */
870 return qspi_transfer_in(rspi, xfer); 685 return qspi_transfer_in(rspi, xfer);
871 } else { 686 } else {
@@ -1046,65 +861,78 @@ static irqreturn_t rspi_irq_tx(int irq, void *_sr)
1046 return 0; 861 return 0;
1047} 862}
1048 863
1049static int rspi_request_dma(struct rspi_data *rspi, 864static struct dma_chan *rspi_request_dma_chan(struct device *dev,
1050 struct platform_device *pdev) 865 enum dma_transfer_direction dir,
866 unsigned int id,
867 dma_addr_t port_addr)
1051{ 868{
1052 const struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev);
1053 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1054 dma_cap_mask_t mask; 869 dma_cap_mask_t mask;
870 struct dma_chan *chan;
1055 struct dma_slave_config cfg; 871 struct dma_slave_config cfg;
1056 int ret; 872 int ret;
1057 873
1058 if (!res || !rspi_pd) 874 dma_cap_zero(mask);
1059 return 0; /* The driver assumes no error. */ 875 dma_cap_set(DMA_SLAVE, mask);
1060 876
1061 rspi->dma_width_16bit = rspi_pd->dma_width_16bit; 877 chan = dma_request_channel(mask, shdma_chan_filter,
1062 878 (void *)(unsigned long)id);
1063 /* If the module receives data by DMAC, it also needs TX DMAC */ 879 if (!chan) {
1064 if (rspi_pd->dma_rx_id && rspi_pd->dma_tx_id) { 880 dev_warn(dev, "dma_request_channel failed\n");
1065 dma_cap_zero(mask); 881 return NULL;
1066 dma_cap_set(DMA_SLAVE, mask);
1067 rspi->chan_rx = dma_request_channel(mask, shdma_chan_filter,
1068 (void *)rspi_pd->dma_rx_id);
1069 if (rspi->chan_rx) {
1070 cfg.slave_id = rspi_pd->dma_rx_id;
1071 cfg.direction = DMA_DEV_TO_MEM;
1072 cfg.dst_addr = 0;
1073 cfg.src_addr = res->start + RSPI_SPDR;
1074 ret = dmaengine_slave_config(rspi->chan_rx, &cfg);
1075 if (!ret)
1076 dev_info(&pdev->dev, "Use DMA when rx.\n");
1077 else
1078 return ret;
1079 }
1080 } 882 }
1081 if (rspi_pd->dma_tx_id) { 883
1082 dma_cap_zero(mask); 884 memset(&cfg, 0, sizeof(cfg));
1083 dma_cap_set(DMA_SLAVE, mask); 885 cfg.slave_id = id;
1084 rspi->chan_tx = dma_request_channel(mask, shdma_chan_filter, 886 cfg.direction = dir;
1085 (void *)rspi_pd->dma_tx_id); 887 if (dir == DMA_MEM_TO_DEV)
1086 if (rspi->chan_tx) { 888 cfg.dst_addr = port_addr;
1087 cfg.slave_id = rspi_pd->dma_tx_id; 889 else
1088 cfg.direction = DMA_MEM_TO_DEV; 890 cfg.src_addr = port_addr;
1089 cfg.dst_addr = res->start + RSPI_SPDR; 891
1090 cfg.src_addr = 0; 892 ret = dmaengine_slave_config(chan, &cfg);
1091 ret = dmaengine_slave_config(rspi->chan_tx, &cfg); 893 if (ret) {
1092 if (!ret) 894 dev_warn(dev, "dmaengine_slave_config failed %d\n", ret);
1093 dev_info(&pdev->dev, "Use DMA when tx\n"); 895 dma_release_channel(chan);
1094 else 896 return NULL;
1095 return ret;
1096 }
1097 } 897 }
1098 898
899 return chan;
900}
901
902static int rspi_request_dma(struct device *dev, struct spi_master *master,
903 const struct resource *res)
904{
905 const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev);
906
907 if (!rspi_pd || !rspi_pd->dma_rx_id || !rspi_pd->dma_tx_id)
908 return 0; /* The driver assumes no error. */
909
910 master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM,
911 rspi_pd->dma_rx_id,
912 res->start + RSPI_SPDR);
913 if (!master->dma_rx)
914 return -ENODEV;
915
916 master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV,
917 rspi_pd->dma_tx_id,
918 res->start + RSPI_SPDR);
919 if (!master->dma_tx) {
920 dma_release_channel(master->dma_rx);
921 master->dma_rx = NULL;
922 return -ENODEV;
923 }
924
925 master->can_dma = rspi_can_dma;
926 dev_info(dev, "DMA available");
1099 return 0; 927 return 0;
1100} 928}
1101 929
1102static void rspi_release_dma(struct rspi_data *rspi) 930static void rspi_release_dma(struct rspi_data *rspi)
1103{ 931{
1104 if (rspi->chan_tx) 932 if (rspi->master->dma_tx)
1105 dma_release_channel(rspi->chan_tx); 933 dma_release_channel(rspi->master->dma_tx);
1106 if (rspi->chan_rx) 934 if (rspi->master->dma_rx)
1107 dma_release_channel(rspi->chan_rx); 935 dma_release_channel(rspi->master->dma_rx);
1108} 936}
1109 937
1110static int rspi_remove(struct platform_device *pdev) 938static int rspi_remove(struct platform_device *pdev)
@@ -1118,23 +946,29 @@ static int rspi_remove(struct platform_device *pdev)
1118} 946}
1119 947
1120static const struct spi_ops rspi_ops = { 948static const struct spi_ops rspi_ops = {
1121 .set_config_register = rspi_set_config_register, 949 .set_config_register = rspi_set_config_register,
1122 .transfer_one = rspi_transfer_one, 950 .transfer_one = rspi_transfer_one,
1123 .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, 951 .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
952 .flags = SPI_MASTER_MUST_TX,
953 .fifo_size = 8,
1124}; 954};
1125 955
1126static const struct spi_ops rspi_rz_ops = { 956static const struct spi_ops rspi_rz_ops = {
1127 .set_config_register = rspi_rz_set_config_register, 957 .set_config_register = rspi_rz_set_config_register,
1128 .transfer_one = rspi_rz_transfer_one, 958 .transfer_one = rspi_rz_transfer_one,
1129 .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, 959 .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
960 .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
961 .fifo_size = 8, /* 8 for TX, 32 for RX */
1130}; 962};
1131 963
1132static const struct spi_ops qspi_ops = { 964static const struct spi_ops qspi_ops = {
1133 .set_config_register = qspi_set_config_register, 965 .set_config_register = qspi_set_config_register,
1134 .transfer_one = qspi_transfer_one, 966 .transfer_one = qspi_transfer_one,
1135 .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP | 967 .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP |
1136 SPI_TX_DUAL | SPI_TX_QUAD | 968 SPI_TX_DUAL | SPI_TX_QUAD |
1137 SPI_RX_DUAL | SPI_RX_QUAD, 969 SPI_RX_DUAL | SPI_RX_QUAD,
970 .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
971 .fifo_size = 32,
1138}; 972};
1139 973
1140#ifdef CONFIG_OF 974#ifdef CONFIG_OF
@@ -1254,6 +1088,7 @@ static int rspi_probe(struct platform_device *pdev)
1254 master->prepare_message = rspi_prepare_message; 1088 master->prepare_message = rspi_prepare_message;
1255 master->unprepare_message = rspi_unprepare_message; 1089 master->unprepare_message = rspi_unprepare_message;
1256 master->mode_bits = ops->mode_bits; 1090 master->mode_bits = ops->mode_bits;
1091 master->flags = ops->flags;
1257 master->dev.of_node = pdev->dev.of_node; 1092 master->dev.of_node = pdev->dev.of_node;
1258 1093
1259 ret = platform_get_irq_byname(pdev, "rx"); 1094 ret = platform_get_irq_byname(pdev, "rx");
@@ -1291,11 +1126,9 @@ static int rspi_probe(struct platform_device *pdev)
1291 goto error2; 1126 goto error2;
1292 } 1127 }
1293 1128
1294 ret = rspi_request_dma(rspi, pdev); 1129 ret = rspi_request_dma(&pdev->dev, master, res);
1295 if (ret < 0) { 1130 if (ret < 0)
1296 dev_err(&pdev->dev, "rspi_request_dma failed.\n"); 1131 dev_warn(&pdev->dev, "DMA not available, using PIO\n");
1297 goto error3;
1298 }
1299 1132
1300 ret = devm_spi_register_master(&pdev->dev, master); 1133 ret = devm_spi_register_master(&pdev->dev, master);
1301 if (ret < 0) { 1134 if (ret < 0) {
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index bed23384dfab..e713737d784f 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -10,7 +10,6 @@
10*/ 10*/
11 11
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/workqueue.h>
14#include <linux/interrupt.h> 13#include <linux/interrupt.h>
15#include <linux/delay.h> 14#include <linux/delay.h>
16#include <linux/errno.h> 15#include <linux/errno.h>
@@ -183,11 +182,11 @@ static int s3c24xx_spi_setup(struct spi_device *spi)
183 182
184 /* allocate settings on the first call */ 183 /* allocate settings on the first call */
185 if (!cs) { 184 if (!cs) {
186 cs = kzalloc(sizeof(struct s3c24xx_spi_devstate), GFP_KERNEL); 185 cs = devm_kzalloc(&spi->dev,
187 if (!cs) { 186 sizeof(struct s3c24xx_spi_devstate),
188 dev_err(&spi->dev, "no memory for controller state\n"); 187 GFP_KERNEL);
188 if (!cs)
189 return -ENOMEM; 189 return -ENOMEM;
190 }
191 190
192 cs->spcon = SPCON_DEFAULT; 191 cs->spcon = SPCON_DEFAULT;
193 cs->hz = -1; 192 cs->hz = -1;
@@ -209,11 +208,6 @@ static int s3c24xx_spi_setup(struct spi_device *spi)
209 return 0; 208 return 0;
210} 209}
211 210
212static void s3c24xx_spi_cleanup(struct spi_device *spi)
213{
214 kfree(spi->controller_state);
215}
216
217static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count) 211static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count)
218{ 212{
219 return hw->tx ? hw->tx[count] : 0; 213 return hw->tx ? hw->tx[count] : 0;
@@ -543,7 +537,6 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
543 hw->bitbang.txrx_bufs = s3c24xx_spi_txrx; 537 hw->bitbang.txrx_bufs = s3c24xx_spi_txrx;
544 538
545 hw->master->setup = s3c24xx_spi_setup; 539 hw->master->setup = s3c24xx_spi_setup;
546 hw->master->cleanup = s3c24xx_spi_cleanup;
547 540
548 dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang); 541 dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang);
549 542
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index f19cd97855e8..75a56968b14c 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -19,7 +19,6 @@
19 19
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/workqueue.h>
23#include <linux/interrupt.h> 22#include <linux/interrupt.h>
24#include <linux/delay.h> 23#include <linux/delay.h>
25#include <linux/clk.h> 24#include <linux/clk.h>
@@ -773,7 +772,6 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
773 772
774 cs = kzalloc(sizeof(*cs), GFP_KERNEL); 773 cs = kzalloc(sizeof(*cs), GFP_KERNEL);
775 if (!cs) { 774 if (!cs) {
776 dev_err(&spi->dev, "could not allocate memory for controller data\n");
777 of_node_put(data_np); 775 of_node_put(data_np);
778 return ERR_PTR(-ENOMEM); 776 return ERR_PTR(-ENOMEM);
779 } 777 }
@@ -987,10 +985,8 @@ static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
987 u32 temp; 985 u32 temp;
988 986
989 sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL); 987 sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL);
990 if (!sci) { 988 if (!sci)
991 dev_err(dev, "memory allocation for spi_info failed\n");
992 return ERR_PTR(-ENOMEM); 989 return ERR_PTR(-ENOMEM);
993 }
994 990
995 if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) { 991 if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
996 dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n"); 992 dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index e850d03e7190..45b09142afe2 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -642,10 +642,8 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
642 u32 num_cs = 1; 642 u32 num_cs = 1;
643 643
644 info = devm_kzalloc(dev, sizeof(struct sh_msiof_spi_info), GFP_KERNEL); 644 info = devm_kzalloc(dev, sizeof(struct sh_msiof_spi_info), GFP_KERNEL);
645 if (!info) { 645 if (!info)
646 dev_err(dev, "failed to allocate setup data\n");
647 return NULL; 646 return NULL;
648 }
649 647
650 /* Parse the MSIOF properties */ 648 /* Parse the MSIOF properties */
651 of_property_read_u32(np, "num-cs", &num_cs); 649 of_property_read_u32(np, "num-cs", &num_cs);
diff --git a/drivers/spi/spi-sh-sci.c b/drivers/spi/spi-sh-sci.c
index 8b44b71f5024..1f56ef651d1a 100644
--- a/drivers/spi/spi-sh-sci.c
+++ b/drivers/spi/spi-sh-sci.c
@@ -16,7 +16,6 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/workqueue.h>
20#include <linux/platform_device.h> 19#include <linux/platform_device.h>
21 20
22#include <linux/spi/spi.h> 21#include <linux/spi/spi.h>
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index 67d8909dcf39..95ac276eaafe 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -10,6 +10,7 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/clk.h> 12#include <linux/clk.h>
13#include <linux/completion.h>
13#include <linux/interrupt.h> 14#include <linux/interrupt.h>
14#include <linux/io.h> 15#include <linux/io.h>
15#include <linux/of.h> 16#include <linux/of.h>
@@ -85,6 +86,7 @@
85#define SIRFSOC_SPI_TX_DONE BIT(1) 86#define SIRFSOC_SPI_TX_DONE BIT(1)
86#define SIRFSOC_SPI_RX_OFLOW BIT(2) 87#define SIRFSOC_SPI_RX_OFLOW BIT(2)
87#define SIRFSOC_SPI_TX_UFLOW BIT(3) 88#define SIRFSOC_SPI_TX_UFLOW BIT(3)
89#define SIRFSOC_SPI_RX_IO_DMA BIT(4)
88#define SIRFSOC_SPI_RX_FIFO_FULL BIT(6) 90#define SIRFSOC_SPI_RX_FIFO_FULL BIT(6)
89#define SIRFSOC_SPI_TXFIFO_EMPTY BIT(7) 91#define SIRFSOC_SPI_TXFIFO_EMPTY BIT(7)
90#define SIRFSOC_SPI_RXFIFO_THD_REACH BIT(8) 92#define SIRFSOC_SPI_RXFIFO_THD_REACH BIT(8)
@@ -264,41 +266,34 @@ static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
264{ 266{
265 struct sirfsoc_spi *sspi = dev_id; 267 struct sirfsoc_spi *sspi = dev_id;
266 u32 spi_stat = readl(sspi->base + SIRFSOC_SPI_INT_STATUS); 268 u32 spi_stat = readl(sspi->base + SIRFSOC_SPI_INT_STATUS);
267
268 writel(spi_stat, sspi->base + SIRFSOC_SPI_INT_STATUS);
269
270 if (sspi->tx_by_cmd && (spi_stat & SIRFSOC_SPI_FRM_END)) { 269 if (sspi->tx_by_cmd && (spi_stat & SIRFSOC_SPI_FRM_END)) {
271 complete(&sspi->tx_done); 270 complete(&sspi->tx_done);
272 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); 271 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
272 writel(SIRFSOC_SPI_INT_MASK_ALL,
273 sspi->base + SIRFSOC_SPI_INT_STATUS);
273 return IRQ_HANDLED; 274 return IRQ_HANDLED;
274 } 275 }
275 276
276 /* Error Conditions */ 277 /* Error Conditions */
277 if (spi_stat & SIRFSOC_SPI_RX_OFLOW || 278 if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
278 spi_stat & SIRFSOC_SPI_TX_UFLOW) { 279 spi_stat & SIRFSOC_SPI_TX_UFLOW) {
280 complete(&sspi->tx_done);
279 complete(&sspi->rx_done); 281 complete(&sspi->rx_done);
280 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN); 282 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
283 writel(SIRFSOC_SPI_INT_MASK_ALL,
284 sspi->base + SIRFSOC_SPI_INT_STATUS);
285 return IRQ_HANDLED;
281 } 286 }
287 if (spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY)
288 complete(&sspi->tx_done);
289 while (!(readl(sspi->base + SIRFSOC_SPI_INT_STATUS) &
290 SIRFSOC_SPI_RX_IO_DMA))
291 cpu_relax();
292 complete(&sspi->rx_done);
293 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
294 writel(SIRFSOC_SPI_INT_MASK_ALL,
295 sspi->base + SIRFSOC_SPI_INT_STATUS);
282 296
283 if (spi_stat & (SIRFSOC_SPI_FRM_END
284 | SIRFSOC_SPI_RXFIFO_THD_REACH))
285 while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS)
286 & SIRFSOC_SPI_FIFO_EMPTY)) &&
287 sspi->left_rx_word)
288 sspi->rx_word(sspi);
289
290 if (spi_stat & (SIRFSOC_SPI_TXFIFO_EMPTY |
291 SIRFSOC_SPI_TXFIFO_THD_REACH))
292 while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS)
293 & SIRFSOC_SPI_FIFO_FULL)) &&
294 sspi->left_tx_word)
295 sspi->tx_word(sspi);
296
297 /* Received all words */
298 if ((sspi->left_rx_word == 0) && (sspi->left_tx_word == 0)) {
299 complete(&sspi->rx_done);
300 writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
301 }
302 return IRQ_HANDLED; 297 return IRQ_HANDLED;
303} 298}
304 299
@@ -309,59 +304,51 @@ static void spi_sirfsoc_dma_fini_callback(void *data)
309 complete(dma_complete); 304 complete(dma_complete);
310} 305}
311 306
312static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t) 307static int spi_sirfsoc_cmd_transfer(struct spi_device *spi,
308 struct spi_transfer *t)
313{ 309{
314 struct sirfsoc_spi *sspi; 310 struct sirfsoc_spi *sspi;
315 int timeout = t->len * 10; 311 int timeout = t->len * 10;
316 sspi = spi_master_get_devdata(spi->master); 312 u32 cmd;
317 313
318 sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage; 314 sspi = spi_master_get_devdata(spi->master);
319 sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage; 315 memcpy(&cmd, sspi->tx, t->len);
320 sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width; 316 if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST))
321 reinit_completion(&sspi->rx_done); 317 cmd = cpu_to_be32(cmd) >>
322 reinit_completion(&sspi->tx_done); 318 ((SIRFSOC_MAX_CMD_BYTES - t->len) * 8);
323 319 if (sspi->word_width == 2 && t->len == 4 &&
324 writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS); 320 (!(spi->mode & SPI_LSB_FIRST)))
325 321 cmd = ((cmd & 0xffff) << 16) | (cmd >> 16);
326 /* 322 writel(cmd, sspi->base + SIRFSOC_SPI_CMD);
327 * fill tx_buf into command register and wait for its completion 323 writel(SIRFSOC_SPI_FRM_END_INT_EN,
328 */ 324 sspi->base + SIRFSOC_SPI_INT_EN);
329 if (sspi->tx_by_cmd) { 325 writel(SIRFSOC_SPI_CMD_TX_EN,
330 u32 cmd; 326 sspi->base + SIRFSOC_SPI_TX_RX_EN);
331 memcpy(&cmd, sspi->tx, t->len); 327 if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
332 328 dev_err(&spi->dev, "cmd transfer timeout\n");
333 if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST)) 329 return 0;
334 cmd = cpu_to_be32(cmd) >> 330 }
335 ((SIRFSOC_MAX_CMD_BYTES - t->len) * 8);
336 if (sspi->word_width == 2 && t->len == 4 &&
337 (!(spi->mode & SPI_LSB_FIRST)))
338 cmd = ((cmd & 0xffff) << 16) | (cmd >> 16);
339
340 writel(cmd, sspi->base + SIRFSOC_SPI_CMD);
341 writel(SIRFSOC_SPI_FRM_END_INT_EN,
342 sspi->base + SIRFSOC_SPI_INT_EN);
343 writel(SIRFSOC_SPI_CMD_TX_EN,
344 sspi->base + SIRFSOC_SPI_TX_RX_EN);
345 331
346 if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) { 332 return t->len;
347 dev_err(&spi->dev, "transfer timeout\n"); 333}
348 return 0;
349 }
350 334
351 return t->len; 335static void spi_sirfsoc_dma_transfer(struct spi_device *spi,
352 } 336 struct spi_transfer *t)
337{
338 struct sirfsoc_spi *sspi;
339 struct dma_async_tx_descriptor *rx_desc, *tx_desc;
340 int timeout = t->len * 10;
353 341
354 if (sspi->left_tx_word == 1) { 342 sspi = spi_master_get_devdata(spi->master);
355 writel(readl(sspi->base + SIRFSOC_SPI_CTRL) | 343 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
356 SIRFSOC_SPI_ENA_AUTO_CLR, 344 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
357 sspi->base + SIRFSOC_SPI_CTRL); 345 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
358 writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); 346 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
359 writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN); 347 writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
360 } else if ((sspi->left_tx_word > 1) && (sspi->left_tx_word < 348 writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
361 SIRFSOC_SPI_DAT_FRM_LEN_MAX)) { 349 if (sspi->left_tx_word < SIRFSOC_SPI_DAT_FRM_LEN_MAX) {
362 writel(readl(sspi->base + SIRFSOC_SPI_CTRL) | 350 writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
363 SIRFSOC_SPI_MUL_DAT_MODE | 351 SIRFSOC_SPI_ENA_AUTO_CLR | SIRFSOC_SPI_MUL_DAT_MODE,
364 SIRFSOC_SPI_ENA_AUTO_CLR,
365 sspi->base + SIRFSOC_SPI_CTRL); 352 sspi->base + SIRFSOC_SPI_CTRL);
366 writel(sspi->left_tx_word - 1, 353 writel(sspi->left_tx_word - 1,
367 sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); 354 sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
@@ -373,76 +360,122 @@ static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
373 writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN); 360 writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
374 writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN); 361 writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
375 } 362 }
376 363 sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len,
377 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 364 (t->tx_buf != t->rx_buf) ?
378 writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 365 DMA_FROM_DEVICE : DMA_BIDIRECTIONAL);
379 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 366 rx_desc = dmaengine_prep_slave_single(sspi->rx_chan,
380 writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 367 sspi->dst_start, t->len, DMA_DEV_TO_MEM,
381 368 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
382 if (IS_DMA_VALID(t)) { 369 rx_desc->callback = spi_sirfsoc_dma_fini_callback;
383 struct dma_async_tx_descriptor *rx_desc, *tx_desc; 370 rx_desc->callback_param = &sspi->rx_done;
384 371
385 sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len, DMA_FROM_DEVICE); 372 sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len,
386 rx_desc = dmaengine_prep_slave_single(sspi->rx_chan, 373 (t->tx_buf != t->rx_buf) ?
387 sspi->dst_start, t->len, DMA_DEV_TO_MEM, 374 DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
388 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 375 tx_desc = dmaengine_prep_slave_single(sspi->tx_chan,
389 rx_desc->callback = spi_sirfsoc_dma_fini_callback; 376 sspi->src_start, t->len, DMA_MEM_TO_DEV,
390 rx_desc->callback_param = &sspi->rx_done; 377 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
391 378 tx_desc->callback = spi_sirfsoc_dma_fini_callback;
392 sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len, DMA_TO_DEVICE); 379 tx_desc->callback_param = &sspi->tx_done;
393 tx_desc = dmaengine_prep_slave_single(sspi->tx_chan, 380
394 sspi->src_start, t->len, DMA_MEM_TO_DEV, 381 dmaengine_submit(tx_desc);
395 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 382 dmaengine_submit(rx_desc);
396 tx_desc->callback = spi_sirfsoc_dma_fini_callback; 383 dma_async_issue_pending(sspi->tx_chan);
397 tx_desc->callback_param = &sspi->tx_done; 384 dma_async_issue_pending(sspi->rx_chan);
398 385 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
399 dmaengine_submit(tx_desc); 386 sspi->base + SIRFSOC_SPI_TX_RX_EN);
400 dmaengine_submit(rx_desc); 387 if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
401 dma_async_issue_pending(sspi->tx_chan);
402 dma_async_issue_pending(sspi->rx_chan);
403 } else {
404 /* Send the first word to trigger the whole tx/rx process */
405 sspi->tx_word(sspi);
406
407 writel(SIRFSOC_SPI_RX_OFLOW_INT_EN | SIRFSOC_SPI_TX_UFLOW_INT_EN |
408 SIRFSOC_SPI_RXFIFO_THD_INT_EN | SIRFSOC_SPI_TXFIFO_THD_INT_EN |
409 SIRFSOC_SPI_FRM_END_INT_EN | SIRFSOC_SPI_RXFIFO_FULL_INT_EN |
410 SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN, sspi->base + SIRFSOC_SPI_INT_EN);
411 }
412
413 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, sspi->base + SIRFSOC_SPI_TX_RX_EN);
414
415 if (!IS_DMA_VALID(t)) { /* for PIO */
416 if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0)
417 dev_err(&spi->dev, "transfer timeout\n");
418 } else if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
419 dev_err(&spi->dev, "transfer timeout\n"); 388 dev_err(&spi->dev, "transfer timeout\n");
420 dmaengine_terminate_all(sspi->rx_chan); 389 dmaengine_terminate_all(sspi->rx_chan);
421 } else 390 } else
422 sspi->left_rx_word = 0; 391 sspi->left_rx_word = 0;
423
424 /* 392 /*
425 * we only wait tx-done event if transferring by DMA. for PIO, 393 * we only wait tx-done event if transferring by DMA. for PIO,
426 * we get rx data by writing tx data, so if rx is done, tx has 394 * we get rx data by writing tx data, so if rx is done, tx has
427 * done earlier 395 * done earlier
428 */ 396 */
429 if (IS_DMA_VALID(t)) { 397 if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
430 if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) { 398 dev_err(&spi->dev, "transfer timeout\n");
431 dev_err(&spi->dev, "transfer timeout\n"); 399 dmaengine_terminate_all(sspi->tx_chan);
432 dmaengine_terminate_all(sspi->tx_chan);
433 }
434 }
435
436 if (IS_DMA_VALID(t)) {
437 dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
438 dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
439 } 400 }
440 401 dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
402 dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
441 /* TX, RX FIFO stop */ 403 /* TX, RX FIFO stop */
442 writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP); 404 writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
443 writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP); 405 writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
444 writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN); 406 if (sspi->left_tx_word >= SIRFSOC_SPI_DAT_FRM_LEN_MAX)
445 writel(0, sspi->base + SIRFSOC_SPI_INT_EN); 407 writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN);
408}
409
410static void spi_sirfsoc_pio_transfer(struct spi_device *spi,
411 struct spi_transfer *t)
412{
413 struct sirfsoc_spi *sspi;
414 int timeout = t->len * 10;
415
416 sspi = spi_master_get_devdata(spi->master);
417 do {
418 writel(SIRFSOC_SPI_FIFO_RESET,
419 sspi->base + SIRFSOC_SPI_RXFIFO_OP);
420 writel(SIRFSOC_SPI_FIFO_RESET,
421 sspi->base + SIRFSOC_SPI_TXFIFO_OP);
422 writel(SIRFSOC_SPI_FIFO_START,
423 sspi->base + SIRFSOC_SPI_RXFIFO_OP);
424 writel(SIRFSOC_SPI_FIFO_START,
425 sspi->base + SIRFSOC_SPI_TXFIFO_OP);
426 writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
427 writel(SIRFSOC_SPI_INT_MASK_ALL,
428 sspi->base + SIRFSOC_SPI_INT_STATUS);
429 writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
430 SIRFSOC_SPI_MUL_DAT_MODE | SIRFSOC_SPI_ENA_AUTO_CLR,
431 sspi->base + SIRFSOC_SPI_CTRL);
432 writel(min(sspi->left_tx_word, (u32)(256 / sspi->word_width))
433 - 1, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
434 writel(min(sspi->left_rx_word, (u32)(256 / sspi->word_width))
435 - 1, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
436 while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS)
437 & SIRFSOC_SPI_FIFO_FULL)) && sspi->left_tx_word)
438 sspi->tx_word(sspi);
439 writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN |
440 SIRFSOC_SPI_TX_UFLOW_INT_EN |
441 SIRFSOC_SPI_RX_OFLOW_INT_EN,
442 sspi->base + SIRFSOC_SPI_INT_EN);
443 writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
444 sspi->base + SIRFSOC_SPI_TX_RX_EN);
445 if (!wait_for_completion_timeout(&sspi->tx_done, timeout) ||
446 !wait_for_completion_timeout(&sspi->rx_done, timeout)) {
447 dev_err(&spi->dev, "transfer timeout\n");
448 break;
449 }
450 while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS)
451 & SIRFSOC_SPI_FIFO_EMPTY)) && sspi->left_rx_word)
452 sspi->rx_word(sspi);
453 writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
454 writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
455 } while (sspi->left_tx_word != 0 || sspi->left_rx_word != 0);
456}
457
458static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
459{
460 struct sirfsoc_spi *sspi;
461 sspi = spi_master_get_devdata(spi->master);
462
463 sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage;
464 sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage;
465 sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
466 reinit_completion(&sspi->rx_done);
467 reinit_completion(&sspi->tx_done);
468 /*
469 * in the transfer, if transfer data using command register with rx_buf
470 * null, just fill command data into command register and wait for its
471 * completion.
472 */
473 if (sspi->tx_by_cmd)
474 spi_sirfsoc_cmd_transfer(spi, t);
475 else if (IS_DMA_VALID(t))
476 spi_sirfsoc_dma_transfer(spi, t);
477 else
478 spi_sirfsoc_pio_transfer(spi, t);
446 479
447 return t->len - sspi->left_rx_word * sspi->word_width; 480 return t->len - sspi->left_rx_word * sspi->word_width;
448} 481}
@@ -512,7 +545,8 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
512 break; 545 break;
513 case 12: 546 case 12:
514 case 16: 547 case 16:
515 regval |= (bits_per_word == 12) ? SIRFSOC_SPI_TRAN_DAT_FORMAT_12 : 548 regval |= (bits_per_word == 12) ?
549 SIRFSOC_SPI_TRAN_DAT_FORMAT_12 :
516 SIRFSOC_SPI_TRAN_DAT_FORMAT_16; 550 SIRFSOC_SPI_TRAN_DAT_FORMAT_16;
517 sspi->rx_word = spi_sirfsoc_rx_word_u16; 551 sspi->rx_word = spi_sirfsoc_rx_word_u16;
518 sspi->tx_word = spi_sirfsoc_tx_word_u16; 552 sspi->tx_word = spi_sirfsoc_tx_word_u16;
@@ -540,8 +574,8 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
540 regval |= SIRFSOC_SPI_CLK_IDLE_STAT; 574 regval |= SIRFSOC_SPI_CLK_IDLE_STAT;
541 575
542 /* 576 /*
543 * Data should be driven at least 1/2 cycle before the fetch edge to make 577 * Data should be driven at least 1/2 cycle before the fetch edge
544 * sure that data gets stable at the fetch edge. 578 * to make sure that data gets stable at the fetch edge.
545 */ 579 */
546 if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) || 580 if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) ||
547 (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA))) 581 (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA)))
@@ -578,11 +612,14 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
578 if (IS_DMA_VALID(t)) { 612 if (IS_DMA_VALID(t)) {
579 /* Enable DMA mode for RX, TX */ 613 /* Enable DMA mode for RX, TX */
580 writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL); 614 writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
581 writel(SIRFSOC_SPI_RX_DMA_FLUSH, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL); 615 writel(SIRFSOC_SPI_RX_DMA_FLUSH,
616 sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
582 } else { 617 } else {
583 /* Enable IO mode for RX, TX */ 618 /* Enable IO mode for RX, TX */
584 writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL); 619 writel(SIRFSOC_SPI_IO_MODE_SEL,
585 writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL); 620 sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
621 writel(SIRFSOC_SPI_IO_MODE_SEL,
622 sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
586 } 623 }
587 624
588 return 0; 625 return 0;
@@ -612,7 +649,8 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
612 goto err_cs; 649 goto err_cs;
613 } 650 }
614 651
615 master = spi_alloc_master(&pdev->dev, sizeof(*sspi) + sizeof(int) * num_cs); 652 master = spi_alloc_master(&pdev->dev,
653 sizeof(*sspi) + sizeof(int) * num_cs);
616 if (!master) { 654 if (!master) {
617 dev_err(&pdev->dev, "Unable to allocate SPI master\n"); 655 dev_err(&pdev->dev, "Unable to allocate SPI master\n");
618 return -ENOMEM; 656 return -ENOMEM;
@@ -808,8 +846,7 @@ static struct platform_driver spi_sirfsoc_driver = {
808 .remove = spi_sirfsoc_remove, 846 .remove = spi_sirfsoc_remove,
809}; 847};
810module_platform_driver(spi_sirfsoc_driver); 848module_platform_driver(spi_sirfsoc_driver);
811
812MODULE_DESCRIPTION("SiRF SoC SPI master driver"); 849MODULE_DESCRIPTION("SiRF SoC SPI master driver");
813MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>, " 850MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>");
814 "Barry Song <Baohua.Song@csr.com>"); 851MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>");
815MODULE_LICENSE("GPL v2"); 852MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
index d266a8702067..85204c93f3d3 100644
--- a/drivers/spi/spi-sun4i.c
+++ b/drivers/spi/spi-sun4i.c
@@ -19,7 +19,6 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/pm_runtime.h> 21#include <linux/pm_runtime.h>
22#include <linux/workqueue.h>
23 22
24#include <linux/spi/spi.h> 23#include <linux/spi/spi.h>
25 24
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index b3e3498a7e6f..bd24093f4038 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -20,7 +20,6 @@
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/pm_runtime.h> 21#include <linux/pm_runtime.h>
22#include <linux/reset.h> 22#include <linux/reset.h>
23#include <linux/workqueue.h>
24 23
25#include <linux/spi/spi.h> 24#include <linux/spi/spi.h>
26 25
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 400649595505..e4a85ada861d 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -1012,7 +1012,7 @@ static irqreturn_t tegra_spi_isr(int irq, void *context_data)
1012 return IRQ_WAKE_THREAD; 1012 return IRQ_WAKE_THREAD;
1013} 1013}
1014 1014
1015static struct of_device_id tegra_spi_of_match[] = { 1015static const struct of_device_id tegra_spi_of_match[] = {
1016 { .compatible = "nvidia,tegra114-spi", }, 1016 { .compatible = "nvidia,tegra114-spi", },
1017 {} 1017 {}
1018}; 1018};
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 47869ea636e1..3548ce25c08f 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -419,7 +419,7 @@ static irqreturn_t tegra_sflash_isr(int irq, void *context_data)
419 return handle_cpu_based_xfer(tsd); 419 return handle_cpu_based_xfer(tsd);
420} 420}
421 421
422static struct of_device_id tegra_sflash_of_match[] = { 422static const struct of_device_id tegra_sflash_of_match[] = {
423 { .compatible = "nvidia,tegra20-sflash", }, 423 { .compatible = "nvidia,tegra20-sflash", },
424 {} 424 {}
425}; 425};
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index e3c1b93e45d1..0b9e32e9f493 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1001,7 +1001,7 @@ static const struct tegra_slink_chip_data tegra20_spi_cdata = {
1001 .cs_hold_time = false, 1001 .cs_hold_time = false,
1002}; 1002};
1003 1003
1004static struct of_device_id tegra_slink_of_match[] = { 1004static const struct of_device_id tegra_slink_of_match[] = {
1005 { .compatible = "nvidia,tegra30-slink", .data = &tegra30_spi_cdata, }, 1005 { .compatible = "nvidia,tegra30-slink", .data = &tegra30_spi_cdata, },
1006 { .compatible = "nvidia,tegra20-slink", .data = &tegra20_spi_cdata, }, 1006 { .compatible = "nvidia,tegra20-slink", .data = &tegra20_spi_cdata, },
1007 {} 1007 {}
diff --git a/drivers/spi/spi-tle62x0.c b/drivers/spi/spi-tle62x0.c
index 2d4010d80824..daf5aa1c24c3 100644
--- a/drivers/spi/spi-tle62x0.c
+++ b/drivers/spi/spi-tle62x0.c
@@ -253,10 +253,8 @@ static int tle62x0_probe(struct spi_device *spi)
253 } 253 }
254 254
255 st = kzalloc(sizeof(struct tle62x0_state), GFP_KERNEL); 255 st = kzalloc(sizeof(struct tle62x0_state), GFP_KERNEL);
256 if (st == NULL) { 256 if (st == NULL)
257 dev_err(&spi->dev, "no memory for device state\n");
258 return -ENOMEM; 257 return -ENOMEM;
259 }
260 258
261 st->us = spi; 259 st->us = spi;
262 st->nr_gpio = pdata->gpio_count; 260 st->nr_gpio = pdata->gpio_count;
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index f406b30af961..f05abf89c067 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1578,14 +1578,11 @@ static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1578 struct pch_pd_dev_save *pd_dev_save; 1578 struct pch_pd_dev_save *pd_dev_save;
1579 1579
1580 pd_dev_save = kzalloc(sizeof(struct pch_pd_dev_save), GFP_KERNEL); 1580 pd_dev_save = kzalloc(sizeof(struct pch_pd_dev_save), GFP_KERNEL);
1581 if (!pd_dev_save) { 1581 if (!pd_dev_save)
1582 dev_err(&pdev->dev, "%s Can't allocate pd_dev_sav\n", __func__);
1583 return -ENOMEM; 1582 return -ENOMEM;
1584 }
1585 1583
1586 board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL); 1584 board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL);
1587 if (!board_dat) { 1585 if (!board_dat) {
1588 dev_err(&pdev->dev, "%s Can't allocate board_dat\n", __func__);
1589 retval = -ENOMEM; 1586 retval = -ENOMEM;
1590 goto err_no_mem; 1587 goto err_no_mem;
1591 } 1588 }
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 939edf473235..d4f9670b51bc 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -796,7 +796,7 @@ static int spi_transfer_one_message(struct spi_master *master,
796 if (ret > 0) { 796 if (ret > 0) {
797 ret = 0; 797 ret = 0;
798 ms = xfer->len * 8 * 1000 / xfer->speed_hz; 798 ms = xfer->len * 8 * 1000 / xfer->speed_hz;
799 ms += 10; /* some tolerance */ 799 ms += ms + 100; /* some tolerance */
800 800
801 ms = wait_for_completion_timeout(&master->xfer_completion, 801 ms = wait_for_completion_timeout(&master->xfer_completion,
802 msecs_to_jiffies(ms)); 802 msecs_to_jiffies(ms));
@@ -1255,6 +1255,8 @@ static void of_register_spi_devices(struct spi_master *master)
1255 spi->mode |= SPI_CS_HIGH; 1255 spi->mode |= SPI_CS_HIGH;
1256 if (of_find_property(nc, "spi-3wire", NULL)) 1256 if (of_find_property(nc, "spi-3wire", NULL))
1257 spi->mode |= SPI_3WIRE; 1257 spi->mode |= SPI_3WIRE;
1258 if (of_find_property(nc, "spi-lsb-first", NULL))
1259 spi->mode |= SPI_LSB_FIRST;
1258 1260
1259 /* Device DUAL/QUAD mode */ 1261 /* Device DUAL/QUAD mode */
1260 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 1262 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
@@ -1268,11 +1270,10 @@ static void of_register_spi_devices(struct spi_master *master)
1268 spi->mode |= SPI_TX_QUAD; 1270 spi->mode |= SPI_TX_QUAD;
1269 break; 1271 break;
1270 default: 1272 default:
1271 dev_err(&master->dev, 1273 dev_warn(&master->dev,
1272 "spi-tx-bus-width %d not supported\n", 1274 "spi-tx-bus-width %d not supported\n",
1273 value); 1275 value);
1274 spi_dev_put(spi); 1276 break;
1275 continue;
1276 } 1277 }
1277 } 1278 }
1278 1279
@@ -1287,11 +1288,10 @@ static void of_register_spi_devices(struct spi_master *master)
1287 spi->mode |= SPI_RX_QUAD; 1288 spi->mode |= SPI_RX_QUAD;
1288 break; 1289 break;
1289 default: 1290 default:
1290 dev_err(&master->dev, 1291 dev_warn(&master->dev,
1291 "spi-rx-bus-width %d not supported\n", 1292 "spi-rx-bus-width %d not supported\n",
1292 value); 1293 value);
1293 spi_dev_put(spi); 1294 break;
1294 continue;
1295 } 1295 }
1296 } 1296 }
1297 1297