aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig13
-rw-r--r--drivers/spi/Makefile3
-rw-r--r--drivers/spi/spi-adi-v3.c (renamed from drivers/spi/spi-bfin-v3.c)433
-rw-r--r--drivers/spi/spi-atmel.c12
-rw-r--r--drivers/spi/spi-bfin5xx.c1
-rw-r--r--drivers/spi/spi-cadence.c673
-rw-r--r--drivers/spi/spi-dw-mmio.c22
-rw-r--r--drivers/spi/spi-dw.c197
-rw-r--r--drivers/spi/spi-dw.h24
-rw-r--r--drivers/spi/spi-fsl-dspi.c2
-rw-r--r--drivers/spi/spi-fsl-espi.c38
-rw-r--r--drivers/spi/spi-gpio.c2
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c16
-rw-r--r--drivers/spi/spi-qup.c8
-rw-r--r--drivers/spi/spi-sh-hspi.c4
-rw-r--r--drivers/spi/spi-sirf.c20
-rw-r--r--drivers/spi/spi-tegra114.c2
-rw-r--r--drivers/spi/spi-tegra20-sflash.c2
-rw-r--r--drivers/spi/spi-tegra20-slink.c2
-rw-r--r--drivers/spi/spi.c146
20 files changed, 1125 insertions, 495 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 60f2b41c7310..213b5cbb9dcc 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -91,8 +91,8 @@ config SPI_BFIN5XX
91 help 91 help
92 This is the SPI controller master driver for Blackfin 5xx processor. 92 This is the SPI controller master driver for Blackfin 5xx processor.
93 93
94config SPI_BFIN_V3 94config SPI_ADI_V3
95 tristate "SPI controller v3 for Blackfin" 95 tristate "SPI controller v3 for ADI"
96 depends on BF60x 96 depends on BF60x
97 help 97 help
98 This is the SPI controller v3 master driver 98 This is the SPI controller v3 master driver
@@ -148,6 +148,13 @@ config SPI_BUTTERFLY
148 inexpensive battery powered microcontroller evaluation board. 148 inexpensive battery powered microcontroller evaluation board.
149 This same cable can be used to flash new firmware. 149 This same cable can be used to flash new firmware.
150 150
151config SPI_CADENCE
152 tristate "Cadence SPI controller"
153 depends on ARM
154 help
155 This selects the Cadence SPI controller master driver
156 used by Xilinx Zynq.
157
151config SPI_CLPS711X 158config SPI_CLPS711X
152 tristate "CLPS711X host SPI controller" 159 tristate "CLPS711X host SPI controller"
153 depends on ARCH_CLPS711X || COMPILE_TEST 160 depends on ARCH_CLPS711X || COMPILE_TEST
@@ -505,7 +512,7 @@ config SPI_TEGRA20_SLINK
505 512
506config SPI_TOPCLIFF_PCH 513config SPI_TOPCLIFF_PCH
507 tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) SPI" 514 tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) SPI"
508 depends on PCI 515 depends on PCI && (X86_32 || COMPILE_TEST)
509 help 516 help
510 SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus 517 SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus
511 used in some x86 embedded processors. 518 used in some x86 embedded processors.
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index bd792669e563..929c9f5eac01 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -18,10 +18,11 @@ obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o
18obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o 18obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
19obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o 19obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o
20obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o 20obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o
21obj-$(CONFIG_SPI_BFIN_V3) += spi-bfin-v3.o 21obj-$(CONFIG_SPI_ADI_V3) += spi-adi-v3.o
22obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o 22obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o
23obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o 23obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
24obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o 24obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
25obj-$(CONFIG_SPI_CADENCE) += spi-cadence.o
25obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o 26obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o
26obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o 27obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
27obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o 28obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
diff --git a/drivers/spi/spi-bfin-v3.c b/drivers/spi/spi-adi-v3.c
index 4089d0e0d84e..dcb2287c7f8a 100644
--- a/drivers/spi/spi-bfin-v3.c
+++ b/drivers/spi/spi-adi-v3.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Analog Devices SPI3 controller driver 2 * Analog Devices SPI3 controller driver
3 * 3 *
4 * Copyright (c) 2013 Analog Devices Inc. 4 * Copyright (c) 2014 Analog Devices Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
@@ -13,6 +13,7 @@
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 */ 14 */
15 15
16#include <linux/clk.h>
16#include <linux/delay.h> 17#include <linux/delay.h>
17#include <linux/device.h> 18#include <linux/device.h>
18#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
@@ -26,35 +27,34 @@
26#include <linux/platform_device.h> 27#include <linux/platform_device.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28#include <linux/spi/spi.h> 29#include <linux/spi/spi.h>
30#include <linux/spi/adi_spi3.h>
29#include <linux/types.h> 31#include <linux/types.h>
30 32
31#include <asm/bfin_spi3.h>
32#include <asm/cacheflush.h>
33#include <asm/dma.h> 33#include <asm/dma.h>
34#include <asm/portmux.h> 34#include <asm/portmux.h>
35 35
36enum bfin_spi_state { 36enum adi_spi_state {
37 START_STATE, 37 START_STATE,
38 RUNNING_STATE, 38 RUNNING_STATE,
39 DONE_STATE, 39 DONE_STATE,
40 ERROR_STATE 40 ERROR_STATE
41}; 41};
42 42
43struct bfin_spi_master; 43struct adi_spi_master;
44 44
45struct bfin_spi_transfer_ops { 45struct adi_spi_transfer_ops {
46 void (*write) (struct bfin_spi_master *); 46 void (*write) (struct adi_spi_master *);
47 void (*read) (struct bfin_spi_master *); 47 void (*read) (struct adi_spi_master *);
48 void (*duplex) (struct bfin_spi_master *); 48 void (*duplex) (struct adi_spi_master *);
49}; 49};
50 50
51/* runtime info for spi master */ 51/* runtime info for spi master */
52struct bfin_spi_master { 52struct adi_spi_master {
53 /* SPI framework hookup */ 53 /* SPI framework hookup */
54 struct spi_master *master; 54 struct spi_master *master;
55 55
56 /* Regs base of SPI controller */ 56 /* Regs base of SPI controller */
57 struct bfin_spi_regs __iomem *regs; 57 struct adi_spi_regs __iomem *regs;
58 58
59 /* Pin request list */ 59 /* Pin request list */
60 u16 *pin_req; 60 u16 *pin_req;
@@ -65,7 +65,7 @@ struct bfin_spi_master {
65 /* Current message transfer state info */ 65 /* Current message transfer state info */
66 struct spi_message *cur_msg; 66 struct spi_message *cur_msg;
67 struct spi_transfer *cur_transfer; 67 struct spi_transfer *cur_transfer;
68 struct bfin_spi_device *cur_chip; 68 struct adi_spi_device *cur_chip;
69 unsigned transfer_len; 69 unsigned transfer_len;
70 70
71 /* transfer buffer */ 71 /* transfer buffer */
@@ -90,12 +90,12 @@ struct bfin_spi_master {
90 u32 ssel; 90 u32 ssel;
91 91
92 unsigned long sclk; 92 unsigned long sclk;
93 enum bfin_spi_state state; 93 enum adi_spi_state state;
94 94
95 const struct bfin_spi_transfer_ops *ops; 95 const struct adi_spi_transfer_ops *ops;
96}; 96};
97 97
98struct bfin_spi_device { 98struct adi_spi_device {
99 u32 control; 99 u32 control;
100 u32 clock; 100 u32 clock;
101 u32 ssel; 101 u32 ssel;
@@ -105,17 +105,25 @@ struct bfin_spi_device {
105 u32 cs_gpio; 105 u32 cs_gpio;
106 u32 tx_dummy_val; /* tx value for rx only transfer */ 106 u32 tx_dummy_val; /* tx value for rx only transfer */
107 bool enable_dma; 107 bool enable_dma;
108 const struct bfin_spi_transfer_ops *ops; 108 const struct adi_spi_transfer_ops *ops;
109}; 109};
110 110
111static void bfin_spi_enable(struct bfin_spi_master *drv_data) 111static void adi_spi_enable(struct adi_spi_master *drv_data)
112{ 112{
113 bfin_write_or(&drv_data->regs->control, SPI_CTL_EN); 113 u32 ctl;
114
115 ctl = ioread32(&drv_data->regs->control);
116 ctl |= SPI_CTL_EN;
117 iowrite32(ctl, &drv_data->regs->control);
114} 118}
115 119
116static void bfin_spi_disable(struct bfin_spi_master *drv_data) 120static void adi_spi_disable(struct adi_spi_master *drv_data)
117{ 121{
118 bfin_write_and(&drv_data->regs->control, ~SPI_CTL_EN); 122 u32 ctl;
123
124 ctl = ioread32(&drv_data->regs->control);
125 ctl &= ~SPI_CTL_EN;
126 iowrite32(ctl, &drv_data->regs->control);
119} 127}
120 128
121/* Caculate the SPI_CLOCK register value based on input HZ */ 129/* Caculate the SPI_CLOCK register value based on input HZ */
@@ -128,35 +136,43 @@ static u32 hz_to_spi_clock(u32 sclk, u32 speed_hz)
128 return spi_clock; 136 return spi_clock;
129} 137}
130 138
131static int bfin_spi_flush(struct bfin_spi_master *drv_data) 139static int adi_spi_flush(struct adi_spi_master *drv_data)
132{ 140{
133 unsigned long limit = loops_per_jiffy << 1; 141 unsigned long limit = loops_per_jiffy << 1;
134 142
135 /* wait for stop and clear stat */ 143 /* wait for stop and clear stat */
136 while (!(bfin_read(&drv_data->regs->status) & SPI_STAT_SPIF) && --limit) 144 while (!(ioread32(&drv_data->regs->status) & SPI_STAT_SPIF) && --limit)
137 cpu_relax(); 145 cpu_relax();
138 146
139 bfin_write(&drv_data->regs->status, 0xFFFFFFFF); 147 iowrite32(0xFFFFFFFF, &drv_data->regs->status);
140 148
141 return limit; 149 return limit;
142} 150}
143 151
144/* Chip select operation functions for cs_change flag */ 152/* Chip select operation functions for cs_change flag */
145static void bfin_spi_cs_active(struct bfin_spi_master *drv_data, struct bfin_spi_device *chip) 153static void adi_spi_cs_active(struct adi_spi_master *drv_data, struct adi_spi_device *chip)
146{ 154{
147 if (likely(chip->cs < MAX_CTRL_CS)) 155 if (likely(chip->cs < MAX_CTRL_CS)) {
148 bfin_write_and(&drv_data->regs->ssel, ~chip->ssel); 156 u32 reg;
149 else 157 reg = ioread32(&drv_data->regs->ssel);
158 reg &= ~chip->ssel;
159 iowrite32(reg, &drv_data->regs->ssel);
160 } else {
150 gpio_set_value(chip->cs_gpio, 0); 161 gpio_set_value(chip->cs_gpio, 0);
162 }
151} 163}
152 164
153static void bfin_spi_cs_deactive(struct bfin_spi_master *drv_data, 165static void adi_spi_cs_deactive(struct adi_spi_master *drv_data,
154 struct bfin_spi_device *chip) 166 struct adi_spi_device *chip)
155{ 167{
156 if (likely(chip->cs < MAX_CTRL_CS)) 168 if (likely(chip->cs < MAX_CTRL_CS)) {
157 bfin_write_or(&drv_data->regs->ssel, chip->ssel); 169 u32 reg;
158 else 170 reg = ioread32(&drv_data->regs->ssel);
171 reg |= chip->ssel;
172 iowrite32(reg, &drv_data->regs->ssel);
173 } else {
159 gpio_set_value(chip->cs_gpio, 1); 174 gpio_set_value(chip->cs_gpio, 1);
175 }
160 176
161 /* Move delay here for consistency */ 177 /* Move delay here for consistency */
162 if (chip->cs_chg_udelay) 178 if (chip->cs_chg_udelay)
@@ -164,187 +180,192 @@ static void bfin_spi_cs_deactive(struct bfin_spi_master *drv_data,
164} 180}
165 181
166/* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */ 182/* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */
167static inline void bfin_spi_cs_enable(struct bfin_spi_master *drv_data, 183static inline void adi_spi_cs_enable(struct adi_spi_master *drv_data,
168 struct bfin_spi_device *chip) 184 struct adi_spi_device *chip)
169{ 185{
170 if (chip->cs < MAX_CTRL_CS) 186 if (chip->cs < MAX_CTRL_CS) {
171 bfin_write_or(&drv_data->regs->ssel, chip->ssel >> 8); 187 u32 reg;
188 reg = ioread32(&drv_data->regs->ssel);
189 reg |= chip->ssel >> 8;
190 iowrite32(reg, &drv_data->regs->ssel);
191 }
172} 192}
173 193
174static inline void bfin_spi_cs_disable(struct bfin_spi_master *drv_data, 194static inline void adi_spi_cs_disable(struct adi_spi_master *drv_data,
175 struct bfin_spi_device *chip) 195 struct adi_spi_device *chip)
176{ 196{
177 if (chip->cs < MAX_CTRL_CS) 197 if (chip->cs < MAX_CTRL_CS) {
178 bfin_write_and(&drv_data->regs->ssel, ~(chip->ssel >> 8)); 198 u32 reg;
199 reg = ioread32(&drv_data->regs->ssel);
200 reg &= ~(chip->ssel >> 8);
201 iowrite32(reg, &drv_data->regs->ssel);
202 }
179} 203}
180 204
181/* stop controller and re-config current chip*/ 205/* stop controller and re-config current chip*/
182static void bfin_spi_restore_state(struct bfin_spi_master *drv_data) 206static void adi_spi_restore_state(struct adi_spi_master *drv_data)
183{ 207{
184 struct bfin_spi_device *chip = drv_data->cur_chip; 208 struct adi_spi_device *chip = drv_data->cur_chip;
185 209
186 /* Clear status and disable clock */ 210 /* Clear status and disable clock */
187 bfin_write(&drv_data->regs->status, 0xFFFFFFFF); 211 iowrite32(0xFFFFFFFF, &drv_data->regs->status);
188 bfin_write(&drv_data->regs->rx_control, 0x0); 212 iowrite32(0x0, &drv_data->regs->rx_control);
189 bfin_write(&drv_data->regs->tx_control, 0x0); 213 iowrite32(0x0, &drv_data->regs->tx_control);
190 bfin_spi_disable(drv_data); 214 adi_spi_disable(drv_data);
191
192 SSYNC();
193 215
194 /* Load the registers */ 216 /* Load the registers */
195 bfin_write(&drv_data->regs->control, chip->control); 217 iowrite32(chip->control, &drv_data->regs->control);
196 bfin_write(&drv_data->regs->clock, chip->clock); 218 iowrite32(chip->clock, &drv_data->regs->clock);
197 219
198 bfin_spi_enable(drv_data); 220 adi_spi_enable(drv_data);
199 drv_data->tx_num = drv_data->rx_num = 0; 221 drv_data->tx_num = drv_data->rx_num = 0;
200 /* we always choose tx transfer initiate */ 222 /* we always choose tx transfer initiate */
201 bfin_write(&drv_data->regs->rx_control, SPI_RXCTL_REN); 223 iowrite32(SPI_RXCTL_REN, &drv_data->regs->rx_control);
202 bfin_write(&drv_data->regs->tx_control, 224 iowrite32(SPI_TXCTL_TEN | SPI_TXCTL_TTI, &drv_data->regs->tx_control);
203 SPI_TXCTL_TEN | SPI_TXCTL_TTI); 225 adi_spi_cs_active(drv_data, chip);
204 bfin_spi_cs_active(drv_data, chip);
205} 226}
206 227
207/* discard invalid rx data and empty rfifo */ 228/* discard invalid rx data and empty rfifo */
208static inline void dummy_read(struct bfin_spi_master *drv_data) 229static inline void dummy_read(struct adi_spi_master *drv_data)
209{ 230{
210 while (!(bfin_read(&drv_data->regs->status) & SPI_STAT_RFE)) 231 while (!(ioread32(&drv_data->regs->status) & SPI_STAT_RFE))
211 bfin_read(&drv_data->regs->rfifo); 232 ioread32(&drv_data->regs->rfifo);
212} 233}
213 234
214static void bfin_spi_u8_write(struct bfin_spi_master *drv_data) 235static void adi_spi_u8_write(struct adi_spi_master *drv_data)
215{ 236{
216 dummy_read(drv_data); 237 dummy_read(drv_data);
217 while (drv_data->tx < drv_data->tx_end) { 238 while (drv_data->tx < drv_data->tx_end) {
218 bfin_write(&drv_data->regs->tfifo, (*(u8 *)(drv_data->tx++))); 239 iowrite32(*(u8 *)(drv_data->tx++), &drv_data->regs->tfifo);
219 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 240 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
220 cpu_relax(); 241 cpu_relax();
221 bfin_read(&drv_data->regs->rfifo); 242 ioread32(&drv_data->regs->rfifo);
222 } 243 }
223} 244}
224 245
225static void bfin_spi_u8_read(struct bfin_spi_master *drv_data) 246static void adi_spi_u8_read(struct adi_spi_master *drv_data)
226{ 247{
227 u32 tx_val = drv_data->cur_chip->tx_dummy_val; 248 u32 tx_val = drv_data->cur_chip->tx_dummy_val;
228 249
229 dummy_read(drv_data); 250 dummy_read(drv_data);
230 while (drv_data->rx < drv_data->rx_end) { 251 while (drv_data->rx < drv_data->rx_end) {
231 bfin_write(&drv_data->regs->tfifo, tx_val); 252 iowrite32(tx_val, &drv_data->regs->tfifo);
232 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 253 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
233 cpu_relax(); 254 cpu_relax();
234 *(u8 *)(drv_data->rx++) = bfin_read(&drv_data->regs->rfifo); 255 *(u8 *)(drv_data->rx++) = ioread32(&drv_data->regs->rfifo);
235 } 256 }
236} 257}
237 258
238static void bfin_spi_u8_duplex(struct bfin_spi_master *drv_data) 259static void adi_spi_u8_duplex(struct adi_spi_master *drv_data)
239{ 260{
240 dummy_read(drv_data); 261 dummy_read(drv_data);
241 while (drv_data->rx < drv_data->rx_end) { 262 while (drv_data->rx < drv_data->rx_end) {
242 bfin_write(&drv_data->regs->tfifo, (*(u8 *)(drv_data->tx++))); 263 iowrite32(*(u8 *)(drv_data->tx++), &drv_data->regs->tfifo);
243 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 264 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
244 cpu_relax(); 265 cpu_relax();
245 *(u8 *)(drv_data->rx++) = bfin_read(&drv_data->regs->rfifo); 266 *(u8 *)(drv_data->rx++) = ioread32(&drv_data->regs->rfifo);
246 } 267 }
247} 268}
248 269
249static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u8 = { 270static const struct adi_spi_transfer_ops adi_spi_transfer_ops_u8 = {
250 .write = bfin_spi_u8_write, 271 .write = adi_spi_u8_write,
251 .read = bfin_spi_u8_read, 272 .read = adi_spi_u8_read,
252 .duplex = bfin_spi_u8_duplex, 273 .duplex = adi_spi_u8_duplex,
253}; 274};
254 275
255static void bfin_spi_u16_write(struct bfin_spi_master *drv_data) 276static void adi_spi_u16_write(struct adi_spi_master *drv_data)
256{ 277{
257 dummy_read(drv_data); 278 dummy_read(drv_data);
258 while (drv_data->tx < drv_data->tx_end) { 279 while (drv_data->tx < drv_data->tx_end) {
259 bfin_write(&drv_data->regs->tfifo, (*(u16 *)drv_data->tx)); 280 iowrite32(*(u16 *)drv_data->tx, &drv_data->regs->tfifo);
260 drv_data->tx += 2; 281 drv_data->tx += 2;
261 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 282 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
262 cpu_relax(); 283 cpu_relax();
263 bfin_read(&drv_data->regs->rfifo); 284 ioread32(&drv_data->regs->rfifo);
264 } 285 }
265} 286}
266 287
267static void bfin_spi_u16_read(struct bfin_spi_master *drv_data) 288static void adi_spi_u16_read(struct adi_spi_master *drv_data)
268{ 289{
269 u32 tx_val = drv_data->cur_chip->tx_dummy_val; 290 u32 tx_val = drv_data->cur_chip->tx_dummy_val;
270 291
271 dummy_read(drv_data); 292 dummy_read(drv_data);
272 while (drv_data->rx < drv_data->rx_end) { 293 while (drv_data->rx < drv_data->rx_end) {
273 bfin_write(&drv_data->regs->tfifo, tx_val); 294 iowrite32(tx_val, &drv_data->regs->tfifo);
274 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 295 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
275 cpu_relax(); 296 cpu_relax();
276 *(u16 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo); 297 *(u16 *)drv_data->rx = ioread32(&drv_data->regs->rfifo);
277 drv_data->rx += 2; 298 drv_data->rx += 2;
278 } 299 }
279} 300}
280 301
281static void bfin_spi_u16_duplex(struct bfin_spi_master *drv_data) 302static void adi_spi_u16_duplex(struct adi_spi_master *drv_data)
282{ 303{
283 dummy_read(drv_data); 304 dummy_read(drv_data);
284 while (drv_data->rx < drv_data->rx_end) { 305 while (drv_data->rx < drv_data->rx_end) {
285 bfin_write(&drv_data->regs->tfifo, (*(u16 *)drv_data->tx)); 306 iowrite32(*(u16 *)drv_data->tx, &drv_data->regs->tfifo);
286 drv_data->tx += 2; 307 drv_data->tx += 2;
287 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 308 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
288 cpu_relax(); 309 cpu_relax();
289 *(u16 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo); 310 *(u16 *)drv_data->rx = ioread32(&drv_data->regs->rfifo);
290 drv_data->rx += 2; 311 drv_data->rx += 2;
291 } 312 }
292} 313}
293 314
294static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u16 = { 315static const struct adi_spi_transfer_ops adi_spi_transfer_ops_u16 = {
295 .write = bfin_spi_u16_write, 316 .write = adi_spi_u16_write,
296 .read = bfin_spi_u16_read, 317 .read = adi_spi_u16_read,
297 .duplex = bfin_spi_u16_duplex, 318 .duplex = adi_spi_u16_duplex,
298}; 319};
299 320
300static void bfin_spi_u32_write(struct bfin_spi_master *drv_data) 321static void adi_spi_u32_write(struct adi_spi_master *drv_data)
301{ 322{
302 dummy_read(drv_data); 323 dummy_read(drv_data);
303 while (drv_data->tx < drv_data->tx_end) { 324 while (drv_data->tx < drv_data->tx_end) {
304 bfin_write(&drv_data->regs->tfifo, (*(u32 *)drv_data->tx)); 325 iowrite32(*(u32 *)drv_data->tx, &drv_data->regs->tfifo);
305 drv_data->tx += 4; 326 drv_data->tx += 4;
306 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 327 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
307 cpu_relax(); 328 cpu_relax();
308 bfin_read(&drv_data->regs->rfifo); 329 ioread32(&drv_data->regs->rfifo);
309 } 330 }
310} 331}
311 332
312static void bfin_spi_u32_read(struct bfin_spi_master *drv_data) 333static void adi_spi_u32_read(struct adi_spi_master *drv_data)
313{ 334{
314 u32 tx_val = drv_data->cur_chip->tx_dummy_val; 335 u32 tx_val = drv_data->cur_chip->tx_dummy_val;
315 336
316 dummy_read(drv_data); 337 dummy_read(drv_data);
317 while (drv_data->rx < drv_data->rx_end) { 338 while (drv_data->rx < drv_data->rx_end) {
318 bfin_write(&drv_data->regs->tfifo, tx_val); 339 iowrite32(tx_val, &drv_data->regs->tfifo);
319 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 340 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
320 cpu_relax(); 341 cpu_relax();
321 *(u32 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo); 342 *(u32 *)drv_data->rx = ioread32(&drv_data->regs->rfifo);
322 drv_data->rx += 4; 343 drv_data->rx += 4;
323 } 344 }
324} 345}
325 346
326static void bfin_spi_u32_duplex(struct bfin_spi_master *drv_data) 347static void adi_spi_u32_duplex(struct adi_spi_master *drv_data)
327{ 348{
328 dummy_read(drv_data); 349 dummy_read(drv_data);
329 while (drv_data->rx < drv_data->rx_end) { 350 while (drv_data->rx < drv_data->rx_end) {
330 bfin_write(&drv_data->regs->tfifo, (*(u32 *)drv_data->tx)); 351 iowrite32(*(u32 *)drv_data->tx, &drv_data->regs->tfifo);
331 drv_data->tx += 4; 352 drv_data->tx += 4;
332 while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE) 353 while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
333 cpu_relax(); 354 cpu_relax();
334 *(u32 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo); 355 *(u32 *)drv_data->rx = ioread32(&drv_data->regs->rfifo);
335 drv_data->rx += 4; 356 drv_data->rx += 4;
336 } 357 }
337} 358}
338 359
339static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u32 = { 360static const struct adi_spi_transfer_ops adi_spi_transfer_ops_u32 = {
340 .write = bfin_spi_u32_write, 361 .write = adi_spi_u32_write,
341 .read = bfin_spi_u32_read, 362 .read = adi_spi_u32_read,
342 .duplex = bfin_spi_u32_duplex, 363 .duplex = adi_spi_u32_duplex,
343}; 364};
344 365
345 366
346/* test if there is more transfer to be done */ 367/* test if there is more transfer to be done */
347static void bfin_spi_next_transfer(struct bfin_spi_master *drv) 368static void adi_spi_next_transfer(struct adi_spi_master *drv)
348{ 369{
349 struct spi_message *msg = drv->cur_msg; 370 struct spi_message *msg = drv->cur_msg;
350 struct spi_transfer *t = drv->cur_transfer; 371 struct spi_transfer *t = drv->cur_transfer;
@@ -360,15 +381,15 @@ static void bfin_spi_next_transfer(struct bfin_spi_master *drv)
360 } 381 }
361} 382}
362 383
363static void bfin_spi_giveback(struct bfin_spi_master *drv_data) 384static void adi_spi_giveback(struct adi_spi_master *drv_data)
364{ 385{
365 struct bfin_spi_device *chip = drv_data->cur_chip; 386 struct adi_spi_device *chip = drv_data->cur_chip;
366 387
367 bfin_spi_cs_deactive(drv_data, chip); 388 adi_spi_cs_deactive(drv_data, chip);
368 spi_finalize_current_message(drv_data->master); 389 spi_finalize_current_message(drv_data->master);
369} 390}
370 391
371static int bfin_spi_setup_transfer(struct bfin_spi_master *drv) 392static int adi_spi_setup_transfer(struct adi_spi_master *drv)
372{ 393{
373 struct spi_transfer *t = drv->cur_transfer; 394 struct spi_transfer *t = drv->cur_transfer;
374 u32 cr, cr_width; 395 u32 cr, cr_width;
@@ -393,34 +414,33 @@ static int bfin_spi_setup_transfer(struct bfin_spi_master *drv)
393 switch (t->bits_per_word) { 414 switch (t->bits_per_word) {
394 case 8: 415 case 8:
395 cr_width = SPI_CTL_SIZE08; 416 cr_width = SPI_CTL_SIZE08;
396 drv->ops = &bfin_bfin_spi_transfer_ops_u8; 417 drv->ops = &adi_spi_transfer_ops_u8;
397 break; 418 break;
398 case 16: 419 case 16:
399 cr_width = SPI_CTL_SIZE16; 420 cr_width = SPI_CTL_SIZE16;
400 drv->ops = &bfin_bfin_spi_transfer_ops_u16; 421 drv->ops = &adi_spi_transfer_ops_u16;
401 break; 422 break;
402 case 32: 423 case 32:
403 cr_width = SPI_CTL_SIZE32; 424 cr_width = SPI_CTL_SIZE32;
404 drv->ops = &bfin_bfin_spi_transfer_ops_u32; 425 drv->ops = &adi_spi_transfer_ops_u32;
405 break; 426 break;
406 default: 427 default:
407 return -EINVAL; 428 return -EINVAL;
408 } 429 }
409 cr = bfin_read(&drv->regs->control) & ~SPI_CTL_SIZE; 430 cr = ioread32(&drv->regs->control) & ~SPI_CTL_SIZE;
410 cr |= cr_width; 431 cr |= cr_width;
411 bfin_write(&drv->regs->control, cr); 432 iowrite32(cr, &drv->regs->control);
412 433
413 /* speed setup */ 434 /* speed setup */
414 bfin_write(&drv->regs->clock, 435 iowrite32(hz_to_spi_clock(drv->sclk, t->speed_hz), &drv->regs->clock);
415 hz_to_spi_clock(drv->sclk, t->speed_hz));
416 return 0; 436 return 0;
417} 437}
418 438
419static int bfin_spi_dma_xfer(struct bfin_spi_master *drv_data) 439static int adi_spi_dma_xfer(struct adi_spi_master *drv_data)
420{ 440{
421 struct spi_transfer *t = drv_data->cur_transfer; 441 struct spi_transfer *t = drv_data->cur_transfer;
422 struct spi_message *msg = drv_data->cur_msg; 442 struct spi_message *msg = drv_data->cur_msg;
423 struct bfin_spi_device *chip = drv_data->cur_chip; 443 struct adi_spi_device *chip = drv_data->cur_chip;
424 u32 dma_config; 444 u32 dma_config;
425 unsigned long word_count, word_size; 445 unsigned long word_count, word_size;
426 void *tx_buf, *rx_buf; 446 void *tx_buf, *rx_buf;
@@ -498,17 +518,16 @@ static int bfin_spi_dma_xfer(struct bfin_spi_master *drv_data)
498 set_dma_config(drv_data->rx_dma, dma_config | WNR); 518 set_dma_config(drv_data->rx_dma, dma_config | WNR);
499 enable_dma(drv_data->tx_dma); 519 enable_dma(drv_data->tx_dma);
500 enable_dma(drv_data->rx_dma); 520 enable_dma(drv_data->rx_dma);
501 SSYNC();
502 521
503 bfin_write(&drv_data->regs->rx_control, SPI_RXCTL_REN | SPI_RXCTL_RDR_NE); 522 iowrite32(SPI_RXCTL_REN | SPI_RXCTL_RDR_NE,
504 SSYNC(); 523 &drv_data->regs->rx_control);
505 bfin_write(&drv_data->regs->tx_control, 524 iowrite32(SPI_TXCTL_TEN | SPI_TXCTL_TTI | SPI_TXCTL_TDR_NF,
506 SPI_TXCTL_TEN | SPI_TXCTL_TTI | SPI_TXCTL_TDR_NF); 525 &drv_data->regs->tx_control);
507 526
508 return 0; 527 return 0;
509} 528}
510 529
511static int bfin_spi_pio_xfer(struct bfin_spi_master *drv_data) 530static int adi_spi_pio_xfer(struct adi_spi_master *drv_data)
512{ 531{
513 struct spi_message *msg = drv_data->cur_msg; 532 struct spi_message *msg = drv_data->cur_msg;
514 533
@@ -529,19 +548,19 @@ static int bfin_spi_pio_xfer(struct bfin_spi_master *drv_data)
529 return -EIO; 548 return -EIO;
530 } 549 }
531 550
532 if (!bfin_spi_flush(drv_data)) 551 if (!adi_spi_flush(drv_data))
533 return -EIO; 552 return -EIO;
534 msg->actual_length += drv_data->transfer_len; 553 msg->actual_length += drv_data->transfer_len;
535 tasklet_schedule(&drv_data->pump_transfers); 554 tasklet_schedule(&drv_data->pump_transfers);
536 return 0; 555 return 0;
537} 556}
538 557
539static void bfin_spi_pump_transfers(unsigned long data) 558static void adi_spi_pump_transfers(unsigned long data)
540{ 559{
541 struct bfin_spi_master *drv_data = (struct bfin_spi_master *)data; 560 struct adi_spi_master *drv_data = (struct adi_spi_master *)data;
542 struct spi_message *msg = NULL; 561 struct spi_message *msg = NULL;
543 struct spi_transfer *t = NULL; 562 struct spi_transfer *t = NULL;
544 struct bfin_spi_device *chip = NULL; 563 struct adi_spi_device *chip = NULL;
545 int ret; 564 int ret;
546 565
547 /* Get current state information */ 566 /* Get current state information */
@@ -552,7 +571,7 @@ static void bfin_spi_pump_transfers(unsigned long data)
552 /* Handle for abort */ 571 /* Handle for abort */
553 if (drv_data->state == ERROR_STATE) { 572 if (drv_data->state == ERROR_STATE) {
554 msg->status = -EIO; 573 msg->status = -EIO;
555 bfin_spi_giveback(drv_data); 574 adi_spi_giveback(drv_data);
556 return; 575 return;
557 } 576 }
558 577
@@ -560,14 +579,14 @@ static void bfin_spi_pump_transfers(unsigned long data)
560 if (t->delay_usecs) 579 if (t->delay_usecs)
561 udelay(t->delay_usecs); 580 udelay(t->delay_usecs);
562 if (t->cs_change) 581 if (t->cs_change)
563 bfin_spi_cs_deactive(drv_data, chip); 582 adi_spi_cs_deactive(drv_data, chip);
564 bfin_spi_next_transfer(drv_data); 583 adi_spi_next_transfer(drv_data);
565 t = drv_data->cur_transfer; 584 t = drv_data->cur_transfer;
566 } 585 }
567 /* Handle end of message */ 586 /* Handle end of message */
568 if (drv_data->state == DONE_STATE) { 587 if (drv_data->state == DONE_STATE) {
569 msg->status = 0; 588 msg->status = 0;
570 bfin_spi_giveback(drv_data); 589 adi_spi_giveback(drv_data);
571 return; 590 return;
572 } 591 }
573 592
@@ -577,34 +596,34 @@ static void bfin_spi_pump_transfers(unsigned long data)
577 return; 596 return;
578 } 597 }
579 598
580 ret = bfin_spi_setup_transfer(drv_data); 599 ret = adi_spi_setup_transfer(drv_data);
581 if (ret) { 600 if (ret) {
582 msg->status = ret; 601 msg->status = ret;
583 bfin_spi_giveback(drv_data); 602 adi_spi_giveback(drv_data);
584 } 603 }
585 604
586 bfin_write(&drv_data->regs->status, 0xFFFFFFFF); 605 iowrite32(0xFFFFFFFF, &drv_data->regs->status);
587 bfin_spi_cs_active(drv_data, chip); 606 adi_spi_cs_active(drv_data, chip);
588 drv_data->state = RUNNING_STATE; 607 drv_data->state = RUNNING_STATE;
589 608
590 if (chip->enable_dma) 609 if (chip->enable_dma)
591 ret = bfin_spi_dma_xfer(drv_data); 610 ret = adi_spi_dma_xfer(drv_data);
592 else 611 else
593 ret = bfin_spi_pio_xfer(drv_data); 612 ret = adi_spi_pio_xfer(drv_data);
594 if (ret) { 613 if (ret) {
595 msg->status = ret; 614 msg->status = ret;
596 bfin_spi_giveback(drv_data); 615 adi_spi_giveback(drv_data);
597 } 616 }
598} 617}
599 618
600static int bfin_spi_transfer_one_message(struct spi_master *master, 619static int adi_spi_transfer_one_message(struct spi_master *master,
601 struct spi_message *m) 620 struct spi_message *m)
602{ 621{
603 struct bfin_spi_master *drv_data = spi_master_get_devdata(master); 622 struct adi_spi_master *drv_data = spi_master_get_devdata(master);
604 623
605 drv_data->cur_msg = m; 624 drv_data->cur_msg = m;
606 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); 625 drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
607 bfin_spi_restore_state(drv_data); 626 adi_spi_restore_state(drv_data);
608 627
609 drv_data->state = START_STATE; 628 drv_data->state = START_STATE;
610 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, 629 drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
@@ -630,15 +649,15 @@ static const u16 ssel[][MAX_SPI_SSEL] = {
630 P_SPI2_SSEL6, P_SPI2_SSEL7}, 649 P_SPI2_SSEL6, P_SPI2_SSEL7},
631}; 650};
632 651
633static int bfin_spi_setup(struct spi_device *spi) 652static int adi_spi_setup(struct spi_device *spi)
634{ 653{
635 struct bfin_spi_master *drv_data = spi_master_get_devdata(spi->master); 654 struct adi_spi_master *drv_data = spi_master_get_devdata(spi->master);
636 struct bfin_spi_device *chip = spi_get_ctldata(spi); 655 struct adi_spi_device *chip = spi_get_ctldata(spi);
637 u32 bfin_ctl_reg = SPI_CTL_ODM | SPI_CTL_PSSE; 656 u32 ctl_reg = SPI_CTL_ODM | SPI_CTL_PSSE;
638 int ret = -EINVAL; 657 int ret = -EINVAL;
639 658
640 if (!chip) { 659 if (!chip) {
641 struct bfin_spi3_chip *chip_info = spi->controller_data; 660 struct adi_spi3_chip *chip_info = spi->controller_data;
642 661
643 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 662 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
644 if (!chip) { 663 if (!chip) {
@@ -646,7 +665,7 @@ static int bfin_spi_setup(struct spi_device *spi)
646 return -ENOMEM; 665 return -ENOMEM;
647 } 666 }
648 if (chip_info) { 667 if (chip_info) {
649 if (chip_info->control & ~bfin_ctl_reg) { 668 if (chip_info->control & ~ctl_reg) {
650 dev_err(&spi->dev, 669 dev_err(&spi->dev,
651 "do not set bits that the SPI framework manages\n"); 670 "do not set bits that the SPI framework manages\n");
652 goto error; 671 goto error;
@@ -657,6 +676,7 @@ static int bfin_spi_setup(struct spi_device *spi)
657 chip->enable_dma = chip_info->enable_dma; 676 chip->enable_dma = chip_info->enable_dma;
658 } 677 }
659 chip->cs = spi->chip_select; 678 chip->cs = spi->chip_select;
679
660 if (chip->cs < MAX_CTRL_CS) { 680 if (chip->cs < MAX_CTRL_CS) {
661 chip->ssel = (1 << chip->cs) << 8; 681 chip->ssel = (1 << chip->cs) << 8;
662 ret = peripheral_request(ssel[spi->master->bus_num] 682 ret = peripheral_request(ssel[spi->master->bus_num]
@@ -678,7 +698,7 @@ static int bfin_spi_setup(struct spi_device *spi)
678 } 698 }
679 699
680 /* force a default base state */ 700 /* force a default base state */
681 chip->control &= bfin_ctl_reg; 701 chip->control &= ctl_reg;
682 702
683 if (spi->mode & SPI_CPOL) 703 if (spi->mode & SPI_CPOL)
684 chip->control |= SPI_CTL_CPOL; 704 chip->control |= SPI_CTL_CPOL;
@@ -692,8 +712,8 @@ static int bfin_spi_setup(struct spi_device *spi)
692 712
693 chip->clock = hz_to_spi_clock(drv_data->sclk, spi->max_speed_hz); 713 chip->clock = hz_to_spi_clock(drv_data->sclk, spi->max_speed_hz);
694 714
695 bfin_spi_cs_enable(drv_data, chip); 715 adi_spi_cs_enable(drv_data, chip);
696 bfin_spi_cs_deactive(drv_data, chip); 716 adi_spi_cs_deactive(drv_data, chip);
697 717
698 return 0; 718 return 0;
699error: 719error:
@@ -705,10 +725,10 @@ error:
705 return ret; 725 return ret;
706} 726}
707 727
708static void bfin_spi_cleanup(struct spi_device *spi) 728static void adi_spi_cleanup(struct spi_device *spi)
709{ 729{
710 struct bfin_spi_device *chip = spi_get_ctldata(spi); 730 struct adi_spi_device *chip = spi_get_ctldata(spi);
711 struct bfin_spi_master *drv_data = spi_master_get_devdata(spi->master); 731 struct adi_spi_master *drv_data = spi_master_get_devdata(spi->master);
712 732
713 if (!chip) 733 if (!chip)
714 return; 734 return;
@@ -716,7 +736,7 @@ static void bfin_spi_cleanup(struct spi_device *spi)
716 if (chip->cs < MAX_CTRL_CS) { 736 if (chip->cs < MAX_CTRL_CS) {
717 peripheral_free(ssel[spi->master->bus_num] 737 peripheral_free(ssel[spi->master->bus_num]
718 [chip->cs-1]); 738 [chip->cs-1]);
719 bfin_spi_cs_disable(drv_data, chip); 739 adi_spi_cs_disable(drv_data, chip);
720 } else { 740 } else {
721 gpio_free(chip->cs_gpio); 741 gpio_free(chip->cs_gpio);
722 } 742 }
@@ -725,10 +745,11 @@ static void bfin_spi_cleanup(struct spi_device *spi)
725 spi_set_ctldata(spi, NULL); 745 spi_set_ctldata(spi, NULL);
726} 746}
727 747
728static irqreturn_t bfin_spi_tx_dma_isr(int irq, void *dev_id) 748static irqreturn_t adi_spi_tx_dma_isr(int irq, void *dev_id)
729{ 749{
730 struct bfin_spi_master *drv_data = dev_id; 750 struct adi_spi_master *drv_data = dev_id;
731 u32 dma_stat = get_dma_curr_irqstat(drv_data->tx_dma); 751 u32 dma_stat = get_dma_curr_irqstat(drv_data->tx_dma);
752 u32 tx_ctl;
732 753
733 clear_dma_irqstat(drv_data->tx_dma); 754 clear_dma_irqstat(drv_data->tx_dma);
734 if (dma_stat & DMA_DONE) { 755 if (dma_stat & DMA_DONE) {
@@ -739,13 +760,15 @@ static irqreturn_t bfin_spi_tx_dma_isr(int irq, void *dev_id)
739 if (drv_data->tx) 760 if (drv_data->tx)
740 drv_data->state = ERROR_STATE; 761 drv_data->state = ERROR_STATE;
741 } 762 }
742 bfin_write_and(&drv_data->regs->tx_control, ~SPI_TXCTL_TDR_NF); 763 tx_ctl = ioread32(&drv_data->regs->tx_control);
764 tx_ctl &= ~SPI_TXCTL_TDR_NF;
765 iowrite32(tx_ctl, &drv_data->regs->tx_control);
743 return IRQ_HANDLED; 766 return IRQ_HANDLED;
744} 767}
745 768
746static irqreturn_t bfin_spi_rx_dma_isr(int irq, void *dev_id) 769static irqreturn_t adi_spi_rx_dma_isr(int irq, void *dev_id)
747{ 770{
748 struct bfin_spi_master *drv_data = dev_id; 771 struct adi_spi_master *drv_data = dev_id;
749 struct spi_message *msg = drv_data->cur_msg; 772 struct spi_message *msg = drv_data->cur_msg;
750 u32 dma_stat = get_dma_curr_irqstat(drv_data->rx_dma); 773 u32 dma_stat = get_dma_curr_irqstat(drv_data->rx_dma);
751 774
@@ -760,8 +783,8 @@ static irqreturn_t bfin_spi_rx_dma_isr(int irq, void *dev_id)
760 dev_err(&drv_data->master->dev, 783 dev_err(&drv_data->master->dev,
761 "spi rx dma error: %d\n", dma_stat); 784 "spi rx dma error: %d\n", dma_stat);
762 } 785 }
763 bfin_write(&drv_data->regs->tx_control, 0); 786 iowrite32(0, &drv_data->regs->tx_control);
764 bfin_write(&drv_data->regs->rx_control, 0); 787 iowrite32(0, &drv_data->regs->rx_control);
765 if (drv_data->rx_num != drv_data->tx_num) 788 if (drv_data->rx_num != drv_data->tx_num)
766 dev_dbg(&drv_data->master->dev, 789 dev_dbg(&drv_data->master->dev,
767 "dma interrupt missing: tx=%d,rx=%d\n", 790 "dma interrupt missing: tx=%d,rx=%d\n",
@@ -770,15 +793,15 @@ static irqreturn_t bfin_spi_rx_dma_isr(int irq, void *dev_id)
770 return IRQ_HANDLED; 793 return IRQ_HANDLED;
771} 794}
772 795
773static int bfin_spi_probe(struct platform_device *pdev) 796static int adi_spi_probe(struct platform_device *pdev)
774{ 797{
775 struct device *dev = &pdev->dev; 798 struct device *dev = &pdev->dev;
776 struct bfin_spi3_master *info = dev_get_platdata(dev); 799 struct adi_spi3_master *info = dev_get_platdata(dev);
777 struct spi_master *master; 800 struct spi_master *master;
778 struct bfin_spi_master *drv_data; 801 struct adi_spi_master *drv_data;
779 struct resource *mem, *res; 802 struct resource *mem, *res;
780 unsigned int tx_dma, rx_dma; 803 unsigned int tx_dma, rx_dma;
781 unsigned long sclk; 804 struct clk *sclk;
782 int ret; 805 int ret;
783 806
784 if (!info) { 807 if (!info) {
@@ -786,10 +809,10 @@ static int bfin_spi_probe(struct platform_device *pdev)
786 return -ENODEV; 809 return -ENODEV;
787 } 810 }
788 811
789 sclk = get_sclk1(); 812 sclk = devm_clk_get(dev, "spi");
790 if (!sclk) { 813 if (IS_ERR(sclk)) {
791 dev_err(dev, "can not get sclk1\n"); 814 dev_err(dev, "can not get spi clock\n");
792 return -ENXIO; 815 return PTR_ERR(sclk);
793 } 816 }
794 817
795 res = platform_get_resource(pdev, IORESOURCE_DMA, 0); 818 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
@@ -819,9 +842,9 @@ static int bfin_spi_probe(struct platform_device *pdev)
819 842
820 master->bus_num = pdev->id; 843 master->bus_num = pdev->id;
821 master->num_chipselect = info->num_chipselect; 844 master->num_chipselect = info->num_chipselect;
822 master->cleanup = bfin_spi_cleanup; 845 master->cleanup = adi_spi_cleanup;
823 master->setup = bfin_spi_setup; 846 master->setup = adi_spi_setup;
824 master->transfer_one_message = bfin_spi_transfer_one_message; 847 master->transfer_one_message = adi_spi_transfer_one_message;
825 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) | 848 master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
826 SPI_BPW_MASK(8); 849 SPI_BPW_MASK(8);
827 850
@@ -830,7 +853,7 @@ static int bfin_spi_probe(struct platform_device *pdev)
830 drv_data->tx_dma = tx_dma; 853 drv_data->tx_dma = tx_dma;
831 drv_data->rx_dma = rx_dma; 854 drv_data->rx_dma = rx_dma;
832 drv_data->pin_req = info->pin_req; 855 drv_data->pin_req = info->pin_req;
833 drv_data->sclk = sclk; 856 drv_data->sclk = clk_get_rate(sclk);
834 857
835 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 858 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
836 drv_data->regs = devm_ioremap_resource(dev, mem); 859 drv_data->regs = devm_ioremap_resource(dev, mem);
@@ -845,28 +868,28 @@ static int bfin_spi_probe(struct platform_device *pdev)
845 dev_err(dev, "can not request SPI TX DMA channel\n"); 868 dev_err(dev, "can not request SPI TX DMA channel\n");
846 goto err_put_master; 869 goto err_put_master;
847 } 870 }
848 set_dma_callback(tx_dma, bfin_spi_tx_dma_isr, drv_data); 871 set_dma_callback(tx_dma, adi_spi_tx_dma_isr, drv_data);
849 872
850 ret = request_dma(rx_dma, "SPI_RX_DMA"); 873 ret = request_dma(rx_dma, "SPI_RX_DMA");
851 if (ret) { 874 if (ret) {
852 dev_err(dev, "can not request SPI RX DMA channel\n"); 875 dev_err(dev, "can not request SPI RX DMA channel\n");
853 goto err_free_tx_dma; 876 goto err_free_tx_dma;
854 } 877 }
855 set_dma_callback(drv_data->rx_dma, bfin_spi_rx_dma_isr, drv_data); 878 set_dma_callback(drv_data->rx_dma, adi_spi_rx_dma_isr, drv_data);
856 879
857 /* request CLK, MOSI and MISO */ 880 /* request CLK, MOSI and MISO */
858 ret = peripheral_request_list(drv_data->pin_req, "bfin-spi3"); 881 ret = peripheral_request_list(drv_data->pin_req, "adi-spi3");
859 if (ret < 0) { 882 if (ret < 0) {
860 dev_err(dev, "can not request spi pins\n"); 883 dev_err(dev, "can not request spi pins\n");
861 goto err_free_rx_dma; 884 goto err_free_rx_dma;
862 } 885 }
863 886
864 bfin_write(&drv_data->regs->control, SPI_CTL_MSTR | SPI_CTL_CPHA); 887 iowrite32(SPI_CTL_MSTR | SPI_CTL_CPHA, &drv_data->regs->control);
865 bfin_write(&drv_data->regs->ssel, 0x0000FE00); 888 iowrite32(0x0000FE00, &drv_data->regs->ssel);
866 bfin_write(&drv_data->regs->delay, 0x0); 889 iowrite32(0x0, &drv_data->regs->delay);
867 890
868 tasklet_init(&drv_data->pump_transfers, 891 tasklet_init(&drv_data->pump_transfers,
869 bfin_spi_pump_transfers, (unsigned long)drv_data); 892 adi_spi_pump_transfers, (unsigned long)drv_data);
870 /* register with the SPI framework */ 893 /* register with the SPI framework */
871 ret = devm_spi_register_master(dev, master); 894 ret = devm_spi_register_master(dev, master);
872 if (ret) { 895 if (ret) {
@@ -888,43 +911,41 @@ err_put_master:
888 return ret; 911 return ret;
889} 912}
890 913
891static int bfin_spi_remove(struct platform_device *pdev) 914static int adi_spi_remove(struct platform_device *pdev)
892{ 915{
893 struct spi_master *master = platform_get_drvdata(pdev); 916 struct spi_master *master = platform_get_drvdata(pdev);
894 struct bfin_spi_master *drv_data = spi_master_get_devdata(master); 917 struct adi_spi_master *drv_data = spi_master_get_devdata(master);
895
896 bfin_spi_disable(drv_data);
897 918
919 adi_spi_disable(drv_data);
898 peripheral_free_list(drv_data->pin_req); 920 peripheral_free_list(drv_data->pin_req);
899 free_dma(drv_data->rx_dma); 921 free_dma(drv_data->rx_dma);
900 free_dma(drv_data->tx_dma); 922 free_dma(drv_data->tx_dma);
901
902 return 0; 923 return 0;
903} 924}
904 925
905#ifdef CONFIG_PM 926#ifdef CONFIG_PM
906static int bfin_spi_suspend(struct device *dev) 927static int adi_spi_suspend(struct device *dev)
907{ 928{
908 struct spi_master *master = dev_get_drvdata(dev); 929 struct spi_master *master = dev_get_drvdata(dev);
909 struct bfin_spi_master *drv_data = spi_master_get_devdata(master); 930 struct adi_spi_master *drv_data = spi_master_get_devdata(master);
910 931
911 spi_master_suspend(master); 932 spi_master_suspend(master);
912 933
913 drv_data->control = bfin_read(&drv_data->regs->control); 934 drv_data->control = ioread32(&drv_data->regs->control);
914 drv_data->ssel = bfin_read(&drv_data->regs->ssel); 935 drv_data->ssel = ioread32(&drv_data->regs->ssel);
915 936
916 bfin_write(&drv_data->regs->control, SPI_CTL_MSTR | SPI_CTL_CPHA); 937 iowrite32(SPI_CTL_MSTR | SPI_CTL_CPHA, &drv_data->regs->control);
917 bfin_write(&drv_data->regs->ssel, 0x0000FE00); 938 iowrite32(0x0000FE00, &drv_data->regs->ssel);
918 dma_disable_irq(drv_data->rx_dma); 939 dma_disable_irq(drv_data->rx_dma);
919 dma_disable_irq(drv_data->tx_dma); 940 dma_disable_irq(drv_data->tx_dma);
920 941
921 return 0; 942 return 0;
922} 943}
923 944
924static int bfin_spi_resume(struct device *dev) 945static int adi_spi_resume(struct device *dev)
925{ 946{
926 struct spi_master *master = dev_get_drvdata(dev); 947 struct spi_master *master = dev_get_drvdata(dev);
927 struct bfin_spi_master *drv_data = spi_master_get_devdata(master); 948 struct adi_spi_master *drv_data = spi_master_get_devdata(master);
928 int ret = 0; 949 int ret = 0;
929 950
930 /* bootrom may modify spi and dma status when resume in spi boot mode */ 951 /* bootrom may modify spi and dma status when resume in spi boot mode */
@@ -932,8 +953,8 @@ static int bfin_spi_resume(struct device *dev)
932 953
933 dma_enable_irq(drv_data->rx_dma); 954 dma_enable_irq(drv_data->rx_dma);
934 dma_enable_irq(drv_data->tx_dma); 955 dma_enable_irq(drv_data->tx_dma);
935 bfin_write(&drv_data->regs->control, drv_data->control); 956 iowrite32(drv_data->control, &drv_data->regs->control);
936 bfin_write(&drv_data->regs->ssel, drv_data->ssel); 957 iowrite32(drv_data->ssel, &drv_data->regs->ssel);
937 958
938 ret = spi_master_resume(master); 959 ret = spi_master_resume(master);
939 if (ret) { 960 if (ret) {
@@ -944,21 +965,21 @@ static int bfin_spi_resume(struct device *dev)
944 return ret; 965 return ret;
945} 966}
946#endif 967#endif
947static const struct dev_pm_ops bfin_spi_pm_ops = { 968static const struct dev_pm_ops adi_spi_pm_ops = {
948 SET_SYSTEM_SLEEP_PM_OPS(bfin_spi_suspend, bfin_spi_resume) 969 SET_SYSTEM_SLEEP_PM_OPS(adi_spi_suspend, adi_spi_resume)
949}; 970};
950 971
951MODULE_ALIAS("platform:bfin-spi3"); 972MODULE_ALIAS("platform:adi-spi3");
952static struct platform_driver bfin_spi_driver = { 973static struct platform_driver adi_spi_driver = {
953 .driver = { 974 .driver = {
954 .name = "bfin-spi3", 975 .name = "adi-spi3",
955 .owner = THIS_MODULE, 976 .owner = THIS_MODULE,
956 .pm = &bfin_spi_pm_ops, 977 .pm = &adi_spi_pm_ops,
957 }, 978 },
958 .remove = bfin_spi_remove, 979 .remove = adi_spi_remove,
959}; 980};
960 981
961module_platform_driver_probe(bfin_spi_driver, bfin_spi_probe); 982module_platform_driver_probe(adi_spi_driver, adi_spi_probe);
962 983
963MODULE_DESCRIPTION("Analog Devices SPI3 controller driver"); 984MODULE_DESCRIPTION("Analog Devices SPI3 controller driver");
964MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>"); 985MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 8005f9869481..92a6f0d93233 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -224,7 +224,7 @@ struct atmel_spi {
224 struct platform_device *pdev; 224 struct platform_device *pdev;
225 225
226 struct spi_transfer *current_transfer; 226 struct spi_transfer *current_transfer;
227 unsigned long current_remaining_bytes; 227 int current_remaining_bytes;
228 int done_status; 228 int done_status;
229 229
230 struct completion xfer_completion; 230 struct completion xfer_completion;
@@ -874,8 +874,9 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
874 spi_readl(as, RDR); 874 spi_readl(as, RDR);
875 } 875 }
876 if (xfer->bits_per_word > 8) { 876 if (xfer->bits_per_word > 8) {
877 as->current_remaining_bytes -= 2; 877 if (as->current_remaining_bytes > 2)
878 if (as->current_remaining_bytes < 0) 878 as->current_remaining_bytes -= 2;
879 else
879 as->current_remaining_bytes = 0; 880 as->current_remaining_bytes = 0;
880 } else { 881 } else {
881 as->current_remaining_bytes--; 882 as->current_remaining_bytes--;
@@ -1110,13 +1111,18 @@ static int atmel_spi_one_transfer(struct spi_master *master,
1110 atmel_spi_next_xfer_pio(master, xfer); 1111 atmel_spi_next_xfer_pio(master, xfer);
1111 } else { 1112 } else {
1112 as->current_remaining_bytes -= len; 1113 as->current_remaining_bytes -= len;
1114 if (as->current_remaining_bytes < 0)
1115 as->current_remaining_bytes = 0;
1113 } 1116 }
1114 } else { 1117 } else {
1115 atmel_spi_next_xfer_pio(master, xfer); 1118 atmel_spi_next_xfer_pio(master, xfer);
1116 } 1119 }
1117 1120
1121 /* interrupts are disabled, so free the lock for schedule */
1122 atmel_spi_unlock(as);
1118 ret = wait_for_completion_timeout(&as->xfer_completion, 1123 ret = wait_for_completion_timeout(&as->xfer_completion,
1119 SPI_DMA_TIMEOUT); 1124 SPI_DMA_TIMEOUT);
1125 atmel_spi_lock(as);
1120 if (WARN_ON(ret == 0)) { 1126 if (WARN_ON(ret == 0)) {
1121 dev_err(&spi->dev, 1127 dev_err(&spi->dev,
1122 "spi trasfer timeout, err %d\n", ret); 1128 "spi trasfer timeout, err %d\n", ret);
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index 55e57c3eb9bd..ebf720b88a2a 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/device.h> 14#include <linux/device.h>
15#include <linux/gpio.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
16#include <linux/io.h> 17#include <linux/io.h>
17#include <linux/ioport.h> 18#include <linux/ioport.h>
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
new file mode 100644
index 000000000000..bb758978465d
--- /dev/null
+++ b/drivers/spi/spi-cadence.c
@@ -0,0 +1,673 @@
1/*
2 * Cadence SPI controller driver (master mode only)
3 *
4 * Copyright (C) 2008 - 2014 Xilinx, Inc.
5 *
6 * based on Blackfin On-Chip SPI Driver (spi_bfin5xx.c)
7 *
8 * This program is free software; you can redistribute it and/or modify it under
9 * the terms of the GNU General Public License version 2 as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/module.h>
19#include <linux/of_irq.h>
20#include <linux/of_address.h>
21#include <linux/platform_device.h>
22#include <linux/spi/spi.h>
23
24/* Name of this driver */
25#define CDNS_SPI_NAME "cdns-spi"
26
27/* Register offset definitions */
28#define CDNS_SPI_CR_OFFSET 0x00 /* Configuration Register, RW */
29#define CDNS_SPI_ISR_OFFSET 0x04 /* Interrupt Status Register, RO */
30#define CDNS_SPI_IER_OFFSET 0x08 /* Interrupt Enable Register, WO */
31#define CDNS_SPI_IDR_OFFSET 0x0c /* Interrupt Disable Register, WO */
32#define CDNS_SPI_IMR_OFFSET 0x10 /* Interrupt Enabled Mask Register, RO */
33#define CDNS_SPI_ER_OFFSET 0x14 /* Enable/Disable Register, RW */
34#define CDNS_SPI_DR_OFFSET 0x18 /* Delay Register, RW */
35#define CDNS_SPI_TXD_OFFSET 0x1C /* Data Transmit Register, WO */
36#define CDNS_SPI_RXD_OFFSET 0x20 /* Data Receive Register, RO */
37#define CDNS_SPI_SICR_OFFSET 0x24 /* Slave Idle Count Register, RW */
38#define CDNS_SPI_THLD_OFFSET 0x28 /* Transmit FIFO Watermark Register,RW */
39
40/*
41 * SPI Configuration Register bit Masks
42 *
43 * This register contains various control bits that affect the operation
44 * of the SPI controller
45 */
46#define CDNS_SPI_CR_MANSTRT_MASK 0x00010000 /* Manual TX Start */
47#define CDNS_SPI_CR_CPHA_MASK 0x00000004 /* Clock Phase Control */
48#define CDNS_SPI_CR_CPOL_MASK 0x00000002 /* Clock Polarity Control */
49#define CDNS_SPI_CR_SSCTRL_MASK 0x00003C00 /* Slave Select Mask */
50#define CDNS_SPI_CR_BAUD_DIV_MASK 0x00000038 /* Baud Rate Divisor Mask */
51#define CDNS_SPI_CR_MSTREN_MASK 0x00000001 /* Master Enable Mask */
52#define CDNS_SPI_CR_MANSTRTEN_MASK 0x00008000 /* Manual TX Enable Mask */
53#define CDNS_SPI_CR_SSFORCE_MASK 0x00004000 /* Manual SS Enable Mask */
54#define CDNS_SPI_CR_BAUD_DIV_4_MASK 0x00000008 /* Default Baud Div Mask */
55#define CDNS_SPI_CR_DEFAULT_MASK (CDNS_SPI_CR_MSTREN_MASK | \
56 CDNS_SPI_CR_SSCTRL_MASK | \
57 CDNS_SPI_CR_SSFORCE_MASK | \
58 CDNS_SPI_CR_BAUD_DIV_4_MASK)
59
60/*
61 * SPI Configuration Register - Baud rate and slave select
62 *
63 * These are the values used in the calculation of baud rate divisor and
64 * setting the slave select.
65 */
66
67#define CDNS_SPI_BAUD_DIV_MAX 7 /* Baud rate divisor maximum */
68#define CDNS_SPI_BAUD_DIV_MIN 1 /* Baud rate divisor minimum */
69#define CDNS_SPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */
70#define CDNS_SPI_SS_SHIFT 10 /* Slave Select field shift in CR */
71#define CDNS_SPI_SS0 0x1 /* Slave Select zero */
72
73/*
74 * SPI Interrupt Registers bit Masks
75 *
76 * All the four interrupt registers (Status/Mask/Enable/Disable) have the same
77 * bit definitions.
78 */
79#define CDNS_SPI_IXR_TXOW_MASK 0x00000004 /* SPI TX FIFO Overwater */
80#define CDNS_SPI_IXR_MODF_MASK 0x00000002 /* SPI Mode Fault */
81#define CDNS_SPI_IXR_RXNEMTY_MASK 0x00000010 /* SPI RX FIFO Not Empty */
82#define CDNS_SPI_IXR_DEFAULT_MASK (CDNS_SPI_IXR_TXOW_MASK | \
83 CDNS_SPI_IXR_MODF_MASK)
84#define CDNS_SPI_IXR_TXFULL_MASK 0x00000008 /* SPI TX Full */
85#define CDNS_SPI_IXR_ALL_MASK 0x0000007F /* SPI all interrupts */
86
87/*
88 * SPI Enable Register bit Masks
89 *
90 * This register is used to enable or disable the SPI controller
91 */
92#define CDNS_SPI_ER_ENABLE_MASK 0x00000001 /* SPI Enable Bit Mask */
93#define CDNS_SPI_ER_DISABLE_MASK 0x0 /* SPI Disable Bit Mask */
94
95/* SPI FIFO depth in bytes */
96#define CDNS_SPI_FIFO_DEPTH 128
97
98/* Default number of chip select lines */
99#define CDNS_SPI_DEFAULT_NUM_CS 4
100
101/**
102 * struct cdns_spi - This definition defines spi driver instance
103 * @regs: Virtual address of the SPI controller registers
104 * @ref_clk: Pointer to the peripheral clock
105 * @pclk: Pointer to the APB clock
106 * @speed_hz: Current SPI bus clock speed in Hz
107 * @txbuf: Pointer to the TX buffer
108 * @rxbuf: Pointer to the RX buffer
109 * @tx_bytes: Number of bytes left to transfer
110 * @rx_bytes: Number of bytes requested
111 * @dev_busy: Device busy flag
112 * @is_decoded_cs: Flag for decoder property set or not
113 */
114struct cdns_spi {
115 void __iomem *regs;
116 struct clk *ref_clk;
117 struct clk *pclk;
118 u32 speed_hz;
119 const u8 *txbuf;
120 u8 *rxbuf;
121 int tx_bytes;
122 int rx_bytes;
123 u8 dev_busy;
124 u32 is_decoded_cs;
125};
126
127/* Macros for the SPI controller read/write */
128static inline u32 cdns_spi_read(struct cdns_spi *xspi, u32 offset)
129{
130 return readl_relaxed(xspi->regs + offset);
131}
132
133static inline void cdns_spi_write(struct cdns_spi *xspi, u32 offset, u32 val)
134{
135 writel_relaxed(val, xspi->regs + offset);
136}
137
138/**
139 * cdns_spi_init_hw - Initialize the hardware and configure the SPI controller
140 * @xspi: Pointer to the cdns_spi structure
141 *
142 * On reset the SPI controller is configured to be in master mode, baud rate
143 * divisor is set to 4, threshold value for TX FIFO not full interrupt is set
144 * to 1 and size of the word to be transferred as 8 bit.
145 * This function initializes the SPI controller to disable and clear all the
146 * interrupts, enable manual slave select and manual start, deselect all the
147 * chip select lines, and enable the SPI controller.
148 */
149static void cdns_spi_init_hw(struct cdns_spi *xspi)
150{
151 cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
152 CDNS_SPI_ER_DISABLE_MASK);
153 cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
154 CDNS_SPI_IXR_ALL_MASK);
155
156 /* Clear the RX FIFO */
157 while (cdns_spi_read(xspi, CDNS_SPI_ISR_OFFSET) &
158 CDNS_SPI_IXR_RXNEMTY_MASK)
159 cdns_spi_read(xspi, CDNS_SPI_RXD_OFFSET);
160
161 cdns_spi_write(xspi, CDNS_SPI_ISR_OFFSET,
162 CDNS_SPI_IXR_ALL_MASK);
163 cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET,
164 CDNS_SPI_CR_DEFAULT_MASK);
165 cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
166 CDNS_SPI_ER_ENABLE_MASK);
167}
168
169/**
170 * cdns_spi_chipselect - Select or deselect the chip select line
171 * @spi: Pointer to the spi_device structure
172 * @is_on: Select(0) or deselect (1) the chip select line
173 */
174static void cdns_spi_chipselect(struct spi_device *spi, bool is_high)
175{
176 struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
177 u32 ctrl_reg;
178
179 ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET);
180
181 if (is_high) {
182 /* Deselect the slave */
183 ctrl_reg |= CDNS_SPI_CR_SSCTRL_MASK;
184 } else {
185 /* Select the slave */
186 ctrl_reg &= ~CDNS_SPI_CR_SSCTRL_MASK;
187 if (!(xspi->is_decoded_cs))
188 ctrl_reg |= ((~(CDNS_SPI_SS0 << spi->chip_select)) <<
189 CDNS_SPI_SS_SHIFT) &
190 CDNS_SPI_CR_SSCTRL_MASK;
191 else
192 ctrl_reg |= (spi->chip_select << CDNS_SPI_SS_SHIFT) &
193 CDNS_SPI_CR_SSCTRL_MASK;
194 }
195
196 cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
197}
198
199/**
200 * cdns_spi_config_clock_mode - Sets clock polarity and phase
201 * @spi: Pointer to the spi_device structure
202 *
203 * Sets the requested clock polarity and phase.
204 */
205static void cdns_spi_config_clock_mode(struct spi_device *spi)
206{
207 struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
208 u32 ctrl_reg;
209
210 ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET);
211
212 /* Set the SPI clock phase and clock polarity */
213 ctrl_reg &= ~(CDNS_SPI_CR_CPHA_MASK | CDNS_SPI_CR_CPOL_MASK);
214 if (spi->mode & SPI_CPHA)
215 ctrl_reg |= CDNS_SPI_CR_CPHA_MASK;
216 if (spi->mode & SPI_CPOL)
217 ctrl_reg |= CDNS_SPI_CR_CPOL_MASK;
218
219 cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
220}
221
222/**
223 * cdns_spi_config_clock_freq - Sets clock frequency
224 * @spi: Pointer to the spi_device structure
225 * @transfer: Pointer to the spi_transfer structure which provides
226 * information about next transfer setup parameters
227 *
228 * Sets the requested clock frequency.
229 * Note: If the requested frequency is not an exact match with what can be
230 * obtained using the prescalar value the driver sets the clock frequency which
231 * is lower than the requested frequency (maximum lower) for the transfer. If
232 * the requested frequency is higher or lower than that is supported by the SPI
233 * controller the driver will set the highest or lowest frequency supported by
234 * controller.
235 */
236static void cdns_spi_config_clock_freq(struct spi_device *spi,
237 struct spi_transfer *transfer)
238{
239 struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
240 u32 ctrl_reg, baud_rate_val;
241 unsigned long frequency;
242
243 frequency = clk_get_rate(xspi->ref_clk);
244
245 ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET);
246
247 /* Set the clock frequency */
248 if (xspi->speed_hz != transfer->speed_hz) {
249 /* first valid value is 1 */
250 baud_rate_val = CDNS_SPI_BAUD_DIV_MIN;
251 while ((baud_rate_val < CDNS_SPI_BAUD_DIV_MAX) &&
252 (frequency / (2 << baud_rate_val)) > transfer->speed_hz)
253 baud_rate_val++;
254
255 ctrl_reg &= ~CDNS_SPI_CR_BAUD_DIV_MASK;
256 ctrl_reg |= baud_rate_val << CDNS_SPI_BAUD_DIV_SHIFT;
257
258 xspi->speed_hz = frequency / (2 << baud_rate_val);
259 }
260 cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
261}
262
263/**
264 * cdns_spi_setup_transfer - Configure SPI controller for specified transfer
265 * @spi: Pointer to the spi_device structure
266 * @transfer: Pointer to the spi_transfer structure which provides
267 * information about next transfer setup parameters
268 *
269 * Sets the operational mode of SPI controller for the next SPI transfer and
270 * sets the requested clock frequency.
271 *
272 * Return: Always 0
273 */
274static int cdns_spi_setup_transfer(struct spi_device *spi,
275 struct spi_transfer *transfer)
276{
277 struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
278
279 cdns_spi_config_clock_freq(spi, transfer);
280
281 dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u clock speed\n",
282 __func__, spi->mode, spi->bits_per_word,
283 xspi->speed_hz);
284
285 return 0;
286}
287
288/**
289 * cdns_spi_fill_tx_fifo - Fills the TX FIFO with as many bytes as possible
290 * @xspi: Pointer to the cdns_spi structure
291 */
292static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
293{
294 unsigned long trans_cnt = 0;
295
296 while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) &&
297 (xspi->tx_bytes > 0)) {
298 if (xspi->txbuf)
299 cdns_spi_write(xspi, CDNS_SPI_TXD_OFFSET,
300 *xspi->txbuf++);
301 else
302 cdns_spi_write(xspi, CDNS_SPI_TXD_OFFSET, 0);
303
304 xspi->tx_bytes--;
305 trans_cnt++;
306 }
307}
308
309/**
310 * cdns_spi_irq - Interrupt service routine of the SPI controller
311 * @irq: IRQ number
312 * @dev_id: Pointer to the xspi structure
313 *
314 * This function handles TX empty and Mode Fault interrupts only.
315 * On TX empty interrupt this function reads the received data from RX FIFO and
316 * fills the TX FIFO if there is any data remaining to be transferred.
317 * On Mode Fault interrupt this function indicates that transfer is completed,
318 * the SPI subsystem will identify the error as the remaining bytes to be
319 * transferred is non-zero.
320 *
321 * Return: IRQ_HANDLED when handled; IRQ_NONE otherwise.
322 */
323static irqreturn_t cdns_spi_irq(int irq, void *dev_id)
324{
325 struct spi_master *master = dev_id;
326 struct cdns_spi *xspi = spi_master_get_devdata(master);
327 u32 intr_status, status;
328
329 status = IRQ_NONE;
330 intr_status = cdns_spi_read(xspi, CDNS_SPI_ISR_OFFSET);
331 cdns_spi_write(xspi, CDNS_SPI_ISR_OFFSET, intr_status);
332
333 if (intr_status & CDNS_SPI_IXR_MODF_MASK) {
334 /* Indicate that transfer is completed, the SPI subsystem will
335 * identify the error as the remaining bytes to be
336 * transferred is non-zero
337 */
338 cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
339 CDNS_SPI_IXR_DEFAULT_MASK);
340 spi_finalize_current_transfer(master);
341 status = IRQ_HANDLED;
342 } else if (intr_status & CDNS_SPI_IXR_TXOW_MASK) {
343 unsigned long trans_cnt;
344
345 trans_cnt = xspi->rx_bytes - xspi->tx_bytes;
346
347 /* Read out the data from the RX FIFO */
348 while (trans_cnt) {
349 u8 data;
350
351 data = cdns_spi_read(xspi, CDNS_SPI_RXD_OFFSET);
352 if (xspi->rxbuf)
353 *xspi->rxbuf++ = data;
354
355 xspi->rx_bytes--;
356 trans_cnt--;
357 }
358
359 if (xspi->tx_bytes) {
360 /* There is more data to send */
361 cdns_spi_fill_tx_fifo(xspi);
362 } else {
363 /* Transfer is completed */
364 cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
365 CDNS_SPI_IXR_DEFAULT_MASK);
366 spi_finalize_current_transfer(master);
367 }
368 status = IRQ_HANDLED;
369 }
370
371 return status;
372}
373
374/**
375 * cdns_transfer_one - Initiates the SPI transfer
376 * @master: Pointer to spi_master structure
377 * @spi: Pointer to the spi_device structure
378 * @transfer: Pointer to the spi_transfer structure which provides
379 * information about next transfer parameters
380 *
381 * This function fills the TX FIFO, starts the SPI transfer and
382 * returns a positive transfer count so that core will wait for completion.
383 *
384 * Return: Number of bytes transferred in the last transfer
385 */
386static int cdns_transfer_one(struct spi_master *master,
387 struct spi_device *spi,
388 struct spi_transfer *transfer)
389{
390 struct cdns_spi *xspi = spi_master_get_devdata(master);
391
392 xspi->txbuf = transfer->tx_buf;
393 xspi->rxbuf = transfer->rx_buf;
394 xspi->tx_bytes = transfer->len;
395 xspi->rx_bytes = transfer->len;
396
397 cdns_spi_setup_transfer(spi, transfer);
398
399 cdns_spi_fill_tx_fifo(xspi);
400
401 cdns_spi_write(xspi, CDNS_SPI_IER_OFFSET,
402 CDNS_SPI_IXR_DEFAULT_MASK);
403 return transfer->len;
404}
405
406/**
407 * cdns_prepare_transfer_hardware - Prepares hardware for transfer.
408 * @master: Pointer to the spi_master structure which provides
409 * information about the controller.
410 *
411 * This function enables SPI master controller.
412 *
413 * Return: 0 always
414 */
415static int cdns_prepare_transfer_hardware(struct spi_master *master)
416{
417 struct cdns_spi *xspi = spi_master_get_devdata(master);
418
419 cdns_spi_config_clock_mode(master->cur_msg->spi);
420
421 cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
422 CDNS_SPI_ER_ENABLE_MASK);
423
424 return 0;
425}
426
427/**
428 * cdns_unprepare_transfer_hardware - Relaxes hardware after transfer
429 * @master: Pointer to the spi_master structure which provides
430 * information about the controller.
431 *
432 * This function disables the SPI master controller.
433 *
434 * Return: 0 always
435 */
436static int cdns_unprepare_transfer_hardware(struct spi_master *master)
437{
438 struct cdns_spi *xspi = spi_master_get_devdata(master);
439
440 cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
441 CDNS_SPI_ER_DISABLE_MASK);
442
443 return 0;
444}
445
446/**
447 * cdns_spi_probe - Probe method for the SPI driver
448 * @pdev: Pointer to the platform_device structure
449 *
450 * This function initializes the driver data structures and the hardware.
451 *
452 * Return: 0 on success and error value on error
453 */
454static int cdns_spi_probe(struct platform_device *pdev)
455{
456 int ret = 0, irq;
457 struct spi_master *master;
458 struct cdns_spi *xspi;
459 struct resource *res;
460 u32 num_cs;
461
462 master = spi_alloc_master(&pdev->dev, sizeof(*xspi));
463 if (master == NULL)
464 return -ENOMEM;
465
466 xspi = spi_master_get_devdata(master);
467 master->dev.of_node = pdev->dev.of_node;
468 platform_set_drvdata(pdev, master);
469
470 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
471 xspi->regs = devm_ioremap_resource(&pdev->dev, res);
472 if (IS_ERR(xspi->regs)) {
473 ret = PTR_ERR(xspi->regs);
474 goto remove_master;
475 }
476
477 xspi->pclk = devm_clk_get(&pdev->dev, "pclk");
478 if (IS_ERR(xspi->pclk)) {
479 dev_err(&pdev->dev, "pclk clock not found.\n");
480 ret = PTR_ERR(xspi->pclk);
481 goto remove_master;
482 }
483
484 xspi->ref_clk = devm_clk_get(&pdev->dev, "ref_clk");
485 if (IS_ERR(xspi->ref_clk)) {
486 dev_err(&pdev->dev, "ref_clk clock not found.\n");
487 ret = PTR_ERR(xspi->ref_clk);
488 goto remove_master;
489 }
490
491 ret = clk_prepare_enable(xspi->pclk);
492 if (ret) {
493 dev_err(&pdev->dev, "Unable to enable APB clock.\n");
494 goto remove_master;
495 }
496
497 ret = clk_prepare_enable(xspi->ref_clk);
498 if (ret) {
499 dev_err(&pdev->dev, "Unable to enable device clock.\n");
500 goto clk_dis_apb;
501 }
502
503 /* SPI controller initializations */
504 cdns_spi_init_hw(xspi);
505
506 irq = platform_get_irq(pdev, 0);
507 if (irq <= 0) {
508 ret = -ENXIO;
509 dev_err(&pdev->dev, "irq number is invalid\n");
510 goto remove_master;
511 }
512
513 ret = devm_request_irq(&pdev->dev, irq, cdns_spi_irq,
514 0, pdev->name, master);
515 if (ret != 0) {
516 ret = -ENXIO;
517 dev_err(&pdev->dev, "request_irq failed\n");
518 goto remove_master;
519 }
520
521 ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
522
523 if (ret < 0)
524 master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
525 else
526 master->num_chipselect = num_cs;
527
528 ret = of_property_read_u32(pdev->dev.of_node, "is-decoded-cs",
529 &xspi->is_decoded_cs);
530
531 if (ret < 0)
532 xspi->is_decoded_cs = 0;
533
534 master->prepare_transfer_hardware = cdns_prepare_transfer_hardware;
535 master->transfer_one = cdns_transfer_one;
536 master->unprepare_transfer_hardware = cdns_unprepare_transfer_hardware;
537 master->set_cs = cdns_spi_chipselect;
538 master->mode_bits = SPI_CPOL | SPI_CPHA;
539
540 /* Set to default valid value */
541 master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4;
542 xspi->speed_hz = master->max_speed_hz;
543
544 master->bits_per_word_mask = SPI_BPW_MASK(8);
545
546 ret = spi_register_master(master);
547 if (ret) {
548 dev_err(&pdev->dev, "spi_register_master failed\n");
549 goto clk_dis_all;
550 }
551
552 return ret;
553
554clk_dis_all:
555 clk_disable_unprepare(xspi->ref_clk);
556clk_dis_apb:
557 clk_disable_unprepare(xspi->pclk);
558remove_master:
559 spi_master_put(master);
560 return ret;
561}
562
563/**
564 * cdns_spi_remove - Remove method for the SPI driver
565 * @pdev: Pointer to the platform_device structure
566 *
567 * This function is called if a device is physically removed from the system or
568 * if the driver module is being unloaded. It frees all resources allocated to
569 * the device.
570 *
571 * Return: 0 on success and error value on error
572 */
573static int cdns_spi_remove(struct platform_device *pdev)
574{
575 struct spi_master *master = platform_get_drvdata(pdev);
576 struct cdns_spi *xspi = spi_master_get_devdata(master);
577
578 cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
579 CDNS_SPI_ER_DISABLE_MASK);
580
581 clk_disable_unprepare(xspi->ref_clk);
582 clk_disable_unprepare(xspi->pclk);
583
584 spi_unregister_master(master);
585
586 return 0;
587}
588
589/**
590 * cdns_spi_suspend - Suspend method for the SPI driver
591 * @dev: Address of the platform_device structure
592 *
593 * This function disables the SPI controller and
594 * changes the driver state to "suspend"
595 *
596 * Return: Always 0
597 */
598static int __maybe_unused cdns_spi_suspend(struct device *dev)
599{
600 struct platform_device *pdev = container_of(dev,
601 struct platform_device, dev);
602 struct spi_master *master = platform_get_drvdata(pdev);
603 struct cdns_spi *xspi = spi_master_get_devdata(master);
604
605 spi_master_suspend(master);
606
607 clk_disable_unprepare(xspi->ref_clk);
608
609 clk_disable_unprepare(xspi->pclk);
610
611 return 0;
612}
613
614/**
615 * cdns_spi_resume - Resume method for the SPI driver
616 * @dev: Address of the platform_device structure
617 *
618 * This function changes the driver state to "ready"
619 *
620 * Return: 0 on success and error value on error
621 */
622static int __maybe_unused cdns_spi_resume(struct device *dev)
623{
624 struct platform_device *pdev = container_of(dev,
625 struct platform_device, dev);
626 struct spi_master *master = platform_get_drvdata(pdev);
627 struct cdns_spi *xspi = spi_master_get_devdata(master);
628 int ret = 0;
629
630 ret = clk_prepare_enable(xspi->pclk);
631 if (ret) {
632 dev_err(dev, "Cannot enable APB clock.\n");
633 return ret;
634 }
635
636 ret = clk_prepare_enable(xspi->ref_clk);
637 if (ret) {
638 dev_err(dev, "Cannot enable device clock.\n");
639 clk_disable(xspi->pclk);
640 return ret;
641 }
642 spi_master_resume(master);
643
644 return 0;
645}
646
647static SIMPLE_DEV_PM_OPS(cdns_spi_dev_pm_ops, cdns_spi_suspend,
648 cdns_spi_resume);
649
650static struct of_device_id cdns_spi_of_match[] = {
651 { .compatible = "xlnx,zynq-spi-r1p6" },
652 { .compatible = "cdns,spi-r1p6" },
653 { /* end of table */ }
654};
655MODULE_DEVICE_TABLE(of, cdns_spi_of_match);
656
657/* cdns_spi_driver - This structure defines the SPI subsystem platform driver */
658static struct platform_driver cdns_spi_driver = {
659 .probe = cdns_spi_probe,
660 .remove = cdns_spi_remove,
661 .driver = {
662 .name = CDNS_SPI_NAME,
663 .owner = THIS_MODULE,
664 .of_match_table = cdns_spi_of_match,
665 .pm = &cdns_spi_dev_pm_ops,
666 },
667};
668
669module_platform_driver(cdns_spi_driver);
670
671MODULE_AUTHOR("Xilinx, Inc.");
672MODULE_DESCRIPTION("Cadence SPI driver");
673MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 1492f5ee9aaa..a5cba14ac3d2 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -16,6 +16,7 @@
16#include <linux/spi/spi.h> 16#include <linux/spi/spi.h>
17#include <linux/scatterlist.h> 17#include <linux/scatterlist.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/of_gpio.h>
19 20
20#include "spi-dw.h" 21#include "spi-dw.h"
21 22
@@ -70,6 +71,27 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
70 dws->num_cs = 4; 71 dws->num_cs = 4;
71 dws->max_freq = clk_get_rate(dwsmmio->clk); 72 dws->max_freq = clk_get_rate(dwsmmio->clk);
72 73
74 if (pdev->dev.of_node) {
75 int i;
76
77 for (i = 0; i < dws->num_cs; i++) {
78 int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
79 "cs-gpios", i);
80
81 if (cs_gpio == -EPROBE_DEFER) {
82 ret = cs_gpio;
83 goto out;
84 }
85
86 if (gpio_is_valid(cs_gpio)) {
87 ret = devm_gpio_request(&pdev->dev, cs_gpio,
88 dev_name(&pdev->dev));
89 if (ret)
90 goto out;
91 }
92 }
93 }
94
73 ret = dw_spi_add_host(&pdev->dev, dws); 95 ret = dw_spi_add_host(&pdev->dev, dws);
74 if (ret) 96 if (ret)
75 goto out; 97 goto out;
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 712ac5629cd4..29f33143b795 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -24,6 +24,7 @@
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/spi/spi.h> 26#include <linux/spi/spi.h>
27#include <linux/gpio.h>
27 28
28#include "spi-dw.h" 29#include "spi-dw.h"
29 30
@@ -36,12 +37,6 @@
36#define DONE_STATE ((void *)2) 37#define DONE_STATE ((void *)2)
37#define ERROR_STATE ((void *)-1) 38#define ERROR_STATE ((void *)-1)
38 39
39#define QUEUE_RUNNING 0
40#define QUEUE_STOPPED 1
41
42#define MRST_SPI_DEASSERT 0
43#define MRST_SPI_ASSERT 1
44
45/* Slave spi_dev related */ 40/* Slave spi_dev related */
46struct chip_data { 41struct chip_data {
47 u16 cr0; 42 u16 cr0;
@@ -263,28 +258,22 @@ static int map_dma_buffers(struct dw_spi *dws)
263static void giveback(struct dw_spi *dws) 258static void giveback(struct dw_spi *dws)
264{ 259{
265 struct spi_transfer *last_transfer; 260 struct spi_transfer *last_transfer;
266 unsigned long flags;
267 struct spi_message *msg; 261 struct spi_message *msg;
268 262
269 spin_lock_irqsave(&dws->lock, flags);
270 msg = dws->cur_msg; 263 msg = dws->cur_msg;
271 dws->cur_msg = NULL; 264 dws->cur_msg = NULL;
272 dws->cur_transfer = NULL; 265 dws->cur_transfer = NULL;
273 dws->prev_chip = dws->cur_chip; 266 dws->prev_chip = dws->cur_chip;
274 dws->cur_chip = NULL; 267 dws->cur_chip = NULL;
275 dws->dma_mapped = 0; 268 dws->dma_mapped = 0;
276 queue_work(dws->workqueue, &dws->pump_messages);
277 spin_unlock_irqrestore(&dws->lock, flags);
278 269
279 last_transfer = list_last_entry(&msg->transfers, struct spi_transfer, 270 last_transfer = list_last_entry(&msg->transfers, struct spi_transfer,
280 transfer_list); 271 transfer_list);
281 272
282 if (!last_transfer->cs_change && dws->cs_control) 273 if (!last_transfer->cs_change)
283 dws->cs_control(MRST_SPI_DEASSERT); 274 spi_chip_sel(dws, dws->cur_msg->spi, 0);
284 275
285 msg->state = NULL; 276 spi_finalize_current_message(dws->master);
286 if (msg->complete)
287 msg->complete(msg->context);
288} 277}
289 278
290static void int_error_stop(struct dw_spi *dws, const char *msg) 279static void int_error_stop(struct dw_spi *dws, const char *msg)
@@ -502,7 +491,7 @@ static void pump_transfers(unsigned long data)
502 dw_writew(dws, DW_SPI_CTRL0, cr0); 491 dw_writew(dws, DW_SPI_CTRL0, cr0);
503 492
504 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div); 493 spi_set_clk(dws, clk_div ? clk_div : chip->clk_div);
505 spi_chip_sel(dws, spi->chip_select); 494 spi_chip_sel(dws, spi, 1);
506 495
507 /* Set the interrupt mask, for poll mode just disable all int */ 496 /* Set the interrupt mask, for poll mode just disable all int */
508 spi_mask_intr(dws, 0xff); 497 spi_mask_intr(dws, 0xff);
@@ -529,30 +518,12 @@ early_exit:
529 return; 518 return;
530} 519}
531 520
532static void pump_messages(struct work_struct *work) 521static int dw_spi_transfer_one_message(struct spi_master *master,
522 struct spi_message *msg)
533{ 523{
534 struct dw_spi *dws = 524 struct dw_spi *dws = spi_master_get_devdata(master);
535 container_of(work, struct dw_spi, pump_messages);
536 unsigned long flags;
537
538 /* Lock queue and check for queue work */
539 spin_lock_irqsave(&dws->lock, flags);
540 if (list_empty(&dws->queue) || dws->run == QUEUE_STOPPED) {
541 dws->busy = 0;
542 spin_unlock_irqrestore(&dws->lock, flags);
543 return;
544 }
545
546 /* Make sure we are not already running a message */
547 if (dws->cur_msg) {
548 spin_unlock_irqrestore(&dws->lock, flags);
549 return;
550 }
551
552 /* Extract head of queue */
553 dws->cur_msg = list_entry(dws->queue.next, struct spi_message, queue);
554 list_del_init(&dws->cur_msg->queue);
555 525
526 dws->cur_msg = msg;
556 /* Initial message state*/ 527 /* Initial message state*/
557 dws->cur_msg->state = START_STATE; 528 dws->cur_msg->state = START_STATE;
558 dws->cur_transfer = list_entry(dws->cur_msg->transfers.next, 529 dws->cur_transfer = list_entry(dws->cur_msg->transfers.next,
@@ -560,46 +531,9 @@ static void pump_messages(struct work_struct *work)
560 transfer_list); 531 transfer_list);
561 dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi); 532 dws->cur_chip = spi_get_ctldata(dws->cur_msg->spi);
562 533
563 /* Mark as busy and launch transfers */ 534 /* Launch transfers */
564 tasklet_schedule(&dws->pump_transfers); 535 tasklet_schedule(&dws->pump_transfers);
565 536
566 dws->busy = 1;
567 spin_unlock_irqrestore(&dws->lock, flags);
568}
569
570/* spi_device use this to queue in their spi_msg */
571static int dw_spi_transfer(struct spi_device *spi, struct spi_message *msg)
572{
573 struct dw_spi *dws = spi_master_get_devdata(spi->master);
574 unsigned long flags;
575
576 spin_lock_irqsave(&dws->lock, flags);
577
578 if (dws->run == QUEUE_STOPPED) {
579 spin_unlock_irqrestore(&dws->lock, flags);
580 return -ESHUTDOWN;
581 }
582
583 msg->actual_length = 0;
584 msg->status = -EINPROGRESS;
585 msg->state = START_STATE;
586
587 list_add_tail(&msg->queue, &dws->queue);
588
589 if (dws->run == QUEUE_RUNNING && !dws->busy) {
590
591 if (dws->cur_transfer || dws->cur_msg)
592 queue_work(dws->workqueue,
593 &dws->pump_messages);
594 else {
595 /* If no other data transaction in air, just go */
596 spin_unlock_irqrestore(&dws->lock, flags);
597 pump_messages(&dws->pump_messages);
598 return 0;
599 }
600 }
601
602 spin_unlock_irqrestore(&dws->lock, flags);
603 return 0; 537 return 0;
604} 538}
605 539
@@ -608,6 +542,7 @@ static int dw_spi_setup(struct spi_device *spi)
608{ 542{
609 struct dw_spi_chip *chip_info = NULL; 543 struct dw_spi_chip *chip_info = NULL;
610 struct chip_data *chip; 544 struct chip_data *chip;
545 int ret;
611 546
612 /* Only alloc on first setup */ 547 /* Only alloc on first setup */
613 chip = spi_get_ctldata(spi); 548 chip = spi_get_ctldata(spi);
@@ -661,81 +596,13 @@ static int dw_spi_setup(struct spi_device *spi)
661 | (spi->mode << SPI_MODE_OFFSET) 596 | (spi->mode << SPI_MODE_OFFSET)
662 | (chip->tmode << SPI_TMOD_OFFSET); 597 | (chip->tmode << SPI_TMOD_OFFSET);
663 598
664 return 0; 599 if (gpio_is_valid(spi->cs_gpio)) {
665} 600 ret = gpio_direction_output(spi->cs_gpio,
666 601 !(spi->mode & SPI_CS_HIGH));
667static int init_queue(struct dw_spi *dws) 602 if (ret)
668{ 603 return ret;
669 INIT_LIST_HEAD(&dws->queue);
670 spin_lock_init(&dws->lock);
671
672 dws->run = QUEUE_STOPPED;
673 dws->busy = 0;
674
675 tasklet_init(&dws->pump_transfers,
676 pump_transfers, (unsigned long)dws);
677
678 INIT_WORK(&dws->pump_messages, pump_messages);
679 dws->workqueue = create_singlethread_workqueue(
680 dev_name(dws->master->dev.parent));
681 if (dws->workqueue == NULL)
682 return -EBUSY;
683
684 return 0;
685}
686
687static int start_queue(struct dw_spi *dws)
688{
689 unsigned long flags;
690
691 spin_lock_irqsave(&dws->lock, flags);
692
693 if (dws->run == QUEUE_RUNNING || dws->busy) {
694 spin_unlock_irqrestore(&dws->lock, flags);
695 return -EBUSY;
696 } 604 }
697 605
698 dws->run = QUEUE_RUNNING;
699 dws->cur_msg = NULL;
700 dws->cur_transfer = NULL;
701 dws->cur_chip = NULL;
702 dws->prev_chip = NULL;
703 spin_unlock_irqrestore(&dws->lock, flags);
704
705 queue_work(dws->workqueue, &dws->pump_messages);
706
707 return 0;
708}
709
710static int stop_queue(struct dw_spi *dws)
711{
712 unsigned long flags;
713 unsigned limit = 50;
714 int status = 0;
715
716 spin_lock_irqsave(&dws->lock, flags);
717 dws->run = QUEUE_STOPPED;
718 while ((!list_empty(&dws->queue) || dws->busy) && limit--) {
719 spin_unlock_irqrestore(&dws->lock, flags);
720 msleep(10);
721 spin_lock_irqsave(&dws->lock, flags);
722 }
723
724 if (!list_empty(&dws->queue) || dws->busy)
725 status = -EBUSY;
726 spin_unlock_irqrestore(&dws->lock, flags);
727
728 return status;
729}
730
731static int destroy_queue(struct dw_spi *dws)
732{
733 int status;
734
735 status = stop_queue(dws);
736 if (status != 0)
737 return status;
738 destroy_workqueue(dws->workqueue);
739 return 0; 606 return 0;
740} 607}
741 608
@@ -794,7 +661,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
794 master->bus_num = dws->bus_num; 661 master->bus_num = dws->bus_num;
795 master->num_chipselect = dws->num_cs; 662 master->num_chipselect = dws->num_cs;
796 master->setup = dw_spi_setup; 663 master->setup = dw_spi_setup;
797 master->transfer = dw_spi_transfer; 664 master->transfer_one_message = dw_spi_transfer_one_message;
798 master->max_speed_hz = dws->max_freq; 665 master->max_speed_hz = dws->max_freq;
799 666
800 /* Basic HW init */ 667 /* Basic HW init */
@@ -808,33 +675,21 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
808 } 675 }
809 } 676 }
810 677
811 /* Initial and start queue */ 678 tasklet_init(&dws->pump_transfers, pump_transfers, (unsigned long)dws);
812 ret = init_queue(dws);
813 if (ret) {
814 dev_err(&master->dev, "problem initializing queue\n");
815 goto err_diable_hw;
816 }
817 ret = start_queue(dws);
818 if (ret) {
819 dev_err(&master->dev, "problem starting queue\n");
820 goto err_diable_hw;
821 }
822 679
823 spi_master_set_devdata(master, dws); 680 spi_master_set_devdata(master, dws);
824 ret = devm_spi_register_master(dev, master); 681 ret = devm_spi_register_master(dev, master);
825 if (ret) { 682 if (ret) {
826 dev_err(&master->dev, "problem registering spi master\n"); 683 dev_err(&master->dev, "problem registering spi master\n");
827 goto err_queue_alloc; 684 goto err_dma_exit;
828 } 685 }
829 686
830 mrst_spi_debugfs_init(dws); 687 mrst_spi_debugfs_init(dws);
831 return 0; 688 return 0;
832 689
833err_queue_alloc: 690err_dma_exit:
834 destroy_queue(dws);
835 if (dws->dma_ops && dws->dma_ops->dma_exit) 691 if (dws->dma_ops && dws->dma_ops->dma_exit)
836 dws->dma_ops->dma_exit(dws); 692 dws->dma_ops->dma_exit(dws);
837err_diable_hw:
838 spi_enable_chip(dws, 0); 693 spi_enable_chip(dws, 0);
839err_free_master: 694err_free_master:
840 spi_master_put(master); 695 spi_master_put(master);
@@ -844,18 +699,10 @@ EXPORT_SYMBOL_GPL(dw_spi_add_host);
844 699
845void dw_spi_remove_host(struct dw_spi *dws) 700void dw_spi_remove_host(struct dw_spi *dws)
846{ 701{
847 int status = 0;
848
849 if (!dws) 702 if (!dws)
850 return; 703 return;
851 mrst_spi_debugfs_remove(dws); 704 mrst_spi_debugfs_remove(dws);
852 705
853 /* Remove the queue */
854 status = destroy_queue(dws);
855 if (status != 0)
856 dev_err(&dws->master->dev,
857 "dw_spi_remove: workqueue will not complete, message memory not freed\n");
858
859 if (dws->dma_ops && dws->dma_ops->dma_exit) 706 if (dws->dma_ops && dws->dma_ops->dma_exit)
860 dws->dma_ops->dma_exit(dws); 707 dws->dma_ops->dma_exit(dws);
861 spi_enable_chip(dws, 0); 708 spi_enable_chip(dws, 0);
@@ -868,7 +715,7 @@ int dw_spi_suspend_host(struct dw_spi *dws)
868{ 715{
869 int ret = 0; 716 int ret = 0;
870 717
871 ret = stop_queue(dws); 718 ret = spi_master_suspend(dws->master);
872 if (ret) 719 if (ret)
873 return ret; 720 return ret;
874 spi_enable_chip(dws, 0); 721 spi_enable_chip(dws, 0);
@@ -882,7 +729,7 @@ int dw_spi_resume_host(struct dw_spi *dws)
882 int ret; 729 int ret;
883 730
884 spi_hw_init(dws); 731 spi_hw_init(dws);
885 ret = start_queue(dws); 732 ret = spi_master_resume(dws->master);
886 if (ret) 733 if (ret)
887 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret); 734 dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
888 return ret; 735 return ret;
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
index 587643dae11e..6d2acad34f64 100644
--- a/drivers/spi/spi-dw.h
+++ b/drivers/spi/spi-dw.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/io.h> 4#include <linux/io.h>
5#include <linux/scatterlist.h> 5#include <linux/scatterlist.h>
6#include <linux/gpio.h>
6 7
7/* Register offsets */ 8/* Register offsets */
8#define DW_SPI_CTRL0 0x00 9#define DW_SPI_CTRL0 0x00
@@ -104,14 +105,6 @@ struct dw_spi {
104 u16 bus_num; 105 u16 bus_num;
105 u16 num_cs; /* supported slave numbers */ 106 u16 num_cs; /* supported slave numbers */
106 107
107 /* Driver message queue */
108 struct workqueue_struct *workqueue;
109 struct work_struct pump_messages;
110 spinlock_t lock;
111 struct list_head queue;
112 int busy;
113 int run;
114
115 /* Message Transfer pump */ 108 /* Message Transfer pump */
116 struct tasklet_struct pump_transfers; 109 struct tasklet_struct pump_transfers;
117 110
@@ -186,15 +179,20 @@ static inline void spi_set_clk(struct dw_spi *dws, u16 div)
186 dw_writel(dws, DW_SPI_BAUDR, div); 179 dw_writel(dws, DW_SPI_BAUDR, div);
187} 180}
188 181
189static inline void spi_chip_sel(struct dw_spi *dws, u16 cs) 182static inline void spi_chip_sel(struct dw_spi *dws, struct spi_device *spi,
183 int active)
190{ 184{
191 if (cs > dws->num_cs) 185 u16 cs = spi->chip_select;
192 return; 186 int gpio_val = active ? (spi->mode & SPI_CS_HIGH) :
187 !(spi->mode & SPI_CS_HIGH);
193 188
194 if (dws->cs_control) 189 if (dws->cs_control)
195 dws->cs_control(1); 190 dws->cs_control(active);
191 if (gpio_is_valid(spi->cs_gpio))
192 gpio_set_value(spi->cs_gpio, gpio_val);
196 193
197 dw_writel(dws, DW_SPI_SER, 1 << cs); 194 if (active)
195 dw_writel(dws, DW_SPI_SER, 1 << cs);
198} 196}
199 197
200/* Disable IRQ bits */ 198/* Disable IRQ bits */
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index d565eeee3bd8..5021ddf03f60 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -406,7 +406,7 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
406 return IRQ_HANDLED; 406 return IRQ_HANDLED;
407} 407}
408 408
409static struct of_device_id fsl_dspi_dt_ids[] = { 409static const struct of_device_id fsl_dspi_dt_ids[] = {
410 { .compatible = "fsl,vf610-dspi", .data = NULL, }, 410 { .compatible = "fsl,vf610-dspi", .data = NULL, },
411 { /* sentinel */ } 411 { /* sentinel */ }
412}; 412};
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 3cf7d65bc739..8ebd724e4c59 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -348,7 +348,7 @@ static void fsl_espi_cmd_trans(struct spi_message *m,
348 } 348 }
349 349
350 espi_trans->tx_buf = local_buf; 350 espi_trans->tx_buf = local_buf;
351 espi_trans->rx_buf = local_buf + espi_trans->n_tx; 351 espi_trans->rx_buf = local_buf;
352 fsl_espi_do_trans(m, espi_trans); 352 fsl_espi_do_trans(m, espi_trans);
353 353
354 espi_trans->actual_length = espi_trans->len; 354 espi_trans->actual_length = espi_trans->len;
@@ -397,7 +397,7 @@ static void fsl_espi_rw_trans(struct spi_message *m,
397 espi_trans->n_rx = trans_len; 397 espi_trans->n_rx = trans_len;
398 espi_trans->len = trans_len + n_tx; 398 espi_trans->len = trans_len + n_tx;
399 espi_trans->tx_buf = local_buf; 399 espi_trans->tx_buf = local_buf;
400 espi_trans->rx_buf = local_buf + n_tx; 400 espi_trans->rx_buf = local_buf;
401 fsl_espi_do_trans(m, espi_trans); 401 fsl_espi_do_trans(m, espi_trans);
402 402
403 memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len); 403 memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len);
@@ -586,8 +586,10 @@ static struct spi_master * fsl_espi_probe(struct device *dev,
586 struct spi_master *master; 586 struct spi_master *master;
587 struct mpc8xxx_spi *mpc8xxx_spi; 587 struct mpc8xxx_spi *mpc8xxx_spi;
588 struct fsl_espi_reg *reg_base; 588 struct fsl_espi_reg *reg_base;
589 u32 regval; 589 struct device_node *nc;
590 int i, ret = 0; 590 const __be32 *prop;
591 u32 regval, csmode;
592 int i, len, ret = 0;
591 593
592 master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi)); 594 master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi));
593 if (!master) { 595 if (!master) {
@@ -634,8 +636,32 @@ static struct spi_master * fsl_espi_probe(struct device *dev,
634 mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff); 636 mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff);
635 637
636 /* Init eSPI CS mode register */ 638 /* Init eSPI CS mode register */
637 for (i = 0; i < pdata->max_chipselect; i++) 639 for_each_available_child_of_node(master->dev.of_node, nc) {
638 mpc8xxx_spi_write_reg(&reg_base->csmode[i], CSMODE_INIT_VAL); 640 /* get chip select */
641 prop = of_get_property(nc, "reg", &len);
642 if (!prop || len < sizeof(*prop))
643 continue;
644 i = be32_to_cpup(prop);
645 if (i < 0 || i >= pdata->max_chipselect)
646 continue;
647
648 csmode = CSMODE_INIT_VAL;
649 /* check if CSBEF is set in device tree */
650 prop = of_get_property(nc, "fsl,csbef", &len);
651 if (prop && len >= sizeof(*prop)) {
652 csmode &= ~(CSMODE_BEF(0xf));
653 csmode |= CSMODE_BEF(be32_to_cpup(prop));
654 }
655 /* check if CSAFT is set in device tree */
656 prop = of_get_property(nc, "fsl,csaft", &len);
657 if (prop && len >= sizeof(*prop)) {
658 csmode &= ~(CSMODE_AFT(0xf));
659 csmode |= CSMODE_AFT(be32_to_cpup(prop));
660 }
661 mpc8xxx_spi_write_reg(&reg_base->csmode[i], csmode);
662
663 dev_info(dev, "cs=%d, init_csmode=0x%x\n", i, csmode);
664 }
639 665
640 /* Enable SPI interface */ 666 /* Enable SPI interface */
641 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; 667 regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 09823076df88..9f595535cf27 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -340,7 +340,7 @@ done:
340} 340}
341 341
342#ifdef CONFIG_OF 342#ifdef CONFIG_OF
343static struct of_device_id spi_gpio_dt_ids[] = { 343static const struct of_device_id spi_gpio_dt_ids[] = {
344 { .compatible = "spi-gpio" }, 344 { .compatible = "spi-gpio" },
345 {} 345 {}
346}; 346};
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 713af4806f26..f6759dc0153b 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -29,18 +29,6 @@ static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
29 struct sg_table *sgt; 29 struct sg_table *sgt;
30 void *buf, *pbuf; 30 void *buf, *pbuf;
31 31
32 /*
33 * Some DMA controllers have problems transferring buffers that are
34 * not multiple of 4 bytes. So we truncate the transfer so that it
35 * is suitable for such controllers, and handle the trailing bytes
36 * manually after the DMA completes.
37 *
38 * REVISIT: It would be better if this information could be
39 * retrieved directly from the DMA device in a similar way than
40 * ->copy_align etc. is done.
41 */
42 len = ALIGN(drv_data->len, 4);
43
44 if (dir == DMA_TO_DEVICE) { 32 if (dir == DMA_TO_DEVICE) {
45 dmadev = drv_data->tx_chan->device->dev; 33 dmadev = drv_data->tx_chan->device->dev;
46 sgt = &drv_data->tx_sgt; 34 sgt = &drv_data->tx_sgt;
@@ -144,12 +132,8 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
144 if (!error) { 132 if (!error) {
145 pxa2xx_spi_unmap_dma_buffers(drv_data); 133 pxa2xx_spi_unmap_dma_buffers(drv_data);
146 134
147 /* Handle the last bytes of unaligned transfer */
148 drv_data->tx += drv_data->tx_map_len; 135 drv_data->tx += drv_data->tx_map_len;
149 drv_data->write(drv_data);
150
151 drv_data->rx += drv_data->rx_map_len; 136 drv_data->rx += drv_data->rx_map_len;
152 drv_data->read(drv_data);
153 137
154 msg->actual_length += drv_data->len; 138 msg->actual_length += drv_data->len;
155 msg->state = pxa2xx_spi_next_transfer(drv_data); 139 msg->state = pxa2xx_spi_next_transfer(drv_data);
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index b032e8885e24..fc1de86d3c8a 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -287,7 +287,7 @@ static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
287 writel_relaxed(opflags, controller->base + QUP_OPERATIONAL); 287 writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
288 288
289 if (!xfer) { 289 if (!xfer) {
290 dev_err_ratelimited(controller->dev, "unexpected irq %x08 %x08 %x08\n", 290 dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n",
291 qup_err, spi_err, opflags); 291 qup_err, spi_err, opflags);
292 return IRQ_HANDLED; 292 return IRQ_HANDLED;
293 } 293 }
@@ -366,7 +366,7 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
366 n_words = xfer->len / w_size; 366 n_words = xfer->len / w_size;
367 controller->w_size = w_size; 367 controller->w_size = w_size;
368 368
369 if (n_words <= controller->in_fifo_sz) { 369 if (n_words <= (controller->in_fifo_sz / sizeof(u32))) {
370 mode = QUP_IO_M_MODE_FIFO; 370 mode = QUP_IO_M_MODE_FIFO;
371 writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT); 371 writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
372 writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT); 372 writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
@@ -734,7 +734,7 @@ static int spi_qup_remove(struct platform_device *pdev)
734 int ret; 734 int ret;
735 735
736 ret = pm_runtime_get_sync(&pdev->dev); 736 ret = pm_runtime_get_sync(&pdev->dev);
737 if (ret) 737 if (ret < 0)
738 return ret; 738 return ret;
739 739
740 ret = spi_qup_set_state(controller, QUP_STATE_RESET); 740 ret = spi_qup_set_state(controller, QUP_STATE_RESET);
@@ -749,7 +749,7 @@ static int spi_qup_remove(struct platform_device *pdev)
749 return 0; 749 return 0;
750} 750}
751 751
752static struct of_device_id spi_qup_dt_match[] = { 752static const struct of_device_id spi_qup_dt_match[] = {
753 { .compatible = "qcom,spi-qup-v2.1.1", }, 753 { .compatible = "qcom,spi-qup-v2.1.1", },
754 { .compatible = "qcom,spi-qup-v2.2.1", }, 754 { .compatible = "qcom,spi-qup-v2.2.1", },
755 { } 755 { }
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 9009456bdf4d..c8e795ef2e13 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -244,9 +244,9 @@ static int hspi_probe(struct platform_device *pdev)
244 return -ENOMEM; 244 return -ENOMEM;
245 } 245 }
246 246
247 clk = clk_get(NULL, "shyway_clk"); 247 clk = clk_get(&pdev->dev, NULL);
248 if (IS_ERR(clk)) { 248 if (IS_ERR(clk)) {
249 dev_err(&pdev->dev, "shyway_clk is required\n"); 249 dev_err(&pdev->dev, "couldn't get clock\n");
250 ret = -EINVAL; 250 ret = -EINVAL;
251 goto error0; 251 goto error0;
252 } 252 }
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index 1a77ad52812f..67d8909dcf39 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -287,8 +287,8 @@ static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
287 sspi->left_rx_word) 287 sspi->left_rx_word)
288 sspi->rx_word(sspi); 288 sspi->rx_word(sspi);
289 289
290 if (spi_stat & (SIRFSOC_SPI_FIFO_EMPTY 290 if (spi_stat & (SIRFSOC_SPI_TXFIFO_EMPTY |
291 | SIRFSOC_SPI_TXFIFO_THD_REACH)) 291 SIRFSOC_SPI_TXFIFO_THD_REACH))
292 while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS) 292 while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS)
293 & SIRFSOC_SPI_FIFO_FULL)) && 293 & SIRFSOC_SPI_FIFO_FULL)) &&
294 sspi->left_tx_word) 294 sspi->left_tx_word)
@@ -470,7 +470,16 @@ static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
470 writel(regval, sspi->base + SIRFSOC_SPI_CTRL); 470 writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
471 } else { 471 } else {
472 int gpio = sspi->chipselect[spi->chip_select]; 472 int gpio = sspi->chipselect[spi->chip_select];
473 gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1); 473 switch (value) {
474 case BITBANG_CS_ACTIVE:
475 gpio_direction_output(gpio,
476 spi->mode & SPI_CS_HIGH ? 1 : 0);
477 break;
478 case BITBANG_CS_INACTIVE:
479 gpio_direction_output(gpio,
480 spi->mode & SPI_CS_HIGH ? 0 : 1);
481 break;
482 }
474 } 483 }
475} 484}
476 485
@@ -559,6 +568,11 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
559 regval &= ~SIRFSOC_SPI_CMD_MODE; 568 regval &= ~SIRFSOC_SPI_CMD_MODE;
560 sspi->tx_by_cmd = false; 569 sspi->tx_by_cmd = false;
561 } 570 }
571 /*
572 * set spi controller in RISC chipselect mode, we are controlling CS by
573 * software BITBANG_CS_ACTIVE and BITBANG_CS_INACTIVE.
574 */
575 regval |= SIRFSOC_SPI_CS_IO_MODE;
562 writel(regval, sspi->base + SIRFSOC_SPI_CTRL); 576 writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
563 577
564 if (IS_DMA_VALID(t)) { 578 if (IS_DMA_VALID(t)) {
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 400649595505..e4a85ada861d 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -1012,7 +1012,7 @@ static irqreturn_t tegra_spi_isr(int irq, void *context_data)
1012 return IRQ_WAKE_THREAD; 1012 return IRQ_WAKE_THREAD;
1013} 1013}
1014 1014
1015static struct of_device_id tegra_spi_of_match[] = { 1015static const struct of_device_id tegra_spi_of_match[] = {
1016 { .compatible = "nvidia,tegra114-spi", }, 1016 { .compatible = "nvidia,tegra114-spi", },
1017 {} 1017 {}
1018}; 1018};
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 47869ea636e1..3548ce25c08f 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -419,7 +419,7 @@ static irqreturn_t tegra_sflash_isr(int irq, void *context_data)
419 return handle_cpu_based_xfer(tsd); 419 return handle_cpu_based_xfer(tsd);
420} 420}
421 421
422static struct of_device_id tegra_sflash_of_match[] = { 422static const struct of_device_id tegra_sflash_of_match[] = {
423 { .compatible = "nvidia,tegra20-sflash", }, 423 { .compatible = "nvidia,tegra20-sflash", },
424 {} 424 {}
425}; 425};
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index e3c1b93e45d1..0b9e32e9f493 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1001,7 +1001,7 @@ static const struct tegra_slink_chip_data tegra20_spi_cdata = {
1001 .cs_hold_time = false, 1001 .cs_hold_time = false,
1002}; 1002};
1003 1003
1004static struct of_device_id tegra_slink_of_match[] = { 1004static const struct of_device_id tegra_slink_of_match[] = {
1005 { .compatible = "nvidia,tegra30-slink", .data = &tegra30_spi_cdata, }, 1005 { .compatible = "nvidia,tegra30-slink", .data = &tegra30_spi_cdata, },
1006 { .compatible = "nvidia,tegra20-slink", .data = &tegra20_spi_cdata, }, 1006 { .compatible = "nvidia,tegra20-slink", .data = &tegra20_spi_cdata, },
1007 {} 1007 {}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 4eb9bf02996c..d4f9670b51bc 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -580,6 +580,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
580 spi->master->set_cs(spi, !enable); 580 spi->master->set_cs(spi, !enable);
581} 581}
582 582
583#ifdef CONFIG_HAS_DMA
583static int spi_map_buf(struct spi_master *master, struct device *dev, 584static int spi_map_buf(struct spi_master *master, struct device *dev,
584 struct sg_table *sgt, void *buf, size_t len, 585 struct sg_table *sgt, void *buf, size_t len,
585 enum dma_data_direction dir) 586 enum dma_data_direction dir)
@@ -637,55 +638,12 @@ static void spi_unmap_buf(struct spi_master *master, struct device *dev,
637 } 638 }
638} 639}
639 640
640static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 641static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
641{ 642{
642 struct device *tx_dev, *rx_dev; 643 struct device *tx_dev, *rx_dev;
643 struct spi_transfer *xfer; 644 struct spi_transfer *xfer;
644 void *tmp;
645 unsigned int max_tx, max_rx;
646 int ret; 645 int ret;
647 646
648 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
649 max_tx = 0;
650 max_rx = 0;
651
652 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
653 if ((master->flags & SPI_MASTER_MUST_TX) &&
654 !xfer->tx_buf)
655 max_tx = max(xfer->len, max_tx);
656 if ((master->flags & SPI_MASTER_MUST_RX) &&
657 !xfer->rx_buf)
658 max_rx = max(xfer->len, max_rx);
659 }
660
661 if (max_tx) {
662 tmp = krealloc(master->dummy_tx, max_tx,
663 GFP_KERNEL | GFP_DMA);
664 if (!tmp)
665 return -ENOMEM;
666 master->dummy_tx = tmp;
667 memset(tmp, 0, max_tx);
668 }
669
670 if (max_rx) {
671 tmp = krealloc(master->dummy_rx, max_rx,
672 GFP_KERNEL | GFP_DMA);
673 if (!tmp)
674 return -ENOMEM;
675 master->dummy_rx = tmp;
676 }
677
678 if (max_tx || max_rx) {
679 list_for_each_entry(xfer, &msg->transfers,
680 transfer_list) {
681 if (!xfer->tx_buf)
682 xfer->tx_buf = master->dummy_tx;
683 if (!xfer->rx_buf)
684 xfer->rx_buf = master->dummy_rx;
685 }
686 }
687 }
688
689 if (!master->can_dma) 647 if (!master->can_dma)
690 return 0; 648 return 0;
691 649
@@ -742,6 +700,69 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
742 700
743 return 0; 701 return 0;
744} 702}
703#else /* !CONFIG_HAS_DMA */
704static inline int __spi_map_msg(struct spi_master *master,
705 struct spi_message *msg)
706{
707 return 0;
708}
709
710static inline int spi_unmap_msg(struct spi_master *master,
711 struct spi_message *msg)
712{
713 return 0;
714}
715#endif /* !CONFIG_HAS_DMA */
716
717static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
718{
719 struct spi_transfer *xfer;
720 void *tmp;
721 unsigned int max_tx, max_rx;
722
723 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
724 max_tx = 0;
725 max_rx = 0;
726
727 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
728 if ((master->flags & SPI_MASTER_MUST_TX) &&
729 !xfer->tx_buf)
730 max_tx = max(xfer->len, max_tx);
731 if ((master->flags & SPI_MASTER_MUST_RX) &&
732 !xfer->rx_buf)
733 max_rx = max(xfer->len, max_rx);
734 }
735
736 if (max_tx) {
737 tmp = krealloc(master->dummy_tx, max_tx,
738 GFP_KERNEL | GFP_DMA);
739 if (!tmp)
740 return -ENOMEM;
741 master->dummy_tx = tmp;
742 memset(tmp, 0, max_tx);
743 }
744
745 if (max_rx) {
746 tmp = krealloc(master->dummy_rx, max_rx,
747 GFP_KERNEL | GFP_DMA);
748 if (!tmp)
749 return -ENOMEM;
750 master->dummy_rx = tmp;
751 }
752
753 if (max_tx || max_rx) {
754 list_for_each_entry(xfer, &msg->transfers,
755 transfer_list) {
756 if (!xfer->tx_buf)
757 xfer->tx_buf = master->dummy_tx;
758 if (!xfer->rx_buf)
759 xfer->rx_buf = master->dummy_rx;
760 }
761 }
762 }
763
764 return __spi_map_msg(master, msg);
765}
745 766
746/* 767/*
747 * spi_transfer_one_message - Default implementation of transfer_one_message() 768 * spi_transfer_one_message - Default implementation of transfer_one_message()
@@ -775,7 +796,7 @@ static int spi_transfer_one_message(struct spi_master *master,
775 if (ret > 0) { 796 if (ret > 0) {
776 ret = 0; 797 ret = 0;
777 ms = xfer->len * 8 * 1000 / xfer->speed_hz; 798 ms = xfer->len * 8 * 1000 / xfer->speed_hz;
778 ms += 10; /* some tolerance */ 799 ms += ms + 100; /* some tolerance */
779 800
780 ms = wait_for_completion_timeout(&master->xfer_completion, 801 ms = wait_for_completion_timeout(&master->xfer_completion,
781 msecs_to_jiffies(ms)); 802 msecs_to_jiffies(ms));
@@ -1151,7 +1172,6 @@ static int spi_master_initialize_queue(struct spi_master *master)
1151{ 1172{
1152 int ret; 1173 int ret;
1153 1174
1154 master->queued = true;
1155 master->transfer = spi_queued_transfer; 1175 master->transfer = spi_queued_transfer;
1156 if (!master->transfer_one_message) 1176 if (!master->transfer_one_message)
1157 master->transfer_one_message = spi_transfer_one_message; 1177 master->transfer_one_message = spi_transfer_one_message;
@@ -1162,6 +1182,7 @@ static int spi_master_initialize_queue(struct spi_master *master)
1162 dev_err(&master->dev, "problem initializing queue\n"); 1182 dev_err(&master->dev, "problem initializing queue\n");
1163 goto err_init_queue; 1183 goto err_init_queue;
1164 } 1184 }
1185 master->queued = true;
1165 ret = spi_start_queue(master); 1186 ret = spi_start_queue(master);
1166 if (ret) { 1187 if (ret) {
1167 dev_err(&master->dev, "problem starting queue\n"); 1188 dev_err(&master->dev, "problem starting queue\n");
@@ -1171,8 +1192,8 @@ static int spi_master_initialize_queue(struct spi_master *master)
1171 return 0; 1192 return 0;
1172 1193
1173err_start_queue: 1194err_start_queue:
1174err_init_queue:
1175 spi_destroy_queue(master); 1195 spi_destroy_queue(master);
1196err_init_queue:
1176 return ret; 1197 return ret;
1177} 1198}
1178 1199
@@ -1234,6 +1255,8 @@ static void of_register_spi_devices(struct spi_master *master)
1234 spi->mode |= SPI_CS_HIGH; 1255 spi->mode |= SPI_CS_HIGH;
1235 if (of_find_property(nc, "spi-3wire", NULL)) 1256 if (of_find_property(nc, "spi-3wire", NULL))
1236 spi->mode |= SPI_3WIRE; 1257 spi->mode |= SPI_3WIRE;
1258 if (of_find_property(nc, "spi-lsb-first", NULL))
1259 spi->mode |= SPI_LSB_FIRST;
1237 1260
1238 /* Device DUAL/QUAD mode */ 1261 /* Device DUAL/QUAD mode */
1239 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) { 1262 if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
@@ -1247,11 +1270,10 @@ static void of_register_spi_devices(struct spi_master *master)
1247 spi->mode |= SPI_TX_QUAD; 1270 spi->mode |= SPI_TX_QUAD;
1248 break; 1271 break;
1249 default: 1272 default:
1250 dev_err(&master->dev, 1273 dev_warn(&master->dev,
1251 "spi-tx-bus-width %d not supported\n", 1274 "spi-tx-bus-width %d not supported\n",
1252 value); 1275 value);
1253 spi_dev_put(spi); 1276 break;
1254 continue;
1255 } 1277 }
1256 } 1278 }
1257 1279
@@ -1266,11 +1288,10 @@ static void of_register_spi_devices(struct spi_master *master)
1266 spi->mode |= SPI_RX_QUAD; 1288 spi->mode |= SPI_RX_QUAD;
1267 break; 1289 break;
1268 default: 1290 default:
1269 dev_err(&master->dev, 1291 dev_warn(&master->dev,
1270 "spi-rx-bus-width %d not supported\n", 1292 "spi-rx-bus-width %d not supported\n",
1271 value); 1293 value);
1272 spi_dev_put(spi); 1294 break;
1273 continue;
1274 } 1295 }
1275 } 1296 }
1276 1297
@@ -1756,7 +1777,7 @@ EXPORT_SYMBOL_GPL(spi_busnum_to_master);
1756 */ 1777 */
1757int spi_setup(struct spi_device *spi) 1778int spi_setup(struct spi_device *spi)
1758{ 1779{
1759 unsigned bad_bits; 1780 unsigned bad_bits, ugly_bits;
1760 int status = 0; 1781 int status = 0;
1761 1782
1762 /* check mode to prevent that DUAL and QUAD set at the same time 1783 /* check mode to prevent that DUAL and QUAD set at the same time
@@ -1776,6 +1797,15 @@ int spi_setup(struct spi_device *spi)
1776 * that aren't supported with their current master 1797 * that aren't supported with their current master
1777 */ 1798 */
1778 bad_bits = spi->mode & ~spi->master->mode_bits; 1799 bad_bits = spi->mode & ~spi->master->mode_bits;
1800 ugly_bits = bad_bits &
1801 (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
1802 if (ugly_bits) {
1803 dev_warn(&spi->dev,
1804 "setup: ignoring unsupported mode bits %x\n",
1805 ugly_bits);
1806 spi->mode &= ~ugly_bits;
1807 bad_bits &= ~ugly_bits;
1808 }
1779 if (bad_bits) { 1809 if (bad_bits) {
1780 dev_err(&spi->dev, "setup: unsupported mode bits %x\n", 1810 dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
1781 bad_bits); 1811 bad_bits);