diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-29 19:38:41 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-29 19:38:41 -0400 |
commit | 61f3d0a9883d965b498edeb673235bddc92770fd (patch) | |
tree | fa1e394cd1d5332f4a205d12f0db88320bc83813 /drivers | |
parent | 8ded8d4e4facab78acf616bc34085ddd15c2c21c (diff) | |
parent | cd8d984f0def2a8c5733a9468634ec3e0feec03d (diff) |
Merge tag 'spi-v3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi
Pull spi updates from Mark Brown:
"A fairly quiet release for SPI, mainly driver work. A few highlights:
- Supports bits per word compatibility checking in the core.
- Allow use of the IP used in Freescale SPI controllers outside
Freescale SoCs.
- DMA support for the Atmel SPI driver.
- New drivers for the BCM2835 and Tegra114"
* tag 'spi-v3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/spi: (68 commits)
spi-topcliff-pch: fix to use list_for_each_entry_safe() when delete list items
spi-topcliff-pch: missing platform_driver_unregister() on error in pch_spi_init()
ARM: dts: add pinctrl property for spi node for atmel SoC
ARM: dts: add spi nodes for the atmel boards
ARM: dts: add spi nodes for atmel SoC
ARM: at91: add clocks for spi dt entries
spi/spi-atmel: add dmaengine support
spi/spi-atmel: add flag to controller data for lock operations
spi/spi-atmel: add physical base address
spi/sirf: fix MODULE_DEVICE_TABLE
MAINTAINERS: Add git repository and update my address
spi/s3c64xx: Check for errors in dmaengine prepare_transfer()
spi/s3c64xx: Fix non-dmaengine usage
spi: omap2-mcspi: fix error return code in omap2_mcspi_probe()
spi/s3c64xx: let device core setup the default pin configuration
MAINTAINERS: Update Grant's email address and maintainership
spi: omap2-mcspi: Fix transfers if DMADEVICES is not set
spi: s3c64xx: move to generic dmaengine API
spi-gpio: init CS before spi_bitbang_setup()
spi: spi-mpc512x-psc: let transmiter/receiver enabled when in xfer loop
...
Diffstat (limited to 'drivers')
27 files changed, 3372 insertions, 821 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 2be0de920d67..141d8c10b764 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -75,6 +75,17 @@ config SPI_ATMEL | |||
75 | This selects a driver for the Atmel SPI Controller, present on | 75 | This selects a driver for the Atmel SPI Controller, present on |
76 | many AT32 (AVR32) and AT91 (ARM) chips. | 76 | many AT32 (AVR32) and AT91 (ARM) chips. |
77 | 77 | ||
78 | config SPI_BCM2835 | ||
79 | tristate "BCM2835 SPI controller" | ||
80 | depends on ARCH_BCM2835 | ||
81 | help | ||
82 | This selects a driver for the Broadcom BCM2835 SPI master. | ||
83 | |||
84 | The BCM2835 contains two types of SPI master controller; the | ||
85 | "universal SPI master", and the regular SPI controller. This driver | ||
86 | is for the regular SPI controller. Slave mode operation is not also | ||
87 | not supported. | ||
88 | |||
78 | config SPI_BFIN5XX | 89 | config SPI_BFIN5XX |
79 | tristate "SPI controller driver for ADI Blackfin5xx" | 90 | tristate "SPI controller driver for ADI Blackfin5xx" |
80 | depends on BLACKFIN | 91 | depends on BLACKFIN |
@@ -219,16 +230,23 @@ config SPI_MPC512x_PSC | |||
219 | 230 | ||
220 | config SPI_FSL_LIB | 231 | config SPI_FSL_LIB |
221 | tristate | 232 | tristate |
233 | depends on OF | ||
234 | |||
235 | config SPI_FSL_CPM | ||
236 | tristate | ||
222 | depends on FSL_SOC | 237 | depends on FSL_SOC |
223 | 238 | ||
224 | config SPI_FSL_SPI | 239 | config SPI_FSL_SPI |
225 | bool "Freescale SPI controller" | 240 | bool "Freescale SPI controller and Aeroflex Gaisler GRLIB SPI controller" |
226 | depends on FSL_SOC | 241 | depends on OF |
227 | select SPI_FSL_LIB | 242 | select SPI_FSL_LIB |
243 | select SPI_FSL_CPM if FSL_SOC | ||
228 | help | 244 | help |
229 | This enables using the Freescale SPI controllers in master mode. | 245 | This enables using the Freescale SPI controllers in master mode. |
230 | MPC83xx platform uses the controller in cpu mode or CPM/QE mode. | 246 | MPC83xx platform uses the controller in cpu mode or CPM/QE mode. |
231 | MPC8569 uses the controller in QE mode, MPC8610 in cpu mode. | 247 | MPC8569 uses the controller in QE mode, MPC8610 in cpu mode. |
248 | This also enables using the Aeroflex Gaisler GRLIB SPI controller in | ||
249 | master mode. | ||
232 | 250 | ||
233 | config SPI_FSL_ESPI | 251 | config SPI_FSL_ESPI |
234 | bool "Freescale eSPI controller" | 252 | bool "Freescale eSPI controller" |
@@ -398,6 +416,14 @@ config SPI_MXS | |||
398 | help | 416 | help |
399 | SPI driver for Freescale MXS devices. | 417 | SPI driver for Freescale MXS devices. |
400 | 418 | ||
419 | config SPI_TEGRA114 | ||
420 | tristate "NVIDIA Tegra114 SPI Controller" | ||
421 | depends on ARCH_TEGRA && TEGRA20_APB_DMA | ||
422 | help | ||
423 | SPI driver for NVIDIA Tegra114 SPI Controller interface. This controller | ||
424 | is different than the older SoCs SPI controller and also register interface | ||
425 | get changed with this controller. | ||
426 | |||
401 | config SPI_TEGRA20_SFLASH | 427 | config SPI_TEGRA20_SFLASH |
402 | tristate "Nvidia Tegra20 Serial flash Controller" | 428 | tristate "Nvidia Tegra20 Serial flash Controller" |
403 | depends on ARCH_TEGRA | 429 | depends on ARCH_TEGRA |
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index e53c30941340..33f9c09561e7 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile | |||
@@ -14,6 +14,7 @@ obj-$(CONFIG_SPI_ALTERA) += spi-altera.o | |||
14 | obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o | 14 | obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o |
15 | obj-$(CONFIG_SPI_ATH79) += spi-ath79.o | 15 | obj-$(CONFIG_SPI_ATH79) += spi-ath79.o |
16 | obj-$(CONFIG_SPI_AU1550) += spi-au1550.o | 16 | obj-$(CONFIG_SPI_AU1550) += spi-au1550.o |
17 | obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o | ||
17 | obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o | 18 | obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o |
18 | obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o | 19 | obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o |
19 | obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o | 20 | obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o |
@@ -28,6 +29,7 @@ obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o | |||
28 | spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o | 29 | spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o |
29 | obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o | 30 | obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o |
30 | obj-$(CONFIG_SPI_FALCON) += spi-falcon.o | 31 | obj-$(CONFIG_SPI_FALCON) += spi-falcon.o |
32 | obj-$(CONFIG_SPI_FSL_CPM) += spi-fsl-cpm.o | ||
31 | obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o | 33 | obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o |
32 | obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o | 34 | obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o |
33 | obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o | 35 | obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o |
@@ -63,6 +65,7 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o | |||
63 | obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o | 65 | obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o |
64 | obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o | 66 | obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o |
65 | obj-$(CONFIG_SPI_SIRF) += spi-sirf.o | 67 | obj-$(CONFIG_SPI_SIRF) += spi-sirf.o |
68 | obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o | ||
66 | obj-$(CONFIG_SPI_TEGRA20_SFLASH) += spi-tegra20-sflash.o | 69 | obj-$(CONFIG_SPI_TEGRA20_SFLASH) += spi-tegra20-sflash.o |
67 | obj-$(CONFIG_SPI_TEGRA20_SLINK) += spi-tegra20-slink.o | 70 | obj-$(CONFIG_SPI_TEGRA20_SLINK) += spi-tegra20-slink.o |
68 | obj-$(CONFIG_SPI_TI_SSP) += spi-ti-ssp.o | 71 | obj-$(CONFIG_SPI_TI_SSP) += spi-ti-ssp.o |
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 656d137db253..787bd2c22bca 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c | |||
@@ -15,16 +15,17 @@ | |||
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/dma-mapping.h> | 17 | #include <linux/dma-mapping.h> |
18 | #include <linux/dmaengine.h> | ||
18 | #include <linux/err.h> | 19 | #include <linux/err.h> |
19 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
20 | #include <linux/spi/spi.h> | 21 | #include <linux/spi/spi.h> |
21 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
22 | #include <linux/platform_data/atmel.h> | 23 | #include <linux/platform_data/atmel.h> |
24 | #include <linux/platform_data/dma-atmel.h> | ||
23 | #include <linux/of.h> | 25 | #include <linux/of.h> |
24 | 26 | ||
25 | #include <asm/io.h> | 27 | #include <linux/io.h> |
26 | #include <asm/gpio.h> | 28 | #include <linux/gpio.h> |
27 | #include <mach/cpu.h> | ||
28 | 29 | ||
29 | /* SPI register offsets */ | 30 | /* SPI register offsets */ |
30 | #define SPI_CR 0x0000 | 31 | #define SPI_CR 0x0000 |
@@ -39,6 +40,7 @@ | |||
39 | #define SPI_CSR1 0x0034 | 40 | #define SPI_CSR1 0x0034 |
40 | #define SPI_CSR2 0x0038 | 41 | #define SPI_CSR2 0x0038 |
41 | #define SPI_CSR3 0x003c | 42 | #define SPI_CSR3 0x003c |
43 | #define SPI_VERSION 0x00fc | ||
42 | #define SPI_RPR 0x0100 | 44 | #define SPI_RPR 0x0100 |
43 | #define SPI_RCR 0x0104 | 45 | #define SPI_RCR 0x0104 |
44 | #define SPI_TPR 0x0108 | 46 | #define SPI_TPR 0x0108 |
@@ -71,6 +73,8 @@ | |||
71 | #define SPI_FDIV_SIZE 1 | 73 | #define SPI_FDIV_SIZE 1 |
72 | #define SPI_MODFDIS_OFFSET 4 | 74 | #define SPI_MODFDIS_OFFSET 4 |
73 | #define SPI_MODFDIS_SIZE 1 | 75 | #define SPI_MODFDIS_SIZE 1 |
76 | #define SPI_WDRBT_OFFSET 5 | ||
77 | #define SPI_WDRBT_SIZE 1 | ||
74 | #define SPI_LLB_OFFSET 7 | 78 | #define SPI_LLB_OFFSET 7 |
75 | #define SPI_LLB_SIZE 1 | 79 | #define SPI_LLB_SIZE 1 |
76 | #define SPI_PCS_OFFSET 16 | 80 | #define SPI_PCS_OFFSET 16 |
@@ -180,6 +184,27 @@ | |||
180 | #define spi_writel(port,reg,value) \ | 184 | #define spi_writel(port,reg,value) \ |
181 | __raw_writel((value), (port)->regs + SPI_##reg) | 185 | __raw_writel((value), (port)->regs + SPI_##reg) |
182 | 186 | ||
187 | /* use PIO for small transfers, avoiding DMA setup/teardown overhead and | ||
188 | * cache operations; better heuristics consider wordsize and bitrate. | ||
189 | */ | ||
190 | #define DMA_MIN_BYTES 16 | ||
191 | |||
192 | struct atmel_spi_dma { | ||
193 | struct dma_chan *chan_rx; | ||
194 | struct dma_chan *chan_tx; | ||
195 | struct scatterlist sgrx; | ||
196 | struct scatterlist sgtx; | ||
197 | struct dma_async_tx_descriptor *data_desc_rx; | ||
198 | struct dma_async_tx_descriptor *data_desc_tx; | ||
199 | |||
200 | struct at_dma_slave dma_slave; | ||
201 | }; | ||
202 | |||
203 | struct atmel_spi_caps { | ||
204 | bool is_spi2; | ||
205 | bool has_wdrbt; | ||
206 | bool has_dma_support; | ||
207 | }; | ||
183 | 208 | ||
184 | /* | 209 | /* |
185 | * The core SPI transfer engine just talks to a register bank to set up | 210 | * The core SPI transfer engine just talks to a register bank to set up |
@@ -188,7 +213,9 @@ | |||
188 | */ | 213 | */ |
189 | struct atmel_spi { | 214 | struct atmel_spi { |
190 | spinlock_t lock; | 215 | spinlock_t lock; |
216 | unsigned long flags; | ||
191 | 217 | ||
218 | phys_addr_t phybase; | ||
192 | void __iomem *regs; | 219 | void __iomem *regs; |
193 | int irq; | 220 | int irq; |
194 | struct clk *clk; | 221 | struct clk *clk; |
@@ -197,13 +224,23 @@ struct atmel_spi { | |||
197 | 224 | ||
198 | u8 stopping; | 225 | u8 stopping; |
199 | struct list_head queue; | 226 | struct list_head queue; |
227 | struct tasklet_struct tasklet; | ||
200 | struct spi_transfer *current_transfer; | 228 | struct spi_transfer *current_transfer; |
201 | unsigned long current_remaining_bytes; | 229 | unsigned long current_remaining_bytes; |
202 | struct spi_transfer *next_transfer; | 230 | struct spi_transfer *next_transfer; |
203 | unsigned long next_remaining_bytes; | 231 | unsigned long next_remaining_bytes; |
232 | int done_status; | ||
204 | 233 | ||
234 | /* scratch buffer */ | ||
205 | void *buffer; | 235 | void *buffer; |
206 | dma_addr_t buffer_dma; | 236 | dma_addr_t buffer_dma; |
237 | |||
238 | struct atmel_spi_caps caps; | ||
239 | |||
240 | bool use_dma; | ||
241 | bool use_pdc; | ||
242 | /* dmaengine data */ | ||
243 | struct atmel_spi_dma dma; | ||
207 | }; | 244 | }; |
208 | 245 | ||
209 | /* Controller-specific per-slave state */ | 246 | /* Controller-specific per-slave state */ |
@@ -222,14 +259,10 @@ struct atmel_spi_device { | |||
222 | * - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs) | 259 | * - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs) |
223 | * - SPI_CSRx.CSAAT | 260 | * - SPI_CSRx.CSAAT |
224 | * - SPI_CSRx.SBCR allows faster clocking | 261 | * - SPI_CSRx.SBCR allows faster clocking |
225 | * | ||
226 | * We can determine the controller version by reading the VERSION | ||
227 | * register, but I haven't checked that it exists on all chips, and | ||
228 | * this is cheaper anyway. | ||
229 | */ | 262 | */ |
230 | static bool atmel_spi_is_v2(void) | 263 | static bool atmel_spi_is_v2(struct atmel_spi *as) |
231 | { | 264 | { |
232 | return !cpu_is_at91rm9200(); | 265 | return as->caps.is_spi2; |
233 | } | 266 | } |
234 | 267 | ||
235 | /* | 268 | /* |
@@ -250,11 +283,6 @@ static bool atmel_spi_is_v2(void) | |||
250 | * Master on Chip Select 0.") No workaround exists for that ... so for | 283 | * Master on Chip Select 0.") No workaround exists for that ... so for |
251 | * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH, | 284 | * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH, |
252 | * and (c) will trigger that first erratum in some cases. | 285 | * and (c) will trigger that first erratum in some cases. |
253 | * | ||
254 | * TODO: Test if the atmel_spi_is_v2() branch below works on | ||
255 | * AT91RM9200 if we use some other register than CSR0. However, don't | ||
256 | * do this unconditionally since AP7000 has an errata where the BITS | ||
257 | * field in CSR0 overrides all other CSRs. | ||
258 | */ | 286 | */ |
259 | 287 | ||
260 | static void cs_activate(struct atmel_spi *as, struct spi_device *spi) | 288 | static void cs_activate(struct atmel_spi *as, struct spi_device *spi) |
@@ -263,15 +291,25 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi) | |||
263 | unsigned active = spi->mode & SPI_CS_HIGH; | 291 | unsigned active = spi->mode & SPI_CS_HIGH; |
264 | u32 mr; | 292 | u32 mr; |
265 | 293 | ||
266 | if (atmel_spi_is_v2()) { | 294 | if (atmel_spi_is_v2(as)) { |
267 | /* | 295 | spi_writel(as, CSR0 + 4 * spi->chip_select, asd->csr); |
268 | * Always use CSR0. This ensures that the clock | 296 | /* For the low SPI version, there is a issue that PDC transfer |
269 | * switches to the correct idle polarity before we | 297 | * on CS1,2,3 needs SPI_CSR0.BITS config as SPI_CSR1,2,3.BITS |
270 | * toggle the CS. | ||
271 | */ | 298 | */ |
272 | spi_writel(as, CSR0, asd->csr); | 299 | spi_writel(as, CSR0, asd->csr); |
273 | spi_writel(as, MR, SPI_BF(PCS, 0x0e) | SPI_BIT(MODFDIS) | 300 | if (as->caps.has_wdrbt) { |
274 | | SPI_BIT(MSTR)); | 301 | spi_writel(as, MR, |
302 | SPI_BF(PCS, ~(0x01 << spi->chip_select)) | ||
303 | | SPI_BIT(WDRBT) | ||
304 | | SPI_BIT(MODFDIS) | ||
305 | | SPI_BIT(MSTR)); | ||
306 | } else { | ||
307 | spi_writel(as, MR, | ||
308 | SPI_BF(PCS, ~(0x01 << spi->chip_select)) | ||
309 | | SPI_BIT(MODFDIS) | ||
310 | | SPI_BIT(MSTR)); | ||
311 | } | ||
312 | |||
275 | mr = spi_readl(as, MR); | 313 | mr = spi_readl(as, MR); |
276 | gpio_set_value(asd->npcs_pin, active); | 314 | gpio_set_value(asd->npcs_pin, active); |
277 | } else { | 315 | } else { |
@@ -318,10 +356,26 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi) | |||
318 | asd->npcs_pin, active ? " (low)" : "", | 356 | asd->npcs_pin, active ? " (low)" : "", |
319 | mr); | 357 | mr); |
320 | 358 | ||
321 | if (atmel_spi_is_v2() || spi->chip_select != 0) | 359 | if (atmel_spi_is_v2(as) || spi->chip_select != 0) |
322 | gpio_set_value(asd->npcs_pin, !active); | 360 | gpio_set_value(asd->npcs_pin, !active); |
323 | } | 361 | } |
324 | 362 | ||
363 | static void atmel_spi_lock(struct atmel_spi *as) | ||
364 | { | ||
365 | spin_lock_irqsave(&as->lock, as->flags); | ||
366 | } | ||
367 | |||
368 | static void atmel_spi_unlock(struct atmel_spi *as) | ||
369 | { | ||
370 | spin_unlock_irqrestore(&as->lock, as->flags); | ||
371 | } | ||
372 | |||
373 | static inline bool atmel_spi_use_dma(struct atmel_spi *as, | ||
374 | struct spi_transfer *xfer) | ||
375 | { | ||
376 | return as->use_dma && xfer->len >= DMA_MIN_BYTES; | ||
377 | } | ||
378 | |||
325 | static inline int atmel_spi_xfer_is_last(struct spi_message *msg, | 379 | static inline int atmel_spi_xfer_is_last(struct spi_message *msg, |
326 | struct spi_transfer *xfer) | 380 | struct spi_transfer *xfer) |
327 | { | 381 | { |
@@ -333,6 +387,265 @@ static inline int atmel_spi_xfer_can_be_chained(struct spi_transfer *xfer) | |||
333 | return xfer->delay_usecs == 0 && !xfer->cs_change; | 387 | return xfer->delay_usecs == 0 && !xfer->cs_change; |
334 | } | 388 | } |
335 | 389 | ||
390 | static int atmel_spi_dma_slave_config(struct atmel_spi *as, | ||
391 | struct dma_slave_config *slave_config, | ||
392 | u8 bits_per_word) | ||
393 | { | ||
394 | int err = 0; | ||
395 | |||
396 | if (bits_per_word > 8) { | ||
397 | slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
398 | slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
399 | } else { | ||
400 | slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
401 | slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
402 | } | ||
403 | |||
404 | slave_config->dst_addr = (dma_addr_t)as->phybase + SPI_TDR; | ||
405 | slave_config->src_addr = (dma_addr_t)as->phybase + SPI_RDR; | ||
406 | slave_config->src_maxburst = 1; | ||
407 | slave_config->dst_maxburst = 1; | ||
408 | slave_config->device_fc = false; | ||
409 | |||
410 | slave_config->direction = DMA_MEM_TO_DEV; | ||
411 | if (dmaengine_slave_config(as->dma.chan_tx, slave_config)) { | ||
412 | dev_err(&as->pdev->dev, | ||
413 | "failed to configure tx dma channel\n"); | ||
414 | err = -EINVAL; | ||
415 | } | ||
416 | |||
417 | slave_config->direction = DMA_DEV_TO_MEM; | ||
418 | if (dmaengine_slave_config(as->dma.chan_rx, slave_config)) { | ||
419 | dev_err(&as->pdev->dev, | ||
420 | "failed to configure rx dma channel\n"); | ||
421 | err = -EINVAL; | ||
422 | } | ||
423 | |||
424 | return err; | ||
425 | } | ||
426 | |||
427 | static bool filter(struct dma_chan *chan, void *slave) | ||
428 | { | ||
429 | struct at_dma_slave *sl = slave; | ||
430 | |||
431 | if (sl->dma_dev == chan->device->dev) { | ||
432 | chan->private = sl; | ||
433 | return true; | ||
434 | } else { | ||
435 | return false; | ||
436 | } | ||
437 | } | ||
438 | |||
439 | static int atmel_spi_configure_dma(struct atmel_spi *as) | ||
440 | { | ||
441 | struct at_dma_slave *sdata = &as->dma.dma_slave; | ||
442 | struct dma_slave_config slave_config; | ||
443 | int err; | ||
444 | |||
445 | if (sdata && sdata->dma_dev) { | ||
446 | dma_cap_mask_t mask; | ||
447 | |||
448 | /* Try to grab two DMA channels */ | ||
449 | dma_cap_zero(mask); | ||
450 | dma_cap_set(DMA_SLAVE, mask); | ||
451 | as->dma.chan_tx = dma_request_channel(mask, filter, sdata); | ||
452 | if (as->dma.chan_tx) | ||
453 | as->dma.chan_rx = | ||
454 | dma_request_channel(mask, filter, sdata); | ||
455 | } | ||
456 | if (!as->dma.chan_rx || !as->dma.chan_tx) { | ||
457 | dev_err(&as->pdev->dev, | ||
458 | "DMA channel not available, SPI unable to use DMA\n"); | ||
459 | err = -EBUSY; | ||
460 | goto error; | ||
461 | } | ||
462 | |||
463 | err = atmel_spi_dma_slave_config(as, &slave_config, 8); | ||
464 | if (err) | ||
465 | goto error; | ||
466 | |||
467 | dev_info(&as->pdev->dev, | ||
468 | "Using %s (tx) and %s (rx) for DMA transfers\n", | ||
469 | dma_chan_name(as->dma.chan_tx), | ||
470 | dma_chan_name(as->dma.chan_rx)); | ||
471 | return 0; | ||
472 | error: | ||
473 | if (as->dma.chan_rx) | ||
474 | dma_release_channel(as->dma.chan_rx); | ||
475 | if (as->dma.chan_tx) | ||
476 | dma_release_channel(as->dma.chan_tx); | ||
477 | return err; | ||
478 | } | ||
479 | |||
480 | static void atmel_spi_stop_dma(struct atmel_spi *as) | ||
481 | { | ||
482 | if (as->dma.chan_rx) | ||
483 | as->dma.chan_rx->device->device_control(as->dma.chan_rx, | ||
484 | DMA_TERMINATE_ALL, 0); | ||
485 | if (as->dma.chan_tx) | ||
486 | as->dma.chan_tx->device->device_control(as->dma.chan_tx, | ||
487 | DMA_TERMINATE_ALL, 0); | ||
488 | } | ||
489 | |||
490 | static void atmel_spi_release_dma(struct atmel_spi *as) | ||
491 | { | ||
492 | if (as->dma.chan_rx) | ||
493 | dma_release_channel(as->dma.chan_rx); | ||
494 | if (as->dma.chan_tx) | ||
495 | dma_release_channel(as->dma.chan_tx); | ||
496 | } | ||
497 | |||
498 | /* This function is called by the DMA driver from tasklet context */ | ||
499 | static void dma_callback(void *data) | ||
500 | { | ||
501 | struct spi_master *master = data; | ||
502 | struct atmel_spi *as = spi_master_get_devdata(master); | ||
503 | |||
504 | /* trigger SPI tasklet */ | ||
505 | tasklet_schedule(&as->tasklet); | ||
506 | } | ||
507 | |||
508 | /* | ||
509 | * Next transfer using PIO. | ||
510 | * lock is held, spi tasklet is blocked | ||
511 | */ | ||
512 | static void atmel_spi_next_xfer_pio(struct spi_master *master, | ||
513 | struct spi_transfer *xfer) | ||
514 | { | ||
515 | struct atmel_spi *as = spi_master_get_devdata(master); | ||
516 | |||
517 | dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_pio\n"); | ||
518 | |||
519 | as->current_remaining_bytes = xfer->len; | ||
520 | |||
521 | /* Make sure data is not remaining in RDR */ | ||
522 | spi_readl(as, RDR); | ||
523 | while (spi_readl(as, SR) & SPI_BIT(RDRF)) { | ||
524 | spi_readl(as, RDR); | ||
525 | cpu_relax(); | ||
526 | } | ||
527 | |||
528 | if (xfer->tx_buf) | ||
529 | spi_writel(as, TDR, *(u8 *)(xfer->tx_buf)); | ||
530 | else | ||
531 | spi_writel(as, TDR, 0); | ||
532 | |||
533 | dev_dbg(master->dev.parent, | ||
534 | " start pio xfer %p: len %u tx %p rx %p\n", | ||
535 | xfer, xfer->len, xfer->tx_buf, xfer->rx_buf); | ||
536 | |||
537 | /* Enable relevant interrupts */ | ||
538 | spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES)); | ||
539 | } | ||
540 | |||
541 | /* | ||
542 | * Submit next transfer for DMA. | ||
543 | * lock is held, spi tasklet is blocked | ||
544 | */ | ||
545 | static int atmel_spi_next_xfer_dma_submit(struct spi_master *master, | ||
546 | struct spi_transfer *xfer, | ||
547 | u32 *plen) | ||
548 | { | ||
549 | struct atmel_spi *as = spi_master_get_devdata(master); | ||
550 | struct dma_chan *rxchan = as->dma.chan_rx; | ||
551 | struct dma_chan *txchan = as->dma.chan_tx; | ||
552 | struct dma_async_tx_descriptor *rxdesc; | ||
553 | struct dma_async_tx_descriptor *txdesc; | ||
554 | struct dma_slave_config slave_config; | ||
555 | dma_cookie_t cookie; | ||
556 | u32 len = *plen; | ||
557 | |||
558 | dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n"); | ||
559 | |||
560 | /* Check that the channels are available */ | ||
561 | if (!rxchan || !txchan) | ||
562 | return -ENODEV; | ||
563 | |||
564 | /* release lock for DMA operations */ | ||
565 | atmel_spi_unlock(as); | ||
566 | |||
567 | /* prepare the RX dma transfer */ | ||
568 | sg_init_table(&as->dma.sgrx, 1); | ||
569 | if (xfer->rx_buf) { | ||
570 | as->dma.sgrx.dma_address = xfer->rx_dma + xfer->len - *plen; | ||
571 | } else { | ||
572 | as->dma.sgrx.dma_address = as->buffer_dma; | ||
573 | if (len > BUFFER_SIZE) | ||
574 | len = BUFFER_SIZE; | ||
575 | } | ||
576 | |||
577 | /* prepare the TX dma transfer */ | ||
578 | sg_init_table(&as->dma.sgtx, 1); | ||
579 | if (xfer->tx_buf) { | ||
580 | as->dma.sgtx.dma_address = xfer->tx_dma + xfer->len - *plen; | ||
581 | } else { | ||
582 | as->dma.sgtx.dma_address = as->buffer_dma; | ||
583 | if (len > BUFFER_SIZE) | ||
584 | len = BUFFER_SIZE; | ||
585 | memset(as->buffer, 0, len); | ||
586 | } | ||
587 | |||
588 | sg_dma_len(&as->dma.sgtx) = len; | ||
589 | sg_dma_len(&as->dma.sgrx) = len; | ||
590 | |||
591 | *plen = len; | ||
592 | |||
593 | if (atmel_spi_dma_slave_config(as, &slave_config, 8)) | ||
594 | goto err_exit; | ||
595 | |||
596 | /* Send both scatterlists */ | ||
597 | rxdesc = rxchan->device->device_prep_slave_sg(rxchan, | ||
598 | &as->dma.sgrx, | ||
599 | 1, | ||
600 | DMA_FROM_DEVICE, | ||
601 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK, | ||
602 | NULL); | ||
603 | if (!rxdesc) | ||
604 | goto err_dma; | ||
605 | |||
606 | txdesc = txchan->device->device_prep_slave_sg(txchan, | ||
607 | &as->dma.sgtx, | ||
608 | 1, | ||
609 | DMA_TO_DEVICE, | ||
610 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK, | ||
611 | NULL); | ||
612 | if (!txdesc) | ||
613 | goto err_dma; | ||
614 | |||
615 | dev_dbg(master->dev.parent, | ||
616 | " start dma xfer %p: len %u tx %p/%08x rx %p/%08x\n", | ||
617 | xfer, xfer->len, xfer->tx_buf, xfer->tx_dma, | ||
618 | xfer->rx_buf, xfer->rx_dma); | ||
619 | |||
620 | /* Enable relevant interrupts */ | ||
621 | spi_writel(as, IER, SPI_BIT(OVRES)); | ||
622 | |||
623 | /* Put the callback on the RX transfer only, that should finish last */ | ||
624 | rxdesc->callback = dma_callback; | ||
625 | rxdesc->callback_param = master; | ||
626 | |||
627 | /* Submit and fire RX and TX with TX last so we're ready to read! */ | ||
628 | cookie = rxdesc->tx_submit(rxdesc); | ||
629 | if (dma_submit_error(cookie)) | ||
630 | goto err_dma; | ||
631 | cookie = txdesc->tx_submit(txdesc); | ||
632 | if (dma_submit_error(cookie)) | ||
633 | goto err_dma; | ||
634 | rxchan->device->device_issue_pending(rxchan); | ||
635 | txchan->device->device_issue_pending(txchan); | ||
636 | |||
637 | /* take back lock */ | ||
638 | atmel_spi_lock(as); | ||
639 | return 0; | ||
640 | |||
641 | err_dma: | ||
642 | spi_writel(as, IDR, SPI_BIT(OVRES)); | ||
643 | atmel_spi_stop_dma(as); | ||
644 | err_exit: | ||
645 | atmel_spi_lock(as); | ||
646 | return -ENOMEM; | ||
647 | } | ||
648 | |||
336 | static void atmel_spi_next_xfer_data(struct spi_master *master, | 649 | static void atmel_spi_next_xfer_data(struct spi_master *master, |
337 | struct spi_transfer *xfer, | 650 | struct spi_transfer *xfer, |
338 | dma_addr_t *tx_dma, | 651 | dma_addr_t *tx_dma, |
@@ -350,6 +663,7 @@ static void atmel_spi_next_xfer_data(struct spi_master *master, | |||
350 | if (len > BUFFER_SIZE) | 663 | if (len > BUFFER_SIZE) |
351 | len = BUFFER_SIZE; | 664 | len = BUFFER_SIZE; |
352 | } | 665 | } |
666 | |||
353 | if (xfer->tx_buf) | 667 | if (xfer->tx_buf) |
354 | *tx_dma = xfer->tx_dma + xfer->len - *plen; | 668 | *tx_dma = xfer->tx_dma + xfer->len - *plen; |
355 | else { | 669 | else { |
@@ -365,10 +679,10 @@ static void atmel_spi_next_xfer_data(struct spi_master *master, | |||
365 | } | 679 | } |
366 | 680 | ||
367 | /* | 681 | /* |
368 | * Submit next transfer for DMA. | 682 | * Submit next transfer for PDC. |
369 | * lock is held, spi irq is blocked | 683 | * lock is held, spi irq is blocked |
370 | */ | 684 | */ |
371 | static void atmel_spi_next_xfer(struct spi_master *master, | 685 | static void atmel_spi_pdc_next_xfer(struct spi_master *master, |
372 | struct spi_message *msg) | 686 | struct spi_message *msg) |
373 | { | 687 | { |
374 | struct atmel_spi *as = spi_master_get_devdata(master); | 688 | struct atmel_spi *as = spi_master_get_devdata(master); |
@@ -465,6 +779,48 @@ static void atmel_spi_next_xfer(struct spi_master *master, | |||
465 | spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); | 779 | spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); |
466 | } | 780 | } |
467 | 781 | ||
782 | /* | ||
783 | * Choose way to submit next transfer and start it. | ||
784 | * lock is held, spi tasklet is blocked | ||
785 | */ | ||
786 | static void atmel_spi_dma_next_xfer(struct spi_master *master, | ||
787 | struct spi_message *msg) | ||
788 | { | ||
789 | struct atmel_spi *as = spi_master_get_devdata(master); | ||
790 | struct spi_transfer *xfer; | ||
791 | u32 remaining, len; | ||
792 | |||
793 | remaining = as->current_remaining_bytes; | ||
794 | if (remaining) { | ||
795 | xfer = as->current_transfer; | ||
796 | len = remaining; | ||
797 | } else { | ||
798 | if (!as->current_transfer) | ||
799 | xfer = list_entry(msg->transfers.next, | ||
800 | struct spi_transfer, transfer_list); | ||
801 | else | ||
802 | xfer = list_entry( | ||
803 | as->current_transfer->transfer_list.next, | ||
804 | struct spi_transfer, transfer_list); | ||
805 | |||
806 | as->current_transfer = xfer; | ||
807 | len = xfer->len; | ||
808 | } | ||
809 | |||
810 | if (atmel_spi_use_dma(as, xfer)) { | ||
811 | u32 total = len; | ||
812 | if (!atmel_spi_next_xfer_dma_submit(master, xfer, &len)) { | ||
813 | as->current_remaining_bytes = total - len; | ||
814 | return; | ||
815 | } else { | ||
816 | dev_err(&msg->spi->dev, "unable to use DMA, fallback to PIO\n"); | ||
817 | } | ||
818 | } | ||
819 | |||
820 | /* use PIO if error appened using DMA */ | ||
821 | atmel_spi_next_xfer_pio(master, xfer); | ||
822 | } | ||
823 | |||
468 | static void atmel_spi_next_message(struct spi_master *master) | 824 | static void atmel_spi_next_message(struct spi_master *master) |
469 | { | 825 | { |
470 | struct atmel_spi *as = spi_master_get_devdata(master); | 826 | struct atmel_spi *as = spi_master_get_devdata(master); |
@@ -489,7 +845,10 @@ static void atmel_spi_next_message(struct spi_master *master) | |||
489 | } else | 845 | } else |
490 | cs_activate(as, spi); | 846 | cs_activate(as, spi); |
491 | 847 | ||
492 | atmel_spi_next_xfer(master, msg); | 848 | if (as->use_pdc) |
849 | atmel_spi_pdc_next_xfer(master, msg); | ||
850 | else | ||
851 | atmel_spi_dma_next_xfer(master, msg); | ||
493 | } | 852 | } |
494 | 853 | ||
495 | /* | 854 | /* |
@@ -542,38 +901,213 @@ static void atmel_spi_dma_unmap_xfer(struct spi_master *master, | |||
542 | xfer->len, DMA_FROM_DEVICE); | 901 | xfer->len, DMA_FROM_DEVICE); |
543 | } | 902 | } |
544 | 903 | ||
904 | static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as) | ||
905 | { | ||
906 | spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); | ||
907 | } | ||
908 | |||
545 | static void | 909 | static void |
546 | atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as, | 910 | atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as, |
547 | struct spi_message *msg, int status, int stay) | 911 | struct spi_message *msg, int stay) |
548 | { | 912 | { |
549 | if (!stay || status < 0) | 913 | if (!stay || as->done_status < 0) |
550 | cs_deactivate(as, msg->spi); | 914 | cs_deactivate(as, msg->spi); |
551 | else | 915 | else |
552 | as->stay = msg->spi; | 916 | as->stay = msg->spi; |
553 | 917 | ||
554 | list_del(&msg->queue); | 918 | list_del(&msg->queue); |
555 | msg->status = status; | 919 | msg->status = as->done_status; |
556 | 920 | ||
557 | dev_dbg(master->dev.parent, | 921 | dev_dbg(master->dev.parent, |
558 | "xfer complete: %u bytes transferred\n", | 922 | "xfer complete: %u bytes transferred\n", |
559 | msg->actual_length); | 923 | msg->actual_length); |
560 | 924 | ||
561 | spin_unlock(&as->lock); | 925 | atmel_spi_unlock(as); |
562 | msg->complete(msg->context); | 926 | msg->complete(msg->context); |
563 | spin_lock(&as->lock); | 927 | atmel_spi_lock(as); |
564 | 928 | ||
565 | as->current_transfer = NULL; | 929 | as->current_transfer = NULL; |
566 | as->next_transfer = NULL; | 930 | as->next_transfer = NULL; |
931 | as->done_status = 0; | ||
567 | 932 | ||
568 | /* continue if needed */ | 933 | /* continue if needed */ |
569 | if (list_empty(&as->queue) || as->stopping) | 934 | if (list_empty(&as->queue) || as->stopping) { |
570 | spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); | 935 | if (as->use_pdc) |
571 | else | 936 | atmel_spi_disable_pdc_transfer(as); |
937 | } else { | ||
572 | atmel_spi_next_message(master); | 938 | atmel_spi_next_message(master); |
939 | } | ||
940 | } | ||
941 | |||
942 | /* Called from IRQ | ||
943 | * lock is held | ||
944 | * | ||
945 | * Must update "current_remaining_bytes" to keep track of data | ||
946 | * to transfer. | ||
947 | */ | ||
948 | static void | ||
949 | atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer) | ||
950 | { | ||
951 | u8 *txp; | ||
952 | u8 *rxp; | ||
953 | unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; | ||
954 | |||
955 | if (xfer->rx_buf) { | ||
956 | rxp = ((u8 *)xfer->rx_buf) + xfer_pos; | ||
957 | *rxp = spi_readl(as, RDR); | ||
958 | } else { | ||
959 | spi_readl(as, RDR); | ||
960 | } | ||
961 | |||
962 | as->current_remaining_bytes--; | ||
963 | |||
964 | if (as->current_remaining_bytes) { | ||
965 | if (xfer->tx_buf) { | ||
966 | txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1; | ||
967 | spi_writel(as, TDR, *txp); | ||
968 | } else { | ||
969 | spi_writel(as, TDR, 0); | ||
970 | } | ||
971 | } | ||
972 | } | ||
973 | |||
974 | /* Tasklet | ||
975 | * Called from DMA callback + pio transfer and overrun IRQ. | ||
976 | */ | ||
977 | static void atmel_spi_tasklet_func(unsigned long data) | ||
978 | { | ||
979 | struct spi_master *master = (struct spi_master *)data; | ||
980 | struct atmel_spi *as = spi_master_get_devdata(master); | ||
981 | struct spi_message *msg; | ||
982 | struct spi_transfer *xfer; | ||
983 | |||
984 | dev_vdbg(master->dev.parent, "atmel_spi_tasklet_func\n"); | ||
985 | |||
986 | atmel_spi_lock(as); | ||
987 | |||
988 | xfer = as->current_transfer; | ||
989 | |||
990 | if (xfer == NULL) | ||
991 | /* already been there */ | ||
992 | goto tasklet_out; | ||
993 | |||
994 | msg = list_entry(as->queue.next, struct spi_message, queue); | ||
995 | |||
996 | if (as->current_remaining_bytes == 0) { | ||
997 | if (as->done_status < 0) { | ||
998 | /* error happened (overrun) */ | ||
999 | if (atmel_spi_use_dma(as, xfer)) | ||
1000 | atmel_spi_stop_dma(as); | ||
1001 | } else { | ||
1002 | /* only update length if no error */ | ||
1003 | msg->actual_length += xfer->len; | ||
1004 | } | ||
1005 | |||
1006 | if (atmel_spi_use_dma(as, xfer)) | ||
1007 | if (!msg->is_dma_mapped) | ||
1008 | atmel_spi_dma_unmap_xfer(master, xfer); | ||
1009 | |||
1010 | if (xfer->delay_usecs) | ||
1011 | udelay(xfer->delay_usecs); | ||
1012 | |||
1013 | if (atmel_spi_xfer_is_last(msg, xfer) || as->done_status < 0) { | ||
1014 | /* report completed (or erroneous) message */ | ||
1015 | atmel_spi_msg_done(master, as, msg, xfer->cs_change); | ||
1016 | } else { | ||
1017 | if (xfer->cs_change) { | ||
1018 | cs_deactivate(as, msg->spi); | ||
1019 | udelay(1); | ||
1020 | cs_activate(as, msg->spi); | ||
1021 | } | ||
1022 | |||
1023 | /* | ||
1024 | * Not done yet. Submit the next transfer. | ||
1025 | * | ||
1026 | * FIXME handle protocol options for xfer | ||
1027 | */ | ||
1028 | atmel_spi_dma_next_xfer(master, msg); | ||
1029 | } | ||
1030 | } else { | ||
1031 | /* | ||
1032 | * Keep going, we still have data to send in | ||
1033 | * the current transfer. | ||
1034 | */ | ||
1035 | atmel_spi_dma_next_xfer(master, msg); | ||
1036 | } | ||
1037 | |||
1038 | tasklet_out: | ||
1039 | atmel_spi_unlock(as); | ||
573 | } | 1040 | } |
574 | 1041 | ||
1042 | /* Interrupt | ||
1043 | * | ||
1044 | * No need for locking in this Interrupt handler: done_status is the | ||
1045 | * only information modified. What we need is the update of this field | ||
1046 | * before tasklet runs. This is ensured by using barrier. | ||
1047 | */ | ||
575 | static irqreturn_t | 1048 | static irqreturn_t |
576 | atmel_spi_interrupt(int irq, void *dev_id) | 1049 | atmel_spi_pio_interrupt(int irq, void *dev_id) |
1050 | { | ||
1051 | struct spi_master *master = dev_id; | ||
1052 | struct atmel_spi *as = spi_master_get_devdata(master); | ||
1053 | u32 status, pending, imr; | ||
1054 | struct spi_transfer *xfer; | ||
1055 | int ret = IRQ_NONE; | ||
1056 | |||
1057 | imr = spi_readl(as, IMR); | ||
1058 | status = spi_readl(as, SR); | ||
1059 | pending = status & imr; | ||
1060 | |||
1061 | if (pending & SPI_BIT(OVRES)) { | ||
1062 | ret = IRQ_HANDLED; | ||
1063 | spi_writel(as, IDR, SPI_BIT(OVRES)); | ||
1064 | dev_warn(master->dev.parent, "overrun\n"); | ||
1065 | |||
1066 | /* | ||
1067 | * When we get an overrun, we disregard the current | ||
1068 | * transfer. Data will not be copied back from any | ||
1069 | * bounce buffer and msg->actual_len will not be | ||
1070 | * updated with the last xfer. | ||
1071 | * | ||
1072 | * We will also not process any remaning transfers in | ||
1073 | * the message. | ||
1074 | * | ||
1075 | * All actions are done in tasklet with done_status indication | ||
1076 | */ | ||
1077 | as->done_status = -EIO; | ||
1078 | smp_wmb(); | ||
1079 | |||
1080 | /* Clear any overrun happening while cleaning up */ | ||
1081 | spi_readl(as, SR); | ||
1082 | |||
1083 | tasklet_schedule(&as->tasklet); | ||
1084 | |||
1085 | } else if (pending & SPI_BIT(RDRF)) { | ||
1086 | atmel_spi_lock(as); | ||
1087 | |||
1088 | if (as->current_remaining_bytes) { | ||
1089 | ret = IRQ_HANDLED; | ||
1090 | xfer = as->current_transfer; | ||
1091 | atmel_spi_pump_pio_data(as, xfer); | ||
1092 | if (!as->current_remaining_bytes) { | ||
1093 | /* no more data to xfer, kick tasklet */ | ||
1094 | spi_writel(as, IDR, pending); | ||
1095 | tasklet_schedule(&as->tasklet); | ||
1096 | } | ||
1097 | } | ||
1098 | |||
1099 | atmel_spi_unlock(as); | ||
1100 | } else { | ||
1101 | WARN_ONCE(pending, "IRQ not handled, pending = %x\n", pending); | ||
1102 | ret = IRQ_HANDLED; | ||
1103 | spi_writel(as, IDR, pending); | ||
1104 | } | ||
1105 | |||
1106 | return ret; | ||
1107 | } | ||
1108 | |||
1109 | static irqreturn_t | ||
1110 | atmel_spi_pdc_interrupt(int irq, void *dev_id) | ||
577 | { | 1111 | { |
578 | struct spi_master *master = dev_id; | 1112 | struct spi_master *master = dev_id; |
579 | struct atmel_spi *as = spi_master_get_devdata(master); | 1113 | struct atmel_spi *as = spi_master_get_devdata(master); |
@@ -582,7 +1116,7 @@ atmel_spi_interrupt(int irq, void *dev_id) | |||
582 | u32 status, pending, imr; | 1116 | u32 status, pending, imr; |
583 | int ret = IRQ_NONE; | 1117 | int ret = IRQ_NONE; |
584 | 1118 | ||
585 | spin_lock(&as->lock); | 1119 | atmel_spi_lock(as); |
586 | 1120 | ||
587 | xfer = as->current_transfer; | 1121 | xfer = as->current_transfer; |
588 | msg = list_entry(as->queue.next, struct spi_message, queue); | 1122 | msg = list_entry(as->queue.next, struct spi_message, queue); |
@@ -641,7 +1175,8 @@ atmel_spi_interrupt(int irq, void *dev_id) | |||
641 | /* Clear any overrun happening while cleaning up */ | 1175 | /* Clear any overrun happening while cleaning up */ |
642 | spi_readl(as, SR); | 1176 | spi_readl(as, SR); |
643 | 1177 | ||
644 | atmel_spi_msg_done(master, as, msg, -EIO, 0); | 1178 | as->done_status = -EIO; |
1179 | atmel_spi_msg_done(master, as, msg, 0); | ||
645 | } else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) { | 1180 | } else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) { |
646 | ret = IRQ_HANDLED; | 1181 | ret = IRQ_HANDLED; |
647 | 1182 | ||
@@ -659,7 +1194,7 @@ atmel_spi_interrupt(int irq, void *dev_id) | |||
659 | 1194 | ||
660 | if (atmel_spi_xfer_is_last(msg, xfer)) { | 1195 | if (atmel_spi_xfer_is_last(msg, xfer)) { |
661 | /* report completed message */ | 1196 | /* report completed message */ |
662 | atmel_spi_msg_done(master, as, msg, 0, | 1197 | atmel_spi_msg_done(master, as, msg, |
663 | xfer->cs_change); | 1198 | xfer->cs_change); |
664 | } else { | 1199 | } else { |
665 | if (xfer->cs_change) { | 1200 | if (xfer->cs_change) { |
@@ -673,18 +1208,18 @@ atmel_spi_interrupt(int irq, void *dev_id) | |||
673 | * | 1208 | * |
674 | * FIXME handle protocol options for xfer | 1209 | * FIXME handle protocol options for xfer |
675 | */ | 1210 | */ |
676 | atmel_spi_next_xfer(master, msg); | 1211 | atmel_spi_pdc_next_xfer(master, msg); |
677 | } | 1212 | } |
678 | } else { | 1213 | } else { |
679 | /* | 1214 | /* |
680 | * Keep going, we still have data to send in | 1215 | * Keep going, we still have data to send in |
681 | * the current transfer. | 1216 | * the current transfer. |
682 | */ | 1217 | */ |
683 | atmel_spi_next_xfer(master, msg); | 1218 | atmel_spi_pdc_next_xfer(master, msg); |
684 | } | 1219 | } |
685 | } | 1220 | } |
686 | 1221 | ||
687 | spin_unlock(&as->lock); | 1222 | atmel_spi_unlock(as); |
688 | 1223 | ||
689 | return ret; | 1224 | return ret; |
690 | } | 1225 | } |
@@ -719,7 +1254,7 @@ static int atmel_spi_setup(struct spi_device *spi) | |||
719 | } | 1254 | } |
720 | 1255 | ||
721 | /* see notes above re chipselect */ | 1256 | /* see notes above re chipselect */ |
722 | if (!atmel_spi_is_v2() | 1257 | if (!atmel_spi_is_v2(as) |
723 | && spi->chip_select == 0 | 1258 | && spi->chip_select == 0 |
724 | && (spi->mode & SPI_CS_HIGH)) { | 1259 | && (spi->mode & SPI_CS_HIGH)) { |
725 | dev_dbg(&spi->dev, "setup: can't be active-high\n"); | 1260 | dev_dbg(&spi->dev, "setup: can't be active-high\n"); |
@@ -728,7 +1263,7 @@ static int atmel_spi_setup(struct spi_device *spi) | |||
728 | 1263 | ||
729 | /* v1 chips start out at half the peripheral bus speed. */ | 1264 | /* v1 chips start out at half the peripheral bus speed. */ |
730 | bus_hz = clk_get_rate(as->clk); | 1265 | bus_hz = clk_get_rate(as->clk); |
731 | if (!atmel_spi_is_v2()) | 1266 | if (!atmel_spi_is_v2(as)) |
732 | bus_hz /= 2; | 1267 | bus_hz /= 2; |
733 | 1268 | ||
734 | if (spi->max_speed_hz) { | 1269 | if (spi->max_speed_hz) { |
@@ -789,13 +1324,11 @@ static int atmel_spi_setup(struct spi_device *spi) | |||
789 | spi->controller_state = asd; | 1324 | spi->controller_state = asd; |
790 | gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH)); | 1325 | gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH)); |
791 | } else { | 1326 | } else { |
792 | unsigned long flags; | 1327 | atmel_spi_lock(as); |
793 | |||
794 | spin_lock_irqsave(&as->lock, flags); | ||
795 | if (as->stay == spi) | 1328 | if (as->stay == spi) |
796 | as->stay = NULL; | 1329 | as->stay = NULL; |
797 | cs_deactivate(as, spi); | 1330 | cs_deactivate(as, spi); |
798 | spin_unlock_irqrestore(&as->lock, flags); | 1331 | atmel_spi_unlock(as); |
799 | } | 1332 | } |
800 | 1333 | ||
801 | asd->csr = csr; | 1334 | asd->csr = csr; |
@@ -804,7 +1337,7 @@ static int atmel_spi_setup(struct spi_device *spi) | |||
804 | "setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n", | 1337 | "setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n", |
805 | bus_hz / scbr, bits, spi->mode, spi->chip_select, csr); | 1338 | bus_hz / scbr, bits, spi->mode, spi->chip_select, csr); |
806 | 1339 | ||
807 | if (!atmel_spi_is_v2()) | 1340 | if (!atmel_spi_is_v2(as)) |
808 | spi_writel(as, CSR0 + 4 * spi->chip_select, csr); | 1341 | spi_writel(as, CSR0 + 4 * spi->chip_select, csr); |
809 | 1342 | ||
810 | return 0; | 1343 | return 0; |
@@ -814,7 +1347,6 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) | |||
814 | { | 1347 | { |
815 | struct atmel_spi *as; | 1348 | struct atmel_spi *as; |
816 | struct spi_transfer *xfer; | 1349 | struct spi_transfer *xfer; |
817 | unsigned long flags; | ||
818 | struct device *controller = spi->master->dev.parent; | 1350 | struct device *controller = spi->master->dev.parent; |
819 | u8 bits; | 1351 | u8 bits; |
820 | struct atmel_spi_device *asd; | 1352 | struct atmel_spi_device *asd; |
@@ -854,13 +1386,10 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) | |||
854 | 1386 | ||
855 | /* | 1387 | /* |
856 | * DMA map early, for performance (empties dcache ASAP) and | 1388 | * DMA map early, for performance (empties dcache ASAP) and |
857 | * better fault reporting. This is a DMA-only driver. | 1389 | * better fault reporting. |
858 | * | ||
859 | * NOTE that if dma_unmap_single() ever starts to do work on | ||
860 | * platforms supported by this driver, we would need to clean | ||
861 | * up mappings for previously-mapped transfers. | ||
862 | */ | 1390 | */ |
863 | if (!msg->is_dma_mapped) { | 1391 | if ((!msg->is_dma_mapped) && (atmel_spi_use_dma(as, xfer) |
1392 | || as->use_pdc)) { | ||
864 | if (atmel_spi_dma_map_xfer(as, xfer) < 0) | 1393 | if (atmel_spi_dma_map_xfer(as, xfer) < 0) |
865 | return -ENOMEM; | 1394 | return -ENOMEM; |
866 | } | 1395 | } |
@@ -879,11 +1408,11 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) | |||
879 | msg->status = -EINPROGRESS; | 1408 | msg->status = -EINPROGRESS; |
880 | msg->actual_length = 0; | 1409 | msg->actual_length = 0; |
881 | 1410 | ||
882 | spin_lock_irqsave(&as->lock, flags); | 1411 | atmel_spi_lock(as); |
883 | list_add_tail(&msg->queue, &as->queue); | 1412 | list_add_tail(&msg->queue, &as->queue); |
884 | if (!as->current_transfer) | 1413 | if (!as->current_transfer) |
885 | atmel_spi_next_message(spi->master); | 1414 | atmel_spi_next_message(spi->master); |
886 | spin_unlock_irqrestore(&as->lock, flags); | 1415 | atmel_spi_unlock(as); |
887 | 1416 | ||
888 | return 0; | 1417 | return 0; |
889 | } | 1418 | } |
@@ -893,23 +1422,39 @@ static void atmel_spi_cleanup(struct spi_device *spi) | |||
893 | struct atmel_spi *as = spi_master_get_devdata(spi->master); | 1422 | struct atmel_spi *as = spi_master_get_devdata(spi->master); |
894 | struct atmel_spi_device *asd = spi->controller_state; | 1423 | struct atmel_spi_device *asd = spi->controller_state; |
895 | unsigned gpio = (unsigned) spi->controller_data; | 1424 | unsigned gpio = (unsigned) spi->controller_data; |
896 | unsigned long flags; | ||
897 | 1425 | ||
898 | if (!asd) | 1426 | if (!asd) |
899 | return; | 1427 | return; |
900 | 1428 | ||
901 | spin_lock_irqsave(&as->lock, flags); | 1429 | atmel_spi_lock(as); |
902 | if (as->stay == spi) { | 1430 | if (as->stay == spi) { |
903 | as->stay = NULL; | 1431 | as->stay = NULL; |
904 | cs_deactivate(as, spi); | 1432 | cs_deactivate(as, spi); |
905 | } | 1433 | } |
906 | spin_unlock_irqrestore(&as->lock, flags); | 1434 | atmel_spi_unlock(as); |
907 | 1435 | ||
908 | spi->controller_state = NULL; | 1436 | spi->controller_state = NULL; |
909 | gpio_free(gpio); | 1437 | gpio_free(gpio); |
910 | kfree(asd); | 1438 | kfree(asd); |
911 | } | 1439 | } |
912 | 1440 | ||
1441 | static inline unsigned int atmel_get_version(struct atmel_spi *as) | ||
1442 | { | ||
1443 | return spi_readl(as, VERSION) & 0x00000fff; | ||
1444 | } | ||
1445 | |||
1446 | static void atmel_get_caps(struct atmel_spi *as) | ||
1447 | { | ||
1448 | unsigned int version; | ||
1449 | |||
1450 | version = atmel_get_version(as); | ||
1451 | dev_info(&as->pdev->dev, "version: 0x%x\n", version); | ||
1452 | |||
1453 | as->caps.is_spi2 = version > 0x121; | ||
1454 | as->caps.has_wdrbt = version >= 0x210; | ||
1455 | as->caps.has_dma_support = version >= 0x212; | ||
1456 | } | ||
1457 | |||
913 | /*-------------------------------------------------------------------------*/ | 1458 | /*-------------------------------------------------------------------------*/ |
914 | 1459 | ||
915 | static int atmel_spi_probe(struct platform_device *pdev) | 1460 | static int atmel_spi_probe(struct platform_device *pdev) |
@@ -963,15 +1508,39 @@ static int atmel_spi_probe(struct platform_device *pdev) | |||
963 | 1508 | ||
964 | spin_lock_init(&as->lock); | 1509 | spin_lock_init(&as->lock); |
965 | INIT_LIST_HEAD(&as->queue); | 1510 | INIT_LIST_HEAD(&as->queue); |
1511 | |||
966 | as->pdev = pdev; | 1512 | as->pdev = pdev; |
967 | as->regs = ioremap(regs->start, resource_size(regs)); | 1513 | as->regs = ioremap(regs->start, resource_size(regs)); |
968 | if (!as->regs) | 1514 | if (!as->regs) |
969 | goto out_free_buffer; | 1515 | goto out_free_buffer; |
1516 | as->phybase = regs->start; | ||
970 | as->irq = irq; | 1517 | as->irq = irq; |
971 | as->clk = clk; | 1518 | as->clk = clk; |
972 | 1519 | ||
973 | ret = request_irq(irq, atmel_spi_interrupt, 0, | 1520 | atmel_get_caps(as); |
974 | dev_name(&pdev->dev), master); | 1521 | |
1522 | as->use_dma = false; | ||
1523 | as->use_pdc = false; | ||
1524 | if (as->caps.has_dma_support) { | ||
1525 | if (atmel_spi_configure_dma(as) == 0) | ||
1526 | as->use_dma = true; | ||
1527 | } else { | ||
1528 | as->use_pdc = true; | ||
1529 | } | ||
1530 | |||
1531 | if (as->caps.has_dma_support && !as->use_dma) | ||
1532 | dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n"); | ||
1533 | |||
1534 | if (as->use_pdc) { | ||
1535 | ret = request_irq(irq, atmel_spi_pdc_interrupt, 0, | ||
1536 | dev_name(&pdev->dev), master); | ||
1537 | } else { | ||
1538 | tasklet_init(&as->tasklet, atmel_spi_tasklet_func, | ||
1539 | (unsigned long)master); | ||
1540 | |||
1541 | ret = request_irq(irq, atmel_spi_pio_interrupt, 0, | ||
1542 | dev_name(&pdev->dev), master); | ||
1543 | } | ||
975 | if (ret) | 1544 | if (ret) |
976 | goto out_unmap_regs; | 1545 | goto out_unmap_regs; |
977 | 1546 | ||
@@ -979,8 +1548,15 @@ static int atmel_spi_probe(struct platform_device *pdev) | |||
979 | clk_enable(clk); | 1548 | clk_enable(clk); |
980 | spi_writel(as, CR, SPI_BIT(SWRST)); | 1549 | spi_writel(as, CR, SPI_BIT(SWRST)); |
981 | spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ | 1550 | spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ |
982 | spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS)); | 1551 | if (as->caps.has_wdrbt) { |
983 | spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); | 1552 | spi_writel(as, MR, SPI_BIT(WDRBT) | SPI_BIT(MODFDIS) |
1553 | | SPI_BIT(MSTR)); | ||
1554 | } else { | ||
1555 | spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS)); | ||
1556 | } | ||
1557 | |||
1558 | if (as->use_pdc) | ||
1559 | spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); | ||
984 | spi_writel(as, CR, SPI_BIT(SPIEN)); | 1560 | spi_writel(as, CR, SPI_BIT(SPIEN)); |
985 | 1561 | ||
986 | /* go! */ | 1562 | /* go! */ |
@@ -989,11 +1565,14 @@ static int atmel_spi_probe(struct platform_device *pdev) | |||
989 | 1565 | ||
990 | ret = spi_register_master(master); | 1566 | ret = spi_register_master(master); |
991 | if (ret) | 1567 | if (ret) |
992 | goto out_reset_hw; | 1568 | goto out_free_dma; |
993 | 1569 | ||
994 | return 0; | 1570 | return 0; |
995 | 1571 | ||
996 | out_reset_hw: | 1572 | out_free_dma: |
1573 | if (as->use_dma) | ||
1574 | atmel_spi_release_dma(as); | ||
1575 | |||
997 | spi_writel(as, CR, SPI_BIT(SWRST)); | 1576 | spi_writel(as, CR, SPI_BIT(SWRST)); |
998 | spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ | 1577 | spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ |
999 | clk_disable(clk); | 1578 | clk_disable(clk); |
@@ -1001,6 +1580,8 @@ out_reset_hw: | |||
1001 | out_unmap_regs: | 1580 | out_unmap_regs: |
1002 | iounmap(as->regs); | 1581 | iounmap(as->regs); |
1003 | out_free_buffer: | 1582 | out_free_buffer: |
1583 | if (!as->use_pdc) | ||
1584 | tasklet_kill(&as->tasklet); | ||
1004 | dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, | 1585 | dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, |
1005 | as->buffer_dma); | 1586 | as->buffer_dma); |
1006 | out_free: | 1587 | out_free: |
@@ -1014,10 +1595,16 @@ static int atmel_spi_remove(struct platform_device *pdev) | |||
1014 | struct spi_master *master = platform_get_drvdata(pdev); | 1595 | struct spi_master *master = platform_get_drvdata(pdev); |
1015 | struct atmel_spi *as = spi_master_get_devdata(master); | 1596 | struct atmel_spi *as = spi_master_get_devdata(master); |
1016 | struct spi_message *msg; | 1597 | struct spi_message *msg; |
1598 | struct spi_transfer *xfer; | ||
1017 | 1599 | ||
1018 | /* reset the hardware and block queue progress */ | 1600 | /* reset the hardware and block queue progress */ |
1019 | spin_lock_irq(&as->lock); | 1601 | spin_lock_irq(&as->lock); |
1020 | as->stopping = 1; | 1602 | as->stopping = 1; |
1603 | if (as->use_dma) { | ||
1604 | atmel_spi_stop_dma(as); | ||
1605 | atmel_spi_release_dma(as); | ||
1606 | } | ||
1607 | |||
1021 | spi_writel(as, CR, SPI_BIT(SWRST)); | 1608 | spi_writel(as, CR, SPI_BIT(SWRST)); |
1022 | spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ | 1609 | spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */ |
1023 | spi_readl(as, SR); | 1610 | spi_readl(as, SR); |
@@ -1025,13 +1612,18 @@ static int atmel_spi_remove(struct platform_device *pdev) | |||
1025 | 1612 | ||
1026 | /* Terminate remaining queued transfers */ | 1613 | /* Terminate remaining queued transfers */ |
1027 | list_for_each_entry(msg, &as->queue, queue) { | 1614 | list_for_each_entry(msg, &as->queue, queue) { |
1028 | /* REVISIT unmapping the dma is a NOP on ARM and AVR32 | 1615 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
1029 | * but we shouldn't depend on that... | 1616 | if (!msg->is_dma_mapped |
1030 | */ | 1617 | && (atmel_spi_use_dma(as, xfer) |
1618 | || as->use_pdc)) | ||
1619 | atmel_spi_dma_unmap_xfer(master, xfer); | ||
1620 | } | ||
1031 | msg->status = -ESHUTDOWN; | 1621 | msg->status = -ESHUTDOWN; |
1032 | msg->complete(msg->context); | 1622 | msg->complete(msg->context); |
1033 | } | 1623 | } |
1034 | 1624 | ||
1625 | if (!as->use_pdc) | ||
1626 | tasklet_kill(&as->tasklet); | ||
1035 | dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, | 1627 | dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, |
1036 | as->buffer_dma); | 1628 | as->buffer_dma); |
1037 | 1629 | ||
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c new file mode 100644 index 000000000000..89c0b5033114 --- /dev/null +++ b/drivers/spi/spi-bcm2835.c | |||
@@ -0,0 +1,422 @@ | |||
1 | /* | ||
2 | * Driver for Broadcom BCM2835 SPI Controllers | ||
3 | * | ||
4 | * Copyright (C) 2012 Chris Boot | ||
5 | * Copyright (C) 2013 Stephen Warren | ||
6 | * | ||
7 | * This driver is inspired by: | ||
8 | * spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org> | ||
9 | * spi-atmel.c, Copyright (C) 2006 Atmel Corporation | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
24 | */ | ||
25 | |||
26 | #include <linux/clk.h> | ||
27 | #include <linux/completion.h> | ||
28 | #include <linux/delay.h> | ||
29 | #include <linux/err.h> | ||
30 | #include <linux/interrupt.h> | ||
31 | #include <linux/io.h> | ||
32 | #include <linux/kernel.h> | ||
33 | #include <linux/module.h> | ||
34 | #include <linux/of.h> | ||
35 | #include <linux/of_irq.h> | ||
36 | #include <linux/of_device.h> | ||
37 | #include <linux/spi/spi.h> | ||
38 | |||
39 | /* SPI register offsets */ | ||
40 | #define BCM2835_SPI_CS 0x00 | ||
41 | #define BCM2835_SPI_FIFO 0x04 | ||
42 | #define BCM2835_SPI_CLK 0x08 | ||
43 | #define BCM2835_SPI_DLEN 0x0c | ||
44 | #define BCM2835_SPI_LTOH 0x10 | ||
45 | #define BCM2835_SPI_DC 0x14 | ||
46 | |||
47 | /* Bitfields in CS */ | ||
48 | #define BCM2835_SPI_CS_LEN_LONG 0x02000000 | ||
49 | #define BCM2835_SPI_CS_DMA_LEN 0x01000000 | ||
50 | #define BCM2835_SPI_CS_CSPOL2 0x00800000 | ||
51 | #define BCM2835_SPI_CS_CSPOL1 0x00400000 | ||
52 | #define BCM2835_SPI_CS_CSPOL0 0x00200000 | ||
53 | #define BCM2835_SPI_CS_RXF 0x00100000 | ||
54 | #define BCM2835_SPI_CS_RXR 0x00080000 | ||
55 | #define BCM2835_SPI_CS_TXD 0x00040000 | ||
56 | #define BCM2835_SPI_CS_RXD 0x00020000 | ||
57 | #define BCM2835_SPI_CS_DONE 0x00010000 | ||
58 | #define BCM2835_SPI_CS_LEN 0x00002000 | ||
59 | #define BCM2835_SPI_CS_REN 0x00001000 | ||
60 | #define BCM2835_SPI_CS_ADCS 0x00000800 | ||
61 | #define BCM2835_SPI_CS_INTR 0x00000400 | ||
62 | #define BCM2835_SPI_CS_INTD 0x00000200 | ||
63 | #define BCM2835_SPI_CS_DMAEN 0x00000100 | ||
64 | #define BCM2835_SPI_CS_TA 0x00000080 | ||
65 | #define BCM2835_SPI_CS_CSPOL 0x00000040 | ||
66 | #define BCM2835_SPI_CS_CLEAR_RX 0x00000020 | ||
67 | #define BCM2835_SPI_CS_CLEAR_TX 0x00000010 | ||
68 | #define BCM2835_SPI_CS_CPOL 0x00000008 | ||
69 | #define BCM2835_SPI_CS_CPHA 0x00000004 | ||
70 | #define BCM2835_SPI_CS_CS_10 0x00000002 | ||
71 | #define BCM2835_SPI_CS_CS_01 0x00000001 | ||
72 | |||
73 | #define BCM2835_SPI_TIMEOUT_MS 30000 | ||
74 | #define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_NO_CS) | ||
75 | |||
76 | #define DRV_NAME "spi-bcm2835" | ||
77 | |||
78 | struct bcm2835_spi { | ||
79 | void __iomem *regs; | ||
80 | struct clk *clk; | ||
81 | int irq; | ||
82 | struct completion done; | ||
83 | const u8 *tx_buf; | ||
84 | u8 *rx_buf; | ||
85 | int len; | ||
86 | }; | ||
87 | |||
88 | static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg) | ||
89 | { | ||
90 | return readl(bs->regs + reg); | ||
91 | } | ||
92 | |||
93 | static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned reg, u32 val) | ||
94 | { | ||
95 | writel(val, bs->regs + reg); | ||
96 | } | ||
97 | |||
98 | static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs, int len) | ||
99 | { | ||
100 | u8 byte; | ||
101 | |||
102 | while (len--) { | ||
103 | byte = bcm2835_rd(bs, BCM2835_SPI_FIFO); | ||
104 | if (bs->rx_buf) | ||
105 | *bs->rx_buf++ = byte; | ||
106 | } | ||
107 | } | ||
108 | |||
109 | static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs, int len) | ||
110 | { | ||
111 | u8 byte; | ||
112 | |||
113 | if (len > bs->len) | ||
114 | len = bs->len; | ||
115 | |||
116 | while (len--) { | ||
117 | byte = bs->tx_buf ? *bs->tx_buf++ : 0; | ||
118 | bcm2835_wr(bs, BCM2835_SPI_FIFO, byte); | ||
119 | bs->len--; | ||
120 | } | ||
121 | } | ||
122 | |||
123 | static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id) | ||
124 | { | ||
125 | struct spi_master *master = dev_id; | ||
126 | struct bcm2835_spi *bs = spi_master_get_devdata(master); | ||
127 | u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); | ||
128 | |||
129 | /* | ||
130 | * RXR - RX needs Reading. This means 12 (or more) bytes have been | ||
131 | * transmitted and hence 12 (or more) bytes have been received. | ||
132 | * | ||
133 | * The FIFO is 16-bytes deep. We check for this interrupt to keep the | ||
134 | * FIFO full; we have a 4-byte-time buffer for IRQ latency. We check | ||
135 | * this before DONE (TX empty) just in case we delayed processing this | ||
136 | * interrupt for some reason. | ||
137 | * | ||
138 | * We only check for this case if we have more bytes to TX; at the end | ||
139 | * of the transfer, we ignore this pipelining optimization, and let | ||
140 | * bcm2835_spi_finish_transfer() drain the RX FIFO. | ||
141 | */ | ||
142 | if (bs->len && (cs & BCM2835_SPI_CS_RXR)) { | ||
143 | /* Read 12 bytes of data */ | ||
144 | bcm2835_rd_fifo(bs, 12); | ||
145 | |||
146 | /* Write up to 12 bytes */ | ||
147 | bcm2835_wr_fifo(bs, 12); | ||
148 | |||
149 | /* | ||
150 | * We must have written something to the TX FIFO due to the | ||
151 | * bs->len check above, so cannot be DONE. Hence, return | ||
152 | * early. Note that DONE could also be set if we serviced an | ||
153 | * RXR interrupt really late. | ||
154 | */ | ||
155 | return IRQ_HANDLED; | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * DONE - TX empty. This occurs when we first enable the transfer | ||
160 | * since we do not pre-fill the TX FIFO. At any other time, given that | ||
161 | * we refill the TX FIFO above based on RXR, and hence ignore DONE if | ||
162 | * RXR is set, DONE really does mean end-of-transfer. | ||
163 | */ | ||
164 | if (cs & BCM2835_SPI_CS_DONE) { | ||
165 | if (bs->len) { /* First interrupt in a transfer */ | ||
166 | bcm2835_wr_fifo(bs, 16); | ||
167 | } else { /* Transfer complete */ | ||
168 | /* Disable SPI interrupts */ | ||
169 | cs &= ~(BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD); | ||
170 | bcm2835_wr(bs, BCM2835_SPI_CS, cs); | ||
171 | |||
172 | /* | ||
173 | * Wake up bcm2835_spi_transfer_one(), which will call | ||
174 | * bcm2835_spi_finish_transfer(), to drain the RX FIFO. | ||
175 | */ | ||
176 | complete(&bs->done); | ||
177 | } | ||
178 | |||
179 | return IRQ_HANDLED; | ||
180 | } | ||
181 | |||
182 | return IRQ_NONE; | ||
183 | } | ||
184 | |||
185 | static int bcm2835_spi_start_transfer(struct spi_device *spi, | ||
186 | struct spi_transfer *tfr) | ||
187 | { | ||
188 | struct bcm2835_spi *bs = spi_master_get_devdata(spi->master); | ||
189 | unsigned long spi_hz, clk_hz, cdiv; | ||
190 | u32 cs = BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA; | ||
191 | |||
192 | spi_hz = tfr->speed_hz; | ||
193 | clk_hz = clk_get_rate(bs->clk); | ||
194 | |||
195 | if (spi_hz >= clk_hz / 2) { | ||
196 | cdiv = 2; /* clk_hz/2 is the fastest we can go */ | ||
197 | } else if (spi_hz) { | ||
198 | /* CDIV must be a power of two */ | ||
199 | cdiv = roundup_pow_of_two(DIV_ROUND_UP(clk_hz, spi_hz)); | ||
200 | |||
201 | if (cdiv >= 65536) | ||
202 | cdiv = 0; /* 0 is the slowest we can go */ | ||
203 | } else | ||
204 | cdiv = 0; /* 0 is the slowest we can go */ | ||
205 | |||
206 | if (spi->mode & SPI_CPOL) | ||
207 | cs |= BCM2835_SPI_CS_CPOL; | ||
208 | if (spi->mode & SPI_CPHA) | ||
209 | cs |= BCM2835_SPI_CS_CPHA; | ||
210 | |||
211 | if (!(spi->mode & SPI_NO_CS)) { | ||
212 | if (spi->mode & SPI_CS_HIGH) { | ||
213 | cs |= BCM2835_SPI_CS_CSPOL; | ||
214 | cs |= BCM2835_SPI_CS_CSPOL0 << spi->chip_select; | ||
215 | } | ||
216 | |||
217 | cs |= spi->chip_select; | ||
218 | } | ||
219 | |||
220 | INIT_COMPLETION(bs->done); | ||
221 | bs->tx_buf = tfr->tx_buf; | ||
222 | bs->rx_buf = tfr->rx_buf; | ||
223 | bs->len = tfr->len; | ||
224 | |||
225 | bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv); | ||
226 | /* | ||
227 | * Enable the HW block. This will immediately trigger a DONE (TX | ||
228 | * empty) interrupt, upon which we will fill the TX FIFO with the | ||
229 | * first TX bytes. Pre-filling the TX FIFO here to avoid the | ||
230 | * interrupt doesn't work:-( | ||
231 | */ | ||
232 | bcm2835_wr(bs, BCM2835_SPI_CS, cs); | ||
233 | |||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | static int bcm2835_spi_finish_transfer(struct spi_device *spi, | ||
238 | struct spi_transfer *tfr, bool cs_change) | ||
239 | { | ||
240 | struct bcm2835_spi *bs = spi_master_get_devdata(spi->master); | ||
241 | u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); | ||
242 | |||
243 | /* Drain RX FIFO */ | ||
244 | while (cs & BCM2835_SPI_CS_RXD) { | ||
245 | bcm2835_rd_fifo(bs, 1); | ||
246 | cs = bcm2835_rd(bs, BCM2835_SPI_CS); | ||
247 | } | ||
248 | |||
249 | if (tfr->delay_usecs) | ||
250 | udelay(tfr->delay_usecs); | ||
251 | |||
252 | if (cs_change) | ||
253 | /* Clear TA flag */ | ||
254 | bcm2835_wr(bs, BCM2835_SPI_CS, cs & ~BCM2835_SPI_CS_TA); | ||
255 | |||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static int bcm2835_spi_transfer_one(struct spi_master *master, | ||
260 | struct spi_message *mesg) | ||
261 | { | ||
262 | struct bcm2835_spi *bs = spi_master_get_devdata(master); | ||
263 | struct spi_transfer *tfr; | ||
264 | struct spi_device *spi = mesg->spi; | ||
265 | int err = 0; | ||
266 | unsigned int timeout; | ||
267 | bool cs_change; | ||
268 | |||
269 | list_for_each_entry(tfr, &mesg->transfers, transfer_list) { | ||
270 | err = bcm2835_spi_start_transfer(spi, tfr); | ||
271 | if (err) | ||
272 | goto out; | ||
273 | |||
274 | timeout = wait_for_completion_timeout(&bs->done, | ||
275 | msecs_to_jiffies(BCM2835_SPI_TIMEOUT_MS)); | ||
276 | if (!timeout) { | ||
277 | err = -ETIMEDOUT; | ||
278 | goto out; | ||
279 | } | ||
280 | |||
281 | cs_change = tfr->cs_change || | ||
282 | list_is_last(&tfr->transfer_list, &mesg->transfers); | ||
283 | |||
284 | err = bcm2835_spi_finish_transfer(spi, tfr, cs_change); | ||
285 | if (err) | ||
286 | goto out; | ||
287 | |||
288 | mesg->actual_length += (tfr->len - bs->len); | ||
289 | } | ||
290 | |||
291 | out: | ||
292 | /* Clear FIFOs, and disable the HW block */ | ||
293 | bcm2835_wr(bs, BCM2835_SPI_CS, | ||
294 | BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); | ||
295 | mesg->status = err; | ||
296 | spi_finalize_current_message(master); | ||
297 | |||
298 | return 0; | ||
299 | } | ||
300 | |||
301 | static int bcm2835_spi_probe(struct platform_device *pdev) | ||
302 | { | ||
303 | struct spi_master *master; | ||
304 | struct bcm2835_spi *bs; | ||
305 | struct resource *res; | ||
306 | int err; | ||
307 | |||
308 | master = spi_alloc_master(&pdev->dev, sizeof(*bs)); | ||
309 | if (!master) { | ||
310 | dev_err(&pdev->dev, "spi_alloc_master() failed\n"); | ||
311 | return -ENOMEM; | ||
312 | } | ||
313 | |||
314 | platform_set_drvdata(pdev, master); | ||
315 | |||
316 | master->mode_bits = BCM2835_SPI_MODE_BITS; | ||
317 | master->bits_per_word_mask = BIT(8 - 1); | ||
318 | master->bus_num = -1; | ||
319 | master->num_chipselect = 3; | ||
320 | master->transfer_one_message = bcm2835_spi_transfer_one; | ||
321 | master->dev.of_node = pdev->dev.of_node; | ||
322 | |||
323 | bs = spi_master_get_devdata(master); | ||
324 | |||
325 | init_completion(&bs->done); | ||
326 | |||
327 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
328 | if (!res) { | ||
329 | dev_err(&pdev->dev, "could not get memory resource\n"); | ||
330 | err = -ENODEV; | ||
331 | goto out_master_put; | ||
332 | } | ||
333 | |||
334 | bs->regs = devm_request_and_ioremap(&pdev->dev, res); | ||
335 | if (!bs->regs) { | ||
336 | dev_err(&pdev->dev, "could not request/map memory region\n"); | ||
337 | err = -ENODEV; | ||
338 | goto out_master_put; | ||
339 | } | ||
340 | |||
341 | bs->clk = devm_clk_get(&pdev->dev, NULL); | ||
342 | if (IS_ERR(bs->clk)) { | ||
343 | err = PTR_ERR(bs->clk); | ||
344 | dev_err(&pdev->dev, "could not get clk: %d\n", err); | ||
345 | goto out_master_put; | ||
346 | } | ||
347 | |||
348 | bs->irq = irq_of_parse_and_map(pdev->dev.of_node, 0); | ||
349 | if (bs->irq <= 0) { | ||
350 | dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq); | ||
351 | err = bs->irq ? bs->irq : -ENODEV; | ||
352 | goto out_master_put; | ||
353 | } | ||
354 | |||
355 | clk_prepare_enable(bs->clk); | ||
356 | |||
357 | err = request_irq(bs->irq, bcm2835_spi_interrupt, 0, | ||
358 | dev_name(&pdev->dev), master); | ||
359 | if (err) { | ||
360 | dev_err(&pdev->dev, "could not request IRQ: %d\n", err); | ||
361 | goto out_clk_disable; | ||
362 | } | ||
363 | |||
364 | /* initialise the hardware */ | ||
365 | bcm2835_wr(bs, BCM2835_SPI_CS, | ||
366 | BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); | ||
367 | |||
368 | err = spi_register_master(master); | ||
369 | if (err) { | ||
370 | dev_err(&pdev->dev, "could not register SPI master: %d\n", err); | ||
371 | goto out_free_irq; | ||
372 | } | ||
373 | |||
374 | return 0; | ||
375 | |||
376 | out_free_irq: | ||
377 | free_irq(bs->irq, master); | ||
378 | out_clk_disable: | ||
379 | clk_disable_unprepare(bs->clk); | ||
380 | out_master_put: | ||
381 | spi_master_put(master); | ||
382 | return err; | ||
383 | } | ||
384 | |||
385 | static int bcm2835_spi_remove(struct platform_device *pdev) | ||
386 | { | ||
387 | struct spi_master *master = platform_get_drvdata(pdev); | ||
388 | struct bcm2835_spi *bs = spi_master_get_devdata(master); | ||
389 | |||
390 | free_irq(bs->irq, master); | ||
391 | spi_unregister_master(master); | ||
392 | |||
393 | /* Clear FIFOs, and disable the HW block */ | ||
394 | bcm2835_wr(bs, BCM2835_SPI_CS, | ||
395 | BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX); | ||
396 | |||
397 | clk_disable_unprepare(bs->clk); | ||
398 | spi_master_put(master); | ||
399 | |||
400 | return 0; | ||
401 | } | ||
402 | |||
403 | static const struct of_device_id bcm2835_spi_match[] = { | ||
404 | { .compatible = "brcm,bcm2835-spi", }, | ||
405 | {} | ||
406 | }; | ||
407 | MODULE_DEVICE_TABLE(of, bcm2835_spi_match); | ||
408 | |||
409 | static struct platform_driver bcm2835_spi_driver = { | ||
410 | .driver = { | ||
411 | .name = DRV_NAME, | ||
412 | .owner = THIS_MODULE, | ||
413 | .of_match_table = bcm2835_spi_match, | ||
414 | }, | ||
415 | .probe = bcm2835_spi_probe, | ||
416 | .remove = bcm2835_spi_remove, | ||
417 | }; | ||
418 | module_platform_driver(bcm2835_spi_driver); | ||
419 | |||
420 | MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835"); | ||
421 | MODULE_AUTHOR("Chris Boot <bootc@bootc.net>"); | ||
422 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c index d7df435d962e..a4ec5f4ec817 100644 --- a/drivers/spi/spi-bcm63xx.c +++ b/drivers/spi/spi-bcm63xx.c | |||
@@ -46,7 +46,6 @@ struct bcm63xx_spi { | |||
46 | int irq; | 46 | int irq; |
47 | 47 | ||
48 | /* Platform data */ | 48 | /* Platform data */ |
49 | u32 speed_hz; | ||
50 | unsigned fifo_size; | 49 | unsigned fifo_size; |
51 | unsigned int msg_type_shift; | 50 | unsigned int msg_type_shift; |
52 | unsigned int msg_ctl_width; | 51 | unsigned int msg_ctl_width; |
@@ -93,40 +92,16 @@ static const unsigned bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = { | |||
93 | { 391000, SPI_CLK_0_391MHZ } | 92 | { 391000, SPI_CLK_0_391MHZ } |
94 | }; | 93 | }; |
95 | 94 | ||
96 | static int bcm63xx_spi_check_transfer(struct spi_device *spi, | ||
97 | struct spi_transfer *t) | ||
98 | { | ||
99 | u8 bits_per_word; | ||
100 | |||
101 | bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word; | ||
102 | if (bits_per_word != 8) { | ||
103 | dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", | ||
104 | __func__, bits_per_word); | ||
105 | return -EINVAL; | ||
106 | } | ||
107 | |||
108 | if (spi->chip_select > spi->master->num_chipselect) { | ||
109 | dev_err(&spi->dev, "%s, unsupported slave %d\n", | ||
110 | __func__, spi->chip_select); | ||
111 | return -EINVAL; | ||
112 | } | ||
113 | |||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | static void bcm63xx_spi_setup_transfer(struct spi_device *spi, | 95 | static void bcm63xx_spi_setup_transfer(struct spi_device *spi, |
118 | struct spi_transfer *t) | 96 | struct spi_transfer *t) |
119 | { | 97 | { |
120 | struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master); | 98 | struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master); |
121 | u32 hz; | ||
122 | u8 clk_cfg, reg; | 99 | u8 clk_cfg, reg; |
123 | int i; | 100 | int i; |
124 | 101 | ||
125 | hz = (t) ? t->speed_hz : spi->max_speed_hz; | ||
126 | |||
127 | /* Find the closest clock configuration */ | 102 | /* Find the closest clock configuration */ |
128 | for (i = 0; i < SPI_CLK_MASK; i++) { | 103 | for (i = 0; i < SPI_CLK_MASK; i++) { |
129 | if (hz >= bcm63xx_spi_freq_table[i][0]) { | 104 | if (t->speed_hz >= bcm63xx_spi_freq_table[i][0]) { |
130 | clk_cfg = bcm63xx_spi_freq_table[i][1]; | 105 | clk_cfg = bcm63xx_spi_freq_table[i][1]; |
131 | break; | 106 | break; |
132 | } | 107 | } |
@@ -143,7 +118,7 @@ static void bcm63xx_spi_setup_transfer(struct spi_device *spi, | |||
143 | 118 | ||
144 | bcm_spi_writeb(bs, reg, SPI_CLK_CFG); | 119 | bcm_spi_writeb(bs, reg, SPI_CLK_CFG); |
145 | dev_dbg(&spi->dev, "Setting clock register to %02x (hz %d)\n", | 120 | dev_dbg(&spi->dev, "Setting clock register to %02x (hz %d)\n", |
146 | clk_cfg, hz); | 121 | clk_cfg, t->speed_hz); |
147 | } | 122 | } |
148 | 123 | ||
149 | /* the spi->mode bits understood by this driver: */ | 124 | /* the spi->mode bits understood by this driver: */ |
@@ -151,22 +126,12 @@ static void bcm63xx_spi_setup_transfer(struct spi_device *spi, | |||
151 | 126 | ||
152 | static int bcm63xx_spi_setup(struct spi_device *spi) | 127 | static int bcm63xx_spi_setup(struct spi_device *spi) |
153 | { | 128 | { |
154 | struct bcm63xx_spi *bs; | 129 | if (spi->bits_per_word != 8) { |
155 | 130 | dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", | |
156 | bs = spi_master_get_devdata(spi->master); | 131 | __func__, spi->bits_per_word); |
157 | |||
158 | if (!spi->bits_per_word) | ||
159 | spi->bits_per_word = 8; | ||
160 | |||
161 | if (spi->mode & ~MODEBITS) { | ||
162 | dev_err(&spi->dev, "%s, unsupported mode bits %x\n", | ||
163 | __func__, spi->mode & ~MODEBITS); | ||
164 | return -EINVAL; | 132 | return -EINVAL; |
165 | } | 133 | } |
166 | 134 | ||
167 | dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n", | ||
168 | __func__, spi->mode & MODEBITS, spi->bits_per_word, 0); | ||
169 | |||
170 | return 0; | 135 | return 0; |
171 | } | 136 | } |
172 | 137 | ||
@@ -312,9 +277,12 @@ static int bcm63xx_spi_transfer_one(struct spi_master *master, | |||
312 | * full-duplex transfers. | 277 | * full-duplex transfers. |
313 | */ | 278 | */ |
314 | list_for_each_entry(t, &m->transfers, transfer_list) { | 279 | list_for_each_entry(t, &m->transfers, transfer_list) { |
315 | status = bcm63xx_spi_check_transfer(spi, t); | 280 | if (t->bits_per_word != 8) { |
316 | if (status < 0) | 281 | dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", |
282 | __func__, t->bits_per_word); | ||
283 | status = -EINVAL; | ||
317 | goto exit; | 284 | goto exit; |
285 | } | ||
318 | 286 | ||
319 | if (!first) | 287 | if (!first) |
320 | first = t; | 288 | first = t; |
@@ -443,18 +411,9 @@ static int bcm63xx_spi_probe(struct platform_device *pdev) | |||
443 | platform_set_drvdata(pdev, master); | 411 | platform_set_drvdata(pdev, master); |
444 | bs->pdev = pdev; | 412 | bs->pdev = pdev; |
445 | 413 | ||
446 | if (!devm_request_mem_region(&pdev->dev, r->start, | 414 | bs->regs = devm_ioremap_resource(&pdev->dev, r); |
447 | resource_size(r), PFX)) { | 415 | if (IS_ERR(bs->regs)) { |
448 | dev_err(dev, "iomem request failed\n"); | 416 | ret = PTR_ERR(bs->regs); |
449 | ret = -ENXIO; | ||
450 | goto out_err; | ||
451 | } | ||
452 | |||
453 | bs->regs = devm_ioremap_nocache(&pdev->dev, r->start, | ||
454 | resource_size(r)); | ||
455 | if (!bs->regs) { | ||
456 | dev_err(dev, "unable to ioremap regs\n"); | ||
457 | ret = -ENOMEM; | ||
458 | goto out_err; | 417 | goto out_err; |
459 | } | 418 | } |
460 | 419 | ||
@@ -476,7 +435,6 @@ static int bcm63xx_spi_probe(struct platform_device *pdev) | |||
476 | master->unprepare_transfer_hardware = bcm63xx_spi_unprepare_transfer; | 435 | master->unprepare_transfer_hardware = bcm63xx_spi_unprepare_transfer; |
477 | master->transfer_one_message = bcm63xx_spi_transfer_one; | 436 | master->transfer_one_message = bcm63xx_spi_transfer_one; |
478 | master->mode_bits = MODEBITS; | 437 | master->mode_bits = MODEBITS; |
479 | bs->speed_hz = pdata->speed_hz; | ||
480 | bs->msg_type_shift = pdata->msg_type_shift; | 438 | bs->msg_type_shift = pdata->msg_type_shift; |
481 | bs->msg_ctl_width = pdata->msg_ctl_width; | 439 | bs->msg_ctl_width = pdata->msg_ctl_width; |
482 | bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA)); | 440 | bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA)); |
@@ -493,7 +451,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev) | |||
493 | } | 451 | } |
494 | 452 | ||
495 | /* Initialize hardware */ | 453 | /* Initialize hardware */ |
496 | clk_enable(bs->clk); | 454 | clk_prepare_enable(bs->clk); |
497 | bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS); | 455 | bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS); |
498 | 456 | ||
499 | /* register and we are done */ | 457 | /* register and we are done */ |
@@ -509,7 +467,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev) | |||
509 | return 0; | 467 | return 0; |
510 | 468 | ||
511 | out_clk_disable: | 469 | out_clk_disable: |
512 | clk_disable(clk); | 470 | clk_disable_unprepare(clk); |
513 | out_err: | 471 | out_err: |
514 | platform_set_drvdata(pdev, NULL); | 472 | platform_set_drvdata(pdev, NULL); |
515 | spi_master_put(master); | 473 | spi_master_put(master); |
@@ -530,7 +488,7 @@ static int bcm63xx_spi_remove(struct platform_device *pdev) | |||
530 | bcm_spi_writeb(bs, 0, SPI_INT_MASK); | 488 | bcm_spi_writeb(bs, 0, SPI_INT_MASK); |
531 | 489 | ||
532 | /* HW shutdown */ | 490 | /* HW shutdown */ |
533 | clk_disable(bs->clk); | 491 | clk_disable_unprepare(bs->clk); |
534 | clk_put(bs->clk); | 492 | clk_put(bs->clk); |
535 | 493 | ||
536 | platform_set_drvdata(pdev, 0); | 494 | platform_set_drvdata(pdev, 0); |
@@ -549,7 +507,7 @@ static int bcm63xx_spi_suspend(struct device *dev) | |||
549 | 507 | ||
550 | spi_master_suspend(master); | 508 | spi_master_suspend(master); |
551 | 509 | ||
552 | clk_disable(bs->clk); | 510 | clk_disable_unprepare(bs->clk); |
553 | 511 | ||
554 | return 0; | 512 | return 0; |
555 | } | 513 | } |
@@ -560,7 +518,7 @@ static int bcm63xx_spi_resume(struct device *dev) | |||
560 | platform_get_drvdata(to_platform_device(dev)); | 518 | platform_get_drvdata(to_platform_device(dev)); |
561 | struct bcm63xx_spi *bs = spi_master_get_devdata(master); | 519 | struct bcm63xx_spi *bs = spi_master_get_devdata(master); |
562 | 520 | ||
563 | clk_enable(bs->clk); | 521 | clk_prepare_enable(bs->clk); |
564 | 522 | ||
565 | spi_master_resume(master); | 523 | spi_master_resume(master); |
566 | 524 | ||
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c new file mode 100644 index 000000000000..07971e3fe58b --- /dev/null +++ b/drivers/spi/spi-fsl-cpm.c | |||
@@ -0,0 +1,387 @@ | |||
1 | /* | ||
2 | * Freescale SPI controller driver cpm functions. | ||
3 | * | ||
4 | * Maintainer: Kumar Gala | ||
5 | * | ||
6 | * Copyright (C) 2006 Polycom, Inc. | ||
7 | * Copyright 2010 Freescale Semiconductor, Inc. | ||
8 | * | ||
9 | * CPM SPI and QE buffer descriptors mode support: | ||
10 | * Copyright (c) 2009 MontaVista Software, Inc. | ||
11 | * Author: Anton Vorontsov <avorontsov@ru.mvista.com> | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify it | ||
14 | * under the terms of the GNU General Public License as published by the | ||
15 | * Free Software Foundation; either version 2 of the License, or (at your | ||
16 | * option) any later version. | ||
17 | */ | ||
18 | #include <linux/types.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/spi/spi.h> | ||
21 | #include <linux/fsl_devices.h> | ||
22 | #include <linux/dma-mapping.h> | ||
23 | #include <asm/cpm.h> | ||
24 | #include <asm/qe.h> | ||
25 | |||
26 | #include "spi-fsl-lib.h" | ||
27 | #include "spi-fsl-cpm.h" | ||
28 | #include "spi-fsl-spi.h" | ||
29 | |||
30 | /* CPM1 and CPM2 are mutually exclusive. */ | ||
31 | #ifdef CONFIG_CPM1 | ||
32 | #include <asm/cpm1.h> | ||
33 | #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0) | ||
34 | #else | ||
35 | #include <asm/cpm2.h> | ||
36 | #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0) | ||
37 | #endif | ||
38 | |||
39 | #define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */ | ||
40 | #define SPIE_RXB 0x00000100 /* Last char is written to rx buf */ | ||
41 | |||
42 | /* SPCOM register values */ | ||
43 | #define SPCOM_STR (1 << 23) /* Start transmit */ | ||
44 | |||
45 | #define SPI_PRAM_SIZE 0x100 | ||
46 | #define SPI_MRBLR ((unsigned int)PAGE_SIZE) | ||
47 | |||
48 | static void *fsl_dummy_rx; | ||
49 | static DEFINE_MUTEX(fsl_dummy_rx_lock); | ||
50 | static int fsl_dummy_rx_refcnt; | ||
51 | |||
52 | void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi) | ||
53 | { | ||
54 | if (mspi->flags & SPI_QE) { | ||
55 | qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock, | ||
56 | QE_CR_PROTOCOL_UNSPECIFIED, 0); | ||
57 | } else { | ||
58 | cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX); | ||
59 | if (mspi->flags & SPI_CPM1) { | ||
60 | out_be16(&mspi->pram->rbptr, | ||
61 | in_be16(&mspi->pram->rbase)); | ||
62 | out_be16(&mspi->pram->tbptr, | ||
63 | in_be16(&mspi->pram->tbase)); | ||
64 | } | ||
65 | } | ||
66 | } | ||
67 | |||
68 | static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) | ||
69 | { | ||
70 | struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd; | ||
71 | struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd; | ||
72 | unsigned int xfer_len = min(mspi->count, SPI_MRBLR); | ||
73 | unsigned int xfer_ofs; | ||
74 | struct fsl_spi_reg *reg_base = mspi->reg_base; | ||
75 | |||
76 | xfer_ofs = mspi->xfer_in_progress->len - mspi->count; | ||
77 | |||
78 | if (mspi->rx_dma == mspi->dma_dummy_rx) | ||
79 | out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma); | ||
80 | else | ||
81 | out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); | ||
82 | out_be16(&rx_bd->cbd_datlen, 0); | ||
83 | out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP); | ||
84 | |||
85 | if (mspi->tx_dma == mspi->dma_dummy_tx) | ||
86 | out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma); | ||
87 | else | ||
88 | out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); | ||
89 | out_be16(&tx_bd->cbd_datlen, xfer_len); | ||
90 | out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP | | ||
91 | BD_SC_LAST); | ||
92 | |||
93 | /* start transfer */ | ||
94 | mpc8xxx_spi_write_reg(®_base->command, SPCOM_STR); | ||
95 | } | ||
96 | |||
97 | int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi, | ||
98 | struct spi_transfer *t, bool is_dma_mapped) | ||
99 | { | ||
100 | struct device *dev = mspi->dev; | ||
101 | struct fsl_spi_reg *reg_base = mspi->reg_base; | ||
102 | |||
103 | if (is_dma_mapped) { | ||
104 | mspi->map_tx_dma = 0; | ||
105 | mspi->map_rx_dma = 0; | ||
106 | } else { | ||
107 | mspi->map_tx_dma = 1; | ||
108 | mspi->map_rx_dma = 1; | ||
109 | } | ||
110 | |||
111 | if (!t->tx_buf) { | ||
112 | mspi->tx_dma = mspi->dma_dummy_tx; | ||
113 | mspi->map_tx_dma = 0; | ||
114 | } | ||
115 | |||
116 | if (!t->rx_buf) { | ||
117 | mspi->rx_dma = mspi->dma_dummy_rx; | ||
118 | mspi->map_rx_dma = 0; | ||
119 | } | ||
120 | |||
121 | if (mspi->map_tx_dma) { | ||
122 | void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */ | ||
123 | |||
124 | mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len, | ||
125 | DMA_TO_DEVICE); | ||
126 | if (dma_mapping_error(dev, mspi->tx_dma)) { | ||
127 | dev_err(dev, "unable to map tx dma\n"); | ||
128 | return -ENOMEM; | ||
129 | } | ||
130 | } else if (t->tx_buf) { | ||
131 | mspi->tx_dma = t->tx_dma; | ||
132 | } | ||
133 | |||
134 | if (mspi->map_rx_dma) { | ||
135 | mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len, | ||
136 | DMA_FROM_DEVICE); | ||
137 | if (dma_mapping_error(dev, mspi->rx_dma)) { | ||
138 | dev_err(dev, "unable to map rx dma\n"); | ||
139 | goto err_rx_dma; | ||
140 | } | ||
141 | } else if (t->rx_buf) { | ||
142 | mspi->rx_dma = t->rx_dma; | ||
143 | } | ||
144 | |||
145 | /* enable rx ints */ | ||
146 | mpc8xxx_spi_write_reg(®_base->mask, SPIE_RXB); | ||
147 | |||
148 | mspi->xfer_in_progress = t; | ||
149 | mspi->count = t->len; | ||
150 | |||
151 | /* start CPM transfers */ | ||
152 | fsl_spi_cpm_bufs_start(mspi); | ||
153 | |||
154 | return 0; | ||
155 | |||
156 | err_rx_dma: | ||
157 | if (mspi->map_tx_dma) | ||
158 | dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); | ||
159 | return -ENOMEM; | ||
160 | } | ||
161 | |||
162 | void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) | ||
163 | { | ||
164 | struct device *dev = mspi->dev; | ||
165 | struct spi_transfer *t = mspi->xfer_in_progress; | ||
166 | |||
167 | if (mspi->map_tx_dma) | ||
168 | dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); | ||
169 | if (mspi->map_rx_dma) | ||
170 | dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE); | ||
171 | mspi->xfer_in_progress = NULL; | ||
172 | } | ||
173 | |||
174 | void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) | ||
175 | { | ||
176 | u16 len; | ||
177 | struct fsl_spi_reg *reg_base = mspi->reg_base; | ||
178 | |||
179 | dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__, | ||
180 | in_be16(&mspi->rx_bd->cbd_datlen), mspi->count); | ||
181 | |||
182 | len = in_be16(&mspi->rx_bd->cbd_datlen); | ||
183 | if (len > mspi->count) { | ||
184 | WARN_ON(1); | ||
185 | len = mspi->count; | ||
186 | } | ||
187 | |||
188 | /* Clear the events */ | ||
189 | mpc8xxx_spi_write_reg(®_base->event, events); | ||
190 | |||
191 | mspi->count -= len; | ||
192 | if (mspi->count) | ||
193 | fsl_spi_cpm_bufs_start(mspi); | ||
194 | else | ||
195 | complete(&mspi->done); | ||
196 | } | ||
197 | |||
198 | static void *fsl_spi_alloc_dummy_rx(void) | ||
199 | { | ||
200 | mutex_lock(&fsl_dummy_rx_lock); | ||
201 | |||
202 | if (!fsl_dummy_rx) | ||
203 | fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL); | ||
204 | if (fsl_dummy_rx) | ||
205 | fsl_dummy_rx_refcnt++; | ||
206 | |||
207 | mutex_unlock(&fsl_dummy_rx_lock); | ||
208 | |||
209 | return fsl_dummy_rx; | ||
210 | } | ||
211 | |||
212 | static void fsl_spi_free_dummy_rx(void) | ||
213 | { | ||
214 | mutex_lock(&fsl_dummy_rx_lock); | ||
215 | |||
216 | switch (fsl_dummy_rx_refcnt) { | ||
217 | case 0: | ||
218 | WARN_ON(1); | ||
219 | break; | ||
220 | case 1: | ||
221 | kfree(fsl_dummy_rx); | ||
222 | fsl_dummy_rx = NULL; | ||
223 | /* fall through */ | ||
224 | default: | ||
225 | fsl_dummy_rx_refcnt--; | ||
226 | break; | ||
227 | } | ||
228 | |||
229 | mutex_unlock(&fsl_dummy_rx_lock); | ||
230 | } | ||
231 | |||
232 | static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) | ||
233 | { | ||
234 | struct device *dev = mspi->dev; | ||
235 | struct device_node *np = dev->of_node; | ||
236 | const u32 *iprop; | ||
237 | int size; | ||
238 | void __iomem *spi_base; | ||
239 | unsigned long pram_ofs = -ENOMEM; | ||
240 | |||
241 | /* Can't use of_address_to_resource(), QE muram isn't at 0. */ | ||
242 | iprop = of_get_property(np, "reg", &size); | ||
243 | |||
244 | /* QE with a fixed pram location? */ | ||
245 | if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4) | ||
246 | return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE); | ||
247 | |||
248 | /* QE but with a dynamic pram location? */ | ||
249 | if (mspi->flags & SPI_QE) { | ||
250 | pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); | ||
251 | qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock, | ||
252 | QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs); | ||
253 | return pram_ofs; | ||
254 | } | ||
255 | |||
256 | spi_base = of_iomap(np, 1); | ||
257 | if (spi_base == NULL) | ||
258 | return -EINVAL; | ||
259 | |||
260 | if (mspi->flags & SPI_CPM2) { | ||
261 | pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); | ||
262 | out_be16(spi_base, pram_ofs); | ||
263 | } else { | ||
264 | struct spi_pram __iomem *pram = spi_base; | ||
265 | u16 rpbase = in_be16(&pram->rpbase); | ||
266 | |||
267 | /* Microcode relocation patch applied? */ | ||
268 | if (rpbase) { | ||
269 | pram_ofs = rpbase; | ||
270 | } else { | ||
271 | pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); | ||
272 | out_be16(spi_base, pram_ofs); | ||
273 | } | ||
274 | } | ||
275 | |||
276 | iounmap(spi_base); | ||
277 | return pram_ofs; | ||
278 | } | ||
279 | |||
280 | int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) | ||
281 | { | ||
282 | struct device *dev = mspi->dev; | ||
283 | struct device_node *np = dev->of_node; | ||
284 | const u32 *iprop; | ||
285 | int size; | ||
286 | unsigned long pram_ofs; | ||
287 | unsigned long bds_ofs; | ||
288 | |||
289 | if (!(mspi->flags & SPI_CPM_MODE)) | ||
290 | return 0; | ||
291 | |||
292 | if (!fsl_spi_alloc_dummy_rx()) | ||
293 | return -ENOMEM; | ||
294 | |||
295 | if (mspi->flags & SPI_QE) { | ||
296 | iprop = of_get_property(np, "cell-index", &size); | ||
297 | if (iprop && size == sizeof(*iprop)) | ||
298 | mspi->subblock = *iprop; | ||
299 | |||
300 | switch (mspi->subblock) { | ||
301 | default: | ||
302 | dev_warn(dev, "cell-index unspecified, assuming SPI1"); | ||
303 | /* fall through */ | ||
304 | case 0: | ||
305 | mspi->subblock = QE_CR_SUBBLOCK_SPI1; | ||
306 | break; | ||
307 | case 1: | ||
308 | mspi->subblock = QE_CR_SUBBLOCK_SPI2; | ||
309 | break; | ||
310 | } | ||
311 | } | ||
312 | |||
313 | pram_ofs = fsl_spi_cpm_get_pram(mspi); | ||
314 | if (IS_ERR_VALUE(pram_ofs)) { | ||
315 | dev_err(dev, "can't allocate spi parameter ram\n"); | ||
316 | goto err_pram; | ||
317 | } | ||
318 | |||
319 | bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) + | ||
320 | sizeof(*mspi->rx_bd), 8); | ||
321 | if (IS_ERR_VALUE(bds_ofs)) { | ||
322 | dev_err(dev, "can't allocate bds\n"); | ||
323 | goto err_bds; | ||
324 | } | ||
325 | |||
326 | mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE, | ||
327 | DMA_TO_DEVICE); | ||
328 | if (dma_mapping_error(dev, mspi->dma_dummy_tx)) { | ||
329 | dev_err(dev, "unable to map dummy tx buffer\n"); | ||
330 | goto err_dummy_tx; | ||
331 | } | ||
332 | |||
333 | mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR, | ||
334 | DMA_FROM_DEVICE); | ||
335 | if (dma_mapping_error(dev, mspi->dma_dummy_rx)) { | ||
336 | dev_err(dev, "unable to map dummy rx buffer\n"); | ||
337 | goto err_dummy_rx; | ||
338 | } | ||
339 | |||
340 | mspi->pram = cpm_muram_addr(pram_ofs); | ||
341 | |||
342 | mspi->tx_bd = cpm_muram_addr(bds_ofs); | ||
343 | mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd)); | ||
344 | |||
345 | /* Initialize parameter ram. */ | ||
346 | out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd)); | ||
347 | out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd)); | ||
348 | out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL); | ||
349 | out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL); | ||
350 | out_be16(&mspi->pram->mrblr, SPI_MRBLR); | ||
351 | out_be32(&mspi->pram->rstate, 0); | ||
352 | out_be32(&mspi->pram->rdp, 0); | ||
353 | out_be16(&mspi->pram->rbptr, 0); | ||
354 | out_be16(&mspi->pram->rbc, 0); | ||
355 | out_be32(&mspi->pram->rxtmp, 0); | ||
356 | out_be32(&mspi->pram->tstate, 0); | ||
357 | out_be32(&mspi->pram->tdp, 0); | ||
358 | out_be16(&mspi->pram->tbptr, 0); | ||
359 | out_be16(&mspi->pram->tbc, 0); | ||
360 | out_be32(&mspi->pram->txtmp, 0); | ||
361 | |||
362 | return 0; | ||
363 | |||
364 | err_dummy_rx: | ||
365 | dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); | ||
366 | err_dummy_tx: | ||
367 | cpm_muram_free(bds_ofs); | ||
368 | err_bds: | ||
369 | cpm_muram_free(pram_ofs); | ||
370 | err_pram: | ||
371 | fsl_spi_free_dummy_rx(); | ||
372 | return -ENOMEM; | ||
373 | } | ||
374 | |||
375 | void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) | ||
376 | { | ||
377 | struct device *dev = mspi->dev; | ||
378 | |||
379 | if (!(mspi->flags & SPI_CPM_MODE)) | ||
380 | return; | ||
381 | |||
382 | dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE); | ||
383 | dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); | ||
384 | cpm_muram_free(cpm_muram_offset(mspi->tx_bd)); | ||
385 | cpm_muram_free(cpm_muram_offset(mspi->pram)); | ||
386 | fsl_spi_free_dummy_rx(); | ||
387 | } | ||
diff --git a/drivers/spi/spi-fsl-cpm.h b/drivers/spi/spi-fsl-cpm.h new file mode 100644 index 000000000000..c71115805485 --- /dev/null +++ b/drivers/spi/spi-fsl-cpm.h | |||
@@ -0,0 +1,43 @@ | |||
1 | /* | ||
2 | * Freescale SPI controller driver cpm functions. | ||
3 | * | ||
4 | * Maintainer: Kumar Gala | ||
5 | * | ||
6 | * Copyright (C) 2006 Polycom, Inc. | ||
7 | * Copyright 2010 Freescale Semiconductor, Inc. | ||
8 | * | ||
9 | * CPM SPI and QE buffer descriptors mode support: | ||
10 | * Copyright (c) 2009 MontaVista Software, Inc. | ||
11 | * Author: Anton Vorontsov <avorontsov@ru.mvista.com> | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify it | ||
14 | * under the terms of the GNU General Public License as published by the | ||
15 | * Free Software Foundation; either version 2 of the License, or (at your | ||
16 | * option) any later version. | ||
17 | */ | ||
18 | |||
19 | #ifndef __SPI_FSL_CPM_H__ | ||
20 | #define __SPI_FSL_CPM_H__ | ||
21 | |||
22 | #include "spi-fsl-lib.h" | ||
23 | |||
24 | #ifdef CONFIG_FSL_SOC | ||
25 | extern void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi); | ||
26 | extern int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi, | ||
27 | struct spi_transfer *t, bool is_dma_mapped); | ||
28 | extern void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi); | ||
29 | extern void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events); | ||
30 | extern int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi); | ||
31 | extern void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi); | ||
32 | #else | ||
33 | static inline void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi) { } | ||
34 | static inline int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi, | ||
35 | struct spi_transfer *t, | ||
36 | bool is_dma_mapped) { return 0; } | ||
37 | static inline void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) { } | ||
38 | static inline void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) { } | ||
39 | static inline int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) { return 0; } | ||
40 | static inline void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) { } | ||
41 | #endif | ||
42 | |||
43 | #endif /* __SPI_FSL_CPM_H__ */ | ||
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c index 8ade675a04f1..a91db0e57b23 100644 --- a/drivers/spi/spi-fsl-lib.c +++ b/drivers/spi/spi-fsl-lib.c | |||
@@ -23,7 +23,9 @@ | |||
23 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
24 | #include <linux/of_platform.h> | 24 | #include <linux/of_platform.h> |
25 | #include <linux/spi/spi.h> | 25 | #include <linux/spi/spi.h> |
26 | #ifdef CONFIG_FSL_SOC | ||
26 | #include <sysdev/fsl_soc.h> | 27 | #include <sysdev/fsl_soc.h> |
28 | #endif | ||
27 | 29 | ||
28 | #include "spi-fsl-lib.h" | 30 | #include "spi-fsl-lib.h" |
29 | 31 | ||
@@ -208,6 +210,7 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev) | |||
208 | /* Allocate bus num dynamically. */ | 210 | /* Allocate bus num dynamically. */ |
209 | pdata->bus_num = -1; | 211 | pdata->bus_num = -1; |
210 | 212 | ||
213 | #ifdef CONFIG_FSL_SOC | ||
211 | /* SPI controller is either clocked from QE or SoC clock. */ | 214 | /* SPI controller is either clocked from QE or SoC clock. */ |
212 | pdata->sysclk = get_brgfreq(); | 215 | pdata->sysclk = get_brgfreq(); |
213 | if (pdata->sysclk == -1) { | 216 | if (pdata->sysclk == -1) { |
@@ -217,6 +220,11 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev) | |||
217 | goto err; | 220 | goto err; |
218 | } | 221 | } |
219 | } | 222 | } |
223 | #else | ||
224 | ret = of_property_read_u32(np, "clock-frequency", &pdata->sysclk); | ||
225 | if (ret) | ||
226 | goto err; | ||
227 | #endif | ||
220 | 228 | ||
221 | prop = of_get_property(np, "mode", NULL); | 229 | prop = of_get_property(np, "mode", NULL); |
222 | if (prop && !strcmp(prop, "cpu-qe")) | 230 | if (prop && !strcmp(prop, "cpu-qe")) |
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h index cbe881b9ea76..52db6936778e 100644 --- a/drivers/spi/spi-fsl-lib.h +++ b/drivers/spi/spi-fsl-lib.h | |||
@@ -34,8 +34,10 @@ struct mpc8xxx_spi { | |||
34 | 34 | ||
35 | int subblock; | 35 | int subblock; |
36 | struct spi_pram __iomem *pram; | 36 | struct spi_pram __iomem *pram; |
37 | #ifdef CONFIG_FSL_SOC | ||
37 | struct cpm_buf_desc __iomem *tx_bd; | 38 | struct cpm_buf_desc __iomem *tx_bd; |
38 | struct cpm_buf_desc __iomem *rx_bd; | 39 | struct cpm_buf_desc __iomem *rx_bd; |
40 | #endif | ||
39 | 41 | ||
40 | struct spi_transfer *xfer_in_progress; | 42 | struct spi_transfer *xfer_in_progress; |
41 | 43 | ||
@@ -67,6 +69,15 @@ struct mpc8xxx_spi { | |||
67 | 69 | ||
68 | unsigned int flags; | 70 | unsigned int flags; |
69 | 71 | ||
72 | #ifdef CONFIG_SPI_FSL_SPI | ||
73 | int type; | ||
74 | int native_chipselects; | ||
75 | u8 max_bits_per_word; | ||
76 | |||
77 | void (*set_shifts)(u32 *rx_shift, u32 *tx_shift, | ||
78 | int bits_per_word, int msb_first); | ||
79 | #endif | ||
80 | |||
70 | struct workqueue_struct *workqueue; | 81 | struct workqueue_struct *workqueue; |
71 | struct work_struct work; | 82 | struct work_struct work; |
72 | 83 | ||
@@ -87,12 +98,12 @@ struct spi_mpc8xxx_cs { | |||
87 | 98 | ||
88 | static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val) | 99 | static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val) |
89 | { | 100 | { |
90 | out_be32(reg, val); | 101 | iowrite32be(val, reg); |
91 | } | 102 | } |
92 | 103 | ||
93 | static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg) | 104 | static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg) |
94 | { | 105 | { |
95 | return in_be32(reg); | 106 | return ioread32be(reg); |
96 | } | 107 | } |
97 | 108 | ||
98 | struct mpc8xxx_spi_probe_info { | 109 | struct mpc8xxx_spi_probe_info { |
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index 086a9eef2e05..14e202ee7036 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c | |||
@@ -10,6 +10,10 @@ | |||
10 | * Copyright (c) 2009 MontaVista Software, Inc. | 10 | * Copyright (c) 2009 MontaVista Software, Inc. |
11 | * Author: Anton Vorontsov <avorontsov@ru.mvista.com> | 11 | * Author: Anton Vorontsov <avorontsov@ru.mvista.com> |
12 | * | 12 | * |
13 | * GRLIB support: | ||
14 | * Copyright (c) 2012 Aeroflex Gaisler AB. | ||
15 | * Author: Andreas Larsson <andreas@gaisler.com> | ||
16 | * | ||
13 | * This program is free software; you can redistribute it and/or modify it | 17 | * This program is free software; you can redistribute it and/or modify it |
14 | * under the terms of the GNU General Public License as published by the | 18 | * under the terms of the GNU General Public License as published by the |
15 | * Free Software Foundation; either version 2 of the License, or (at your | 19 | * Free Software Foundation; either version 2 of the License, or (at your |
@@ -30,75 +34,54 @@ | |||
30 | #include <linux/mutex.h> | 34 | #include <linux/mutex.h> |
31 | #include <linux/of.h> | 35 | #include <linux/of.h> |
32 | #include <linux/of_platform.h> | 36 | #include <linux/of_platform.h> |
37 | #include <linux/of_address.h> | ||
38 | #include <linux/of_irq.h> | ||
33 | #include <linux/gpio.h> | 39 | #include <linux/gpio.h> |
34 | #include <linux/of_gpio.h> | 40 | #include <linux/of_gpio.h> |
35 | 41 | ||
36 | #include <sysdev/fsl_soc.h> | ||
37 | #include <asm/cpm.h> | ||
38 | #include <asm/qe.h> | ||
39 | |||
40 | #include "spi-fsl-lib.h" | 42 | #include "spi-fsl-lib.h" |
43 | #include "spi-fsl-cpm.h" | ||
44 | #include "spi-fsl-spi.h" | ||
41 | 45 | ||
42 | /* CPM1 and CPM2 are mutually exclusive. */ | 46 | #define TYPE_FSL 0 |
43 | #ifdef CONFIG_CPM1 | 47 | #define TYPE_GRLIB 1 |
44 | #include <asm/cpm1.h> | ||
45 | #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0) | ||
46 | #else | ||
47 | #include <asm/cpm2.h> | ||
48 | #define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0) | ||
49 | #endif | ||
50 | |||
51 | /* SPI Controller registers */ | ||
52 | struct fsl_spi_reg { | ||
53 | u8 res1[0x20]; | ||
54 | __be32 mode; | ||
55 | __be32 event; | ||
56 | __be32 mask; | ||
57 | __be32 command; | ||
58 | __be32 transmit; | ||
59 | __be32 receive; | ||
60 | }; | ||
61 | |||
62 | /* SPI Controller mode register definitions */ | ||
63 | #define SPMODE_LOOP (1 << 30) | ||
64 | #define SPMODE_CI_INACTIVEHIGH (1 << 29) | ||
65 | #define SPMODE_CP_BEGIN_EDGECLK (1 << 28) | ||
66 | #define SPMODE_DIV16 (1 << 27) | ||
67 | #define SPMODE_REV (1 << 26) | ||
68 | #define SPMODE_MS (1 << 25) | ||
69 | #define SPMODE_ENABLE (1 << 24) | ||
70 | #define SPMODE_LEN(x) ((x) << 20) | ||
71 | #define SPMODE_PM(x) ((x) << 16) | ||
72 | #define SPMODE_OP (1 << 14) | ||
73 | #define SPMODE_CG(x) ((x) << 7) | ||
74 | 48 | ||
75 | /* | 49 | struct fsl_spi_match_data { |
76 | * Default for SPI Mode: | 50 | int type; |
77 | * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk | 51 | }; |
78 | */ | ||
79 | #define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \ | ||
80 | SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf)) | ||
81 | |||
82 | /* SPIE register values */ | ||
83 | #define SPIE_NE 0x00000200 /* Not empty */ | ||
84 | #define SPIE_NF 0x00000100 /* Not full */ | ||
85 | 52 | ||
86 | /* SPIM register values */ | 53 | static struct fsl_spi_match_data of_fsl_spi_fsl_config = { |
87 | #define SPIM_NE 0x00000200 /* Not empty */ | 54 | .type = TYPE_FSL, |
88 | #define SPIM_NF 0x00000100 /* Not full */ | 55 | }; |
89 | 56 | ||
90 | #define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */ | 57 | static struct fsl_spi_match_data of_fsl_spi_grlib_config = { |
91 | #define SPIE_RXB 0x00000100 /* Last char is written to rx buf */ | 58 | .type = TYPE_GRLIB, |
59 | }; | ||
92 | 60 | ||
93 | /* SPCOM register values */ | 61 | static struct of_device_id of_fsl_spi_match[] = { |
94 | #define SPCOM_STR (1 << 23) /* Start transmit */ | 62 | { |
63 | .compatible = "fsl,spi", | ||
64 | .data = &of_fsl_spi_fsl_config, | ||
65 | }, | ||
66 | { | ||
67 | .compatible = "aeroflexgaisler,spictrl", | ||
68 | .data = &of_fsl_spi_grlib_config, | ||
69 | }, | ||
70 | {} | ||
71 | }; | ||
72 | MODULE_DEVICE_TABLE(of, of_fsl_spi_match); | ||
95 | 73 | ||
96 | #define SPI_PRAM_SIZE 0x100 | 74 | static int fsl_spi_get_type(struct device *dev) |
97 | #define SPI_MRBLR ((unsigned int)PAGE_SIZE) | 75 | { |
76 | const struct of_device_id *match; | ||
98 | 77 | ||
99 | static void *fsl_dummy_rx; | 78 | if (dev->of_node) { |
100 | static DEFINE_MUTEX(fsl_dummy_rx_lock); | 79 | match = of_match_node(of_fsl_spi_match, dev->of_node); |
101 | static int fsl_dummy_rx_refcnt; | 80 | if (match && match->data) |
81 | return ((struct fsl_spi_match_data *)match->data)->type; | ||
82 | } | ||
83 | return TYPE_FSL; | ||
84 | } | ||
102 | 85 | ||
103 | static void fsl_spi_change_mode(struct spi_device *spi) | 86 | static void fsl_spi_change_mode(struct spi_device *spi) |
104 | { | 87 | { |
@@ -119,18 +102,7 @@ static void fsl_spi_change_mode(struct spi_device *spi) | |||
119 | 102 | ||
120 | /* When in CPM mode, we need to reinit tx and rx. */ | 103 | /* When in CPM mode, we need to reinit tx and rx. */ |
121 | if (mspi->flags & SPI_CPM_MODE) { | 104 | if (mspi->flags & SPI_CPM_MODE) { |
122 | if (mspi->flags & SPI_QE) { | 105 | fsl_spi_cpm_reinit_txrx(mspi); |
123 | qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock, | ||
124 | QE_CR_PROTOCOL_UNSPECIFIED, 0); | ||
125 | } else { | ||
126 | cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX); | ||
127 | if (mspi->flags & SPI_CPM1) { | ||
128 | out_be16(&mspi->pram->rbptr, | ||
129 | in_be16(&mspi->pram->rbase)); | ||
130 | out_be16(&mspi->pram->tbptr, | ||
131 | in_be16(&mspi->pram->tbase)); | ||
132 | } | ||
133 | } | ||
134 | } | 106 | } |
135 | mpc8xxx_spi_write_reg(mode, cs->hw_mode); | 107 | mpc8xxx_spi_write_reg(mode, cs->hw_mode); |
136 | local_irq_restore(flags); | 108 | local_irq_restore(flags); |
@@ -163,6 +135,40 @@ static void fsl_spi_chipselect(struct spi_device *spi, int value) | |||
163 | } | 135 | } |
164 | } | 136 | } |
165 | 137 | ||
138 | static void fsl_spi_qe_cpu_set_shifts(u32 *rx_shift, u32 *tx_shift, | ||
139 | int bits_per_word, int msb_first) | ||
140 | { | ||
141 | *rx_shift = 0; | ||
142 | *tx_shift = 0; | ||
143 | if (msb_first) { | ||
144 | if (bits_per_word <= 8) { | ||
145 | *rx_shift = 16; | ||
146 | *tx_shift = 24; | ||
147 | } else if (bits_per_word <= 16) { | ||
148 | *rx_shift = 16; | ||
149 | *tx_shift = 16; | ||
150 | } | ||
151 | } else { | ||
152 | if (bits_per_word <= 8) | ||
153 | *rx_shift = 8; | ||
154 | } | ||
155 | } | ||
156 | |||
157 | static void fsl_spi_grlib_set_shifts(u32 *rx_shift, u32 *tx_shift, | ||
158 | int bits_per_word, int msb_first) | ||
159 | { | ||
160 | *rx_shift = 0; | ||
161 | *tx_shift = 0; | ||
162 | if (bits_per_word <= 16) { | ||
163 | if (msb_first) { | ||
164 | *rx_shift = 16; /* LSB in bit 16 */ | ||
165 | *tx_shift = 32 - bits_per_word; /* MSB in bit 31 */ | ||
166 | } else { | ||
167 | *rx_shift = 16 - bits_per_word; /* MSB in bit 15 */ | ||
168 | } | ||
169 | } | ||
170 | } | ||
171 | |||
166 | static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs, | 172 | static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs, |
167 | struct spi_device *spi, | 173 | struct spi_device *spi, |
168 | struct mpc8xxx_spi *mpc8xxx_spi, | 174 | struct mpc8xxx_spi *mpc8xxx_spi, |
@@ -173,31 +179,20 @@ static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs, | |||
173 | if (bits_per_word <= 8) { | 179 | if (bits_per_word <= 8) { |
174 | cs->get_rx = mpc8xxx_spi_rx_buf_u8; | 180 | cs->get_rx = mpc8xxx_spi_rx_buf_u8; |
175 | cs->get_tx = mpc8xxx_spi_tx_buf_u8; | 181 | cs->get_tx = mpc8xxx_spi_tx_buf_u8; |
176 | if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { | ||
177 | cs->rx_shift = 16; | ||
178 | cs->tx_shift = 24; | ||
179 | } | ||
180 | } else if (bits_per_word <= 16) { | 182 | } else if (bits_per_word <= 16) { |
181 | cs->get_rx = mpc8xxx_spi_rx_buf_u16; | 183 | cs->get_rx = mpc8xxx_spi_rx_buf_u16; |
182 | cs->get_tx = mpc8xxx_spi_tx_buf_u16; | 184 | cs->get_tx = mpc8xxx_spi_tx_buf_u16; |
183 | if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { | ||
184 | cs->rx_shift = 16; | ||
185 | cs->tx_shift = 16; | ||
186 | } | ||
187 | } else if (bits_per_word <= 32) { | 185 | } else if (bits_per_word <= 32) { |
188 | cs->get_rx = mpc8xxx_spi_rx_buf_u32; | 186 | cs->get_rx = mpc8xxx_spi_rx_buf_u32; |
189 | cs->get_tx = mpc8xxx_spi_tx_buf_u32; | 187 | cs->get_tx = mpc8xxx_spi_tx_buf_u32; |
190 | } else | 188 | } else |
191 | return -EINVAL; | 189 | return -EINVAL; |
192 | 190 | ||
193 | if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE && | 191 | if (mpc8xxx_spi->set_shifts) |
194 | spi->mode & SPI_LSB_FIRST) { | 192 | mpc8xxx_spi->set_shifts(&cs->rx_shift, &cs->tx_shift, |
195 | cs->tx_shift = 0; | 193 | bits_per_word, |
196 | if (bits_per_word <= 8) | 194 | !(spi->mode & SPI_LSB_FIRST)); |
197 | cs->rx_shift = 8; | 195 | |
198 | else | ||
199 | cs->rx_shift = 0; | ||
200 | } | ||
201 | mpc8xxx_spi->rx_shift = cs->rx_shift; | 196 | mpc8xxx_spi->rx_shift = cs->rx_shift; |
202 | mpc8xxx_spi->tx_shift = cs->tx_shift; | 197 | mpc8xxx_spi->tx_shift = cs->tx_shift; |
203 | mpc8xxx_spi->get_rx = cs->get_rx; | 198 | mpc8xxx_spi->get_rx = cs->get_rx; |
@@ -246,7 +241,8 @@ static int fsl_spi_setup_transfer(struct spi_device *spi, | |||
246 | 241 | ||
247 | /* Make sure its a bit width we support [4..16, 32] */ | 242 | /* Make sure its a bit width we support [4..16, 32] */ |
248 | if ((bits_per_word < 4) | 243 | if ((bits_per_word < 4) |
249 | || ((bits_per_word > 16) && (bits_per_word != 32))) | 244 | || ((bits_per_word > 16) && (bits_per_word != 32)) |
245 | || (bits_per_word > mpc8xxx_spi->max_bits_per_word)) | ||
250 | return -EINVAL; | 246 | return -EINVAL; |
251 | 247 | ||
252 | if (!hz) | 248 | if (!hz) |
@@ -295,112 +291,6 @@ static int fsl_spi_setup_transfer(struct spi_device *spi, | |||
295 | return 0; | 291 | return 0; |
296 | } | 292 | } |
297 | 293 | ||
298 | static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi) | ||
299 | { | ||
300 | struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd; | ||
301 | struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd; | ||
302 | unsigned int xfer_len = min(mspi->count, SPI_MRBLR); | ||
303 | unsigned int xfer_ofs; | ||
304 | struct fsl_spi_reg *reg_base = mspi->reg_base; | ||
305 | |||
306 | xfer_ofs = mspi->xfer_in_progress->len - mspi->count; | ||
307 | |||
308 | if (mspi->rx_dma == mspi->dma_dummy_rx) | ||
309 | out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma); | ||
310 | else | ||
311 | out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs); | ||
312 | out_be16(&rx_bd->cbd_datlen, 0); | ||
313 | out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP); | ||
314 | |||
315 | if (mspi->tx_dma == mspi->dma_dummy_tx) | ||
316 | out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma); | ||
317 | else | ||
318 | out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs); | ||
319 | out_be16(&tx_bd->cbd_datlen, xfer_len); | ||
320 | out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP | | ||
321 | BD_SC_LAST); | ||
322 | |||
323 | /* start transfer */ | ||
324 | mpc8xxx_spi_write_reg(®_base->command, SPCOM_STR); | ||
325 | } | ||
326 | |||
327 | static int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi, | ||
328 | struct spi_transfer *t, bool is_dma_mapped) | ||
329 | { | ||
330 | struct device *dev = mspi->dev; | ||
331 | struct fsl_spi_reg *reg_base = mspi->reg_base; | ||
332 | |||
333 | if (is_dma_mapped) { | ||
334 | mspi->map_tx_dma = 0; | ||
335 | mspi->map_rx_dma = 0; | ||
336 | } else { | ||
337 | mspi->map_tx_dma = 1; | ||
338 | mspi->map_rx_dma = 1; | ||
339 | } | ||
340 | |||
341 | if (!t->tx_buf) { | ||
342 | mspi->tx_dma = mspi->dma_dummy_tx; | ||
343 | mspi->map_tx_dma = 0; | ||
344 | } | ||
345 | |||
346 | if (!t->rx_buf) { | ||
347 | mspi->rx_dma = mspi->dma_dummy_rx; | ||
348 | mspi->map_rx_dma = 0; | ||
349 | } | ||
350 | |||
351 | if (mspi->map_tx_dma) { | ||
352 | void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */ | ||
353 | |||
354 | mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len, | ||
355 | DMA_TO_DEVICE); | ||
356 | if (dma_mapping_error(dev, mspi->tx_dma)) { | ||
357 | dev_err(dev, "unable to map tx dma\n"); | ||
358 | return -ENOMEM; | ||
359 | } | ||
360 | } else if (t->tx_buf) { | ||
361 | mspi->tx_dma = t->tx_dma; | ||
362 | } | ||
363 | |||
364 | if (mspi->map_rx_dma) { | ||
365 | mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len, | ||
366 | DMA_FROM_DEVICE); | ||
367 | if (dma_mapping_error(dev, mspi->rx_dma)) { | ||
368 | dev_err(dev, "unable to map rx dma\n"); | ||
369 | goto err_rx_dma; | ||
370 | } | ||
371 | } else if (t->rx_buf) { | ||
372 | mspi->rx_dma = t->rx_dma; | ||
373 | } | ||
374 | |||
375 | /* enable rx ints */ | ||
376 | mpc8xxx_spi_write_reg(®_base->mask, SPIE_RXB); | ||
377 | |||
378 | mspi->xfer_in_progress = t; | ||
379 | mspi->count = t->len; | ||
380 | |||
381 | /* start CPM transfers */ | ||
382 | fsl_spi_cpm_bufs_start(mspi); | ||
383 | |||
384 | return 0; | ||
385 | |||
386 | err_rx_dma: | ||
387 | if (mspi->map_tx_dma) | ||
388 | dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); | ||
389 | return -ENOMEM; | ||
390 | } | ||
391 | |||
392 | static void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) | ||
393 | { | ||
394 | struct device *dev = mspi->dev; | ||
395 | struct spi_transfer *t = mspi->xfer_in_progress; | ||
396 | |||
397 | if (mspi->map_tx_dma) | ||
398 | dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE); | ||
399 | if (mspi->map_rx_dma) | ||
400 | dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE); | ||
401 | mspi->xfer_in_progress = NULL; | ||
402 | } | ||
403 | |||
404 | static int fsl_spi_cpu_bufs(struct mpc8xxx_spi *mspi, | 294 | static int fsl_spi_cpu_bufs(struct mpc8xxx_spi *mspi, |
405 | struct spi_transfer *t, unsigned int len) | 295 | struct spi_transfer *t, unsigned int len) |
406 | { | 296 | { |
@@ -565,31 +455,45 @@ static int fsl_spi_setup(struct spi_device *spi) | |||
565 | cs->hw_mode = hw_mode; /* Restore settings */ | 455 | cs->hw_mode = hw_mode; /* Restore settings */ |
566 | return retval; | 456 | return retval; |
567 | } | 457 | } |
568 | return 0; | ||
569 | } | ||
570 | 458 | ||
571 | static void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) | 459 | if (mpc8xxx_spi->type == TYPE_GRLIB) { |
572 | { | 460 | if (gpio_is_valid(spi->cs_gpio)) { |
573 | u16 len; | 461 | int desel; |
574 | struct fsl_spi_reg *reg_base = mspi->reg_base; | ||
575 | 462 | ||
576 | dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__, | 463 | retval = gpio_request(spi->cs_gpio, |
577 | in_be16(&mspi->rx_bd->cbd_datlen), mspi->count); | 464 | dev_name(&spi->dev)); |
465 | if (retval) | ||
466 | return retval; | ||
578 | 467 | ||
579 | len = in_be16(&mspi->rx_bd->cbd_datlen); | 468 | desel = !(spi->mode & SPI_CS_HIGH); |
580 | if (len > mspi->count) { | 469 | retval = gpio_direction_output(spi->cs_gpio, desel); |
581 | WARN_ON(1); | 470 | if (retval) { |
582 | len = mspi->count; | 471 | gpio_free(spi->cs_gpio); |
472 | return retval; | ||
473 | } | ||
474 | } else if (spi->cs_gpio != -ENOENT) { | ||
475 | if (spi->cs_gpio < 0) | ||
476 | return spi->cs_gpio; | ||
477 | return -EINVAL; | ||
478 | } | ||
479 | /* When spi->cs_gpio == -ENOENT, a hole in the phandle list | ||
480 | * indicates to use native chipselect if present, or allow for | ||
481 | * an always selected chip | ||
482 | */ | ||
583 | } | 483 | } |
584 | 484 | ||
585 | /* Clear the events */ | 485 | /* Initialize chipselect - might be active for SPI_CS_HIGH mode */ |
586 | mpc8xxx_spi_write_reg(®_base->event, events); | 486 | fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE); |
587 | 487 | ||
588 | mspi->count -= len; | 488 | return 0; |
589 | if (mspi->count) | 489 | } |
590 | fsl_spi_cpm_bufs_start(mspi); | 490 | |
591 | else | 491 | static void fsl_spi_cleanup(struct spi_device *spi) |
592 | complete(&mspi->done); | 492 | { |
493 | struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); | ||
494 | |||
495 | if (mpc8xxx_spi->type == TYPE_GRLIB && gpio_is_valid(spi->cs_gpio)) | ||
496 | gpio_free(spi->cs_gpio); | ||
593 | } | 497 | } |
594 | 498 | ||
595 | static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) | 499 | static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) |
@@ -646,201 +550,51 @@ static irqreturn_t fsl_spi_irq(s32 irq, void *context_data) | |||
646 | return ret; | 550 | return ret; |
647 | } | 551 | } |
648 | 552 | ||
649 | static void *fsl_spi_alloc_dummy_rx(void) | 553 | static void fsl_spi_remove(struct mpc8xxx_spi *mspi) |
650 | { | ||
651 | mutex_lock(&fsl_dummy_rx_lock); | ||
652 | |||
653 | if (!fsl_dummy_rx) | ||
654 | fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL); | ||
655 | if (fsl_dummy_rx) | ||
656 | fsl_dummy_rx_refcnt++; | ||
657 | |||
658 | mutex_unlock(&fsl_dummy_rx_lock); | ||
659 | |||
660 | return fsl_dummy_rx; | ||
661 | } | ||
662 | |||
663 | static void fsl_spi_free_dummy_rx(void) | ||
664 | { | 554 | { |
665 | mutex_lock(&fsl_dummy_rx_lock); | 555 | iounmap(mspi->reg_base); |
666 | 556 | fsl_spi_cpm_free(mspi); | |
667 | switch (fsl_dummy_rx_refcnt) { | ||
668 | case 0: | ||
669 | WARN_ON(1); | ||
670 | break; | ||
671 | case 1: | ||
672 | kfree(fsl_dummy_rx); | ||
673 | fsl_dummy_rx = NULL; | ||
674 | /* fall through */ | ||
675 | default: | ||
676 | fsl_dummy_rx_refcnt--; | ||
677 | break; | ||
678 | } | ||
679 | |||
680 | mutex_unlock(&fsl_dummy_rx_lock); | ||
681 | } | 557 | } |
682 | 558 | ||
683 | static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi) | 559 | static void fsl_spi_grlib_cs_control(struct spi_device *spi, bool on) |
684 | { | 560 | { |
685 | struct device *dev = mspi->dev; | 561 | struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); |
686 | struct device_node *np = dev->of_node; | 562 | struct fsl_spi_reg *reg_base = mpc8xxx_spi->reg_base; |
687 | const u32 *iprop; | 563 | u32 slvsel; |
688 | int size; | 564 | u16 cs = spi->chip_select; |
689 | void __iomem *spi_base; | ||
690 | unsigned long pram_ofs = -ENOMEM; | ||
691 | |||
692 | /* Can't use of_address_to_resource(), QE muram isn't at 0. */ | ||
693 | iprop = of_get_property(np, "reg", &size); | ||
694 | |||
695 | /* QE with a fixed pram location? */ | ||
696 | if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4) | ||
697 | return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE); | ||
698 | |||
699 | /* QE but with a dynamic pram location? */ | ||
700 | if (mspi->flags & SPI_QE) { | ||
701 | pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); | ||
702 | qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock, | ||
703 | QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs); | ||
704 | return pram_ofs; | ||
705 | } | ||
706 | |||
707 | spi_base = of_iomap(np, 1); | ||
708 | if (spi_base == NULL) | ||
709 | return -EINVAL; | ||
710 | 565 | ||
711 | if (mspi->flags & SPI_CPM2) { | 566 | if (gpio_is_valid(spi->cs_gpio)) { |
712 | pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); | 567 | gpio_set_value(spi->cs_gpio, on); |
713 | out_be16(spi_base, pram_ofs); | 568 | } else if (cs < mpc8xxx_spi->native_chipselects) { |
714 | } else { | 569 | slvsel = mpc8xxx_spi_read_reg(®_base->slvsel); |
715 | struct spi_pram __iomem *pram = spi_base; | 570 | slvsel = on ? (slvsel | (1 << cs)) : (slvsel & ~(1 << cs)); |
716 | u16 rpbase = in_be16(&pram->rpbase); | 571 | mpc8xxx_spi_write_reg(®_base->slvsel, slvsel); |
717 | |||
718 | /* Microcode relocation patch applied? */ | ||
719 | if (rpbase) | ||
720 | pram_ofs = rpbase; | ||
721 | else { | ||
722 | pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64); | ||
723 | out_be16(spi_base, pram_ofs); | ||
724 | } | ||
725 | } | 572 | } |
726 | |||
727 | iounmap(spi_base); | ||
728 | return pram_ofs; | ||
729 | } | 573 | } |
730 | 574 | ||
731 | static int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) | 575 | static void fsl_spi_grlib_probe(struct device *dev) |
732 | { | 576 | { |
733 | struct device *dev = mspi->dev; | 577 | struct fsl_spi_platform_data *pdata = dev->platform_data; |
734 | struct device_node *np = dev->of_node; | 578 | struct spi_master *master = dev_get_drvdata(dev); |
735 | const u32 *iprop; | 579 | struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master); |
736 | int size; | 580 | struct fsl_spi_reg *reg_base = mpc8xxx_spi->reg_base; |
737 | unsigned long pram_ofs; | 581 | int mbits; |
738 | unsigned long bds_ofs; | 582 | u32 capabilities; |
739 | |||
740 | if (!(mspi->flags & SPI_CPM_MODE)) | ||
741 | return 0; | ||
742 | |||
743 | if (!fsl_spi_alloc_dummy_rx()) | ||
744 | return -ENOMEM; | ||
745 | |||
746 | if (mspi->flags & SPI_QE) { | ||
747 | iprop = of_get_property(np, "cell-index", &size); | ||
748 | if (iprop && size == sizeof(*iprop)) | ||
749 | mspi->subblock = *iprop; | ||
750 | |||
751 | switch (mspi->subblock) { | ||
752 | default: | ||
753 | dev_warn(dev, "cell-index unspecified, assuming SPI1"); | ||
754 | /* fall through */ | ||
755 | case 0: | ||
756 | mspi->subblock = QE_CR_SUBBLOCK_SPI1; | ||
757 | break; | ||
758 | case 1: | ||
759 | mspi->subblock = QE_CR_SUBBLOCK_SPI2; | ||
760 | break; | ||
761 | } | ||
762 | } | ||
763 | |||
764 | pram_ofs = fsl_spi_cpm_get_pram(mspi); | ||
765 | if (IS_ERR_VALUE(pram_ofs)) { | ||
766 | dev_err(dev, "can't allocate spi parameter ram\n"); | ||
767 | goto err_pram; | ||
768 | } | ||
769 | 583 | ||
770 | bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) + | 584 | capabilities = mpc8xxx_spi_read_reg(®_base->cap); |
771 | sizeof(*mspi->rx_bd), 8); | ||
772 | if (IS_ERR_VALUE(bds_ofs)) { | ||
773 | dev_err(dev, "can't allocate bds\n"); | ||
774 | goto err_bds; | ||
775 | } | ||
776 | 585 | ||
777 | mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE, | 586 | mpc8xxx_spi->set_shifts = fsl_spi_grlib_set_shifts; |
778 | DMA_TO_DEVICE); | 587 | mbits = SPCAP_MAXWLEN(capabilities); |
779 | if (dma_mapping_error(dev, mspi->dma_dummy_tx)) { | 588 | if (mbits) |
780 | dev_err(dev, "unable to map dummy tx buffer\n"); | 589 | mpc8xxx_spi->max_bits_per_word = mbits + 1; |
781 | goto err_dummy_tx; | ||
782 | } | ||
783 | 590 | ||
784 | mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR, | 591 | mpc8xxx_spi->native_chipselects = 0; |
785 | DMA_FROM_DEVICE); | 592 | if (SPCAP_SSEN(capabilities)) { |
786 | if (dma_mapping_error(dev, mspi->dma_dummy_rx)) { | 593 | mpc8xxx_spi->native_chipselects = SPCAP_SSSZ(capabilities); |
787 | dev_err(dev, "unable to map dummy rx buffer\n"); | 594 | mpc8xxx_spi_write_reg(®_base->slvsel, 0xffffffff); |
788 | goto err_dummy_rx; | ||
789 | } | 595 | } |
790 | 596 | master->num_chipselect = mpc8xxx_spi->native_chipselects; | |
791 | mspi->pram = cpm_muram_addr(pram_ofs); | 597 | pdata->cs_control = fsl_spi_grlib_cs_control; |
792 | |||
793 | mspi->tx_bd = cpm_muram_addr(bds_ofs); | ||
794 | mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd)); | ||
795 | |||
796 | /* Initialize parameter ram. */ | ||
797 | out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd)); | ||
798 | out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd)); | ||
799 | out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL); | ||
800 | out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL); | ||
801 | out_be16(&mspi->pram->mrblr, SPI_MRBLR); | ||
802 | out_be32(&mspi->pram->rstate, 0); | ||
803 | out_be32(&mspi->pram->rdp, 0); | ||
804 | out_be16(&mspi->pram->rbptr, 0); | ||
805 | out_be16(&mspi->pram->rbc, 0); | ||
806 | out_be32(&mspi->pram->rxtmp, 0); | ||
807 | out_be32(&mspi->pram->tstate, 0); | ||
808 | out_be32(&mspi->pram->tdp, 0); | ||
809 | out_be16(&mspi->pram->tbptr, 0); | ||
810 | out_be16(&mspi->pram->tbc, 0); | ||
811 | out_be32(&mspi->pram->txtmp, 0); | ||
812 | |||
813 | return 0; | ||
814 | |||
815 | err_dummy_rx: | ||
816 | dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); | ||
817 | err_dummy_tx: | ||
818 | cpm_muram_free(bds_ofs); | ||
819 | err_bds: | ||
820 | cpm_muram_free(pram_ofs); | ||
821 | err_pram: | ||
822 | fsl_spi_free_dummy_rx(); | ||
823 | return -ENOMEM; | ||
824 | } | ||
825 | |||
826 | static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) | ||
827 | { | ||
828 | struct device *dev = mspi->dev; | ||
829 | |||
830 | if (!(mspi->flags & SPI_CPM_MODE)) | ||
831 | return; | ||
832 | |||
833 | dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE); | ||
834 | dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); | ||
835 | cpm_muram_free(cpm_muram_offset(mspi->tx_bd)); | ||
836 | cpm_muram_free(cpm_muram_offset(mspi->pram)); | ||
837 | fsl_spi_free_dummy_rx(); | ||
838 | } | ||
839 | |||
840 | static void fsl_spi_remove(struct mpc8xxx_spi *mspi) | ||
841 | { | ||
842 | iounmap(mspi->reg_base); | ||
843 | fsl_spi_cpm_free(mspi); | ||
844 | } | 598 | } |
845 | 599 | ||
846 | static struct spi_master * fsl_spi_probe(struct device *dev, | 600 | static struct spi_master * fsl_spi_probe(struct device *dev, |
@@ -866,27 +620,35 @@ static struct spi_master * fsl_spi_probe(struct device *dev, | |||
866 | goto err_probe; | 620 | goto err_probe; |
867 | 621 | ||
868 | master->setup = fsl_spi_setup; | 622 | master->setup = fsl_spi_setup; |
623 | master->cleanup = fsl_spi_cleanup; | ||
869 | 624 | ||
870 | mpc8xxx_spi = spi_master_get_devdata(master); | 625 | mpc8xxx_spi = spi_master_get_devdata(master); |
871 | mpc8xxx_spi->spi_do_one_msg = fsl_spi_do_one_msg; | 626 | mpc8xxx_spi->spi_do_one_msg = fsl_spi_do_one_msg; |
872 | mpc8xxx_spi->spi_remove = fsl_spi_remove; | 627 | mpc8xxx_spi->spi_remove = fsl_spi_remove; |
873 | 628 | mpc8xxx_spi->max_bits_per_word = 32; | |
629 | mpc8xxx_spi->type = fsl_spi_get_type(dev); | ||
874 | 630 | ||
875 | ret = fsl_spi_cpm_init(mpc8xxx_spi); | 631 | ret = fsl_spi_cpm_init(mpc8xxx_spi); |
876 | if (ret) | 632 | if (ret) |
877 | goto err_cpm_init; | 633 | goto err_cpm_init; |
878 | 634 | ||
879 | if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { | ||
880 | mpc8xxx_spi->rx_shift = 16; | ||
881 | mpc8xxx_spi->tx_shift = 24; | ||
882 | } | ||
883 | |||
884 | mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem)); | 635 | mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem)); |
885 | if (mpc8xxx_spi->reg_base == NULL) { | 636 | if (mpc8xxx_spi->reg_base == NULL) { |
886 | ret = -ENOMEM; | 637 | ret = -ENOMEM; |
887 | goto err_ioremap; | 638 | goto err_ioremap; |
888 | } | 639 | } |
889 | 640 | ||
641 | if (mpc8xxx_spi->type == TYPE_GRLIB) | ||
642 | fsl_spi_grlib_probe(dev); | ||
643 | |||
644 | if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) | ||
645 | mpc8xxx_spi->set_shifts = fsl_spi_qe_cpu_set_shifts; | ||
646 | |||
647 | if (mpc8xxx_spi->set_shifts) | ||
648 | /* 8 bits per word and MSB first */ | ||
649 | mpc8xxx_spi->set_shifts(&mpc8xxx_spi->rx_shift, | ||
650 | &mpc8xxx_spi->tx_shift, 8, 1); | ||
651 | |||
890 | /* Register for SPI Interrupt */ | 652 | /* Register for SPI Interrupt */ |
891 | ret = request_irq(mpc8xxx_spi->irq, fsl_spi_irq, | 653 | ret = request_irq(mpc8xxx_spi->irq, fsl_spi_irq, |
892 | 0, "fsl_spi", mpc8xxx_spi); | 654 | 0, "fsl_spi", mpc8xxx_spi); |
@@ -904,6 +666,10 @@ static struct spi_master * fsl_spi_probe(struct device *dev, | |||
904 | 666 | ||
905 | /* Enable SPI interface */ | 667 | /* Enable SPI interface */ |
906 | regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; | 668 | regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; |
669 | if (mpc8xxx_spi->max_bits_per_word < 8) { | ||
670 | regval &= ~SPMODE_LEN(0xF); | ||
671 | regval |= SPMODE_LEN(mpc8xxx_spi->max_bits_per_word - 1); | ||
672 | } | ||
907 | if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) | 673 | if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) |
908 | regval |= SPMODE_OP; | 674 | regval |= SPMODE_OP; |
909 | 675 | ||
@@ -1047,28 +813,31 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) | |||
1047 | struct device_node *np = ofdev->dev.of_node; | 813 | struct device_node *np = ofdev->dev.of_node; |
1048 | struct spi_master *master; | 814 | struct spi_master *master; |
1049 | struct resource mem; | 815 | struct resource mem; |
1050 | struct resource irq; | 816 | int irq, type; |
1051 | int ret = -ENOMEM; | 817 | int ret = -ENOMEM; |
1052 | 818 | ||
1053 | ret = of_mpc8xxx_spi_probe(ofdev); | 819 | ret = of_mpc8xxx_spi_probe(ofdev); |
1054 | if (ret) | 820 | if (ret) |
1055 | return ret; | 821 | return ret; |
1056 | 822 | ||
1057 | ret = of_fsl_spi_get_chipselects(dev); | 823 | type = fsl_spi_get_type(&ofdev->dev); |
1058 | if (ret) | 824 | if (type == TYPE_FSL) { |
1059 | goto err; | 825 | ret = of_fsl_spi_get_chipselects(dev); |
826 | if (ret) | ||
827 | goto err; | ||
828 | } | ||
1060 | 829 | ||
1061 | ret = of_address_to_resource(np, 0, &mem); | 830 | ret = of_address_to_resource(np, 0, &mem); |
1062 | if (ret) | 831 | if (ret) |
1063 | goto err; | 832 | goto err; |
1064 | 833 | ||
1065 | ret = of_irq_to_resource(np, 0, &irq); | 834 | irq = irq_of_parse_and_map(np, 0); |
1066 | if (!ret) { | 835 | if (!irq) { |
1067 | ret = -EINVAL; | 836 | ret = -EINVAL; |
1068 | goto err; | 837 | goto err; |
1069 | } | 838 | } |
1070 | 839 | ||
1071 | master = fsl_spi_probe(dev, &mem, irq.start); | 840 | master = fsl_spi_probe(dev, &mem, irq); |
1072 | if (IS_ERR(master)) { | 841 | if (IS_ERR(master)) { |
1073 | ret = PTR_ERR(master); | 842 | ret = PTR_ERR(master); |
1074 | goto err; | 843 | goto err; |
@@ -1077,27 +846,25 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) | |||
1077 | return 0; | 846 | return 0; |
1078 | 847 | ||
1079 | err: | 848 | err: |
1080 | of_fsl_spi_free_chipselects(dev); | 849 | if (type == TYPE_FSL) |
850 | of_fsl_spi_free_chipselects(dev); | ||
1081 | return ret; | 851 | return ret; |
1082 | } | 852 | } |
1083 | 853 | ||
1084 | static int of_fsl_spi_remove(struct platform_device *ofdev) | 854 | static int of_fsl_spi_remove(struct platform_device *ofdev) |
1085 | { | 855 | { |
856 | struct spi_master *master = dev_get_drvdata(&ofdev->dev); | ||
857 | struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master); | ||
1086 | int ret; | 858 | int ret; |
1087 | 859 | ||
1088 | ret = mpc8xxx_spi_remove(&ofdev->dev); | 860 | ret = mpc8xxx_spi_remove(&ofdev->dev); |
1089 | if (ret) | 861 | if (ret) |
1090 | return ret; | 862 | return ret; |
1091 | of_fsl_spi_free_chipselects(&ofdev->dev); | 863 | if (mpc8xxx_spi->type == TYPE_FSL) |
864 | of_fsl_spi_free_chipselects(&ofdev->dev); | ||
1092 | return 0; | 865 | return 0; |
1093 | } | 866 | } |
1094 | 867 | ||
1095 | static const struct of_device_id of_fsl_spi_match[] = { | ||
1096 | { .compatible = "fsl,spi" }, | ||
1097 | {} | ||
1098 | }; | ||
1099 | MODULE_DEVICE_TABLE(of, of_fsl_spi_match); | ||
1100 | |||
1101 | static struct platform_driver of_fsl_spi_driver = { | 868 | static struct platform_driver of_fsl_spi_driver = { |
1102 | .driver = { | 869 | .driver = { |
1103 | .name = "fsl_spi", | 870 | .name = "fsl_spi", |
@@ -1134,9 +901,7 @@ static int plat_mpc8xxx_spi_probe(struct platform_device *pdev) | |||
1134 | return -EINVAL; | 901 | return -EINVAL; |
1135 | 902 | ||
1136 | master = fsl_spi_probe(&pdev->dev, mem, irq); | 903 | master = fsl_spi_probe(&pdev->dev, mem, irq); |
1137 | if (IS_ERR(master)) | 904 | return PTR_RET(master); |
1138 | return PTR_ERR(master); | ||
1139 | return 0; | ||
1140 | } | 905 | } |
1141 | 906 | ||
1142 | static int plat_mpc8xxx_spi_remove(struct platform_device *pdev) | 907 | static int plat_mpc8xxx_spi_remove(struct platform_device *pdev) |
diff --git a/drivers/spi/spi-fsl-spi.h b/drivers/spi/spi-fsl-spi.h new file mode 100644 index 000000000000..9a6dae00e3f3 --- /dev/null +++ b/drivers/spi/spi-fsl-spi.h | |||
@@ -0,0 +1,72 @@ | |||
1 | /* | ||
2 | * Freescale SPI controller driver. | ||
3 | * | ||
4 | * Maintainer: Kumar Gala | ||
5 | * | ||
6 | * Copyright (C) 2006 Polycom, Inc. | ||
7 | * Copyright 2010 Freescale Semiconductor, Inc. | ||
8 | * | ||
9 | * CPM SPI and QE buffer descriptors mode support: | ||
10 | * Copyright (c) 2009 MontaVista Software, Inc. | ||
11 | * Author: Anton Vorontsov <avorontsov@ru.mvista.com> | ||
12 | * | ||
13 | * GRLIB support: | ||
14 | * Copyright (c) 2012 Aeroflex Gaisler AB. | ||
15 | * Author: Andreas Larsson <andreas@gaisler.com> | ||
16 | * | ||
17 | * This program is free software; you can redistribute it and/or modify it | ||
18 | * under the terms of the GNU General Public License as published by the | ||
19 | * Free Software Foundation; either version 2 of the License, or (at your | ||
20 | * option) any later version. | ||
21 | */ | ||
22 | |||
23 | #ifndef __SPI_FSL_SPI_H__ | ||
24 | #define __SPI_FSL_SPI_H__ | ||
25 | |||
26 | /* SPI Controller registers */ | ||
27 | struct fsl_spi_reg { | ||
28 | __be32 cap; /* TYPE_GRLIB specific */ | ||
29 | u8 res1[0x1C]; | ||
30 | __be32 mode; | ||
31 | __be32 event; | ||
32 | __be32 mask; | ||
33 | __be32 command; | ||
34 | __be32 transmit; | ||
35 | __be32 receive; | ||
36 | __be32 slvsel; /* TYPE_GRLIB specific */ | ||
37 | }; | ||
38 | |||
39 | /* SPI Controller mode register definitions */ | ||
40 | #define SPMODE_LOOP (1 << 30) | ||
41 | #define SPMODE_CI_INACTIVEHIGH (1 << 29) | ||
42 | #define SPMODE_CP_BEGIN_EDGECLK (1 << 28) | ||
43 | #define SPMODE_DIV16 (1 << 27) | ||
44 | #define SPMODE_REV (1 << 26) | ||
45 | #define SPMODE_MS (1 << 25) | ||
46 | #define SPMODE_ENABLE (1 << 24) | ||
47 | #define SPMODE_LEN(x) ((x) << 20) | ||
48 | #define SPMODE_PM(x) ((x) << 16) | ||
49 | #define SPMODE_OP (1 << 14) | ||
50 | #define SPMODE_CG(x) ((x) << 7) | ||
51 | |||
52 | /* TYPE_GRLIB SPI Controller capability register definitions */ | ||
53 | #define SPCAP_SSEN(x) (((x) >> 16) & 0x1) | ||
54 | #define SPCAP_SSSZ(x) (((x) >> 24) & 0xff) | ||
55 | #define SPCAP_MAXWLEN(x) (((x) >> 20) & 0xf) | ||
56 | |||
57 | /* | ||
58 | * Default for SPI Mode: | ||
59 | * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk | ||
60 | */ | ||
61 | #define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \ | ||
62 | SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf)) | ||
63 | |||
64 | /* SPIE register values */ | ||
65 | #define SPIE_NE 0x00000200 /* Not empty */ | ||
66 | #define SPIE_NF 0x00000100 /* Not full */ | ||
67 | |||
68 | /* SPIM register values */ | ||
69 | #define SPIM_NE 0x00000200 /* Not empty */ | ||
70 | #define SPIM_NF 0x00000100 /* Not full */ | ||
71 | |||
72 | #endif /* __SPI_FSL_SPI_H__ */ | ||
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c index 9ddef55a7165..0021fc4c45bc 100644 --- a/drivers/spi/spi-gpio.c +++ b/drivers/spi/spi-gpio.c | |||
@@ -265,9 +265,9 @@ static int spi_gpio_setup(struct spi_device *spi) | |||
265 | } | 265 | } |
266 | } | 266 | } |
267 | if (!status) { | 267 | if (!status) { |
268 | status = spi_bitbang_setup(spi); | ||
269 | /* in case it was initialized from static board data */ | 268 | /* in case it was initialized from static board data */ |
270 | spi_gpio->cs_gpios[spi->chip_select] = cs; | 269 | spi_gpio->cs_gpios[spi->chip_select] = cs; |
270 | status = spi_bitbang_setup(spi); | ||
271 | } | 271 | } |
272 | 272 | ||
273 | if (status) { | 273 | if (status) { |
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c index 3e490ee7f275..dfddf336912d 100644 --- a/drivers/spi/spi-mpc512x-psc.c +++ b/drivers/spi/spi-mpc512x-psc.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/clk.h> | 28 | #include <linux/clk.h> |
29 | #include <linux/spi/spi.h> | 29 | #include <linux/spi/spi.h> |
30 | #include <linux/fsl_devices.h> | 30 | #include <linux/fsl_devices.h> |
31 | #include <linux/gpio.h> | ||
31 | #include <asm/mpc52xx_psc.h> | 32 | #include <asm/mpc52xx_psc.h> |
32 | 33 | ||
33 | struct mpc512x_psc_spi { | 34 | struct mpc512x_psc_spi { |
@@ -113,7 +114,7 @@ static void mpc512x_psc_spi_activate_cs(struct spi_device *spi) | |||
113 | out_be32(&psc->ccr, ccr); | 114 | out_be32(&psc->ccr, ccr); |
114 | mps->bits_per_word = cs->bits_per_word; | 115 | mps->bits_per_word = cs->bits_per_word; |
115 | 116 | ||
116 | if (mps->cs_control) | 117 | if (mps->cs_control && gpio_is_valid(spi->cs_gpio)) |
117 | mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0); | 118 | mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0); |
118 | } | 119 | } |
119 | 120 | ||
@@ -121,7 +122,7 @@ static void mpc512x_psc_spi_deactivate_cs(struct spi_device *spi) | |||
121 | { | 122 | { |
122 | struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); | 123 | struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); |
123 | 124 | ||
124 | if (mps->cs_control) | 125 | if (mps->cs_control && gpio_is_valid(spi->cs_gpio)) |
125 | mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1); | 126 | mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1); |
126 | 127 | ||
127 | } | 128 | } |
@@ -148,6 +149,9 @@ static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi, | |||
148 | in_8(&psc->mode); | 149 | in_8(&psc->mode); |
149 | out_8(&psc->mode, 0x0); | 150 | out_8(&psc->mode, 0x0); |
150 | 151 | ||
152 | /* enable transmiter/receiver */ | ||
153 | out_8(&psc->command, MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE); | ||
154 | |||
151 | while (len) { | 155 | while (len) { |
152 | int count; | 156 | int count; |
153 | int i; | 157 | int i; |
@@ -176,10 +180,6 @@ static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi, | |||
176 | out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY); | 180 | out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY); |
177 | out_be32(&fifo->tximr, MPC512x_PSC_FIFO_EMPTY); | 181 | out_be32(&fifo->tximr, MPC512x_PSC_FIFO_EMPTY); |
178 | 182 | ||
179 | /* enable transmiter/receiver */ | ||
180 | out_8(&psc->command, | ||
181 | MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE); | ||
182 | |||
183 | wait_for_completion(&mps->done); | 183 | wait_for_completion(&mps->done); |
184 | 184 | ||
185 | mdelay(1); | 185 | mdelay(1); |
@@ -204,9 +204,6 @@ static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi, | |||
204 | while (in_be32(&fifo->rxcnt)) { | 204 | while (in_be32(&fifo->rxcnt)) { |
205 | in_8(&fifo->rxdata_8); | 205 | in_8(&fifo->rxdata_8); |
206 | } | 206 | } |
207 | |||
208 | out_8(&psc->command, | ||
209 | MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); | ||
210 | } | 207 | } |
211 | /* disable transmiter/receiver and fifo interrupt */ | 208 | /* disable transmiter/receiver and fifo interrupt */ |
212 | out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); | 209 | out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE); |
@@ -278,6 +275,7 @@ static int mpc512x_psc_spi_setup(struct spi_device *spi) | |||
278 | struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); | 275 | struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master); |
279 | struct mpc512x_psc_spi_cs *cs = spi->controller_state; | 276 | struct mpc512x_psc_spi_cs *cs = spi->controller_state; |
280 | unsigned long flags; | 277 | unsigned long flags; |
278 | int ret; | ||
281 | 279 | ||
282 | if (spi->bits_per_word % 8) | 280 | if (spi->bits_per_word % 8) |
283 | return -EINVAL; | 281 | return -EINVAL; |
@@ -286,6 +284,19 @@ static int mpc512x_psc_spi_setup(struct spi_device *spi) | |||
286 | cs = kzalloc(sizeof *cs, GFP_KERNEL); | 284 | cs = kzalloc(sizeof *cs, GFP_KERNEL); |
287 | if (!cs) | 285 | if (!cs) |
288 | return -ENOMEM; | 286 | return -ENOMEM; |
287 | |||
288 | if (gpio_is_valid(spi->cs_gpio)) { | ||
289 | ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev)); | ||
290 | if (ret) { | ||
291 | dev_err(&spi->dev, "can't get CS gpio: %d\n", | ||
292 | ret); | ||
293 | kfree(cs); | ||
294 | return ret; | ||
295 | } | ||
296 | gpio_direction_output(spi->cs_gpio, | ||
297 | spi->mode & SPI_CS_HIGH ? 0 : 1); | ||
298 | } | ||
299 | |||
289 | spi->controller_state = cs; | 300 | spi->controller_state = cs; |
290 | } | 301 | } |
291 | 302 | ||
@@ -319,6 +330,8 @@ static int mpc512x_psc_spi_transfer(struct spi_device *spi, | |||
319 | 330 | ||
320 | static void mpc512x_psc_spi_cleanup(struct spi_device *spi) | 331 | static void mpc512x_psc_spi_cleanup(struct spi_device *spi) |
321 | { | 332 | { |
333 | if (gpio_is_valid(spi->cs_gpio)) | ||
334 | gpio_free(spi->cs_gpio); | ||
322 | kfree(spi->controller_state); | 335 | kfree(spi->controller_state); |
323 | } | 336 | } |
324 | 337 | ||
@@ -405,6 +418,11 @@ static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id) | |||
405 | return IRQ_NONE; | 418 | return IRQ_NONE; |
406 | } | 419 | } |
407 | 420 | ||
421 | static void mpc512x_spi_cs_control(struct spi_device *spi, bool onoff) | ||
422 | { | ||
423 | gpio_set_value(spi->cs_gpio, onoff); | ||
424 | } | ||
425 | |||
408 | /* bus_num is used only for the case dev->platform_data == NULL */ | 426 | /* bus_num is used only for the case dev->platform_data == NULL */ |
409 | static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr, | 427 | static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr, |
410 | u32 size, unsigned int irq, | 428 | u32 size, unsigned int irq, |
@@ -425,12 +443,9 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr, | |||
425 | mps->irq = irq; | 443 | mps->irq = irq; |
426 | 444 | ||
427 | if (pdata == NULL) { | 445 | if (pdata == NULL) { |
428 | dev_err(dev, "probe called without platform data, no " | 446 | mps->cs_control = mpc512x_spi_cs_control; |
429 | "cs_control function will be called\n"); | ||
430 | mps->cs_control = NULL; | ||
431 | mps->sysclk = 0; | 447 | mps->sysclk = 0; |
432 | master->bus_num = bus_num; | 448 | master->bus_num = bus_num; |
433 | master->num_chipselect = 255; | ||
434 | } else { | 449 | } else { |
435 | mps->cs_control = pdata->cs_control; | 450 | mps->cs_control = pdata->cs_control; |
436 | mps->sysclk = pdata->sysclk; | 451 | mps->sysclk = pdata->sysclk; |
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index 22a0af0147fb..a1d5778e2bbb 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c | |||
@@ -612,6 +612,7 @@ static int mxs_spi_probe(struct platform_device *pdev) | |||
612 | ssp->dmach = dma_request_channel(mask, mxs_ssp_dma_filter, ssp); | 612 | ssp->dmach = dma_request_channel(mask, mxs_ssp_dma_filter, ssp); |
613 | if (!ssp->dmach) { | 613 | if (!ssp->dmach) { |
614 | dev_err(ssp->dev, "Failed to request DMA\n"); | 614 | dev_err(ssp->dev, "Failed to request DMA\n"); |
615 | ret = -ENODEV; | ||
615 | goto out_master_free; | 616 | goto out_master_free; |
616 | } | 617 | } |
617 | 618 | ||
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c index cb2e284bd814..e60a776ed2d4 100644 --- a/drivers/spi/spi-oc-tiny.c +++ b/drivers/spi/spi-oc-tiny.c | |||
@@ -393,8 +393,6 @@ static const struct of_device_id tiny_spi_match[] = { | |||
393 | {}, | 393 | {}, |
394 | }; | 394 | }; |
395 | MODULE_DEVICE_TABLE(of, tiny_spi_match); | 395 | MODULE_DEVICE_TABLE(of, tiny_spi_match); |
396 | #else /* CONFIG_OF */ | ||
397 | #define tiny_spi_match NULL | ||
398 | #endif /* CONFIG_OF */ | 396 | #endif /* CONFIG_OF */ |
399 | 397 | ||
400 | static struct platform_driver tiny_spi_driver = { | 398 | static struct platform_driver tiny_spi_driver = { |
@@ -404,7 +402,7 @@ static struct platform_driver tiny_spi_driver = { | |||
404 | .name = DRV_NAME, | 402 | .name = DRV_NAME, |
405 | .owner = THIS_MODULE, | 403 | .owner = THIS_MODULE, |
406 | .pm = NULL, | 404 | .pm = NULL, |
407 | .of_match_table = tiny_spi_match, | 405 | .of_match_table = of_match_ptr(tiny_spi_match), |
408 | }, | 406 | }, |
409 | }; | 407 | }; |
410 | module_platform_driver(tiny_spi_driver); | 408 | module_platform_driver(tiny_spi_driver); |
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 893c3d78e426..86d2158946bb 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c | |||
@@ -285,8 +285,12 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) | |||
285 | 285 | ||
286 | timeout = jiffies + msecs_to_jiffies(1000); | 286 | timeout = jiffies + msecs_to_jiffies(1000); |
287 | while (!(__raw_readl(reg) & bit)) { | 287 | while (!(__raw_readl(reg) & bit)) { |
288 | if (time_after(jiffies, timeout)) | 288 | if (time_after(jiffies, timeout)) { |
289 | return -1; | 289 | if (!(__raw_readl(reg) & bit)) |
290 | return -ETIMEDOUT; | ||
291 | else | ||
292 | return 0; | ||
293 | } | ||
290 | cpu_relax(); | 294 | cpu_relax(); |
291 | } | 295 | } |
292 | return 0; | 296 | return 0; |
@@ -805,6 +809,10 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi, | |||
805 | return 0; | 809 | return 0; |
806 | } | 810 | } |
807 | 811 | ||
812 | /* | ||
813 | * Note that we currently allow DMA only if we get a channel | ||
814 | * for both rx and tx. Otherwise we'll do PIO for both rx and tx. | ||
815 | */ | ||
808 | static int omap2_mcspi_request_dma(struct spi_device *spi) | 816 | static int omap2_mcspi_request_dma(struct spi_device *spi) |
809 | { | 817 | { |
810 | struct spi_master *master = spi->master; | 818 | struct spi_master *master = spi->master; |
@@ -823,21 +831,22 @@ static int omap2_mcspi_request_dma(struct spi_device *spi) | |||
823 | dma_cap_set(DMA_SLAVE, mask); | 831 | dma_cap_set(DMA_SLAVE, mask); |
824 | sig = mcspi_dma->dma_rx_sync_dev; | 832 | sig = mcspi_dma->dma_rx_sync_dev; |
825 | mcspi_dma->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig); | 833 | mcspi_dma->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig); |
826 | if (!mcspi_dma->dma_rx) { | 834 | if (!mcspi_dma->dma_rx) |
827 | dev_err(&spi->dev, "no RX DMA engine channel for McSPI\n"); | 835 | goto no_dma; |
828 | return -EAGAIN; | ||
829 | } | ||
830 | 836 | ||
831 | sig = mcspi_dma->dma_tx_sync_dev; | 837 | sig = mcspi_dma->dma_tx_sync_dev; |
832 | mcspi_dma->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig); | 838 | mcspi_dma->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig); |
833 | if (!mcspi_dma->dma_tx) { | 839 | if (!mcspi_dma->dma_tx) { |
834 | dev_err(&spi->dev, "no TX DMA engine channel for McSPI\n"); | ||
835 | dma_release_channel(mcspi_dma->dma_rx); | 840 | dma_release_channel(mcspi_dma->dma_rx); |
836 | mcspi_dma->dma_rx = NULL; | 841 | mcspi_dma->dma_rx = NULL; |
837 | return -EAGAIN; | 842 | goto no_dma; |
838 | } | 843 | } |
839 | 844 | ||
840 | return 0; | 845 | return 0; |
846 | |||
847 | no_dma: | ||
848 | dev_warn(&spi->dev, "not using DMA for McSPI\n"); | ||
849 | return -EAGAIN; | ||
841 | } | 850 | } |
842 | 851 | ||
843 | static int omap2_mcspi_setup(struct spi_device *spi) | 852 | static int omap2_mcspi_setup(struct spi_device *spi) |
@@ -870,7 +879,7 @@ static int omap2_mcspi_setup(struct spi_device *spi) | |||
870 | 879 | ||
871 | if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) { | 880 | if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) { |
872 | ret = omap2_mcspi_request_dma(spi); | 881 | ret = omap2_mcspi_request_dma(spi); |
873 | if (ret < 0) | 882 | if (ret < 0 && ret != -EAGAIN) |
874 | return ret; | 883 | return ret; |
875 | } | 884 | } |
876 | 885 | ||
@@ -928,6 +937,7 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m) | |||
928 | struct spi_device *spi; | 937 | struct spi_device *spi; |
929 | struct spi_transfer *t = NULL; | 938 | struct spi_transfer *t = NULL; |
930 | struct spi_master *master; | 939 | struct spi_master *master; |
940 | struct omap2_mcspi_dma *mcspi_dma; | ||
931 | int cs_active = 0; | 941 | int cs_active = 0; |
932 | struct omap2_mcspi_cs *cs; | 942 | struct omap2_mcspi_cs *cs; |
933 | struct omap2_mcspi_device_config *cd; | 943 | struct omap2_mcspi_device_config *cd; |
@@ -937,6 +947,7 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m) | |||
937 | 947 | ||
938 | spi = m->spi; | 948 | spi = m->spi; |
939 | master = spi->master; | 949 | master = spi->master; |
950 | mcspi_dma = mcspi->dma_channels + spi->chip_select; | ||
940 | cs = spi->controller_state; | 951 | cs = spi->controller_state; |
941 | cd = spi->controller_data; | 952 | cd = spi->controller_data; |
942 | 953 | ||
@@ -993,7 +1004,8 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m) | |||
993 | __raw_writel(0, cs->base | 1004 | __raw_writel(0, cs->base |
994 | + OMAP2_MCSPI_TX0); | 1005 | + OMAP2_MCSPI_TX0); |
995 | 1006 | ||
996 | if (m->is_dma_mapped || t->len >= DMA_MIN_BYTES) | 1007 | if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) && |
1008 | (m->is_dma_mapped || t->len >= DMA_MIN_BYTES)) | ||
997 | count = omap2_mcspi_txrx_dma(spi, t); | 1009 | count = omap2_mcspi_txrx_dma(spi, t); |
998 | else | 1010 | else |
999 | count = omap2_mcspi_txrx_pio(spi, t); | 1011 | count = omap2_mcspi_txrx_pio(spi, t); |
@@ -1040,10 +1052,14 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m) | |||
1040 | static int omap2_mcspi_transfer_one_message(struct spi_master *master, | 1052 | static int omap2_mcspi_transfer_one_message(struct spi_master *master, |
1041 | struct spi_message *m) | 1053 | struct spi_message *m) |
1042 | { | 1054 | { |
1055 | struct spi_device *spi; | ||
1043 | struct omap2_mcspi *mcspi; | 1056 | struct omap2_mcspi *mcspi; |
1057 | struct omap2_mcspi_dma *mcspi_dma; | ||
1044 | struct spi_transfer *t; | 1058 | struct spi_transfer *t; |
1045 | 1059 | ||
1060 | spi = m->spi; | ||
1046 | mcspi = spi_master_get_devdata(master); | 1061 | mcspi = spi_master_get_devdata(master); |
1062 | mcspi_dma = mcspi->dma_channels + spi->chip_select; | ||
1047 | m->actual_length = 0; | 1063 | m->actual_length = 0; |
1048 | m->status = 0; | 1064 | m->status = 0; |
1049 | 1065 | ||
@@ -1078,7 +1094,7 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master, | |||
1078 | if (m->is_dma_mapped || len < DMA_MIN_BYTES) | 1094 | if (m->is_dma_mapped || len < DMA_MIN_BYTES) |
1079 | continue; | 1095 | continue; |
1080 | 1096 | ||
1081 | if (tx_buf != NULL) { | 1097 | if (mcspi_dma->dma_tx && tx_buf != NULL) { |
1082 | t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf, | 1098 | t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf, |
1083 | len, DMA_TO_DEVICE); | 1099 | len, DMA_TO_DEVICE); |
1084 | if (dma_mapping_error(mcspi->dev, t->tx_dma)) { | 1100 | if (dma_mapping_error(mcspi->dev, t->tx_dma)) { |
@@ -1087,7 +1103,7 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master, | |||
1087 | return -EINVAL; | 1103 | return -EINVAL; |
1088 | } | 1104 | } |
1089 | } | 1105 | } |
1090 | if (rx_buf != NULL) { | 1106 | if (mcspi_dma->dma_rx && rx_buf != NULL) { |
1091 | t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len, | 1107 | t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len, |
1092 | DMA_FROM_DEVICE); | 1108 | DMA_FROM_DEVICE); |
1093 | if (dma_mapping_error(mcspi->dev, t->rx_dma)) { | 1109 | if (dma_mapping_error(mcspi->dev, t->rx_dma)) { |
@@ -1277,7 +1293,8 @@ static int omap2_mcspi_probe(struct platform_device *pdev) | |||
1277 | pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); | 1293 | pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); |
1278 | pm_runtime_enable(&pdev->dev); | 1294 | pm_runtime_enable(&pdev->dev); |
1279 | 1295 | ||
1280 | if (status || omap2_mcspi_master_setup(mcspi) < 0) | 1296 | status = omap2_mcspi_master_setup(mcspi); |
1297 | if (status < 0) | ||
1281 | goto disable_pm; | 1298 | goto disable_pm; |
1282 | 1299 | ||
1283 | status = spi_register_master(master); | 1300 | status = spi_register_master(master); |
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c index 364964d2ed04..74bc18775658 100644 --- a/drivers/spi/spi-pxa2xx-pci.c +++ b/drivers/spi/spi-pxa2xx-pci.c | |||
@@ -22,7 +22,7 @@ static int ce4100_spi_probe(struct pci_dev *dev, | |||
22 | return ret; | 22 | return ret; |
23 | 23 | ||
24 | ret = pcim_iomap_regions(dev, 1 << 0, "PXA2xx SPI"); | 24 | ret = pcim_iomap_regions(dev, 1 << 0, "PXA2xx SPI"); |
25 | if (!ret) | 25 | if (ret) |
26 | return ret; | 26 | return ret; |
27 | 27 | ||
28 | memset(&spi_pdata, 0, sizeof(spi_pdata)); | 28 | memset(&spi_pdata, 0, sizeof(spi_pdata)); |
@@ -47,8 +47,8 @@ static int ce4100_spi_probe(struct pci_dev *dev, | |||
47 | pi.size_data = sizeof(spi_pdata); | 47 | pi.size_data = sizeof(spi_pdata); |
48 | 48 | ||
49 | pdev = platform_device_register_full(&pi); | 49 | pdev = platform_device_register_full(&pi); |
50 | if (!pdev) | 50 | if (IS_ERR(pdev)) |
51 | return -ENOMEM; | 51 | return PTR_ERR(pdev); |
52 | 52 | ||
53 | pci_set_drvdata(dev, pdev); | 53 | pci_set_drvdata(dev, pdev); |
54 | 54 | ||
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 810413883c79..f5d84d6f8222 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/device.h> | 22 | #include <linux/device.h> |
23 | #include <linux/ioport.h> | 23 | #include <linux/ioport.h> |
24 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
25 | #include <linux/err.h> | ||
25 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
26 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
27 | #include <linux/spi/pxa2xx_spi.h> | 28 | #include <linux/spi/pxa2xx_spi.h> |
@@ -68,6 +69,7 @@ MODULE_ALIAS("platform:pxa2xx-spi"); | |||
68 | #define LPSS_TX_HITHRESH_DFLT 224 | 69 | #define LPSS_TX_HITHRESH_DFLT 224 |
69 | 70 | ||
70 | /* Offset from drv_data->lpss_base */ | 71 | /* Offset from drv_data->lpss_base */ |
72 | #define SSP_REG 0x0c | ||
71 | #define SPI_CS_CONTROL 0x18 | 73 | #define SPI_CS_CONTROL 0x18 |
72 | #define SPI_CS_CONTROL_SW_MODE BIT(0) | 74 | #define SPI_CS_CONTROL_SW_MODE BIT(0) |
73 | #define SPI_CS_CONTROL_CS_HIGH BIT(1) | 75 | #define SPI_CS_CONTROL_CS_HIGH BIT(1) |
@@ -138,6 +140,10 @@ detection_done: | |||
138 | /* Enable software chip select control */ | 140 | /* Enable software chip select control */ |
139 | value = SPI_CS_CONTROL_SW_MODE | SPI_CS_CONTROL_CS_HIGH; | 141 | value = SPI_CS_CONTROL_SW_MODE | SPI_CS_CONTROL_CS_HIGH; |
140 | __lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value); | 142 | __lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value); |
143 | |||
144 | /* Enable multiblock DMA transfers */ | ||
145 | if (drv_data->master_info->enable_dma) | ||
146 | __lpss_ssp_write_priv(drv_data, SSP_REG, 1); | ||
141 | } | 147 | } |
142 | 148 | ||
143 | static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable) | 149 | static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable) |
@@ -1083,11 +1089,9 @@ pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev) | |||
1083 | ssp = &pdata->ssp; | 1089 | ssp = &pdata->ssp; |
1084 | 1090 | ||
1085 | ssp->phys_base = res->start; | 1091 | ssp->phys_base = res->start; |
1086 | ssp->mmio_base = devm_request_and_ioremap(&pdev->dev, res); | 1092 | ssp->mmio_base = devm_ioremap_resource(&pdev->dev, res); |
1087 | if (!ssp->mmio_base) { | 1093 | if (IS_ERR(ssp->mmio_base)) |
1088 | dev_err(&pdev->dev, "failed to ioremap mmio_base\n"); | 1094 | return PTR_ERR(ssp->mmio_base); |
1089 | return NULL; | ||
1090 | } | ||
1091 | 1095 | ||
1092 | ssp->clk = devm_clk_get(&pdev->dev, NULL); | 1096 | ssp->clk = devm_clk_get(&pdev->dev, NULL); |
1093 | ssp->irq = platform_get_irq(pdev, 0); | 1097 | ssp->irq = platform_get_irq(pdev, 0); |
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 4188b2faac5c..5000586cb98d 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
25 | #include <linux/clk.h> | 25 | #include <linux/clk.h> |
26 | #include <linux/dma-mapping.h> | 26 | #include <linux/dma-mapping.h> |
27 | #include <linux/dmaengine.h> | ||
27 | #include <linux/platform_device.h> | 28 | #include <linux/platform_device.h> |
28 | #include <linux/pm_runtime.h> | 29 | #include <linux/pm_runtime.h> |
29 | #include <linux/spi/spi.h> | 30 | #include <linux/spi/spi.h> |
@@ -31,9 +32,12 @@ | |||
31 | #include <linux/of.h> | 32 | #include <linux/of.h> |
32 | #include <linux/of_gpio.h> | 33 | #include <linux/of_gpio.h> |
33 | 34 | ||
34 | #include <mach/dma.h> | ||
35 | #include <linux/platform_data/spi-s3c64xx.h> | 35 | #include <linux/platform_data/spi-s3c64xx.h> |
36 | 36 | ||
37 | #ifdef CONFIG_S3C_DMA | ||
38 | #include <mach/dma.h> | ||
39 | #endif | ||
40 | |||
37 | #define MAX_SPI_PORTS 3 | 41 | #define MAX_SPI_PORTS 3 |
38 | 42 | ||
39 | /* Registers and bit-fields */ | 43 | /* Registers and bit-fields */ |
@@ -131,9 +135,9 @@ | |||
131 | #define TXBUSY (1<<3) | 135 | #define TXBUSY (1<<3) |
132 | 136 | ||
133 | struct s3c64xx_spi_dma_data { | 137 | struct s3c64xx_spi_dma_data { |
134 | unsigned ch; | 138 | struct dma_chan *ch; |
135 | enum dma_transfer_direction direction; | 139 | enum dma_transfer_direction direction; |
136 | enum dma_ch dmach; | 140 | unsigned int dmach; |
137 | }; | 141 | }; |
138 | 142 | ||
139 | /** | 143 | /** |
@@ -195,16 +199,14 @@ struct s3c64xx_spi_driver_data { | |||
195 | unsigned cur_speed; | 199 | unsigned cur_speed; |
196 | struct s3c64xx_spi_dma_data rx_dma; | 200 | struct s3c64xx_spi_dma_data rx_dma; |
197 | struct s3c64xx_spi_dma_data tx_dma; | 201 | struct s3c64xx_spi_dma_data tx_dma; |
202 | #ifdef CONFIG_S3C_DMA | ||
198 | struct samsung_dma_ops *ops; | 203 | struct samsung_dma_ops *ops; |
204 | #endif | ||
199 | struct s3c64xx_spi_port_config *port_conf; | 205 | struct s3c64xx_spi_port_config *port_conf; |
200 | unsigned int port_id; | 206 | unsigned int port_id; |
201 | unsigned long gpios[4]; | 207 | unsigned long gpios[4]; |
202 | }; | 208 | }; |
203 | 209 | ||
204 | static struct s3c2410_dma_client s3c64xx_spi_dma_client = { | ||
205 | .name = "samsung-spi-dma", | ||
206 | }; | ||
207 | |||
208 | static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) | 210 | static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) |
209 | { | 211 | { |
210 | void __iomem *regs = sdd->regs; | 212 | void __iomem *regs = sdd->regs; |
@@ -281,6 +283,13 @@ static void s3c64xx_spi_dmacb(void *data) | |||
281 | spin_unlock_irqrestore(&sdd->lock, flags); | 283 | spin_unlock_irqrestore(&sdd->lock, flags); |
282 | } | 284 | } |
283 | 285 | ||
286 | #ifdef CONFIG_S3C_DMA | ||
287 | /* FIXME: remove this section once arch/arm/mach-s3c64xx uses dmaengine */ | ||
288 | |||
289 | static struct s3c2410_dma_client s3c64xx_spi_dma_client = { | ||
290 | .name = "samsung-spi-dma", | ||
291 | }; | ||
292 | |||
284 | static void prepare_dma(struct s3c64xx_spi_dma_data *dma, | 293 | static void prepare_dma(struct s3c64xx_spi_dma_data *dma, |
285 | unsigned len, dma_addr_t buf) | 294 | unsigned len, dma_addr_t buf) |
286 | { | 295 | { |
@@ -294,14 +303,14 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma, | |||
294 | config.direction = sdd->rx_dma.direction; | 303 | config.direction = sdd->rx_dma.direction; |
295 | config.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA; | 304 | config.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA; |
296 | config.width = sdd->cur_bpw / 8; | 305 | config.width = sdd->cur_bpw / 8; |
297 | sdd->ops->config(sdd->rx_dma.ch, &config); | 306 | sdd->ops->config((enum dma_ch)sdd->rx_dma.ch, &config); |
298 | } else { | 307 | } else { |
299 | sdd = container_of((void *)dma, | 308 | sdd = container_of((void *)dma, |
300 | struct s3c64xx_spi_driver_data, tx_dma); | 309 | struct s3c64xx_spi_driver_data, tx_dma); |
301 | config.direction = sdd->tx_dma.direction; | 310 | config.direction = sdd->tx_dma.direction; |
302 | config.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA; | 311 | config.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA; |
303 | config.width = sdd->cur_bpw / 8; | 312 | config.width = sdd->cur_bpw / 8; |
304 | sdd->ops->config(sdd->tx_dma.ch, &config); | 313 | sdd->ops->config((enum dma_ch)sdd->tx_dma.ch, &config); |
305 | } | 314 | } |
306 | 315 | ||
307 | info.cap = DMA_SLAVE; | 316 | info.cap = DMA_SLAVE; |
@@ -311,8 +320,8 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma, | |||
311 | info.direction = dma->direction; | 320 | info.direction = dma->direction; |
312 | info.buf = buf; | 321 | info.buf = buf; |
313 | 322 | ||
314 | sdd->ops->prepare(dma->ch, &info); | 323 | sdd->ops->prepare((enum dma_ch)dma->ch, &info); |
315 | sdd->ops->trigger(dma->ch); | 324 | sdd->ops->trigger((enum dma_ch)dma->ch); |
316 | } | 325 | } |
317 | 326 | ||
318 | static int acquire_dma(struct s3c64xx_spi_driver_data *sdd) | 327 | static int acquire_dma(struct s3c64xx_spi_driver_data *sdd) |
@@ -325,12 +334,150 @@ static int acquire_dma(struct s3c64xx_spi_driver_data *sdd) | |||
325 | req.cap = DMA_SLAVE; | 334 | req.cap = DMA_SLAVE; |
326 | req.client = &s3c64xx_spi_dma_client; | 335 | req.client = &s3c64xx_spi_dma_client; |
327 | 336 | ||
328 | sdd->rx_dma.ch = sdd->ops->request(sdd->rx_dma.dmach, &req, dev, "rx"); | 337 | sdd->rx_dma.ch = (void *)sdd->ops->request(sdd->rx_dma.dmach, &req, dev, "rx"); |
329 | sdd->tx_dma.ch = sdd->ops->request(sdd->tx_dma.dmach, &req, dev, "tx"); | 338 | sdd->tx_dma.ch = (void *)sdd->ops->request(sdd->tx_dma.dmach, &req, dev, "tx"); |
330 | 339 | ||
331 | return 1; | 340 | return 1; |
332 | } | 341 | } |
333 | 342 | ||
343 | static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) | ||
344 | { | ||
345 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); | ||
346 | |||
347 | /* Acquire DMA channels */ | ||
348 | while (!acquire_dma(sdd)) | ||
349 | usleep_range(10000, 11000); | ||
350 | |||
351 | pm_runtime_get_sync(&sdd->pdev->dev); | ||
352 | |||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi) | ||
357 | { | ||
358 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); | ||
359 | |||
360 | /* Free DMA channels */ | ||
361 | sdd->ops->release((enum dma_ch)sdd->rx_dma.ch, &s3c64xx_spi_dma_client); | ||
362 | sdd->ops->release((enum dma_ch)sdd->tx_dma.ch, &s3c64xx_spi_dma_client); | ||
363 | |||
364 | pm_runtime_put(&sdd->pdev->dev); | ||
365 | |||
366 | return 0; | ||
367 | } | ||
368 | |||
369 | static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd, | ||
370 | struct s3c64xx_spi_dma_data *dma) | ||
371 | { | ||
372 | sdd->ops->stop((enum dma_ch)dma->ch); | ||
373 | } | ||
374 | #else | ||
375 | |||
376 | static void prepare_dma(struct s3c64xx_spi_dma_data *dma, | ||
377 | unsigned len, dma_addr_t buf) | ||
378 | { | ||
379 | struct s3c64xx_spi_driver_data *sdd; | ||
380 | struct dma_slave_config config; | ||
381 | struct scatterlist sg; | ||
382 | struct dma_async_tx_descriptor *desc; | ||
383 | |||
384 | if (dma->direction == DMA_DEV_TO_MEM) { | ||
385 | sdd = container_of((void *)dma, | ||
386 | struct s3c64xx_spi_driver_data, rx_dma); | ||
387 | config.direction = dma->direction; | ||
388 | config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA; | ||
389 | config.src_addr_width = sdd->cur_bpw / 8; | ||
390 | config.src_maxburst = 1; | ||
391 | dmaengine_slave_config(dma->ch, &config); | ||
392 | } else { | ||
393 | sdd = container_of((void *)dma, | ||
394 | struct s3c64xx_spi_driver_data, tx_dma); | ||
395 | config.direction = dma->direction; | ||
396 | config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA; | ||
397 | config.dst_addr_width = sdd->cur_bpw / 8; | ||
398 | config.dst_maxburst = 1; | ||
399 | dmaengine_slave_config(dma->ch, &config); | ||
400 | } | ||
401 | |||
402 | sg_init_table(&sg, 1); | ||
403 | sg_dma_len(&sg) = len; | ||
404 | sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf)), | ||
405 | len, offset_in_page(buf)); | ||
406 | sg_dma_address(&sg) = buf; | ||
407 | |||
408 | desc = dmaengine_prep_slave_sg(dma->ch, | ||
409 | &sg, 1, dma->direction, DMA_PREP_INTERRUPT); | ||
410 | |||
411 | desc->callback = s3c64xx_spi_dmacb; | ||
412 | desc->callback_param = dma; | ||
413 | |||
414 | dmaengine_submit(desc); | ||
415 | dma_async_issue_pending(dma->ch); | ||
416 | } | ||
417 | |||
418 | static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) | ||
419 | { | ||
420 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); | ||
421 | dma_filter_fn filter = sdd->cntrlr_info->filter; | ||
422 | struct device *dev = &sdd->pdev->dev; | ||
423 | dma_cap_mask_t mask; | ||
424 | int ret; | ||
425 | |||
426 | dma_cap_zero(mask); | ||
427 | dma_cap_set(DMA_SLAVE, mask); | ||
428 | |||
429 | /* Acquire DMA channels */ | ||
430 | sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter, | ||
431 | (void*)sdd->rx_dma.dmach, dev, "rx"); | ||
432 | if (!sdd->rx_dma.ch) { | ||
433 | dev_err(dev, "Failed to get RX DMA channel\n"); | ||
434 | ret = -EBUSY; | ||
435 | goto out; | ||
436 | } | ||
437 | |||
438 | sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter, | ||
439 | (void*)sdd->tx_dma.dmach, dev, "tx"); | ||
440 | if (!sdd->tx_dma.ch) { | ||
441 | dev_err(dev, "Failed to get TX DMA channel\n"); | ||
442 | ret = -EBUSY; | ||
443 | goto out_rx; | ||
444 | } | ||
445 | |||
446 | ret = pm_runtime_get_sync(&sdd->pdev->dev); | ||
447 | if (ret != 0) { | ||
448 | dev_err(dev, "Failed to enable device: %d\n", ret); | ||
449 | goto out_tx; | ||
450 | } | ||
451 | |||
452 | return 0; | ||
453 | |||
454 | out_tx: | ||
455 | dma_release_channel(sdd->tx_dma.ch); | ||
456 | out_rx: | ||
457 | dma_release_channel(sdd->rx_dma.ch); | ||
458 | out: | ||
459 | return ret; | ||
460 | } | ||
461 | |||
462 | static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi) | ||
463 | { | ||
464 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); | ||
465 | |||
466 | /* Free DMA channels */ | ||
467 | dma_release_channel(sdd->rx_dma.ch); | ||
468 | dma_release_channel(sdd->tx_dma.ch); | ||
469 | |||
470 | pm_runtime_put(&sdd->pdev->dev); | ||
471 | return 0; | ||
472 | } | ||
473 | |||
474 | static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd, | ||
475 | struct s3c64xx_spi_dma_data *dma) | ||
476 | { | ||
477 | dmaengine_terminate_all(dma->ch); | ||
478 | } | ||
479 | #endif | ||
480 | |||
334 | static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, | 481 | static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, |
335 | struct spi_device *spi, | 482 | struct spi_device *spi, |
336 | struct spi_transfer *xfer, int dma_mode) | 483 | struct spi_transfer *xfer, int dma_mode) |
@@ -713,9 +860,9 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master, | |||
713 | } | 860 | } |
714 | 861 | ||
715 | /* Polling method for xfers not bigger than FIFO capacity */ | 862 | /* Polling method for xfers not bigger than FIFO capacity */ |
716 | if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1)) | 863 | use_dma = 0; |
717 | use_dma = 0; | 864 | if (sdd->rx_dma.ch && sdd->tx_dma.ch && |
718 | else | 865 | (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1))) |
719 | use_dma = 1; | 866 | use_dma = 1; |
720 | 867 | ||
721 | spin_lock_irqsave(&sdd->lock, flags); | 868 | spin_lock_irqsave(&sdd->lock, flags); |
@@ -750,10 +897,10 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master, | |||
750 | if (use_dma) { | 897 | if (use_dma) { |
751 | if (xfer->tx_buf != NULL | 898 | if (xfer->tx_buf != NULL |
752 | && (sdd->state & TXBUSY)) | 899 | && (sdd->state & TXBUSY)) |
753 | sdd->ops->stop(sdd->tx_dma.ch); | 900 | s3c64xx_spi_dma_stop(sdd, &sdd->tx_dma); |
754 | if (xfer->rx_buf != NULL | 901 | if (xfer->rx_buf != NULL |
755 | && (sdd->state & RXBUSY)) | 902 | && (sdd->state & RXBUSY)) |
756 | sdd->ops->stop(sdd->rx_dma.ch); | 903 | s3c64xx_spi_dma_stop(sdd, &sdd->rx_dma); |
757 | } | 904 | } |
758 | 905 | ||
759 | goto out; | 906 | goto out; |
@@ -790,34 +937,7 @@ out: | |||
790 | return 0; | 937 | return 0; |
791 | } | 938 | } |
792 | 939 | ||
793 | static int s3c64xx_spi_prepare_transfer(struct spi_master *spi) | ||
794 | { | ||
795 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); | ||
796 | |||
797 | /* Acquire DMA channels */ | ||
798 | while (!acquire_dma(sdd)) | ||
799 | usleep_range(10000, 11000); | ||
800 | |||
801 | pm_runtime_get_sync(&sdd->pdev->dev); | ||
802 | |||
803 | return 0; | ||
804 | } | ||
805 | |||
806 | static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi) | ||
807 | { | ||
808 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi); | ||
809 | |||
810 | /* Free DMA channels */ | ||
811 | sdd->ops->release(sdd->rx_dma.ch, &s3c64xx_spi_dma_client); | ||
812 | sdd->ops->release(sdd->tx_dma.ch, &s3c64xx_spi_dma_client); | ||
813 | |||
814 | pm_runtime_put(&sdd->pdev->dev); | ||
815 | |||
816 | return 0; | ||
817 | } | ||
818 | |||
819 | static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata( | 940 | static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata( |
820 | struct s3c64xx_spi_driver_data *sdd, | ||
821 | struct spi_device *spi) | 941 | struct spi_device *spi) |
822 | { | 942 | { |
823 | struct s3c64xx_spi_csinfo *cs; | 943 | struct s3c64xx_spi_csinfo *cs; |
@@ -874,7 +994,7 @@ static int s3c64xx_spi_setup(struct spi_device *spi) | |||
874 | 994 | ||
875 | sdd = spi_master_get_devdata(spi->master); | 995 | sdd = spi_master_get_devdata(spi->master); |
876 | if (!cs && spi->dev.of_node) { | 996 | if (!cs && spi->dev.of_node) { |
877 | cs = s3c64xx_get_slave_ctrldata(sdd, spi); | 997 | cs = s3c64xx_get_slave_ctrldata(spi); |
878 | spi->controller_data = cs; | 998 | spi->controller_data = cs; |
879 | } | 999 | } |
880 | 1000 | ||
@@ -912,15 +1032,6 @@ static int s3c64xx_spi_setup(struct spi_device *spi) | |||
912 | 1032 | ||
913 | spin_unlock_irqrestore(&sdd->lock, flags); | 1033 | spin_unlock_irqrestore(&sdd->lock, flags); |
914 | 1034 | ||
915 | if (spi->bits_per_word != 8 | ||
916 | && spi->bits_per_word != 16 | ||
917 | && spi->bits_per_word != 32) { | ||
918 | dev_err(&spi->dev, "setup: %dbits/wrd not supported!\n", | ||
919 | spi->bits_per_word); | ||
920 | err = -EINVAL; | ||
921 | goto setup_exit; | ||
922 | } | ||
923 | |||
924 | pm_runtime_get_sync(&sdd->pdev->dev); | 1035 | pm_runtime_get_sync(&sdd->pdev->dev); |
925 | 1036 | ||
926 | /* Check if we can provide the requested rate */ | 1037 | /* Check if we can provide the requested rate */ |
@@ -1061,41 +1172,6 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel) | |||
1061 | } | 1172 | } |
1062 | 1173 | ||
1063 | #ifdef CONFIG_OF | 1174 | #ifdef CONFIG_OF |
1064 | static int s3c64xx_spi_parse_dt_gpio(struct s3c64xx_spi_driver_data *sdd) | ||
1065 | { | ||
1066 | struct device *dev = &sdd->pdev->dev; | ||
1067 | int idx, gpio, ret; | ||
1068 | |||
1069 | /* find gpios for mosi, miso and clock lines */ | ||
1070 | for (idx = 0; idx < 3; idx++) { | ||
1071 | gpio = of_get_gpio(dev->of_node, idx); | ||
1072 | if (!gpio_is_valid(gpio)) { | ||
1073 | dev_err(dev, "invalid gpio[%d]: %d\n", idx, gpio); | ||
1074 | goto free_gpio; | ||
1075 | } | ||
1076 | sdd->gpios[idx] = gpio; | ||
1077 | ret = gpio_request(gpio, "spi-bus"); | ||
1078 | if (ret) { | ||
1079 | dev_err(dev, "gpio [%d] request failed: %d\n", | ||
1080 | gpio, ret); | ||
1081 | goto free_gpio; | ||
1082 | } | ||
1083 | } | ||
1084 | return 0; | ||
1085 | |||
1086 | free_gpio: | ||
1087 | while (--idx >= 0) | ||
1088 | gpio_free(sdd->gpios[idx]); | ||
1089 | return -EINVAL; | ||
1090 | } | ||
1091 | |||
1092 | static void s3c64xx_spi_dt_gpio_free(struct s3c64xx_spi_driver_data *sdd) | ||
1093 | { | ||
1094 | unsigned int idx; | ||
1095 | for (idx = 0; idx < 3; idx++) | ||
1096 | gpio_free(sdd->gpios[idx]); | ||
1097 | } | ||
1098 | |||
1099 | static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev) | 1175 | static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev) |
1100 | { | 1176 | { |
1101 | struct s3c64xx_spi_info *sci; | 1177 | struct s3c64xx_spi_info *sci; |
@@ -1128,15 +1204,6 @@ static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev) | |||
1128 | { | 1204 | { |
1129 | return dev->platform_data; | 1205 | return dev->platform_data; |
1130 | } | 1206 | } |
1131 | |||
1132 | static int s3c64xx_spi_parse_dt_gpio(struct s3c64xx_spi_driver_data *sdd) | ||
1133 | { | ||
1134 | return -EINVAL; | ||
1135 | } | ||
1136 | |||
1137 | static void s3c64xx_spi_dt_gpio_free(struct s3c64xx_spi_driver_data *sdd) | ||
1138 | { | ||
1139 | } | ||
1140 | #endif | 1207 | #endif |
1141 | 1208 | ||
1142 | static const struct of_device_id s3c64xx_spi_dt_match[]; | 1209 | static const struct of_device_id s3c64xx_spi_dt_match[]; |
@@ -1247,6 +1314,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) | |||
1247 | master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer; | 1314 | master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer; |
1248 | master->num_chipselect = sci->num_cs; | 1315 | master->num_chipselect = sci->num_cs; |
1249 | master->dma_alignment = 8; | 1316 | master->dma_alignment = 8; |
1317 | master->bits_per_word_mask = BIT(32 - 1) | BIT(16 - 1) | BIT(8 - 1); | ||
1250 | /* the spi->mode bits understood by this driver: */ | 1318 | /* the spi->mode bits understood by this driver: */ |
1251 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; | 1319 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; |
1252 | 1320 | ||
@@ -1256,10 +1324,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) | |||
1256 | goto err0; | 1324 | goto err0; |
1257 | } | 1325 | } |
1258 | 1326 | ||
1259 | if (!sci->cfg_gpio && pdev->dev.of_node) { | 1327 | if (sci->cfg_gpio && sci->cfg_gpio()) { |
1260 | if (s3c64xx_spi_parse_dt_gpio(sdd)) | ||
1261 | return -EBUSY; | ||
1262 | } else if (sci->cfg_gpio == NULL || sci->cfg_gpio()) { | ||
1263 | dev_err(&pdev->dev, "Unable to config gpio\n"); | 1328 | dev_err(&pdev->dev, "Unable to config gpio\n"); |
1264 | ret = -EBUSY; | 1329 | ret = -EBUSY; |
1265 | goto err0; | 1330 | goto err0; |
@@ -1270,13 +1335,13 @@ static int s3c64xx_spi_probe(struct platform_device *pdev) | |||
1270 | if (IS_ERR(sdd->clk)) { | 1335 | if (IS_ERR(sdd->clk)) { |
1271 | dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n"); | 1336 | dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n"); |
1272 | ret = PTR_ERR(sdd->clk); | 1337 | ret = PTR_ERR(sdd->clk); |
1273 | goto err1; | 1338 | goto err0; |
1274 | } | 1339 | } |
1275 | 1340 | ||
1276 | if (clk_prepare_enable(sdd->clk)) { | 1341 | if (clk_prepare_enable(sdd->clk)) { |
1277 | dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n"); | 1342 | dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n"); |
1278 | ret = -EBUSY; | 1343 | ret = -EBUSY; |
1279 | goto err1; | 1344 | goto err0; |
1280 | } | 1345 | } |
1281 | 1346 | ||
1282 | sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr); | 1347 | sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr); |
@@ -1333,9 +1398,6 @@ err3: | |||
1333 | clk_disable_unprepare(sdd->src_clk); | 1398 | clk_disable_unprepare(sdd->src_clk); |
1334 | err2: | 1399 | err2: |
1335 | clk_disable_unprepare(sdd->clk); | 1400 | clk_disable_unprepare(sdd->clk); |
1336 | err1: | ||
1337 | if (!sdd->cntrlr_info->cfg_gpio && pdev->dev.of_node) | ||
1338 | s3c64xx_spi_dt_gpio_free(sdd); | ||
1339 | err0: | 1401 | err0: |
1340 | platform_set_drvdata(pdev, NULL); | 1402 | platform_set_drvdata(pdev, NULL); |
1341 | spi_master_put(master); | 1403 | spi_master_put(master); |
@@ -1358,16 +1420,13 @@ static int s3c64xx_spi_remove(struct platform_device *pdev) | |||
1358 | 1420 | ||
1359 | clk_disable_unprepare(sdd->clk); | 1421 | clk_disable_unprepare(sdd->clk); |
1360 | 1422 | ||
1361 | if (!sdd->cntrlr_info->cfg_gpio && pdev->dev.of_node) | ||
1362 | s3c64xx_spi_dt_gpio_free(sdd); | ||
1363 | |||
1364 | platform_set_drvdata(pdev, NULL); | 1423 | platform_set_drvdata(pdev, NULL); |
1365 | spi_master_put(master); | 1424 | spi_master_put(master); |
1366 | 1425 | ||
1367 | return 0; | 1426 | return 0; |
1368 | } | 1427 | } |
1369 | 1428 | ||
1370 | #ifdef CONFIG_PM | 1429 | #ifdef CONFIG_PM_SLEEP |
1371 | static int s3c64xx_spi_suspend(struct device *dev) | 1430 | static int s3c64xx_spi_suspend(struct device *dev) |
1372 | { | 1431 | { |
1373 | struct spi_master *master = dev_get_drvdata(dev); | 1432 | struct spi_master *master = dev_get_drvdata(dev); |
@@ -1379,9 +1438,6 @@ static int s3c64xx_spi_suspend(struct device *dev) | |||
1379 | clk_disable_unprepare(sdd->src_clk); | 1438 | clk_disable_unprepare(sdd->src_clk); |
1380 | clk_disable_unprepare(sdd->clk); | 1439 | clk_disable_unprepare(sdd->clk); |
1381 | 1440 | ||
1382 | if (!sdd->cntrlr_info->cfg_gpio && dev->of_node) | ||
1383 | s3c64xx_spi_dt_gpio_free(sdd); | ||
1384 | |||
1385 | sdd->cur_speed = 0; /* Output Clock is stopped */ | 1441 | sdd->cur_speed = 0; /* Output Clock is stopped */ |
1386 | 1442 | ||
1387 | return 0; | 1443 | return 0; |
@@ -1393,9 +1449,7 @@ static int s3c64xx_spi_resume(struct device *dev) | |||
1393 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); | 1449 | struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master); |
1394 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; | 1450 | struct s3c64xx_spi_info *sci = sdd->cntrlr_info; |
1395 | 1451 | ||
1396 | if (!sci->cfg_gpio && dev->of_node) | 1452 | if (sci->cfg_gpio) |
1397 | s3c64xx_spi_parse_dt_gpio(sdd); | ||
1398 | else | ||
1399 | sci->cfg_gpio(); | 1453 | sci->cfg_gpio(); |
1400 | 1454 | ||
1401 | /* Enable the clock */ | 1455 | /* Enable the clock */ |
@@ -1408,7 +1462,7 @@ static int s3c64xx_spi_resume(struct device *dev) | |||
1408 | 1462 | ||
1409 | return 0; | 1463 | return 0; |
1410 | } | 1464 | } |
1411 | #endif /* CONFIG_PM */ | 1465 | #endif /* CONFIG_PM_SLEEP */ |
1412 | 1466 | ||
1413 | #ifdef CONFIG_PM_RUNTIME | 1467 | #ifdef CONFIG_PM_RUNTIME |
1414 | static int s3c64xx_spi_runtime_suspend(struct device *dev) | 1468 | static int s3c64xx_spi_runtime_suspend(struct device *dev) |
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 8b40d0884f8b..2bc5a6b86300 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c | |||
@@ -764,8 +764,6 @@ static const struct of_device_id sh_msiof_match[] = { | |||
764 | {}, | 764 | {}, |
765 | }; | 765 | }; |
766 | MODULE_DEVICE_TABLE(of, sh_msiof_match); | 766 | MODULE_DEVICE_TABLE(of, sh_msiof_match); |
767 | #else | ||
768 | #define sh_msiof_match NULL | ||
769 | #endif | 767 | #endif |
770 | 768 | ||
771 | static struct dev_pm_ops sh_msiof_spi_dev_pm_ops = { | 769 | static struct dev_pm_ops sh_msiof_spi_dev_pm_ops = { |
@@ -780,7 +778,7 @@ static struct platform_driver sh_msiof_spi_drv = { | |||
780 | .name = "spi_sh_msiof", | 778 | .name = "spi_sh_msiof", |
781 | .owner = THIS_MODULE, | 779 | .owner = THIS_MODULE, |
782 | .pm = &sh_msiof_spi_dev_pm_ops, | 780 | .pm = &sh_msiof_spi_dev_pm_ops, |
783 | .of_match_table = sh_msiof_match, | 781 | .of_match_table = of_match_ptr(sh_msiof_match), |
784 | }, | 782 | }, |
785 | }; | 783 | }; |
786 | module_platform_driver(sh_msiof_spi_drv); | 784 | module_platform_driver(sh_msiof_spi_drv); |
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c index f59d4177b419..0808cd56bf8d 100644 --- a/drivers/spi/spi-sirf.c +++ b/drivers/spi/spi-sirf.c | |||
@@ -660,7 +660,7 @@ static const struct of_device_id spi_sirfsoc_of_match[] = { | |||
660 | { .compatible = "sirf,marco-spi", }, | 660 | { .compatible = "sirf,marco-spi", }, |
661 | {} | 661 | {} |
662 | }; | 662 | }; |
663 | MODULE_DEVICE_TABLE(of, sirfsoc_spi_of_match); | 663 | MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match); |
664 | 664 | ||
665 | static struct platform_driver spi_sirfsoc_driver = { | 665 | static struct platform_driver spi_sirfsoc_driver = { |
666 | .driver = { | 666 | .driver = { |
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c new file mode 100644 index 000000000000..598eb45e8008 --- /dev/null +++ b/drivers/spi/spi-tegra114.c | |||
@@ -0,0 +1,1246 @@ | |||
1 | /* | ||
2 | * SPI driver for NVIDIA's Tegra114 SPI Controller. | ||
3 | * | ||
4 | * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms and conditions of the GNU General Public License, | ||
8 | * version 2, as published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
13 | * more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | |||
19 | #include <linux/clk.h> | ||
20 | #include <linux/clk/tegra.h> | ||
21 | #include <linux/completion.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include <linux/dmaengine.h> | ||
24 | #include <linux/dma-mapping.h> | ||
25 | #include <linux/dmapool.h> | ||
26 | #include <linux/err.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/interrupt.h> | ||
29 | #include <linux/io.h> | ||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/kthread.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/platform_device.h> | ||
34 | #include <linux/pm_runtime.h> | ||
35 | #include <linux/of.h> | ||
36 | #include <linux/of_device.h> | ||
37 | #include <linux/spi/spi.h> | ||
38 | |||
39 | #define SPI_COMMAND1 0x000 | ||
40 | #define SPI_BIT_LENGTH(x) (((x) & 0x1f) << 0) | ||
41 | #define SPI_PACKED (1 << 5) | ||
42 | #define SPI_TX_EN (1 << 11) | ||
43 | #define SPI_RX_EN (1 << 12) | ||
44 | #define SPI_BOTH_EN_BYTE (1 << 13) | ||
45 | #define SPI_BOTH_EN_BIT (1 << 14) | ||
46 | #define SPI_LSBYTE_FE (1 << 15) | ||
47 | #define SPI_LSBIT_FE (1 << 16) | ||
48 | #define SPI_BIDIROE (1 << 17) | ||
49 | #define SPI_IDLE_SDA_DRIVE_LOW (0 << 18) | ||
50 | #define SPI_IDLE_SDA_DRIVE_HIGH (1 << 18) | ||
51 | #define SPI_IDLE_SDA_PULL_LOW (2 << 18) | ||
52 | #define SPI_IDLE_SDA_PULL_HIGH (3 << 18) | ||
53 | #define SPI_IDLE_SDA_MASK (3 << 18) | ||
54 | #define SPI_CS_SS_VAL (1 << 20) | ||
55 | #define SPI_CS_SW_HW (1 << 21) | ||
56 | /* SPI_CS_POL_INACTIVE bits are default high */ | ||
57 | #define SPI_CS_POL_INACTIVE 22 | ||
58 | #define SPI_CS_POL_INACTIVE_0 (1 << 22) | ||
59 | #define SPI_CS_POL_INACTIVE_1 (1 << 23) | ||
60 | #define SPI_CS_POL_INACTIVE_2 (1 << 24) | ||
61 | #define SPI_CS_POL_INACTIVE_3 (1 << 25) | ||
62 | #define SPI_CS_POL_INACTIVE_MASK (0xF << 22) | ||
63 | |||
64 | #define SPI_CS_SEL_0 (0 << 26) | ||
65 | #define SPI_CS_SEL_1 (1 << 26) | ||
66 | #define SPI_CS_SEL_2 (2 << 26) | ||
67 | #define SPI_CS_SEL_3 (3 << 26) | ||
68 | #define SPI_CS_SEL_MASK (3 << 26) | ||
69 | #define SPI_CS_SEL(x) (((x) & 0x3) << 26) | ||
70 | #define SPI_CONTROL_MODE_0 (0 << 28) | ||
71 | #define SPI_CONTROL_MODE_1 (1 << 28) | ||
72 | #define SPI_CONTROL_MODE_2 (2 << 28) | ||
73 | #define SPI_CONTROL_MODE_3 (3 << 28) | ||
74 | #define SPI_CONTROL_MODE_MASK (3 << 28) | ||
75 | #define SPI_MODE_SEL(x) (((x) & 0x3) << 28) | ||
76 | #define SPI_M_S (1 << 30) | ||
77 | #define SPI_PIO (1 << 31) | ||
78 | |||
79 | #define SPI_COMMAND2 0x004 | ||
80 | #define SPI_TX_TAP_DELAY(x) (((x) & 0x3F) << 6) | ||
81 | #define SPI_RX_TAP_DELAY(x) (((x) & 0x3F) << 0) | ||
82 | |||
83 | #define SPI_CS_TIMING1 0x008 | ||
84 | #define SPI_SETUP_HOLD(setup, hold) (((setup) << 4) | (hold)) | ||
85 | #define SPI_CS_SETUP_HOLD(reg, cs, val) \ | ||
86 | ((((val) & 0xFFu) << ((cs) * 8)) | \ | ||
87 | ((reg) & ~(0xFFu << ((cs) * 8)))) | ||
88 | |||
89 | #define SPI_CS_TIMING2 0x00C | ||
90 | #define CYCLES_BETWEEN_PACKETS_0(x) (((x) & 0x1F) << 0) | ||
91 | #define CS_ACTIVE_BETWEEN_PACKETS_0 (1 << 5) | ||
92 | #define CYCLES_BETWEEN_PACKETS_1(x) (((x) & 0x1F) << 8) | ||
93 | #define CS_ACTIVE_BETWEEN_PACKETS_1 (1 << 13) | ||
94 | #define CYCLES_BETWEEN_PACKETS_2(x) (((x) & 0x1F) << 16) | ||
95 | #define CS_ACTIVE_BETWEEN_PACKETS_2 (1 << 21) | ||
96 | #define CYCLES_BETWEEN_PACKETS_3(x) (((x) & 0x1F) << 24) | ||
97 | #define CS_ACTIVE_BETWEEN_PACKETS_3 (1 << 29) | ||
98 | #define SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(reg, cs, val) \ | ||
99 | (reg = (((val) & 0x1) << ((cs) * 8 + 5)) | \ | ||
100 | ((reg) & ~(1 << ((cs) * 8 + 5)))) | ||
101 | #define SPI_SET_CYCLES_BETWEEN_PACKETS(reg, cs, val) \ | ||
102 | (reg = (((val) & 0xF) << ((cs) * 8)) | \ | ||
103 | ((reg) & ~(0xF << ((cs) * 8)))) | ||
104 | |||
105 | #define SPI_TRANS_STATUS 0x010 | ||
106 | #define SPI_BLK_CNT(val) (((val) >> 0) & 0xFFFF) | ||
107 | #define SPI_SLV_IDLE_COUNT(val) (((val) >> 16) & 0xFF) | ||
108 | #define SPI_RDY (1 << 30) | ||
109 | |||
110 | #define SPI_FIFO_STATUS 0x014 | ||
111 | #define SPI_RX_FIFO_EMPTY (1 << 0) | ||
112 | #define SPI_RX_FIFO_FULL (1 << 1) | ||
113 | #define SPI_TX_FIFO_EMPTY (1 << 2) | ||
114 | #define SPI_TX_FIFO_FULL (1 << 3) | ||
115 | #define SPI_RX_FIFO_UNF (1 << 4) | ||
116 | #define SPI_RX_FIFO_OVF (1 << 5) | ||
117 | #define SPI_TX_FIFO_UNF (1 << 6) | ||
118 | #define SPI_TX_FIFO_OVF (1 << 7) | ||
119 | #define SPI_ERR (1 << 8) | ||
120 | #define SPI_TX_FIFO_FLUSH (1 << 14) | ||
121 | #define SPI_RX_FIFO_FLUSH (1 << 15) | ||
122 | #define SPI_TX_FIFO_EMPTY_COUNT(val) (((val) >> 16) & 0x7F) | ||
123 | #define SPI_RX_FIFO_FULL_COUNT(val) (((val) >> 23) & 0x7F) | ||
124 | #define SPI_FRAME_END (1 << 30) | ||
125 | #define SPI_CS_INACTIVE (1 << 31) | ||
126 | |||
127 | #define SPI_FIFO_ERROR (SPI_RX_FIFO_UNF | \ | ||
128 | SPI_RX_FIFO_OVF | SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF) | ||
129 | #define SPI_FIFO_EMPTY (SPI_RX_FIFO_EMPTY | SPI_TX_FIFO_EMPTY) | ||
130 | |||
131 | #define SPI_TX_DATA 0x018 | ||
132 | #define SPI_RX_DATA 0x01C | ||
133 | |||
134 | #define SPI_DMA_CTL 0x020 | ||
135 | #define SPI_TX_TRIG_1 (0 << 15) | ||
136 | #define SPI_TX_TRIG_4 (1 << 15) | ||
137 | #define SPI_TX_TRIG_8 (2 << 15) | ||
138 | #define SPI_TX_TRIG_16 (3 << 15) | ||
139 | #define SPI_TX_TRIG_MASK (3 << 15) | ||
140 | #define SPI_RX_TRIG_1 (0 << 19) | ||
141 | #define SPI_RX_TRIG_4 (1 << 19) | ||
142 | #define SPI_RX_TRIG_8 (2 << 19) | ||
143 | #define SPI_RX_TRIG_16 (3 << 19) | ||
144 | #define SPI_RX_TRIG_MASK (3 << 19) | ||
145 | #define SPI_IE_TX (1 << 28) | ||
146 | #define SPI_IE_RX (1 << 29) | ||
147 | #define SPI_CONT (1 << 30) | ||
148 | #define SPI_DMA (1 << 31) | ||
149 | #define SPI_DMA_EN SPI_DMA | ||
150 | |||
151 | #define SPI_DMA_BLK 0x024 | ||
152 | #define SPI_DMA_BLK_SET(x) (((x) & 0xFFFF) << 0) | ||
153 | |||
154 | #define SPI_TX_FIFO 0x108 | ||
155 | #define SPI_RX_FIFO 0x188 | ||
156 | #define MAX_CHIP_SELECT 4 | ||
157 | #define SPI_FIFO_DEPTH 64 | ||
158 | #define DATA_DIR_TX (1 << 0) | ||
159 | #define DATA_DIR_RX (1 << 1) | ||
160 | |||
161 | #define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000)) | ||
162 | #define DEFAULT_SPI_DMA_BUF_LEN (16*1024) | ||
163 | #define TX_FIFO_EMPTY_COUNT_MAX SPI_TX_FIFO_EMPTY_COUNT(0x40) | ||
164 | #define RX_FIFO_FULL_COUNT_ZERO SPI_RX_FIFO_FULL_COUNT(0) | ||
165 | #define MAX_HOLD_CYCLES 16 | ||
166 | #define SPI_DEFAULT_SPEED 25000000 | ||
167 | |||
168 | #define MAX_CHIP_SELECT 4 | ||
169 | #define SPI_FIFO_DEPTH 64 | ||
170 | |||
171 | struct tegra_spi_data { | ||
172 | struct device *dev; | ||
173 | struct spi_master *master; | ||
174 | spinlock_t lock; | ||
175 | |||
176 | struct clk *clk; | ||
177 | void __iomem *base; | ||
178 | phys_addr_t phys; | ||
179 | unsigned irq; | ||
180 | int dma_req_sel; | ||
181 | u32 spi_max_frequency; | ||
182 | u32 cur_speed; | ||
183 | |||
184 | struct spi_device *cur_spi; | ||
185 | unsigned cur_pos; | ||
186 | unsigned cur_len; | ||
187 | unsigned words_per_32bit; | ||
188 | unsigned bytes_per_word; | ||
189 | unsigned curr_dma_words; | ||
190 | unsigned cur_direction; | ||
191 | |||
192 | unsigned cur_rx_pos; | ||
193 | unsigned cur_tx_pos; | ||
194 | |||
195 | unsigned dma_buf_size; | ||
196 | unsigned max_buf_size; | ||
197 | bool is_curr_dma_xfer; | ||
198 | |||
199 | struct completion rx_dma_complete; | ||
200 | struct completion tx_dma_complete; | ||
201 | |||
202 | u32 tx_status; | ||
203 | u32 rx_status; | ||
204 | u32 status_reg; | ||
205 | bool is_packed; | ||
206 | unsigned long packed_size; | ||
207 | |||
208 | u32 command1_reg; | ||
209 | u32 dma_control_reg; | ||
210 | u32 def_command1_reg; | ||
211 | u32 spi_cs_timing; | ||
212 | |||
213 | struct completion xfer_completion; | ||
214 | struct spi_transfer *curr_xfer; | ||
215 | struct dma_chan *rx_dma_chan; | ||
216 | u32 *rx_dma_buf; | ||
217 | dma_addr_t rx_dma_phys; | ||
218 | struct dma_async_tx_descriptor *rx_dma_desc; | ||
219 | |||
220 | struct dma_chan *tx_dma_chan; | ||
221 | u32 *tx_dma_buf; | ||
222 | dma_addr_t tx_dma_phys; | ||
223 | struct dma_async_tx_descriptor *tx_dma_desc; | ||
224 | }; | ||
225 | |||
226 | static int tegra_spi_runtime_suspend(struct device *dev); | ||
227 | static int tegra_spi_runtime_resume(struct device *dev); | ||
228 | |||
229 | static inline unsigned long tegra_spi_readl(struct tegra_spi_data *tspi, | ||
230 | unsigned long reg) | ||
231 | { | ||
232 | return readl(tspi->base + reg); | ||
233 | } | ||
234 | |||
235 | static inline void tegra_spi_writel(struct tegra_spi_data *tspi, | ||
236 | unsigned long val, unsigned long reg) | ||
237 | { | ||
238 | writel(val, tspi->base + reg); | ||
239 | |||
240 | /* Read back register to make sure that register writes completed */ | ||
241 | if (reg != SPI_TX_FIFO) | ||
242 | readl(tspi->base + SPI_COMMAND1); | ||
243 | } | ||
244 | |||
245 | static void tegra_spi_clear_status(struct tegra_spi_data *tspi) | ||
246 | { | ||
247 | unsigned long val; | ||
248 | |||
249 | /* Write 1 to clear status register */ | ||
250 | val = tegra_spi_readl(tspi, SPI_TRANS_STATUS); | ||
251 | tegra_spi_writel(tspi, val, SPI_TRANS_STATUS); | ||
252 | |||
253 | /* Clear fifo status error if any */ | ||
254 | val = tegra_spi_readl(tspi, SPI_FIFO_STATUS); | ||
255 | if (val & SPI_ERR) | ||
256 | tegra_spi_writel(tspi, SPI_ERR | SPI_FIFO_ERROR, | ||
257 | SPI_FIFO_STATUS); | ||
258 | } | ||
259 | |||
260 | static unsigned tegra_spi_calculate_curr_xfer_param( | ||
261 | struct spi_device *spi, struct tegra_spi_data *tspi, | ||
262 | struct spi_transfer *t) | ||
263 | { | ||
264 | unsigned remain_len = t->len - tspi->cur_pos; | ||
265 | unsigned max_word; | ||
266 | unsigned bits_per_word = t->bits_per_word; | ||
267 | unsigned max_len; | ||
268 | unsigned total_fifo_words; | ||
269 | |||
270 | tspi->bytes_per_word = (bits_per_word - 1) / 8 + 1; | ||
271 | |||
272 | if (bits_per_word == 8 || bits_per_word == 16) { | ||
273 | tspi->is_packed = 1; | ||
274 | tspi->words_per_32bit = 32/bits_per_word; | ||
275 | } else { | ||
276 | tspi->is_packed = 0; | ||
277 | tspi->words_per_32bit = 1; | ||
278 | } | ||
279 | |||
280 | if (tspi->is_packed) { | ||
281 | max_len = min(remain_len, tspi->max_buf_size); | ||
282 | tspi->curr_dma_words = max_len/tspi->bytes_per_word; | ||
283 | total_fifo_words = (max_len + 3) / 4; | ||
284 | } else { | ||
285 | max_word = (remain_len - 1) / tspi->bytes_per_word + 1; | ||
286 | max_word = min(max_word, tspi->max_buf_size/4); | ||
287 | tspi->curr_dma_words = max_word; | ||
288 | total_fifo_words = max_word; | ||
289 | } | ||
290 | return total_fifo_words; | ||
291 | } | ||
292 | |||
293 | static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf( | ||
294 | struct tegra_spi_data *tspi, struct spi_transfer *t) | ||
295 | { | ||
296 | unsigned nbytes; | ||
297 | unsigned tx_empty_count; | ||
298 | unsigned long fifo_status; | ||
299 | unsigned max_n_32bit; | ||
300 | unsigned i, count; | ||
301 | unsigned long x; | ||
302 | unsigned int written_words; | ||
303 | unsigned fifo_words_left; | ||
304 | u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos; | ||
305 | |||
306 | fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS); | ||
307 | tx_empty_count = SPI_TX_FIFO_EMPTY_COUNT(fifo_status); | ||
308 | |||
309 | if (tspi->is_packed) { | ||
310 | fifo_words_left = tx_empty_count * tspi->words_per_32bit; | ||
311 | written_words = min(fifo_words_left, tspi->curr_dma_words); | ||
312 | nbytes = written_words * tspi->bytes_per_word; | ||
313 | max_n_32bit = DIV_ROUND_UP(nbytes, 4); | ||
314 | for (count = 0; count < max_n_32bit; count++) { | ||
315 | x = 0; | ||
316 | for (i = 0; (i < 4) && nbytes; i++, nbytes--) | ||
317 | x |= (*tx_buf++) << (i*8); | ||
318 | tegra_spi_writel(tspi, x, SPI_TX_FIFO); | ||
319 | } | ||
320 | } else { | ||
321 | max_n_32bit = min(tspi->curr_dma_words, tx_empty_count); | ||
322 | written_words = max_n_32bit; | ||
323 | nbytes = written_words * tspi->bytes_per_word; | ||
324 | for (count = 0; count < max_n_32bit; count++) { | ||
325 | x = 0; | ||
326 | for (i = 0; nbytes && (i < tspi->bytes_per_word); | ||
327 | i++, nbytes--) | ||
328 | x |= ((*tx_buf++) << i*8); | ||
329 | tegra_spi_writel(tspi, x, SPI_TX_FIFO); | ||
330 | } | ||
331 | } | ||
332 | tspi->cur_tx_pos += written_words * tspi->bytes_per_word; | ||
333 | return written_words; | ||
334 | } | ||
335 | |||
336 | static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf( | ||
337 | struct tegra_spi_data *tspi, struct spi_transfer *t) | ||
338 | { | ||
339 | unsigned rx_full_count; | ||
340 | unsigned long fifo_status; | ||
341 | unsigned i, count; | ||
342 | unsigned long x; | ||
343 | unsigned int read_words = 0; | ||
344 | unsigned len; | ||
345 | u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos; | ||
346 | |||
347 | fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS); | ||
348 | rx_full_count = SPI_RX_FIFO_FULL_COUNT(fifo_status); | ||
349 | if (tspi->is_packed) { | ||
350 | len = tspi->curr_dma_words * tspi->bytes_per_word; | ||
351 | for (count = 0; count < rx_full_count; count++) { | ||
352 | x = tegra_spi_readl(tspi, SPI_RX_FIFO); | ||
353 | for (i = 0; len && (i < 4); i++, len--) | ||
354 | *rx_buf++ = (x >> i*8) & 0xFF; | ||
355 | } | ||
356 | tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word; | ||
357 | read_words += tspi->curr_dma_words; | ||
358 | } else { | ||
359 | unsigned int rx_mask; | ||
360 | unsigned int bits_per_word = t->bits_per_word; | ||
361 | |||
362 | rx_mask = (1 << bits_per_word) - 1; | ||
363 | for (count = 0; count < rx_full_count; count++) { | ||
364 | x = tegra_spi_readl(tspi, SPI_RX_FIFO); | ||
365 | x &= rx_mask; | ||
366 | for (i = 0; (i < tspi->bytes_per_word); i++) | ||
367 | *rx_buf++ = (x >> (i*8)) & 0xFF; | ||
368 | } | ||
369 | tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word; | ||
370 | read_words += rx_full_count; | ||
371 | } | ||
372 | return read_words; | ||
373 | } | ||
374 | |||
375 | static void tegra_spi_copy_client_txbuf_to_spi_txbuf( | ||
376 | struct tegra_spi_data *tspi, struct spi_transfer *t) | ||
377 | { | ||
378 | unsigned len; | ||
379 | |||
380 | /* Make the dma buffer to read by cpu */ | ||
381 | dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys, | ||
382 | tspi->dma_buf_size, DMA_TO_DEVICE); | ||
383 | |||
384 | if (tspi->is_packed) { | ||
385 | len = tspi->curr_dma_words * tspi->bytes_per_word; | ||
386 | memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len); | ||
387 | } else { | ||
388 | unsigned int i; | ||
389 | unsigned int count; | ||
390 | u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos; | ||
391 | unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word; | ||
392 | unsigned int x; | ||
393 | |||
394 | for (count = 0; count < tspi->curr_dma_words; count++) { | ||
395 | x = 0; | ||
396 | for (i = 0; consume && (i < tspi->bytes_per_word); | ||
397 | i++, consume--) | ||
398 | x |= ((*tx_buf++) << i * 8); | ||
399 | tspi->tx_dma_buf[count] = x; | ||
400 | } | ||
401 | } | ||
402 | tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word; | ||
403 | |||
404 | /* Make the dma buffer to read by dma */ | ||
405 | dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys, | ||
406 | tspi->dma_buf_size, DMA_TO_DEVICE); | ||
407 | } | ||
408 | |||
409 | static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf( | ||
410 | struct tegra_spi_data *tspi, struct spi_transfer *t) | ||
411 | { | ||
412 | unsigned len; | ||
413 | |||
414 | /* Make the dma buffer to read by cpu */ | ||
415 | dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys, | ||
416 | tspi->dma_buf_size, DMA_FROM_DEVICE); | ||
417 | |||
418 | if (tspi->is_packed) { | ||
419 | len = tspi->curr_dma_words * tspi->bytes_per_word; | ||
420 | memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len); | ||
421 | } else { | ||
422 | unsigned int i; | ||
423 | unsigned int count; | ||
424 | unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos; | ||
425 | unsigned int x; | ||
426 | unsigned int rx_mask; | ||
427 | unsigned int bits_per_word = t->bits_per_word; | ||
428 | |||
429 | rx_mask = (1 << bits_per_word) - 1; | ||
430 | for (count = 0; count < tspi->curr_dma_words; count++) { | ||
431 | x = tspi->rx_dma_buf[count]; | ||
432 | x &= rx_mask; | ||
433 | for (i = 0; (i < tspi->bytes_per_word); i++) | ||
434 | *rx_buf++ = (x >> (i*8)) & 0xFF; | ||
435 | } | ||
436 | } | ||
437 | tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word; | ||
438 | |||
439 | /* Make the dma buffer to read by dma */ | ||
440 | dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys, | ||
441 | tspi->dma_buf_size, DMA_FROM_DEVICE); | ||
442 | } | ||
443 | |||
444 | static void tegra_spi_dma_complete(void *args) | ||
445 | { | ||
446 | struct completion *dma_complete = args; | ||
447 | |||
448 | complete(dma_complete); | ||
449 | } | ||
450 | |||
451 | static int tegra_spi_start_tx_dma(struct tegra_spi_data *tspi, int len) | ||
452 | { | ||
453 | INIT_COMPLETION(tspi->tx_dma_complete); | ||
454 | tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan, | ||
455 | tspi->tx_dma_phys, len, DMA_MEM_TO_DEV, | ||
456 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
457 | if (!tspi->tx_dma_desc) { | ||
458 | dev_err(tspi->dev, "Not able to get desc for Tx\n"); | ||
459 | return -EIO; | ||
460 | } | ||
461 | |||
462 | tspi->tx_dma_desc->callback = tegra_spi_dma_complete; | ||
463 | tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete; | ||
464 | |||
465 | dmaengine_submit(tspi->tx_dma_desc); | ||
466 | dma_async_issue_pending(tspi->tx_dma_chan); | ||
467 | return 0; | ||
468 | } | ||
469 | |||
470 | static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len) | ||
471 | { | ||
472 | INIT_COMPLETION(tspi->rx_dma_complete); | ||
473 | tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan, | ||
474 | tspi->rx_dma_phys, len, DMA_DEV_TO_MEM, | ||
475 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
476 | if (!tspi->rx_dma_desc) { | ||
477 | dev_err(tspi->dev, "Not able to get desc for Rx\n"); | ||
478 | return -EIO; | ||
479 | } | ||
480 | |||
481 | tspi->rx_dma_desc->callback = tegra_spi_dma_complete; | ||
482 | tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete; | ||
483 | |||
484 | dmaengine_submit(tspi->rx_dma_desc); | ||
485 | dma_async_issue_pending(tspi->rx_dma_chan); | ||
486 | return 0; | ||
487 | } | ||
488 | |||
489 | static int tegra_spi_start_dma_based_transfer( | ||
490 | struct tegra_spi_data *tspi, struct spi_transfer *t) | ||
491 | { | ||
492 | unsigned long val; | ||
493 | unsigned int len; | ||
494 | int ret = 0; | ||
495 | unsigned long status; | ||
496 | |||
497 | /* Make sure that Rx and Tx fifo are empty */ | ||
498 | status = tegra_spi_readl(tspi, SPI_FIFO_STATUS); | ||
499 | if ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) { | ||
500 | dev_err(tspi->dev, | ||
501 | "Rx/Tx fifo are not empty status 0x%08lx\n", status); | ||
502 | return -EIO; | ||
503 | } | ||
504 | |||
505 | val = SPI_DMA_BLK_SET(tspi->curr_dma_words - 1); | ||
506 | tegra_spi_writel(tspi, val, SPI_DMA_BLK); | ||
507 | |||
508 | if (tspi->is_packed) | ||
509 | len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word, | ||
510 | 4) * 4; | ||
511 | else | ||
512 | len = tspi->curr_dma_words * 4; | ||
513 | |||
514 | /* Set attention level based on length of transfer */ | ||
515 | if (len & 0xF) | ||
516 | val |= SPI_TX_TRIG_1 | SPI_RX_TRIG_1; | ||
517 | else if (((len) >> 4) & 0x1) | ||
518 | val |= SPI_TX_TRIG_4 | SPI_RX_TRIG_4; | ||
519 | else | ||
520 | val |= SPI_TX_TRIG_8 | SPI_RX_TRIG_8; | ||
521 | |||
522 | if (tspi->cur_direction & DATA_DIR_TX) | ||
523 | val |= SPI_IE_TX; | ||
524 | |||
525 | if (tspi->cur_direction & DATA_DIR_RX) | ||
526 | val |= SPI_IE_RX; | ||
527 | |||
528 | tegra_spi_writel(tspi, val, SPI_DMA_CTL); | ||
529 | tspi->dma_control_reg = val; | ||
530 | |||
531 | if (tspi->cur_direction & DATA_DIR_TX) { | ||
532 | tegra_spi_copy_client_txbuf_to_spi_txbuf(tspi, t); | ||
533 | ret = tegra_spi_start_tx_dma(tspi, len); | ||
534 | if (ret < 0) { | ||
535 | dev_err(tspi->dev, | ||
536 | "Starting tx dma failed, err %d\n", ret); | ||
537 | return ret; | ||
538 | } | ||
539 | } | ||
540 | |||
541 | if (tspi->cur_direction & DATA_DIR_RX) { | ||
542 | /* Make the dma buffer to read by dma */ | ||
543 | dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys, | ||
544 | tspi->dma_buf_size, DMA_FROM_DEVICE); | ||
545 | |||
546 | ret = tegra_spi_start_rx_dma(tspi, len); | ||
547 | if (ret < 0) { | ||
548 | dev_err(tspi->dev, | ||
549 | "Starting rx dma failed, err %d\n", ret); | ||
550 | if (tspi->cur_direction & DATA_DIR_TX) | ||
551 | dmaengine_terminate_all(tspi->tx_dma_chan); | ||
552 | return ret; | ||
553 | } | ||
554 | } | ||
555 | tspi->is_curr_dma_xfer = true; | ||
556 | tspi->dma_control_reg = val; | ||
557 | |||
558 | val |= SPI_DMA_EN; | ||
559 | tegra_spi_writel(tspi, val, SPI_DMA_CTL); | ||
560 | return ret; | ||
561 | } | ||
562 | |||
563 | static int tegra_spi_start_cpu_based_transfer( | ||
564 | struct tegra_spi_data *tspi, struct spi_transfer *t) | ||
565 | { | ||
566 | unsigned long val; | ||
567 | unsigned cur_words; | ||
568 | |||
569 | if (tspi->cur_direction & DATA_DIR_TX) | ||
570 | cur_words = tegra_spi_fill_tx_fifo_from_client_txbuf(tspi, t); | ||
571 | else | ||
572 | cur_words = tspi->curr_dma_words; | ||
573 | |||
574 | val = SPI_DMA_BLK_SET(cur_words - 1); | ||
575 | tegra_spi_writel(tspi, val, SPI_DMA_BLK); | ||
576 | |||
577 | val = 0; | ||
578 | if (tspi->cur_direction & DATA_DIR_TX) | ||
579 | val |= SPI_IE_TX; | ||
580 | |||
581 | if (tspi->cur_direction & DATA_DIR_RX) | ||
582 | val |= SPI_IE_RX; | ||
583 | |||
584 | tegra_spi_writel(tspi, val, SPI_DMA_CTL); | ||
585 | tspi->dma_control_reg = val; | ||
586 | |||
587 | tspi->is_curr_dma_xfer = false; | ||
588 | |||
589 | val |= SPI_DMA_EN; | ||
590 | tegra_spi_writel(tspi, val, SPI_DMA_CTL); | ||
591 | return 0; | ||
592 | } | ||
593 | |||
594 | static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi, | ||
595 | bool dma_to_memory) | ||
596 | { | ||
597 | struct dma_chan *dma_chan; | ||
598 | u32 *dma_buf; | ||
599 | dma_addr_t dma_phys; | ||
600 | int ret; | ||
601 | struct dma_slave_config dma_sconfig; | ||
602 | dma_cap_mask_t mask; | ||
603 | |||
604 | dma_cap_zero(mask); | ||
605 | dma_cap_set(DMA_SLAVE, mask); | ||
606 | dma_chan = dma_request_channel(mask, NULL, NULL); | ||
607 | if (!dma_chan) { | ||
608 | dev_err(tspi->dev, | ||
609 | "Dma channel is not available, will try later\n"); | ||
610 | return -EPROBE_DEFER; | ||
611 | } | ||
612 | |||
613 | dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size, | ||
614 | &dma_phys, GFP_KERNEL); | ||
615 | if (!dma_buf) { | ||
616 | dev_err(tspi->dev, " Not able to allocate the dma buffer\n"); | ||
617 | dma_release_channel(dma_chan); | ||
618 | return -ENOMEM; | ||
619 | } | ||
620 | |||
621 | dma_sconfig.slave_id = tspi->dma_req_sel; | ||
622 | if (dma_to_memory) { | ||
623 | dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO; | ||
624 | dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
625 | dma_sconfig.src_maxburst = 0; | ||
626 | } else { | ||
627 | dma_sconfig.dst_addr = tspi->phys + SPI_TX_FIFO; | ||
628 | dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
629 | dma_sconfig.dst_maxburst = 0; | ||
630 | } | ||
631 | |||
632 | ret = dmaengine_slave_config(dma_chan, &dma_sconfig); | ||
633 | if (ret) | ||
634 | goto scrub; | ||
635 | if (dma_to_memory) { | ||
636 | tspi->rx_dma_chan = dma_chan; | ||
637 | tspi->rx_dma_buf = dma_buf; | ||
638 | tspi->rx_dma_phys = dma_phys; | ||
639 | } else { | ||
640 | tspi->tx_dma_chan = dma_chan; | ||
641 | tspi->tx_dma_buf = dma_buf; | ||
642 | tspi->tx_dma_phys = dma_phys; | ||
643 | } | ||
644 | return 0; | ||
645 | |||
646 | scrub: | ||
647 | dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys); | ||
648 | dma_release_channel(dma_chan); | ||
649 | return ret; | ||
650 | } | ||
651 | |||
652 | static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi, | ||
653 | bool dma_to_memory) | ||
654 | { | ||
655 | u32 *dma_buf; | ||
656 | dma_addr_t dma_phys; | ||
657 | struct dma_chan *dma_chan; | ||
658 | |||
659 | if (dma_to_memory) { | ||
660 | dma_buf = tspi->rx_dma_buf; | ||
661 | dma_chan = tspi->rx_dma_chan; | ||
662 | dma_phys = tspi->rx_dma_phys; | ||
663 | tspi->rx_dma_chan = NULL; | ||
664 | tspi->rx_dma_buf = NULL; | ||
665 | } else { | ||
666 | dma_buf = tspi->tx_dma_buf; | ||
667 | dma_chan = tspi->tx_dma_chan; | ||
668 | dma_phys = tspi->tx_dma_phys; | ||
669 | tspi->tx_dma_buf = NULL; | ||
670 | tspi->tx_dma_chan = NULL; | ||
671 | } | ||
672 | if (!dma_chan) | ||
673 | return; | ||
674 | |||
675 | dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys); | ||
676 | dma_release_channel(dma_chan); | ||
677 | } | ||
678 | |||
679 | static int tegra_spi_start_transfer_one(struct spi_device *spi, | ||
680 | struct spi_transfer *t, bool is_first_of_msg, | ||
681 | bool is_single_xfer) | ||
682 | { | ||
683 | struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master); | ||
684 | u32 speed = t->speed_hz; | ||
685 | u8 bits_per_word = t->bits_per_word; | ||
686 | unsigned total_fifo_words; | ||
687 | int ret; | ||
688 | unsigned long command1; | ||
689 | int req_mode; | ||
690 | |||
691 | if (speed != tspi->cur_speed) { | ||
692 | clk_set_rate(tspi->clk, speed); | ||
693 | tspi->cur_speed = speed; | ||
694 | } | ||
695 | |||
696 | tspi->cur_spi = spi; | ||
697 | tspi->cur_pos = 0; | ||
698 | tspi->cur_rx_pos = 0; | ||
699 | tspi->cur_tx_pos = 0; | ||
700 | tspi->curr_xfer = t; | ||
701 | total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t); | ||
702 | |||
703 | if (is_first_of_msg) { | ||
704 | tegra_spi_clear_status(tspi); | ||
705 | |||
706 | command1 = tspi->def_command1_reg; | ||
707 | command1 |= SPI_BIT_LENGTH(bits_per_word - 1); | ||
708 | |||
709 | command1 &= ~SPI_CONTROL_MODE_MASK; | ||
710 | req_mode = spi->mode & 0x3; | ||
711 | if (req_mode == SPI_MODE_0) | ||
712 | command1 |= SPI_CONTROL_MODE_0; | ||
713 | else if (req_mode == SPI_MODE_1) | ||
714 | command1 |= SPI_CONTROL_MODE_1; | ||
715 | else if (req_mode == SPI_MODE_2) | ||
716 | command1 |= SPI_CONTROL_MODE_2; | ||
717 | else if (req_mode == SPI_MODE_3) | ||
718 | command1 |= SPI_CONTROL_MODE_3; | ||
719 | |||
720 | tegra_spi_writel(tspi, command1, SPI_COMMAND1); | ||
721 | |||
722 | command1 |= SPI_CS_SW_HW; | ||
723 | if (spi->mode & SPI_CS_HIGH) | ||
724 | command1 |= SPI_CS_SS_VAL; | ||
725 | else | ||
726 | command1 &= ~SPI_CS_SS_VAL; | ||
727 | |||
728 | tegra_spi_writel(tspi, 0, SPI_COMMAND2); | ||
729 | } else { | ||
730 | command1 = tspi->command1_reg; | ||
731 | command1 &= ~SPI_BIT_LENGTH(~0); | ||
732 | command1 |= SPI_BIT_LENGTH(bits_per_word - 1); | ||
733 | } | ||
734 | |||
735 | if (tspi->is_packed) | ||
736 | command1 |= SPI_PACKED; | ||
737 | |||
738 | command1 &= ~(SPI_CS_SEL_MASK | SPI_TX_EN | SPI_RX_EN); | ||
739 | tspi->cur_direction = 0; | ||
740 | if (t->rx_buf) { | ||
741 | command1 |= SPI_RX_EN; | ||
742 | tspi->cur_direction |= DATA_DIR_RX; | ||
743 | } | ||
744 | if (t->tx_buf) { | ||
745 | command1 |= SPI_TX_EN; | ||
746 | tspi->cur_direction |= DATA_DIR_TX; | ||
747 | } | ||
748 | command1 |= SPI_CS_SEL(spi->chip_select); | ||
749 | tegra_spi_writel(tspi, command1, SPI_COMMAND1); | ||
750 | tspi->command1_reg = command1; | ||
751 | |||
752 | dev_dbg(tspi->dev, "The def 0x%x and written 0x%lx\n", | ||
753 | tspi->def_command1_reg, command1); | ||
754 | |||
755 | if (total_fifo_words > SPI_FIFO_DEPTH) | ||
756 | ret = tegra_spi_start_dma_based_transfer(tspi, t); | ||
757 | else | ||
758 | ret = tegra_spi_start_cpu_based_transfer(tspi, t); | ||
759 | return ret; | ||
760 | } | ||
761 | |||
762 | static int tegra_spi_setup(struct spi_device *spi) | ||
763 | { | ||
764 | struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master); | ||
765 | unsigned long val; | ||
766 | unsigned long flags; | ||
767 | int ret; | ||
768 | unsigned int cs_pol_bit[MAX_CHIP_SELECT] = { | ||
769 | SPI_CS_POL_INACTIVE_0, | ||
770 | SPI_CS_POL_INACTIVE_1, | ||
771 | SPI_CS_POL_INACTIVE_2, | ||
772 | SPI_CS_POL_INACTIVE_3, | ||
773 | }; | ||
774 | |||
775 | dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n", | ||
776 | spi->bits_per_word, | ||
777 | spi->mode & SPI_CPOL ? "" : "~", | ||
778 | spi->mode & SPI_CPHA ? "" : "~", | ||
779 | spi->max_speed_hz); | ||
780 | |||
781 | BUG_ON(spi->chip_select >= MAX_CHIP_SELECT); | ||
782 | |||
783 | /* Set speed to the spi max fequency if spi device has not set */ | ||
784 | spi->max_speed_hz = spi->max_speed_hz ? : tspi->spi_max_frequency; | ||
785 | |||
786 | ret = pm_runtime_get_sync(tspi->dev); | ||
787 | if (ret < 0) { | ||
788 | dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret); | ||
789 | return ret; | ||
790 | } | ||
791 | |||
792 | spin_lock_irqsave(&tspi->lock, flags); | ||
793 | val = tspi->def_command1_reg; | ||
794 | if (spi->mode & SPI_CS_HIGH) | ||
795 | val &= ~cs_pol_bit[spi->chip_select]; | ||
796 | else | ||
797 | val |= cs_pol_bit[spi->chip_select]; | ||
798 | tspi->def_command1_reg = val; | ||
799 | tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1); | ||
800 | spin_unlock_irqrestore(&tspi->lock, flags); | ||
801 | |||
802 | pm_runtime_put(tspi->dev); | ||
803 | return 0; | ||
804 | } | ||
805 | |||
806 | static int tegra_spi_transfer_one_message(struct spi_master *master, | ||
807 | struct spi_message *msg) | ||
808 | { | ||
809 | bool is_first_msg = true; | ||
810 | int single_xfer; | ||
811 | struct tegra_spi_data *tspi = spi_master_get_devdata(master); | ||
812 | struct spi_transfer *xfer; | ||
813 | struct spi_device *spi = msg->spi; | ||
814 | int ret; | ||
815 | |||
816 | msg->status = 0; | ||
817 | msg->actual_length = 0; | ||
818 | |||
819 | ret = pm_runtime_get_sync(tspi->dev); | ||
820 | if (ret < 0) { | ||
821 | dev_err(tspi->dev, "runtime PM get failed: %d\n", ret); | ||
822 | msg->status = ret; | ||
823 | spi_finalize_current_message(master); | ||
824 | return ret; | ||
825 | } | ||
826 | |||
827 | single_xfer = list_is_singular(&msg->transfers); | ||
828 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
829 | INIT_COMPLETION(tspi->xfer_completion); | ||
830 | ret = tegra_spi_start_transfer_one(spi, xfer, | ||
831 | is_first_msg, single_xfer); | ||
832 | if (ret < 0) { | ||
833 | dev_err(tspi->dev, | ||
834 | "spi can not start transfer, err %d\n", ret); | ||
835 | goto exit; | ||
836 | } | ||
837 | is_first_msg = false; | ||
838 | ret = wait_for_completion_timeout(&tspi->xfer_completion, | ||
839 | SPI_DMA_TIMEOUT); | ||
840 | if (WARN_ON(ret == 0)) { | ||
841 | dev_err(tspi->dev, | ||
842 | "spi trasfer timeout, err %d\n", ret); | ||
843 | ret = -EIO; | ||
844 | goto exit; | ||
845 | } | ||
846 | |||
847 | if (tspi->tx_status || tspi->rx_status) { | ||
848 | dev_err(tspi->dev, "Error in Transfer\n"); | ||
849 | ret = -EIO; | ||
850 | goto exit; | ||
851 | } | ||
852 | msg->actual_length += xfer->len; | ||
853 | if (xfer->cs_change && xfer->delay_usecs) { | ||
854 | tegra_spi_writel(tspi, tspi->def_command1_reg, | ||
855 | SPI_COMMAND1); | ||
856 | udelay(xfer->delay_usecs); | ||
857 | } | ||
858 | } | ||
859 | ret = 0; | ||
860 | exit: | ||
861 | tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1); | ||
862 | pm_runtime_put(tspi->dev); | ||
863 | msg->status = ret; | ||
864 | spi_finalize_current_message(master); | ||
865 | return ret; | ||
866 | } | ||
867 | |||
868 | static irqreturn_t handle_cpu_based_xfer(struct tegra_spi_data *tspi) | ||
869 | { | ||
870 | struct spi_transfer *t = tspi->curr_xfer; | ||
871 | unsigned long flags; | ||
872 | |||
873 | spin_lock_irqsave(&tspi->lock, flags); | ||
874 | if (tspi->tx_status || tspi->rx_status) { | ||
875 | dev_err(tspi->dev, "CpuXfer ERROR bit set 0x%x\n", | ||
876 | tspi->status_reg); | ||
877 | dev_err(tspi->dev, "CpuXfer 0x%08x:0x%08x\n", | ||
878 | tspi->command1_reg, tspi->dma_control_reg); | ||
879 | tegra_periph_reset_assert(tspi->clk); | ||
880 | udelay(2); | ||
881 | tegra_periph_reset_deassert(tspi->clk); | ||
882 | complete(&tspi->xfer_completion); | ||
883 | goto exit; | ||
884 | } | ||
885 | |||
886 | if (tspi->cur_direction & DATA_DIR_RX) | ||
887 | tegra_spi_read_rx_fifo_to_client_rxbuf(tspi, t); | ||
888 | |||
889 | if (tspi->cur_direction & DATA_DIR_TX) | ||
890 | tspi->cur_pos = tspi->cur_tx_pos; | ||
891 | else | ||
892 | tspi->cur_pos = tspi->cur_rx_pos; | ||
893 | |||
894 | if (tspi->cur_pos == t->len) { | ||
895 | complete(&tspi->xfer_completion); | ||
896 | goto exit; | ||
897 | } | ||
898 | |||
899 | tegra_spi_calculate_curr_xfer_param(tspi->cur_spi, tspi, t); | ||
900 | tegra_spi_start_cpu_based_transfer(tspi, t); | ||
901 | exit: | ||
902 | spin_unlock_irqrestore(&tspi->lock, flags); | ||
903 | return IRQ_HANDLED; | ||
904 | } | ||
905 | |||
906 | static irqreturn_t handle_dma_based_xfer(struct tegra_spi_data *tspi) | ||
907 | { | ||
908 | struct spi_transfer *t = tspi->curr_xfer; | ||
909 | long wait_status; | ||
910 | int err = 0; | ||
911 | unsigned total_fifo_words; | ||
912 | unsigned long flags; | ||
913 | |||
914 | /* Abort dmas if any error */ | ||
915 | if (tspi->cur_direction & DATA_DIR_TX) { | ||
916 | if (tspi->tx_status) { | ||
917 | dmaengine_terminate_all(tspi->tx_dma_chan); | ||
918 | err += 1; | ||
919 | } else { | ||
920 | wait_status = wait_for_completion_interruptible_timeout( | ||
921 | &tspi->tx_dma_complete, SPI_DMA_TIMEOUT); | ||
922 | if (wait_status <= 0) { | ||
923 | dmaengine_terminate_all(tspi->tx_dma_chan); | ||
924 | dev_err(tspi->dev, "TxDma Xfer failed\n"); | ||
925 | err += 1; | ||
926 | } | ||
927 | } | ||
928 | } | ||
929 | |||
930 | if (tspi->cur_direction & DATA_DIR_RX) { | ||
931 | if (tspi->rx_status) { | ||
932 | dmaengine_terminate_all(tspi->rx_dma_chan); | ||
933 | err += 2; | ||
934 | } else { | ||
935 | wait_status = wait_for_completion_interruptible_timeout( | ||
936 | &tspi->rx_dma_complete, SPI_DMA_TIMEOUT); | ||
937 | if (wait_status <= 0) { | ||
938 | dmaengine_terminate_all(tspi->rx_dma_chan); | ||
939 | dev_err(tspi->dev, "RxDma Xfer failed\n"); | ||
940 | err += 2; | ||
941 | } | ||
942 | } | ||
943 | } | ||
944 | |||
945 | spin_lock_irqsave(&tspi->lock, flags); | ||
946 | if (err) { | ||
947 | dev_err(tspi->dev, "DmaXfer: ERROR bit set 0x%x\n", | ||
948 | tspi->status_reg); | ||
949 | dev_err(tspi->dev, "DmaXfer 0x%08x:0x%08x\n", | ||
950 | tspi->command1_reg, tspi->dma_control_reg); | ||
951 | tegra_periph_reset_assert(tspi->clk); | ||
952 | udelay(2); | ||
953 | tegra_periph_reset_deassert(tspi->clk); | ||
954 | complete(&tspi->xfer_completion); | ||
955 | spin_unlock_irqrestore(&tspi->lock, flags); | ||
956 | return IRQ_HANDLED; | ||
957 | } | ||
958 | |||
959 | if (tspi->cur_direction & DATA_DIR_RX) | ||
960 | tegra_spi_copy_spi_rxbuf_to_client_rxbuf(tspi, t); | ||
961 | |||
962 | if (tspi->cur_direction & DATA_DIR_TX) | ||
963 | tspi->cur_pos = tspi->cur_tx_pos; | ||
964 | else | ||
965 | tspi->cur_pos = tspi->cur_rx_pos; | ||
966 | |||
967 | if (tspi->cur_pos == t->len) { | ||
968 | complete(&tspi->xfer_completion); | ||
969 | goto exit; | ||
970 | } | ||
971 | |||
972 | /* Continue transfer in current message */ | ||
973 | total_fifo_words = tegra_spi_calculate_curr_xfer_param(tspi->cur_spi, | ||
974 | tspi, t); | ||
975 | if (total_fifo_words > SPI_FIFO_DEPTH) | ||
976 | err = tegra_spi_start_dma_based_transfer(tspi, t); | ||
977 | else | ||
978 | err = tegra_spi_start_cpu_based_transfer(tspi, t); | ||
979 | |||
980 | exit: | ||
981 | spin_unlock_irqrestore(&tspi->lock, flags); | ||
982 | return IRQ_HANDLED; | ||
983 | } | ||
984 | |||
985 | static irqreturn_t tegra_spi_isr_thread(int irq, void *context_data) | ||
986 | { | ||
987 | struct tegra_spi_data *tspi = context_data; | ||
988 | |||
989 | if (!tspi->is_curr_dma_xfer) | ||
990 | return handle_cpu_based_xfer(tspi); | ||
991 | return handle_dma_based_xfer(tspi); | ||
992 | } | ||
993 | |||
994 | static irqreturn_t tegra_spi_isr(int irq, void *context_data) | ||
995 | { | ||
996 | struct tegra_spi_data *tspi = context_data; | ||
997 | |||
998 | tspi->status_reg = tegra_spi_readl(tspi, SPI_FIFO_STATUS); | ||
999 | if (tspi->cur_direction & DATA_DIR_TX) | ||
1000 | tspi->tx_status = tspi->status_reg & | ||
1001 | (SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF); | ||
1002 | |||
1003 | if (tspi->cur_direction & DATA_DIR_RX) | ||
1004 | tspi->rx_status = tspi->status_reg & | ||
1005 | (SPI_RX_FIFO_OVF | SPI_RX_FIFO_UNF); | ||
1006 | tegra_spi_clear_status(tspi); | ||
1007 | |||
1008 | return IRQ_WAKE_THREAD; | ||
1009 | } | ||
1010 | |||
1011 | static void tegra_spi_parse_dt(struct platform_device *pdev, | ||
1012 | struct tegra_spi_data *tspi) | ||
1013 | { | ||
1014 | struct device_node *np = pdev->dev.of_node; | ||
1015 | u32 of_dma[2]; | ||
1016 | |||
1017 | if (of_property_read_u32_array(np, "nvidia,dma-request-selector", | ||
1018 | of_dma, 2) >= 0) | ||
1019 | tspi->dma_req_sel = of_dma[1]; | ||
1020 | |||
1021 | if (of_property_read_u32(np, "spi-max-frequency", | ||
1022 | &tspi->spi_max_frequency)) | ||
1023 | tspi->spi_max_frequency = 25000000; /* 25MHz */ | ||
1024 | } | ||
1025 | |||
1026 | static struct of_device_id tegra_spi_of_match[] = { | ||
1027 | { .compatible = "nvidia,tegra114-spi", }, | ||
1028 | {} | ||
1029 | }; | ||
1030 | MODULE_DEVICE_TABLE(of, tegra_spi_of_match); | ||
1031 | |||
1032 | static int tegra_spi_probe(struct platform_device *pdev) | ||
1033 | { | ||
1034 | struct spi_master *master; | ||
1035 | struct tegra_spi_data *tspi; | ||
1036 | struct resource *r; | ||
1037 | int ret, spi_irq; | ||
1038 | |||
1039 | master = spi_alloc_master(&pdev->dev, sizeof(*tspi)); | ||
1040 | if (!master) { | ||
1041 | dev_err(&pdev->dev, "master allocation failed\n"); | ||
1042 | return -ENOMEM; | ||
1043 | } | ||
1044 | dev_set_drvdata(&pdev->dev, master); | ||
1045 | tspi = spi_master_get_devdata(master); | ||
1046 | |||
1047 | /* Parse DT */ | ||
1048 | tegra_spi_parse_dt(pdev, tspi); | ||
1049 | |||
1050 | /* the spi->mode bits understood by this driver: */ | ||
1051 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; | ||
1052 | master->setup = tegra_spi_setup; | ||
1053 | master->transfer_one_message = tegra_spi_transfer_one_message; | ||
1054 | master->num_chipselect = MAX_CHIP_SELECT; | ||
1055 | master->bus_num = -1; | ||
1056 | |||
1057 | tspi->master = master; | ||
1058 | tspi->dev = &pdev->dev; | ||
1059 | spin_lock_init(&tspi->lock); | ||
1060 | |||
1061 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1062 | if (!r) { | ||
1063 | dev_err(&pdev->dev, "No IO memory resource\n"); | ||
1064 | ret = -ENODEV; | ||
1065 | goto exit_free_master; | ||
1066 | } | ||
1067 | tspi->phys = r->start; | ||
1068 | tspi->base = devm_ioremap_resource(&pdev->dev, r); | ||
1069 | if (IS_ERR(tspi->base)) { | ||
1070 | ret = PTR_ERR(tspi->base); | ||
1071 | dev_err(&pdev->dev, "ioremap failed: err = %d\n", ret); | ||
1072 | goto exit_free_master; | ||
1073 | } | ||
1074 | |||
1075 | spi_irq = platform_get_irq(pdev, 0); | ||
1076 | tspi->irq = spi_irq; | ||
1077 | ret = request_threaded_irq(tspi->irq, tegra_spi_isr, | ||
1078 | tegra_spi_isr_thread, IRQF_ONESHOT, | ||
1079 | dev_name(&pdev->dev), tspi); | ||
1080 | if (ret < 0) { | ||
1081 | dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n", | ||
1082 | tspi->irq); | ||
1083 | goto exit_free_master; | ||
1084 | } | ||
1085 | |||
1086 | tspi->clk = devm_clk_get(&pdev->dev, "spi"); | ||
1087 | if (IS_ERR(tspi->clk)) { | ||
1088 | dev_err(&pdev->dev, "can not get clock\n"); | ||
1089 | ret = PTR_ERR(tspi->clk); | ||
1090 | goto exit_free_irq; | ||
1091 | } | ||
1092 | |||
1093 | tspi->max_buf_size = SPI_FIFO_DEPTH << 2; | ||
1094 | tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN; | ||
1095 | |||
1096 | if (tspi->dma_req_sel) { | ||
1097 | ret = tegra_spi_init_dma_param(tspi, true); | ||
1098 | if (ret < 0) { | ||
1099 | dev_err(&pdev->dev, "RxDma Init failed, err %d\n", ret); | ||
1100 | goto exit_free_irq; | ||
1101 | } | ||
1102 | |||
1103 | ret = tegra_spi_init_dma_param(tspi, false); | ||
1104 | if (ret < 0) { | ||
1105 | dev_err(&pdev->dev, "TxDma Init failed, err %d\n", ret); | ||
1106 | goto exit_rx_dma_free; | ||
1107 | } | ||
1108 | tspi->max_buf_size = tspi->dma_buf_size; | ||
1109 | init_completion(&tspi->tx_dma_complete); | ||
1110 | init_completion(&tspi->rx_dma_complete); | ||
1111 | } | ||
1112 | |||
1113 | init_completion(&tspi->xfer_completion); | ||
1114 | |||
1115 | pm_runtime_enable(&pdev->dev); | ||
1116 | if (!pm_runtime_enabled(&pdev->dev)) { | ||
1117 | ret = tegra_spi_runtime_resume(&pdev->dev); | ||
1118 | if (ret) | ||
1119 | goto exit_pm_disable; | ||
1120 | } | ||
1121 | |||
1122 | ret = pm_runtime_get_sync(&pdev->dev); | ||
1123 | if (ret < 0) { | ||
1124 | dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret); | ||
1125 | goto exit_pm_disable; | ||
1126 | } | ||
1127 | tspi->def_command1_reg = SPI_M_S; | ||
1128 | tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1); | ||
1129 | pm_runtime_put(&pdev->dev); | ||
1130 | |||
1131 | master->dev.of_node = pdev->dev.of_node; | ||
1132 | ret = spi_register_master(master); | ||
1133 | if (ret < 0) { | ||
1134 | dev_err(&pdev->dev, "can not register to master err %d\n", ret); | ||
1135 | goto exit_pm_disable; | ||
1136 | } | ||
1137 | return ret; | ||
1138 | |||
1139 | exit_pm_disable: | ||
1140 | pm_runtime_disable(&pdev->dev); | ||
1141 | if (!pm_runtime_status_suspended(&pdev->dev)) | ||
1142 | tegra_spi_runtime_suspend(&pdev->dev); | ||
1143 | tegra_spi_deinit_dma_param(tspi, false); | ||
1144 | exit_rx_dma_free: | ||
1145 | tegra_spi_deinit_dma_param(tspi, true); | ||
1146 | exit_free_irq: | ||
1147 | free_irq(spi_irq, tspi); | ||
1148 | exit_free_master: | ||
1149 | spi_master_put(master); | ||
1150 | return ret; | ||
1151 | } | ||
1152 | |||
1153 | static int tegra_spi_remove(struct platform_device *pdev) | ||
1154 | { | ||
1155 | struct spi_master *master = dev_get_drvdata(&pdev->dev); | ||
1156 | struct tegra_spi_data *tspi = spi_master_get_devdata(master); | ||
1157 | |||
1158 | free_irq(tspi->irq, tspi); | ||
1159 | spi_unregister_master(master); | ||
1160 | |||
1161 | if (tspi->tx_dma_chan) | ||
1162 | tegra_spi_deinit_dma_param(tspi, false); | ||
1163 | |||
1164 | if (tspi->rx_dma_chan) | ||
1165 | tegra_spi_deinit_dma_param(tspi, true); | ||
1166 | |||
1167 | pm_runtime_disable(&pdev->dev); | ||
1168 | if (!pm_runtime_status_suspended(&pdev->dev)) | ||
1169 | tegra_spi_runtime_suspend(&pdev->dev); | ||
1170 | |||
1171 | return 0; | ||
1172 | } | ||
1173 | |||
1174 | #ifdef CONFIG_PM_SLEEP | ||
1175 | static int tegra_spi_suspend(struct device *dev) | ||
1176 | { | ||
1177 | struct spi_master *master = dev_get_drvdata(dev); | ||
1178 | |||
1179 | return spi_master_suspend(master); | ||
1180 | } | ||
1181 | |||
1182 | static int tegra_spi_resume(struct device *dev) | ||
1183 | { | ||
1184 | struct spi_master *master = dev_get_drvdata(dev); | ||
1185 | struct tegra_spi_data *tspi = spi_master_get_devdata(master); | ||
1186 | int ret; | ||
1187 | |||
1188 | ret = pm_runtime_get_sync(dev); | ||
1189 | if (ret < 0) { | ||
1190 | dev_err(dev, "pm runtime failed, e = %d\n", ret); | ||
1191 | return ret; | ||
1192 | } | ||
1193 | tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1); | ||
1194 | pm_runtime_put(dev); | ||
1195 | |||
1196 | return spi_master_resume(master); | ||
1197 | } | ||
1198 | #endif | ||
1199 | |||
1200 | static int tegra_spi_runtime_suspend(struct device *dev) | ||
1201 | { | ||
1202 | struct spi_master *master = dev_get_drvdata(dev); | ||
1203 | struct tegra_spi_data *tspi = spi_master_get_devdata(master); | ||
1204 | |||
1205 | /* Flush all write which are in PPSB queue by reading back */ | ||
1206 | tegra_spi_readl(tspi, SPI_COMMAND1); | ||
1207 | |||
1208 | clk_disable_unprepare(tspi->clk); | ||
1209 | return 0; | ||
1210 | } | ||
1211 | |||
1212 | static int tegra_spi_runtime_resume(struct device *dev) | ||
1213 | { | ||
1214 | struct spi_master *master = dev_get_drvdata(dev); | ||
1215 | struct tegra_spi_data *tspi = spi_master_get_devdata(master); | ||
1216 | int ret; | ||
1217 | |||
1218 | ret = clk_prepare_enable(tspi->clk); | ||
1219 | if (ret < 0) { | ||
1220 | dev_err(tspi->dev, "clk_prepare failed: %d\n", ret); | ||
1221 | return ret; | ||
1222 | } | ||
1223 | return 0; | ||
1224 | } | ||
1225 | |||
1226 | static const struct dev_pm_ops tegra_spi_pm_ops = { | ||
1227 | SET_RUNTIME_PM_OPS(tegra_spi_runtime_suspend, | ||
1228 | tegra_spi_runtime_resume, NULL) | ||
1229 | SET_SYSTEM_SLEEP_PM_OPS(tegra_spi_suspend, tegra_spi_resume) | ||
1230 | }; | ||
1231 | static struct platform_driver tegra_spi_driver = { | ||
1232 | .driver = { | ||
1233 | .name = "spi-tegra114", | ||
1234 | .owner = THIS_MODULE, | ||
1235 | .pm = &tegra_spi_pm_ops, | ||
1236 | .of_match_table = tegra_spi_of_match, | ||
1237 | }, | ||
1238 | .probe = tegra_spi_probe, | ||
1239 | .remove = tegra_spi_remove, | ||
1240 | }; | ||
1241 | module_platform_driver(tegra_spi_driver); | ||
1242 | |||
1243 | MODULE_ALIAS("platform:spi-tegra114"); | ||
1244 | MODULE_DESCRIPTION("NVIDIA Tegra114 SPI Controller Driver"); | ||
1245 | MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); | ||
1246 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c index 3d6a12b2af04..d65c000efe35 100644 --- a/drivers/spi/spi-tegra20-sflash.c +++ b/drivers/spi/spi-tegra20-sflash.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/of.h> | 33 | #include <linux/of.h> |
34 | #include <linux/of_device.h> | 34 | #include <linux/of_device.h> |
35 | #include <linux/spi/spi.h> | 35 | #include <linux/spi/spi.h> |
36 | #include <linux/spi/spi-tegra.h> | ||
37 | #include <linux/clk/tegra.h> | 36 | #include <linux/clk/tegra.h> |
38 | 37 | ||
39 | #define SPI_COMMAND 0x000 | 38 | #define SPI_COMMAND 0x000 |
@@ -439,23 +438,13 @@ static irqreturn_t tegra_sflash_isr(int irq, void *context_data) | |||
439 | return handle_cpu_based_xfer(tsd); | 438 | return handle_cpu_based_xfer(tsd); |
440 | } | 439 | } |
441 | 440 | ||
442 | static struct tegra_spi_platform_data *tegra_sflash_parse_dt( | 441 | static void tegra_sflash_parse_dt(struct tegra_sflash_data *tsd) |
443 | struct platform_device *pdev) | ||
444 | { | 442 | { |
445 | struct tegra_spi_platform_data *pdata; | 443 | struct device_node *np = tsd->dev->of_node; |
446 | struct device_node *np = pdev->dev.of_node; | ||
447 | u32 max_freq; | ||
448 | |||
449 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | ||
450 | if (!pdata) { | ||
451 | dev_err(&pdev->dev, "Memory alloc for pdata failed\n"); | ||
452 | return NULL; | ||
453 | } | ||
454 | |||
455 | if (!of_property_read_u32(np, "spi-max-frequency", &max_freq)) | ||
456 | pdata->spi_max_frequency = max_freq; | ||
457 | 444 | ||
458 | return pdata; | 445 | if (of_property_read_u32(np, "spi-max-frequency", |
446 | &tsd->spi_max_frequency)) | ||
447 | tsd->spi_max_frequency = 25000000; /* 25MHz */ | ||
459 | } | 448 | } |
460 | 449 | ||
461 | static struct of_device_id tegra_sflash_of_match[] = { | 450 | static struct of_device_id tegra_sflash_of_match[] = { |
@@ -469,28 +458,15 @@ static int tegra_sflash_probe(struct platform_device *pdev) | |||
469 | struct spi_master *master; | 458 | struct spi_master *master; |
470 | struct tegra_sflash_data *tsd; | 459 | struct tegra_sflash_data *tsd; |
471 | struct resource *r; | 460 | struct resource *r; |
472 | struct tegra_spi_platform_data *pdata = pdev->dev.platform_data; | ||
473 | int ret; | 461 | int ret; |
474 | const struct of_device_id *match; | 462 | const struct of_device_id *match; |
475 | 463 | ||
476 | match = of_match_device(of_match_ptr(tegra_sflash_of_match), | 464 | match = of_match_device(tegra_sflash_of_match, &pdev->dev); |
477 | &pdev->dev); | ||
478 | if (!match) { | 465 | if (!match) { |
479 | dev_err(&pdev->dev, "Error: No device match found\n"); | 466 | dev_err(&pdev->dev, "Error: No device match found\n"); |
480 | return -ENODEV; | 467 | return -ENODEV; |
481 | } | 468 | } |
482 | 469 | ||
483 | if (!pdata && pdev->dev.of_node) | ||
484 | pdata = tegra_sflash_parse_dt(pdev); | ||
485 | |||
486 | if (!pdata) { | ||
487 | dev_err(&pdev->dev, "No platform data, exiting\n"); | ||
488 | return -ENODEV; | ||
489 | } | ||
490 | |||
491 | if (!pdata->spi_max_frequency) | ||
492 | pdata->spi_max_frequency = 25000000; /* 25MHz */ | ||
493 | |||
494 | master = spi_alloc_master(&pdev->dev, sizeof(*tsd)); | 470 | master = spi_alloc_master(&pdev->dev, sizeof(*tsd)); |
495 | if (!master) { | 471 | if (!master) { |
496 | dev_err(&pdev->dev, "master allocation failed\n"); | 472 | dev_err(&pdev->dev, "master allocation failed\n"); |
@@ -510,6 +486,8 @@ static int tegra_sflash_probe(struct platform_device *pdev) | |||
510 | tsd->dev = &pdev->dev; | 486 | tsd->dev = &pdev->dev; |
511 | spin_lock_init(&tsd->lock); | 487 | spin_lock_init(&tsd->lock); |
512 | 488 | ||
489 | tegra_sflash_parse_dt(tsd); | ||
490 | |||
513 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 491 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
514 | if (!r) { | 492 | if (!r) { |
515 | dev_err(&pdev->dev, "No IO memory resource\n"); | 493 | dev_err(&pdev->dev, "No IO memory resource\n"); |
@@ -538,7 +516,6 @@ static int tegra_sflash_probe(struct platform_device *pdev) | |||
538 | goto exit_free_irq; | 516 | goto exit_free_irq; |
539 | } | 517 | } |
540 | 518 | ||
541 | tsd->spi_max_frequency = pdata->spi_max_frequency; | ||
542 | init_completion(&tsd->xfer_completion); | 519 | init_completion(&tsd->xfer_completion); |
543 | pm_runtime_enable(&pdev->dev); | 520 | pm_runtime_enable(&pdev->dev); |
544 | if (!pm_runtime_enabled(&pdev->dev)) { | 521 | if (!pm_runtime_enabled(&pdev->dev)) { |
@@ -658,7 +635,7 @@ static struct platform_driver tegra_sflash_driver = { | |||
658 | .name = "spi-tegra-sflash", | 635 | .name = "spi-tegra-sflash", |
659 | .owner = THIS_MODULE, | 636 | .owner = THIS_MODULE, |
660 | .pm = &slink_pm_ops, | 637 | .pm = &slink_pm_ops, |
661 | .of_match_table = of_match_ptr(tegra_sflash_of_match), | 638 | .of_match_table = tegra_sflash_of_match, |
662 | }, | 639 | }, |
663 | .probe = tegra_sflash_probe, | 640 | .probe = tegra_sflash_probe, |
664 | .remove = tegra_sflash_remove, | 641 | .remove = tegra_sflash_remove, |
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c index a829563f4713..3faf88d003de 100644 --- a/drivers/spi/spi-tegra20-slink.c +++ b/drivers/spi/spi-tegra20-slink.c | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <linux/of.h> | 34 | #include <linux/of.h> |
35 | #include <linux/of_device.h> | 35 | #include <linux/of_device.h> |
36 | #include <linux/spi/spi.h> | 36 | #include <linux/spi/spi.h> |
37 | #include <linux/spi/spi-tegra.h> | ||
38 | #include <linux/clk/tegra.h> | 37 | #include <linux/clk/tegra.h> |
39 | 38 | ||
40 | #define SLINK_COMMAND 0x000 | 39 | #define SLINK_COMMAND 0x000 |
@@ -189,7 +188,6 @@ struct tegra_slink_data { | |||
189 | unsigned dma_buf_size; | 188 | unsigned dma_buf_size; |
190 | unsigned max_buf_size; | 189 | unsigned max_buf_size; |
191 | bool is_curr_dma_xfer; | 190 | bool is_curr_dma_xfer; |
192 | bool is_hw_based_cs; | ||
193 | 191 | ||
194 | struct completion rx_dma_complete; | 192 | struct completion rx_dma_complete; |
195 | struct completion tx_dma_complete; | 193 | struct completion tx_dma_complete; |
@@ -375,9 +373,6 @@ static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf( | |||
375 | tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word; | 373 | tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word; |
376 | read_words += tspi->curr_dma_words; | 374 | read_words += tspi->curr_dma_words; |
377 | } else { | 375 | } else { |
378 | unsigned int bits_per_word; | ||
379 | |||
380 | bits_per_word = t->bits_per_word; | ||
381 | for (count = 0; count < rx_full_count; count++) { | 376 | for (count = 0; count < rx_full_count; count++) { |
382 | x = tegra_slink_readl(tspi, SLINK_RX_FIFO); | 377 | x = tegra_slink_readl(tspi, SLINK_RX_FIFO); |
383 | for (i = 0; (i < tspi->bytes_per_word); i++) | 378 | for (i = 0; (i < tspi->bytes_per_word); i++) |
@@ -720,7 +715,6 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi, | |||
720 | u8 bits_per_word; | 715 | u8 bits_per_word; |
721 | unsigned total_fifo_words; | 716 | unsigned total_fifo_words; |
722 | int ret; | 717 | int ret; |
723 | struct tegra_spi_device_controller_data *cdata = spi->controller_data; | ||
724 | unsigned long command; | 718 | unsigned long command; |
725 | unsigned long command2; | 719 | unsigned long command2; |
726 | 720 | ||
@@ -743,39 +737,11 @@ static int tegra_slink_start_transfer_one(struct spi_device *spi, | |||
743 | 737 | ||
744 | command = tspi->def_command_reg; | 738 | command = tspi->def_command_reg; |
745 | command |= SLINK_BIT_LENGTH(bits_per_word - 1); | 739 | command |= SLINK_BIT_LENGTH(bits_per_word - 1); |
740 | command |= SLINK_CS_SW | SLINK_CS_VALUE; | ||
746 | 741 | ||
747 | command2 = tspi->def_command2_reg; | 742 | command2 = tspi->def_command2_reg; |
748 | command2 |= SLINK_SS_EN_CS(spi->chip_select); | 743 | command2 |= SLINK_SS_EN_CS(spi->chip_select); |
749 | 744 | ||
750 | /* possibly use the hw based chip select */ | ||
751 | tspi->is_hw_based_cs = false; | ||
752 | if (cdata && cdata->is_hw_based_cs && is_single_xfer && | ||
753 | ((tspi->curr_dma_words * tspi->bytes_per_word) == | ||
754 | (t->len - tspi->cur_pos))) { | ||
755 | int setup_count; | ||
756 | int sts2; | ||
757 | |||
758 | setup_count = cdata->cs_setup_clk_count >> 1; | ||
759 | setup_count = max(setup_count, 3); | ||
760 | command2 |= SLINK_SS_SETUP(setup_count); | ||
761 | if (tspi->chip_data->cs_hold_time) { | ||
762 | int hold_count; | ||
763 | |||
764 | hold_count = cdata->cs_hold_clk_count; | ||
765 | hold_count = max(hold_count, 0xF); | ||
766 | sts2 = tegra_slink_readl(tspi, SLINK_STATUS2); | ||
767 | sts2 &= ~SLINK_SS_HOLD_TIME(0xF); | ||
768 | sts2 |= SLINK_SS_HOLD_TIME(hold_count); | ||
769 | tegra_slink_writel(tspi, sts2, SLINK_STATUS2); | ||
770 | } | ||
771 | tspi->is_hw_based_cs = true; | ||
772 | } | ||
773 | |||
774 | if (tspi->is_hw_based_cs) | ||
775 | command &= ~SLINK_CS_SW; | ||
776 | else | ||
777 | command |= SLINK_CS_SW | SLINK_CS_VALUE; | ||
778 | |||
779 | command &= ~SLINK_MODES; | 745 | command &= ~SLINK_MODES; |
780 | if (spi->mode & SPI_CPHA) | 746 | if (spi->mode & SPI_CPHA) |
781 | command |= SLINK_CK_SDA; | 747 | command |= SLINK_CK_SDA; |
@@ -1065,36 +1031,25 @@ static irqreturn_t tegra_slink_isr(int irq, void *context_data) | |||
1065 | return IRQ_WAKE_THREAD; | 1031 | return IRQ_WAKE_THREAD; |
1066 | } | 1032 | } |
1067 | 1033 | ||
1068 | static struct tegra_spi_platform_data *tegra_slink_parse_dt( | 1034 | static void tegra_slink_parse_dt(struct tegra_slink_data *tspi) |
1069 | struct platform_device *pdev) | ||
1070 | { | 1035 | { |
1071 | struct tegra_spi_platform_data *pdata; | 1036 | struct device_node *np = tspi->dev->of_node; |
1072 | const unsigned int *prop; | ||
1073 | struct device_node *np = pdev->dev.of_node; | ||
1074 | u32 of_dma[2]; | 1037 | u32 of_dma[2]; |
1075 | 1038 | ||
1076 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | ||
1077 | if (!pdata) { | ||
1078 | dev_err(&pdev->dev, "Memory alloc for pdata failed\n"); | ||
1079 | return NULL; | ||
1080 | } | ||
1081 | |||
1082 | if (of_property_read_u32_array(np, "nvidia,dma-request-selector", | 1039 | if (of_property_read_u32_array(np, "nvidia,dma-request-selector", |
1083 | of_dma, 2) >= 0) | 1040 | of_dma, 2) >= 0) |
1084 | pdata->dma_req_sel = of_dma[1]; | 1041 | tspi->dma_req_sel = of_dma[1]; |
1085 | |||
1086 | prop = of_get_property(np, "spi-max-frequency", NULL); | ||
1087 | if (prop) | ||
1088 | pdata->spi_max_frequency = be32_to_cpup(prop); | ||
1089 | 1042 | ||
1090 | return pdata; | 1043 | if (of_property_read_u32(np, "spi-max-frequency", |
1044 | &tspi->spi_max_frequency)) | ||
1045 | tspi->spi_max_frequency = 25000000; /* 25MHz */ | ||
1091 | } | 1046 | } |
1092 | 1047 | ||
1093 | const struct tegra_slink_chip_data tegra30_spi_cdata = { | 1048 | static const struct tegra_slink_chip_data tegra30_spi_cdata = { |
1094 | .cs_hold_time = true, | 1049 | .cs_hold_time = true, |
1095 | }; | 1050 | }; |
1096 | 1051 | ||
1097 | const struct tegra_slink_chip_data tegra20_spi_cdata = { | 1052 | static const struct tegra_slink_chip_data tegra20_spi_cdata = { |
1098 | .cs_hold_time = false, | 1053 | .cs_hold_time = false, |
1099 | }; | 1054 | }; |
1100 | 1055 | ||
@@ -1110,27 +1065,16 @@ static int tegra_slink_probe(struct platform_device *pdev) | |||
1110 | struct spi_master *master; | 1065 | struct spi_master *master; |
1111 | struct tegra_slink_data *tspi; | 1066 | struct tegra_slink_data *tspi; |
1112 | struct resource *r; | 1067 | struct resource *r; |
1113 | struct tegra_spi_platform_data *pdata = pdev->dev.platform_data; | ||
1114 | int ret, spi_irq; | 1068 | int ret, spi_irq; |
1115 | const struct tegra_slink_chip_data *cdata = NULL; | 1069 | const struct tegra_slink_chip_data *cdata = NULL; |
1116 | const struct of_device_id *match; | 1070 | const struct of_device_id *match; |
1117 | 1071 | ||
1118 | match = of_match_device(of_match_ptr(tegra_slink_of_match), &pdev->dev); | 1072 | match = of_match_device(tegra_slink_of_match, &pdev->dev); |
1119 | if (!match) { | 1073 | if (!match) { |
1120 | dev_err(&pdev->dev, "Error: No device match found\n"); | 1074 | dev_err(&pdev->dev, "Error: No device match found\n"); |
1121 | return -ENODEV; | 1075 | return -ENODEV; |
1122 | } | 1076 | } |
1123 | cdata = match->data; | 1077 | cdata = match->data; |
1124 | if (!pdata && pdev->dev.of_node) | ||
1125 | pdata = tegra_slink_parse_dt(pdev); | ||
1126 | |||
1127 | if (!pdata) { | ||
1128 | dev_err(&pdev->dev, "No platform data, exiting\n"); | ||
1129 | return -ENODEV; | ||
1130 | } | ||
1131 | |||
1132 | if (!pdata->spi_max_frequency) | ||
1133 | pdata->spi_max_frequency = 25000000; /* 25MHz */ | ||
1134 | 1078 | ||
1135 | master = spi_alloc_master(&pdev->dev, sizeof(*tspi)); | 1079 | master = spi_alloc_master(&pdev->dev, sizeof(*tspi)); |
1136 | if (!master) { | 1080 | if (!master) { |
@@ -1148,11 +1092,12 @@ static int tegra_slink_probe(struct platform_device *pdev) | |||
1148 | dev_set_drvdata(&pdev->dev, master); | 1092 | dev_set_drvdata(&pdev->dev, master); |
1149 | tspi = spi_master_get_devdata(master); | 1093 | tspi = spi_master_get_devdata(master); |
1150 | tspi->master = master; | 1094 | tspi->master = master; |
1151 | tspi->dma_req_sel = pdata->dma_req_sel; | ||
1152 | tspi->dev = &pdev->dev; | 1095 | tspi->dev = &pdev->dev; |
1153 | tspi->chip_data = cdata; | 1096 | tspi->chip_data = cdata; |
1154 | spin_lock_init(&tspi->lock); | 1097 | spin_lock_init(&tspi->lock); |
1155 | 1098 | ||
1099 | tegra_slink_parse_dt(tspi); | ||
1100 | |||
1156 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1101 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1157 | if (!r) { | 1102 | if (!r) { |
1158 | dev_err(&pdev->dev, "No IO memory resource\n"); | 1103 | dev_err(&pdev->dev, "No IO memory resource\n"); |
@@ -1186,9 +1131,8 @@ static int tegra_slink_probe(struct platform_device *pdev) | |||
1186 | 1131 | ||
1187 | tspi->max_buf_size = SLINK_FIFO_DEPTH << 2; | 1132 | tspi->max_buf_size = SLINK_FIFO_DEPTH << 2; |
1188 | tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN; | 1133 | tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN; |
1189 | tspi->spi_max_frequency = pdata->spi_max_frequency; | ||
1190 | 1134 | ||
1191 | if (pdata->dma_req_sel) { | 1135 | if (tspi->dma_req_sel) { |
1192 | ret = tegra_slink_init_dma_param(tspi, true); | 1136 | ret = tegra_slink_init_dma_param(tspi, true); |
1193 | if (ret < 0) { | 1137 | if (ret < 0) { |
1194 | dev_err(&pdev->dev, "RxDma Init failed, err %d\n", ret); | 1138 | dev_err(&pdev->dev, "RxDma Init failed, err %d\n", ret); |
@@ -1331,7 +1275,7 @@ static struct platform_driver tegra_slink_driver = { | |||
1331 | .name = "spi-tegra-slink", | 1275 | .name = "spi-tegra-slink", |
1332 | .owner = THIS_MODULE, | 1276 | .owner = THIS_MODULE, |
1333 | .pm = &slink_pm_ops, | 1277 | .pm = &slink_pm_ops, |
1334 | .of_match_table = of_match_ptr(tegra_slink_of_match), | 1278 | .of_match_table = tegra_slink_of_match, |
1335 | }, | 1279 | }, |
1336 | .probe = tegra_slink_probe, | 1280 | .probe = tegra_slink_probe, |
1337 | .remove = tegra_slink_remove, | 1281 | .remove = tegra_slink_remove, |
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index f756481b0fea..35f60bd252dd 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c | |||
@@ -615,7 +615,7 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw) | |||
615 | int size; | 615 | int size; |
616 | u32 n_writes; | 616 | u32 n_writes; |
617 | int j; | 617 | int j; |
618 | struct spi_message *pmsg; | 618 | struct spi_message *pmsg, *tmp; |
619 | const u8 *tx_buf; | 619 | const u8 *tx_buf; |
620 | const u16 *tx_sbuf; | 620 | const u16 *tx_sbuf; |
621 | 621 | ||
@@ -656,7 +656,7 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw) | |||
656 | if (!data->pkt_rx_buff) { | 656 | if (!data->pkt_rx_buff) { |
657 | /* flush queue and set status of all transfers to -ENOMEM */ | 657 | /* flush queue and set status of all transfers to -ENOMEM */ |
658 | dev_err(&data->master->dev, "%s :kzalloc failed\n", __func__); | 658 | dev_err(&data->master->dev, "%s :kzalloc failed\n", __func__); |
659 | list_for_each_entry(pmsg, data->queue.next, queue) { | 659 | list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) { |
660 | pmsg->status = -ENOMEM; | 660 | pmsg->status = -ENOMEM; |
661 | 661 | ||
662 | if (pmsg->complete != 0) | 662 | if (pmsg->complete != 0) |
@@ -703,7 +703,7 @@ static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw) | |||
703 | 703 | ||
704 | static void pch_spi_nomore_transfer(struct pch_spi_data *data) | 704 | static void pch_spi_nomore_transfer(struct pch_spi_data *data) |
705 | { | 705 | { |
706 | struct spi_message *pmsg; | 706 | struct spi_message *pmsg, *tmp; |
707 | dev_dbg(&data->master->dev, "%s called\n", __func__); | 707 | dev_dbg(&data->master->dev, "%s called\n", __func__); |
708 | /* Invoke complete callback | 708 | /* Invoke complete callback |
709 | * [To the spi core..indicating end of transfer] */ | 709 | * [To the spi core..indicating end of transfer] */ |
@@ -740,7 +740,7 @@ static void pch_spi_nomore_transfer(struct pch_spi_data *data) | |||
740 | dev_dbg(&data->master->dev, | 740 | dev_dbg(&data->master->dev, |
741 | "%s suspend/remove initiated, flushing queue\n", | 741 | "%s suspend/remove initiated, flushing queue\n", |
742 | __func__); | 742 | __func__); |
743 | list_for_each_entry(pmsg, data->queue.next, queue) { | 743 | list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) { |
744 | pmsg->status = -EIO; | 744 | pmsg->status = -EIO; |
745 | 745 | ||
746 | if (pmsg->complete) | 746 | if (pmsg->complete) |
@@ -1187,7 +1187,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) | |||
1187 | 1187 | ||
1188 | static void pch_spi_process_messages(struct work_struct *pwork) | 1188 | static void pch_spi_process_messages(struct work_struct *pwork) |
1189 | { | 1189 | { |
1190 | struct spi_message *pmsg; | 1190 | struct spi_message *pmsg, *tmp; |
1191 | struct pch_spi_data *data; | 1191 | struct pch_spi_data *data; |
1192 | int bpw; | 1192 | int bpw; |
1193 | 1193 | ||
@@ -1199,7 +1199,7 @@ static void pch_spi_process_messages(struct work_struct *pwork) | |||
1199 | if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) { | 1199 | if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) { |
1200 | dev_dbg(&data->master->dev, "%s suspend/remove initiated," | 1200 | dev_dbg(&data->master->dev, "%s suspend/remove initiated," |
1201 | "flushing queue\n", __func__); | 1201 | "flushing queue\n", __func__); |
1202 | list_for_each_entry(pmsg, data->queue.next, queue) { | 1202 | list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) { |
1203 | pmsg->status = -EIO; | 1203 | pmsg->status = -EIO; |
1204 | 1204 | ||
1205 | if (pmsg->complete != 0) { | 1205 | if (pmsg->complete != 0) { |
@@ -1789,8 +1789,10 @@ static int __init pch_spi_init(void) | |||
1789 | return ret; | 1789 | return ret; |
1790 | 1790 | ||
1791 | ret = pci_register_driver(&pch_spi_pcidev_driver); | 1791 | ret = pci_register_driver(&pch_spi_pcidev_driver); |
1792 | if (ret) | 1792 | if (ret) { |
1793 | platform_driver_unregister(&pch_spi_pd_driver); | ||
1793 | return ret; | 1794 | return ret; |
1795 | } | ||
1794 | 1796 | ||
1795 | return 0; | 1797 | return 0; |
1796 | } | 1798 | } |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 004b10f184d4..163fd802b7ac 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -1376,6 +1376,14 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message) | |||
1376 | xfer->bits_per_word = spi->bits_per_word; | 1376 | xfer->bits_per_word = spi->bits_per_word; |
1377 | if (!xfer->speed_hz) | 1377 | if (!xfer->speed_hz) |
1378 | xfer->speed_hz = spi->max_speed_hz; | 1378 | xfer->speed_hz = spi->max_speed_hz; |
1379 | if (master->bits_per_word_mask) { | ||
1380 | /* Only 32 bits fit in the mask */ | ||
1381 | if (xfer->bits_per_word > 32) | ||
1382 | return -EINVAL; | ||
1383 | if (!(master->bits_per_word_mask & | ||
1384 | BIT(xfer->bits_per_word - 1))) | ||
1385 | return -EINVAL; | ||
1386 | } | ||
1379 | } | 1387 | } |
1380 | 1388 | ||
1381 | message->spi = spi; | 1389 | message->spi = spi; |
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 2e0655dbe070..911e9e0711d2 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c | |||
@@ -603,7 +603,7 @@ static int spidev_probe(struct spi_device *spi) | |||
603 | dev = device_create(spidev_class, &spi->dev, spidev->devt, | 603 | dev = device_create(spidev_class, &spi->dev, spidev->devt, |
604 | spidev, "spidev%d.%d", | 604 | spidev, "spidev%d.%d", |
605 | spi->master->bus_num, spi->chip_select); | 605 | spi->master->bus_num, spi->chip_select); |
606 | status = IS_ERR(dev) ? PTR_ERR(dev) : 0; | 606 | status = PTR_RET(dev); |
607 | } else { | 607 | } else { |
608 | dev_dbg(&spi->dev, "no minor number available!\n"); | 608 | dev_dbg(&spi->dev, "no minor number available!\n"); |
609 | status = -ENODEV; | 609 | status = -ENODEV; |