diff options
author | Grant Likely <grant.likely@secretlab.ca> | 2010-12-29 02:47:29 -0500 |
---|---|---|
committer | Grant Likely <grant.likely@secretlab.ca> | 2010-12-29 02:47:38 -0500 |
commit | 496a2e360a34e1f41c336d23947f800216cb9bdf (patch) | |
tree | 697a68cc1da218c295072a349edfc3e3f57a7c5e /drivers | |
parent | 5aa68b85951aec91d6a955d1de861325fc9a3ba1 (diff) | |
parent | 23ce17adb7fc33a4353abe4b57a03f555cced57b (diff) |
Merge branch 'for-grant' of git://arago-project.org/git/projects/linux-davinci into spi/next
* 'for-grant' of git://arago-project.org/git/projects/linux-davinci into spi/next
spi: davinci: fix checkpatch errors
spi: davinci: whitespace cleanup
spi: davinci: remove unused variable 'pdata'
spi: davinci: set chip-select mode in SPIDEF only once
spi: davinci: enable both activation and deactivation of chip-selects
spi: davinci: remove unnecessary data transmit on CS disable
spi: davinci: enable GPIO lines to be used as chip selects
spi: davinci: simplify prescalar calculation
spi: davinci: remove 'wait_enable' platform data member
spi: davinci: make chip-slect specific parameters really chip-select specific
spi: davinci: consolidate setup of SPIFMTn in one function
spi: davinci: setup chip-select timers values only if timer enabled
spi: davinci: add support for wait enable timeouts
spi: davinci: remove unused members of davinci_spi_slave
spi: davinci: eliminate the single member structure davinci_spi_slave
spi: davinci: eliminate unnecessary update of davinci_spi->count
spi: davinci: simplify calculation of edma acount value
spi: davinci: check for NULL buffer pointer before using it
spi: davinci: remove unnecessary disable of SPI
spi: davinci: remove unnecessary 'count' variable in driver private data
spi: davinci: remove unnecessary completion variable initialization
spi: davinci: remove non-useful interrupt mode support
spi: davinci: simplify poll mode transfers
spi: davinci: add support for interrupt mode
spi: davinci: configure the invariable bits in spipc0 only once
spi: davinci: remove unnecessary function davinci_spi_bufs_prep()
spi: davinci: remove unnecessary call to davinci_spi_setup_transfer()
spi: davinci: do not store DMA channel information per chip select
spi: davinci: always start transmit DMA
spi: davinci: do not use temporary buffer if no transmit data provided
spi: davinci: always start receive DMA
spi: davinci: use edma_write_slot() to setup EDMA PaRAM slot
spi: davinci: fix DMA event generation stoppage
spi: davinci: fix EDMA CC errors at end of transfers
spi: davinci: handle DMA completion errors correctly
spi: davinci: remove usage of additional completion variables for DMA
spi: davinci: let DMA operation be specified on per-device basis
spi: davinci: remove non-useful "clk_internal" platform data
spi: davinci: enable and power-up SPI only when required
spi: davinci: setup the driver owner
spi: davinci: add additional comments
spi: davinci: add EF Johnson Technologies copyright
spi: davinci: removed unused #defines
spi: davinci: remove unnecessary typecast
spi: davinci: do not treat Tx interrupt being set as error
spi: davinci: do not allocate DMA channels during SPI device setup
spi: davinci: remove unnecessary private data member 'region_size'
spi: davinci: shorten variable names
spi: davinci: kconfig: add manufacturer name to prompt string
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/spi/Kconfig | 7 | ||||
-rw-r--r-- | drivers/spi/davinci_spi.c | 1314 |
2 files changed, 540 insertions, 781 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index caa2e84cc0a6..6efac5fd439d 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -111,11 +111,14 @@ config SPI_COLDFIRE_QSPI | |||
111 | will be called coldfire_qspi. | 111 | will be called coldfire_qspi. |
112 | 112 | ||
113 | config SPI_DAVINCI | 113 | config SPI_DAVINCI |
114 | tristate "SPI controller driver for DaVinci/DA8xx SoC's" | 114 | tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller" |
115 | depends on SPI_MASTER && ARCH_DAVINCI | 115 | depends on SPI_MASTER && ARCH_DAVINCI |
116 | select SPI_BITBANG | 116 | select SPI_BITBANG |
117 | help | 117 | help |
118 | SPI master controller for DaVinci and DA8xx SPI modules. | 118 | SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. |
119 | |||
120 | This driver can also be built as a module. The module will be called | ||
121 | davinci_spi. | ||
119 | 122 | ||
120 | config SPI_EP93XX | 123 | config SPI_EP93XX |
121 | tristate "Cirrus Logic EP93xx SPI controller" | 124 | tristate "Cirrus Logic EP93xx SPI controller" |
diff --git a/drivers/spi/davinci_spi.c b/drivers/spi/davinci_spi.c index b85090caf7cf..6beab99bf95b 100644 --- a/drivers/spi/davinci_spi.c +++ b/drivers/spi/davinci_spi.c | |||
@@ -1,5 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2009 Texas Instruments. | 2 | * Copyright (C) 2009 Texas Instruments. |
3 | * Copyright (C) 2010 EF Johnson Technologies | ||
3 | * | 4 | * |
4 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
@@ -38,11 +39,6 @@ | |||
38 | 39 | ||
39 | #define CS_DEFAULT 0xFF | 40 | #define CS_DEFAULT 0xFF |
40 | 41 | ||
41 | #define SPI_BUFSIZ (SMP_CACHE_BYTES + 1) | ||
42 | #define DAVINCI_DMA_DATA_TYPE_S8 0x01 | ||
43 | #define DAVINCI_DMA_DATA_TYPE_S16 0x02 | ||
44 | #define DAVINCI_DMA_DATA_TYPE_S32 0x04 | ||
45 | |||
46 | #define SPIFMT_PHASE_MASK BIT(16) | 42 | #define SPIFMT_PHASE_MASK BIT(16) |
47 | #define SPIFMT_POLARITY_MASK BIT(17) | 43 | #define SPIFMT_POLARITY_MASK BIT(17) |
48 | #define SPIFMT_DISTIMER_MASK BIT(18) | 44 | #define SPIFMT_DISTIMER_MASK BIT(18) |
@@ -52,34 +48,43 @@ | |||
52 | #define SPIFMT_ODD_PARITY_MASK BIT(23) | 48 | #define SPIFMT_ODD_PARITY_MASK BIT(23) |
53 | #define SPIFMT_WDELAY_MASK 0x3f000000u | 49 | #define SPIFMT_WDELAY_MASK 0x3f000000u |
54 | #define SPIFMT_WDELAY_SHIFT 24 | 50 | #define SPIFMT_WDELAY_SHIFT 24 |
55 | #define SPIFMT_CHARLEN_MASK 0x0000001Fu | 51 | #define SPIFMT_PRESCALE_SHIFT 8 |
56 | |||
57 | /* SPIGCR1 */ | ||
58 | #define SPIGCR1_SPIENA_MASK 0x01000000u | ||
59 | 52 | ||
60 | /* SPIPC0 */ | 53 | /* SPIPC0 */ |
61 | #define SPIPC0_DIFUN_MASK BIT(11) /* MISO */ | 54 | #define SPIPC0_DIFUN_MASK BIT(11) /* MISO */ |
62 | #define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */ | 55 | #define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */ |
63 | #define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */ | 56 | #define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */ |
64 | #define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */ | 57 | #define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */ |
65 | #define SPIPC0_EN1FUN_MASK BIT(1) | ||
66 | #define SPIPC0_EN0FUN_MASK BIT(0) | ||
67 | 58 | ||
68 | #define SPIINT_MASKALL 0x0101035F | 59 | #define SPIINT_MASKALL 0x0101035F |
69 | #define SPI_INTLVL_1 0x000001FFu | 60 | #define SPIINT_MASKINT 0x0000015F |
70 | #define SPI_INTLVL_0 0x00000000u | 61 | #define SPI_INTLVL_1 0x000001FF |
62 | #define SPI_INTLVL_0 0x00000000 | ||
71 | 63 | ||
72 | /* SPIDAT1 */ | 64 | /* SPIDAT1 (upper 16 bit defines) */ |
73 | #define SPIDAT1_CSHOLD_SHIFT 28 | 65 | #define SPIDAT1_CSHOLD_MASK BIT(12) |
74 | #define SPIDAT1_CSNR_SHIFT 16 | 66 | |
67 | /* SPIGCR1 */ | ||
75 | #define SPIGCR1_CLKMOD_MASK BIT(1) | 68 | #define SPIGCR1_CLKMOD_MASK BIT(1) |
76 | #define SPIGCR1_MASTER_MASK BIT(0) | 69 | #define SPIGCR1_MASTER_MASK BIT(0) |
70 | #define SPIGCR1_POWERDOWN_MASK BIT(8) | ||
77 | #define SPIGCR1_LOOPBACK_MASK BIT(16) | 71 | #define SPIGCR1_LOOPBACK_MASK BIT(16) |
72 | #define SPIGCR1_SPIENA_MASK BIT(24) | ||
78 | 73 | ||
79 | /* SPIBUF */ | 74 | /* SPIBUF */ |
80 | #define SPIBUF_TXFULL_MASK BIT(29) | 75 | #define SPIBUF_TXFULL_MASK BIT(29) |
81 | #define SPIBUF_RXEMPTY_MASK BIT(31) | 76 | #define SPIBUF_RXEMPTY_MASK BIT(31) |
82 | 77 | ||
78 | /* SPIDELAY */ | ||
79 | #define SPIDELAY_C2TDELAY_SHIFT 24 | ||
80 | #define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT) | ||
81 | #define SPIDELAY_T2CDELAY_SHIFT 16 | ||
82 | #define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT) | ||
83 | #define SPIDELAY_T2EDELAY_SHIFT 8 | ||
84 | #define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT) | ||
85 | #define SPIDELAY_C2EDELAY_SHIFT 0 | ||
86 | #define SPIDELAY_C2EDELAY_MASK 0xFF | ||
87 | |||
83 | /* Error Masks */ | 88 | /* Error Masks */ |
84 | #define SPIFLG_DLEN_ERR_MASK BIT(0) | 89 | #define SPIFLG_DLEN_ERR_MASK BIT(0) |
85 | #define SPIFLG_TIMEOUT_MASK BIT(1) | 90 | #define SPIFLG_TIMEOUT_MASK BIT(1) |
@@ -87,29 +92,13 @@ | |||
87 | #define SPIFLG_DESYNC_MASK BIT(3) | 92 | #define SPIFLG_DESYNC_MASK BIT(3) |
88 | #define SPIFLG_BITERR_MASK BIT(4) | 93 | #define SPIFLG_BITERR_MASK BIT(4) |
89 | #define SPIFLG_OVRRUN_MASK BIT(6) | 94 | #define SPIFLG_OVRRUN_MASK BIT(6) |
90 | #define SPIFLG_RX_INTR_MASK BIT(8) | ||
91 | #define SPIFLG_TX_INTR_MASK BIT(9) | ||
92 | #define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24) | 95 | #define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24) |
93 | #define SPIFLG_MASK (SPIFLG_DLEN_ERR_MASK \ | 96 | #define SPIFLG_ERROR_MASK (SPIFLG_DLEN_ERR_MASK \ |
94 | | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \ | 97 | | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \ |
95 | | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \ | 98 | | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \ |
96 | | SPIFLG_OVRRUN_MASK | SPIFLG_RX_INTR_MASK \ | 99 | | SPIFLG_OVRRUN_MASK) |
97 | | SPIFLG_TX_INTR_MASK \ | ||
98 | | SPIFLG_BUF_INIT_ACTIVE_MASK) | ||
99 | |||
100 | #define SPIINT_DLEN_ERR_INTR BIT(0) | ||
101 | #define SPIINT_TIMEOUT_INTR BIT(1) | ||
102 | #define SPIINT_PARERR_INTR BIT(2) | ||
103 | #define SPIINT_DESYNC_INTR BIT(3) | ||
104 | #define SPIINT_BITERR_INTR BIT(4) | ||
105 | #define SPIINT_OVRRUN_INTR BIT(6) | ||
106 | #define SPIINT_RX_INTR BIT(8) | ||
107 | #define SPIINT_TX_INTR BIT(9) | ||
108 | #define SPIINT_DMA_REQ_EN BIT(16) | ||
109 | #define SPIINT_ENABLE_HIGHZ BIT(24) | ||
110 | 100 | ||
111 | #define SPI_T2CDELAY_SHIFT 16 | 101 | #define SPIINT_DMA_REQ_EN BIT(16) |
112 | #define SPI_C2TDELAY_SHIFT 24 | ||
113 | 102 | ||
114 | /* SPI Controller registers */ | 103 | /* SPI Controller registers */ |
115 | #define SPIGCR0 0x00 | 104 | #define SPIGCR0 0x00 |
@@ -118,44 +107,18 @@ | |||
118 | #define SPILVL 0x0c | 107 | #define SPILVL 0x0c |
119 | #define SPIFLG 0x10 | 108 | #define SPIFLG 0x10 |
120 | #define SPIPC0 0x14 | 109 | #define SPIPC0 0x14 |
121 | #define SPIPC1 0x18 | ||
122 | #define SPIPC2 0x1c | ||
123 | #define SPIPC3 0x20 | ||
124 | #define SPIPC4 0x24 | ||
125 | #define SPIPC5 0x28 | ||
126 | #define SPIPC6 0x2c | ||
127 | #define SPIPC7 0x30 | ||
128 | #define SPIPC8 0x34 | ||
129 | #define SPIDAT0 0x38 | ||
130 | #define SPIDAT1 0x3c | 110 | #define SPIDAT1 0x3c |
131 | #define SPIBUF 0x40 | 111 | #define SPIBUF 0x40 |
132 | #define SPIEMU 0x44 | ||
133 | #define SPIDELAY 0x48 | 112 | #define SPIDELAY 0x48 |
134 | #define SPIDEF 0x4c | 113 | #define SPIDEF 0x4c |
135 | #define SPIFMT0 0x50 | 114 | #define SPIFMT0 0x50 |
136 | #define SPIFMT1 0x54 | ||
137 | #define SPIFMT2 0x58 | ||
138 | #define SPIFMT3 0x5c | ||
139 | #define TGINTVEC0 0x60 | ||
140 | #define TGINTVEC1 0x64 | ||
141 | |||
142 | struct davinci_spi_slave { | ||
143 | u32 cmd_to_write; | ||
144 | u32 clk_ctrl_to_write; | ||
145 | u32 bytes_per_word; | ||
146 | u8 active_cs; | ||
147 | }; | ||
148 | 115 | ||
149 | /* We have 2 DMA channels per CS, one for RX and one for TX */ | 116 | /* We have 2 DMA channels per CS, one for RX and one for TX */ |
150 | struct davinci_spi_dma { | 117 | struct davinci_spi_dma { |
151 | int dma_tx_channel; | 118 | int tx_channel; |
152 | int dma_rx_channel; | 119 | int rx_channel; |
153 | int dma_tx_sync_dev; | 120 | int dummy_param_slot; |
154 | int dma_rx_sync_dev; | ||
155 | enum dma_event_q eventq; | 121 | enum dma_event_q eventq; |
156 | |||
157 | struct completion dma_tx_completion; | ||
158 | struct completion dma_rx_completion; | ||
159 | }; | 122 | }; |
160 | 123 | ||
161 | /* SPI Controller driver's private data. */ | 124 | /* SPI Controller driver's private data. */ |
@@ -166,58 +129,63 @@ struct davinci_spi { | |||
166 | u8 version; | 129 | u8 version; |
167 | resource_size_t pbase; | 130 | resource_size_t pbase; |
168 | void __iomem *base; | 131 | void __iomem *base; |
169 | size_t region_size; | ||
170 | u32 irq; | 132 | u32 irq; |
171 | struct completion done; | 133 | struct completion done; |
172 | 134 | ||
173 | const void *tx; | 135 | const void *tx; |
174 | void *rx; | 136 | void *rx; |
175 | u8 *tmp_buf; | 137 | #define SPI_TMP_BUFSZ (SMP_CACHE_BYTES + 1) |
176 | int count; | 138 | u8 rx_tmp_buf[SPI_TMP_BUFSZ]; |
177 | struct davinci_spi_dma *dma_channels; | 139 | int rcount; |
178 | struct davinci_spi_platform_data *pdata; | 140 | int wcount; |
141 | struct davinci_spi_dma dma; | ||
142 | struct davinci_spi_platform_data *pdata; | ||
179 | 143 | ||
180 | void (*get_rx)(u32 rx_data, struct davinci_spi *); | 144 | void (*get_rx)(u32 rx_data, struct davinci_spi *); |
181 | u32 (*get_tx)(struct davinci_spi *); | 145 | u32 (*get_tx)(struct davinci_spi *); |
182 | 146 | ||
183 | struct davinci_spi_slave slave[SPI_MAX_CHIPSELECT]; | 147 | u8 bytes_per_word[SPI_MAX_CHIPSELECT]; |
184 | }; | 148 | }; |
185 | 149 | ||
186 | static unsigned use_dma; | 150 | static struct davinci_spi_config davinci_spi_default_cfg; |
187 | 151 | ||
188 | static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *davinci_spi) | 152 | static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *dspi) |
189 | { | 153 | { |
190 | u8 *rx = davinci_spi->rx; | 154 | if (dspi->rx) { |
191 | 155 | u8 *rx = dspi->rx; | |
192 | *rx++ = (u8)data; | 156 | *rx++ = (u8)data; |
193 | davinci_spi->rx = rx; | 157 | dspi->rx = rx; |
158 | } | ||
194 | } | 159 | } |
195 | 160 | ||
196 | static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *davinci_spi) | 161 | static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi) |
197 | { | 162 | { |
198 | u16 *rx = davinci_spi->rx; | 163 | if (dspi->rx) { |
199 | 164 | u16 *rx = dspi->rx; | |
200 | *rx++ = (u16)data; | 165 | *rx++ = (u16)data; |
201 | davinci_spi->rx = rx; | 166 | dspi->rx = rx; |
167 | } | ||
202 | } | 168 | } |
203 | 169 | ||
204 | static u32 davinci_spi_tx_buf_u8(struct davinci_spi *davinci_spi) | 170 | static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi) |
205 | { | 171 | { |
206 | u32 data; | 172 | u32 data = 0; |
207 | const u8 *tx = davinci_spi->tx; | 173 | if (dspi->tx) { |
208 | 174 | const u8 *tx = dspi->tx; | |
209 | data = *tx++; | 175 | data = *tx++; |
210 | davinci_spi->tx = tx; | 176 | dspi->tx = tx; |
177 | } | ||
211 | return data; | 178 | return data; |
212 | } | 179 | } |
213 | 180 | ||
214 | static u32 davinci_spi_tx_buf_u16(struct davinci_spi *davinci_spi) | 181 | static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi) |
215 | { | 182 | { |
216 | u32 data; | 183 | u32 data = 0; |
217 | const u16 *tx = davinci_spi->tx; | 184 | if (dspi->tx) { |
218 | 185 | const u16 *tx = dspi->tx; | |
219 | data = *tx++; | 186 | data = *tx++; |
220 | davinci_spi->tx = tx; | 187 | dspi->tx = tx; |
188 | } | ||
221 | return data; | 189 | return data; |
222 | } | 190 | } |
223 | 191 | ||
@@ -237,55 +205,67 @@ static inline void clear_io_bits(void __iomem *addr, u32 bits) | |||
237 | iowrite32(v, addr); | 205 | iowrite32(v, addr); |
238 | } | 206 | } |
239 | 207 | ||
240 | static inline void set_fmt_bits(void __iomem *addr, u32 bits, int cs_num) | ||
241 | { | ||
242 | set_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits); | ||
243 | } | ||
244 | |||
245 | static inline void clear_fmt_bits(void __iomem *addr, u32 bits, int cs_num) | ||
246 | { | ||
247 | clear_io_bits(addr + SPIFMT0 + (0x4 * cs_num), bits); | ||
248 | } | ||
249 | |||
250 | static void davinci_spi_set_dma_req(const struct spi_device *spi, int enable) | ||
251 | { | ||
252 | struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master); | ||
253 | |||
254 | if (enable) | ||
255 | set_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN); | ||
256 | else | ||
257 | clear_io_bits(davinci_spi->base + SPIINT, SPIINT_DMA_REQ_EN); | ||
258 | } | ||
259 | |||
260 | /* | 208 | /* |
261 | * Interface to control the chip select signal | 209 | * Interface to control the chip select signal |
262 | */ | 210 | */ |
263 | static void davinci_spi_chipselect(struct spi_device *spi, int value) | 211 | static void davinci_spi_chipselect(struct spi_device *spi, int value) |
264 | { | 212 | { |
265 | struct davinci_spi *davinci_spi; | 213 | struct davinci_spi *dspi; |
266 | struct davinci_spi_platform_data *pdata; | 214 | struct davinci_spi_platform_data *pdata; |
267 | u32 data1_reg_val = 0; | 215 | u8 chip_sel = spi->chip_select; |
216 | u16 spidat1 = CS_DEFAULT; | ||
217 | bool gpio_chipsel = false; | ||
268 | 218 | ||
269 | davinci_spi = spi_master_get_devdata(spi->master); | 219 | dspi = spi_master_get_devdata(spi->master); |
270 | pdata = davinci_spi->pdata; | 220 | pdata = dspi->pdata; |
221 | |||
222 | if (pdata->chip_sel && chip_sel < pdata->num_chipselect && | ||
223 | pdata->chip_sel[chip_sel] != SPI_INTERN_CS) | ||
224 | gpio_chipsel = true; | ||
271 | 225 | ||
272 | /* | 226 | /* |
273 | * Board specific chip select logic decides the polarity and cs | 227 | * Board specific chip select logic decides the polarity and cs |
274 | * line for the controller | 228 | * line for the controller |
275 | */ | 229 | */ |
276 | if (value == BITBANG_CS_INACTIVE) { | 230 | if (gpio_chipsel) { |
277 | set_io_bits(davinci_spi->base + SPIDEF, CS_DEFAULT); | 231 | if (value == BITBANG_CS_ACTIVE) |
278 | 232 | gpio_set_value(pdata->chip_sel[chip_sel], 0); | |
279 | data1_reg_val |= CS_DEFAULT << SPIDAT1_CSNR_SHIFT; | 233 | else |
280 | iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); | 234 | gpio_set_value(pdata->chip_sel[chip_sel], 1); |
235 | } else { | ||
236 | if (value == BITBANG_CS_ACTIVE) { | ||
237 | spidat1 |= SPIDAT1_CSHOLD_MASK; | ||
238 | spidat1 &= ~(0x1 << chip_sel); | ||
239 | } | ||
281 | 240 | ||
282 | while ((ioread32(davinci_spi->base + SPIBUF) | 241 | iowrite16(spidat1, dspi->base + SPIDAT1 + 2); |
283 | & SPIBUF_RXEMPTY_MASK) == 0) | ||
284 | cpu_relax(); | ||
285 | } | 242 | } |
286 | } | 243 | } |
287 | 244 | ||
288 | /** | 245 | /** |
246 | * davinci_spi_get_prescale - Calculates the correct prescale value | ||
247 | * @maxspeed_hz: the maximum rate the SPI clock can run at | ||
248 | * | ||
249 | * This function calculates the prescale value that generates a clock rate | ||
250 | * less than or equal to the specified maximum. | ||
251 | * | ||
252 | * Returns: calculated prescale - 1 for easy programming into SPI registers | ||
253 | * or negative error number if valid prescalar cannot be updated. | ||
254 | */ | ||
255 | static inline int davinci_spi_get_prescale(struct davinci_spi *dspi, | ||
256 | u32 max_speed_hz) | ||
257 | { | ||
258 | int ret; | ||
259 | |||
260 | ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz); | ||
261 | |||
262 | if (ret < 3 || ret > 256) | ||
263 | return -EINVAL; | ||
264 | |||
265 | return ret - 1; | ||
266 | } | ||
267 | |||
268 | /** | ||
289 | * davinci_spi_setup_transfer - This functions will determine transfer method | 269 | * davinci_spi_setup_transfer - This functions will determine transfer method |
290 | * @spi: spi device on which data transfer to be done | 270 | * @spi: spi device on which data transfer to be done |
291 | * @t: spi transfer in which transfer info is filled | 271 | * @t: spi transfer in which transfer info is filled |
@@ -298,13 +278,15 @@ static int davinci_spi_setup_transfer(struct spi_device *spi, | |||
298 | struct spi_transfer *t) | 278 | struct spi_transfer *t) |
299 | { | 279 | { |
300 | 280 | ||
301 | struct davinci_spi *davinci_spi; | 281 | struct davinci_spi *dspi; |
302 | struct davinci_spi_platform_data *pdata; | 282 | struct davinci_spi_config *spicfg; |
303 | u8 bits_per_word = 0; | 283 | u8 bits_per_word = 0; |
304 | u32 hz = 0, prescale = 0, clkspeed; | 284 | u32 hz = 0, spifmt = 0, prescale = 0; |
305 | 285 | ||
306 | davinci_spi = spi_master_get_devdata(spi->master); | 286 | dspi = spi_master_get_devdata(spi->master); |
307 | pdata = davinci_spi->pdata; | 287 | spicfg = (struct davinci_spi_config *)spi->controller_data; |
288 | if (!spicfg) | ||
289 | spicfg = &davinci_spi_default_cfg; | ||
308 | 290 | ||
309 | if (t) { | 291 | if (t) { |
310 | bits_per_word = t->bits_per_word; | 292 | bits_per_word = t->bits_per_word; |
@@ -320,111 +302,83 @@ static int davinci_spi_setup_transfer(struct spi_device *spi, | |||
320 | * 8bit, 16bit or 32bit transfer | 302 | * 8bit, 16bit or 32bit transfer |
321 | */ | 303 | */ |
322 | if (bits_per_word <= 8 && bits_per_word >= 2) { | 304 | if (bits_per_word <= 8 && bits_per_word >= 2) { |
323 | davinci_spi->get_rx = davinci_spi_rx_buf_u8; | 305 | dspi->get_rx = davinci_spi_rx_buf_u8; |
324 | davinci_spi->get_tx = davinci_spi_tx_buf_u8; | 306 | dspi->get_tx = davinci_spi_tx_buf_u8; |
325 | davinci_spi->slave[spi->chip_select].bytes_per_word = 1; | 307 | dspi->bytes_per_word[spi->chip_select] = 1; |
326 | } else if (bits_per_word <= 16 && bits_per_word >= 2) { | 308 | } else if (bits_per_word <= 16 && bits_per_word >= 2) { |
327 | davinci_spi->get_rx = davinci_spi_rx_buf_u16; | 309 | dspi->get_rx = davinci_spi_rx_buf_u16; |
328 | davinci_spi->get_tx = davinci_spi_tx_buf_u16; | 310 | dspi->get_tx = davinci_spi_tx_buf_u16; |
329 | davinci_spi->slave[spi->chip_select].bytes_per_word = 2; | 311 | dspi->bytes_per_word[spi->chip_select] = 2; |
330 | } else | 312 | } else |
331 | return -EINVAL; | 313 | return -EINVAL; |
332 | 314 | ||
333 | if (!hz) | 315 | if (!hz) |
334 | hz = spi->max_speed_hz; | 316 | hz = spi->max_speed_hz; |
335 | 317 | ||
336 | clear_fmt_bits(davinci_spi->base, SPIFMT_CHARLEN_MASK, | 318 | /* Set up SPIFMTn register, unique to this chipselect. */ |
337 | spi->chip_select); | ||
338 | set_fmt_bits(davinci_spi->base, bits_per_word & 0x1f, | ||
339 | spi->chip_select); | ||
340 | 319 | ||
341 | clkspeed = clk_get_rate(davinci_spi->clk); | 320 | prescale = davinci_spi_get_prescale(dspi, hz); |
342 | if (hz > clkspeed / 2) | 321 | if (prescale < 0) |
343 | prescale = 1 << 8; | 322 | return prescale; |
344 | if (hz < clkspeed / 256) | ||
345 | prescale = 255 << 8; | ||
346 | if (!prescale) | ||
347 | prescale = ((clkspeed / hz - 1) << 8) & 0x0000ff00; | ||
348 | 323 | ||
349 | clear_fmt_bits(davinci_spi->base, 0x0000ff00, spi->chip_select); | 324 | spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f); |
350 | set_fmt_bits(davinci_spi->base, prescale, spi->chip_select); | ||
351 | 325 | ||
352 | return 0; | 326 | if (spi->mode & SPI_LSB_FIRST) |
353 | } | 327 | spifmt |= SPIFMT_SHIFTDIR_MASK; |
354 | 328 | ||
355 | static void davinci_spi_dma_rx_callback(unsigned lch, u16 ch_status, void *data) | 329 | if (spi->mode & SPI_CPOL) |
356 | { | 330 | spifmt |= SPIFMT_POLARITY_MASK; |
357 | struct spi_device *spi = (struct spi_device *)data; | ||
358 | struct davinci_spi *davinci_spi; | ||
359 | struct davinci_spi_dma *davinci_spi_dma; | ||
360 | struct davinci_spi_platform_data *pdata; | ||
361 | 331 | ||
362 | davinci_spi = spi_master_get_devdata(spi->master); | 332 | if (!(spi->mode & SPI_CPHA)) |
363 | davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); | 333 | spifmt |= SPIFMT_PHASE_MASK; |
364 | pdata = davinci_spi->pdata; | ||
365 | 334 | ||
366 | if (ch_status == DMA_COMPLETE) | 335 | /* |
367 | edma_stop(davinci_spi_dma->dma_rx_channel); | 336 | * Version 1 hardware supports two basic SPI modes: |
368 | else | 337 | * - Standard SPI mode uses 4 pins, with chipselect |
369 | edma_clean_channel(davinci_spi_dma->dma_rx_channel); | 338 | * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS) |
339 | * (distinct from SPI_3WIRE, with just one data wire; | ||
340 | * or similar variants without MOSI or without MISO) | ||
341 | * | ||
342 | * Version 2 hardware supports an optional handshaking signal, | ||
343 | * so it can support two more modes: | ||
344 | * - 5 pin SPI variant is standard SPI plus SPI_READY | ||
345 | * - 4 pin with enable is (SPI_READY | SPI_NO_CS) | ||
346 | */ | ||
370 | 347 | ||
371 | complete(&davinci_spi_dma->dma_rx_completion); | 348 | if (dspi->version == SPI_VERSION_2) { |
372 | /* We must disable the DMA RX request */ | ||
373 | davinci_spi_set_dma_req(spi, 0); | ||
374 | } | ||
375 | 349 | ||
376 | static void davinci_spi_dma_tx_callback(unsigned lch, u16 ch_status, void *data) | 350 | u32 delay = 0; |
377 | { | ||
378 | struct spi_device *spi = (struct spi_device *)data; | ||
379 | struct davinci_spi *davinci_spi; | ||
380 | struct davinci_spi_dma *davinci_spi_dma; | ||
381 | struct davinci_spi_platform_data *pdata; | ||
382 | 351 | ||
383 | davinci_spi = spi_master_get_devdata(spi->master); | 352 | spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT) |
384 | davinci_spi_dma = &(davinci_spi->dma_channels[spi->chip_select]); | 353 | & SPIFMT_WDELAY_MASK); |
385 | pdata = davinci_spi->pdata; | ||
386 | 354 | ||
387 | if (ch_status == DMA_COMPLETE) | 355 | if (spicfg->odd_parity) |
388 | edma_stop(davinci_spi_dma->dma_tx_channel); | 356 | spifmt |= SPIFMT_ODD_PARITY_MASK; |
389 | else | ||
390 | edma_clean_channel(davinci_spi_dma->dma_tx_channel); | ||
391 | 357 | ||
392 | complete(&davinci_spi_dma->dma_tx_completion); | 358 | if (spicfg->parity_enable) |
393 | /* We must disable the DMA TX request */ | 359 | spifmt |= SPIFMT_PARITYENA_MASK; |
394 | davinci_spi_set_dma_req(spi, 0); | ||
395 | } | ||
396 | 360 | ||
397 | static int davinci_spi_request_dma(struct spi_device *spi) | 361 | if (spicfg->timer_disable) { |
398 | { | 362 | spifmt |= SPIFMT_DISTIMER_MASK; |
399 | struct davinci_spi *davinci_spi; | 363 | } else { |
400 | struct davinci_spi_dma *davinci_spi_dma; | 364 | delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT) |
401 | struct davinci_spi_platform_data *pdata; | 365 | & SPIDELAY_C2TDELAY_MASK; |
402 | struct device *sdev; | 366 | delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT) |
403 | int r; | 367 | & SPIDELAY_T2CDELAY_MASK; |
368 | } | ||
404 | 369 | ||
405 | davinci_spi = spi_master_get_devdata(spi->master); | 370 | if (spi->mode & SPI_READY) { |
406 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | 371 | spifmt |= SPIFMT_WAITENA_MASK; |
407 | pdata = davinci_spi->pdata; | 372 | delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT) |
408 | sdev = davinci_spi->bitbang.master->dev.parent; | 373 | & SPIDELAY_T2EDELAY_MASK; |
374 | delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT) | ||
375 | & SPIDELAY_C2EDELAY_MASK; | ||
376 | } | ||
409 | 377 | ||
410 | r = edma_alloc_channel(davinci_spi_dma->dma_rx_sync_dev, | 378 | iowrite32(delay, dspi->base + SPIDELAY); |
411 | davinci_spi_dma_rx_callback, spi, | ||
412 | davinci_spi_dma->eventq); | ||
413 | if (r < 0) { | ||
414 | dev_dbg(sdev, "Unable to request DMA channel for SPI RX\n"); | ||
415 | return -EAGAIN; | ||
416 | } | 379 | } |
417 | davinci_spi_dma->dma_rx_channel = r; | 380 | |
418 | r = edma_alloc_channel(davinci_spi_dma->dma_tx_sync_dev, | 381 | iowrite32(spifmt, dspi->base + SPIFMT0); |
419 | davinci_spi_dma_tx_callback, spi, | ||
420 | davinci_spi_dma->eventq); | ||
421 | if (r < 0) { | ||
422 | edma_free_channel(davinci_spi_dma->dma_rx_channel); | ||
423 | davinci_spi_dma->dma_rx_channel = -1; | ||
424 | dev_dbg(sdev, "Unable to request DMA channel for SPI TX\n"); | ||
425 | return -EAGAIN; | ||
426 | } | ||
427 | davinci_spi_dma->dma_tx_channel = r; | ||
428 | 382 | ||
429 | return 0; | 383 | return 0; |
430 | } | 384 | } |
@@ -435,190 +389,40 @@ static int davinci_spi_request_dma(struct spi_device *spi) | |||
435 | * | 389 | * |
436 | * This functions sets the default transfer method. | 390 | * This functions sets the default transfer method. |
437 | */ | 391 | */ |
438 | |||
439 | static int davinci_spi_setup(struct spi_device *spi) | 392 | static int davinci_spi_setup(struct spi_device *spi) |
440 | { | 393 | { |
441 | int retval; | 394 | int retval = 0; |
442 | struct davinci_spi *davinci_spi; | 395 | struct davinci_spi *dspi; |
443 | struct davinci_spi_dma *davinci_spi_dma; | 396 | struct davinci_spi_platform_data *pdata; |
444 | struct device *sdev; | ||
445 | 397 | ||
446 | davinci_spi = spi_master_get_devdata(spi->master); | 398 | dspi = spi_master_get_devdata(spi->master); |
447 | sdev = davinci_spi->bitbang.master->dev.parent; | 399 | pdata = dspi->pdata; |
448 | 400 | ||
449 | /* if bits per word length is zero then set it default 8 */ | 401 | /* if bits per word length is zero then set it default 8 */ |
450 | if (!spi->bits_per_word) | 402 | if (!spi->bits_per_word) |
451 | spi->bits_per_word = 8; | 403 | spi->bits_per_word = 8; |
452 | 404 | ||
453 | davinci_spi->slave[spi->chip_select].cmd_to_write = 0; | 405 | if (!(spi->mode & SPI_NO_CS)) { |
454 | 406 | if ((pdata->chip_sel == NULL) || | |
455 | if (use_dma && davinci_spi->dma_channels) { | 407 | (pdata->chip_sel[spi->chip_select] == SPI_INTERN_CS)) |
456 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | 408 | set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select); |
457 | |||
458 | if ((davinci_spi_dma->dma_rx_channel == -1) | ||
459 | || (davinci_spi_dma->dma_tx_channel == -1)) { | ||
460 | retval = davinci_spi_request_dma(spi); | ||
461 | if (retval < 0) | ||
462 | return retval; | ||
463 | } | ||
464 | } | ||
465 | |||
466 | /* | ||
467 | * SPI in DaVinci and DA8xx operate between | ||
468 | * 600 KHz and 50 MHz | ||
469 | */ | ||
470 | if (spi->max_speed_hz < 600000 || spi->max_speed_hz > 50000000) { | ||
471 | dev_dbg(sdev, "Operating frequency is not in acceptable " | ||
472 | "range\n"); | ||
473 | return -EINVAL; | ||
474 | } | ||
475 | |||
476 | /* | ||
477 | * Set up SPIFMTn register, unique to this chipselect. | ||
478 | * | ||
479 | * NOTE: we could do all of these with one write. Also, some | ||
480 | * of the "version 2" features are found in chips that don't | ||
481 | * support all of them... | ||
482 | */ | ||
483 | if (spi->mode & SPI_LSB_FIRST) | ||
484 | set_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK, | ||
485 | spi->chip_select); | ||
486 | else | ||
487 | clear_fmt_bits(davinci_spi->base, SPIFMT_SHIFTDIR_MASK, | ||
488 | spi->chip_select); | ||
489 | |||
490 | if (spi->mode & SPI_CPOL) | ||
491 | set_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK, | ||
492 | spi->chip_select); | ||
493 | else | ||
494 | clear_fmt_bits(davinci_spi->base, SPIFMT_POLARITY_MASK, | ||
495 | spi->chip_select); | ||
496 | 409 | ||
497 | if (!(spi->mode & SPI_CPHA)) | ||
498 | set_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK, | ||
499 | spi->chip_select); | ||
500 | else | ||
501 | clear_fmt_bits(davinci_spi->base, SPIFMT_PHASE_MASK, | ||
502 | spi->chip_select); | ||
503 | |||
504 | /* | ||
505 | * Version 1 hardware supports two basic SPI modes: | ||
506 | * - Standard SPI mode uses 4 pins, with chipselect | ||
507 | * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS) | ||
508 | * (distinct from SPI_3WIRE, with just one data wire; | ||
509 | * or similar variants without MOSI or without MISO) | ||
510 | * | ||
511 | * Version 2 hardware supports an optional handshaking signal, | ||
512 | * so it can support two more modes: | ||
513 | * - 5 pin SPI variant is standard SPI plus SPI_READY | ||
514 | * - 4 pin with enable is (SPI_READY | SPI_NO_CS) | ||
515 | */ | ||
516 | |||
517 | if (davinci_spi->version == SPI_VERSION_2) { | ||
518 | clear_fmt_bits(davinci_spi->base, SPIFMT_WDELAY_MASK, | ||
519 | spi->chip_select); | ||
520 | set_fmt_bits(davinci_spi->base, | ||
521 | (davinci_spi->pdata->wdelay | ||
522 | << SPIFMT_WDELAY_SHIFT) | ||
523 | & SPIFMT_WDELAY_MASK, | ||
524 | spi->chip_select); | ||
525 | |||
526 | if (davinci_spi->pdata->odd_parity) | ||
527 | set_fmt_bits(davinci_spi->base, | ||
528 | SPIFMT_ODD_PARITY_MASK, | ||
529 | spi->chip_select); | ||
530 | else | ||
531 | clear_fmt_bits(davinci_spi->base, | ||
532 | SPIFMT_ODD_PARITY_MASK, | ||
533 | spi->chip_select); | ||
534 | |||
535 | if (davinci_spi->pdata->parity_enable) | ||
536 | set_fmt_bits(davinci_spi->base, | ||
537 | SPIFMT_PARITYENA_MASK, | ||
538 | spi->chip_select); | ||
539 | else | ||
540 | clear_fmt_bits(davinci_spi->base, | ||
541 | SPIFMT_PARITYENA_MASK, | ||
542 | spi->chip_select); | ||
543 | |||
544 | if (davinci_spi->pdata->wait_enable) | ||
545 | set_fmt_bits(davinci_spi->base, | ||
546 | SPIFMT_WAITENA_MASK, | ||
547 | spi->chip_select); | ||
548 | else | ||
549 | clear_fmt_bits(davinci_spi->base, | ||
550 | SPIFMT_WAITENA_MASK, | ||
551 | spi->chip_select); | ||
552 | |||
553 | if (davinci_spi->pdata->timer_disable) | ||
554 | set_fmt_bits(davinci_spi->base, | ||
555 | SPIFMT_DISTIMER_MASK, | ||
556 | spi->chip_select); | ||
557 | else | ||
558 | clear_fmt_bits(davinci_spi->base, | ||
559 | SPIFMT_DISTIMER_MASK, | ||
560 | spi->chip_select); | ||
561 | } | 410 | } |
562 | 411 | ||
563 | retval = davinci_spi_setup_transfer(spi, NULL); | ||
564 | |||
565 | return retval; | ||
566 | } | ||
567 | |||
568 | static void davinci_spi_cleanup(struct spi_device *spi) | ||
569 | { | ||
570 | struct davinci_spi *davinci_spi = spi_master_get_devdata(spi->master); | ||
571 | struct davinci_spi_dma *davinci_spi_dma; | ||
572 | |||
573 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | ||
574 | |||
575 | if (use_dma && davinci_spi->dma_channels) { | ||
576 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | ||
577 | |||
578 | if ((davinci_spi_dma->dma_rx_channel != -1) | ||
579 | && (davinci_spi_dma->dma_tx_channel != -1)) { | ||
580 | edma_free_channel(davinci_spi_dma->dma_tx_channel); | ||
581 | edma_free_channel(davinci_spi_dma->dma_rx_channel); | ||
582 | } | ||
583 | } | ||
584 | } | ||
585 | |||
586 | static int davinci_spi_bufs_prep(struct spi_device *spi, | ||
587 | struct davinci_spi *davinci_spi) | ||
588 | { | ||
589 | int op_mode = 0; | ||
590 | |||
591 | /* | ||
592 | * REVISIT unless devices disagree about SPI_LOOP or | ||
593 | * SPI_READY (SPI_NO_CS only allows one device!), this | ||
594 | * should not need to be done before each message... | ||
595 | * optimize for both flags staying cleared. | ||
596 | */ | ||
597 | |||
598 | op_mode = SPIPC0_DIFUN_MASK | ||
599 | | SPIPC0_DOFUN_MASK | ||
600 | | SPIPC0_CLKFUN_MASK; | ||
601 | if (!(spi->mode & SPI_NO_CS)) | ||
602 | op_mode |= 1 << spi->chip_select; | ||
603 | if (spi->mode & SPI_READY) | 412 | if (spi->mode & SPI_READY) |
604 | op_mode |= SPIPC0_SPIENA_MASK; | 413 | set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK); |
605 | |||
606 | iowrite32(op_mode, davinci_spi->base + SPIPC0); | ||
607 | 414 | ||
608 | if (spi->mode & SPI_LOOP) | 415 | if (spi->mode & SPI_LOOP) |
609 | set_io_bits(davinci_spi->base + SPIGCR1, | 416 | set_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); |
610 | SPIGCR1_LOOPBACK_MASK); | ||
611 | else | 417 | else |
612 | clear_io_bits(davinci_spi->base + SPIGCR1, | 418 | clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK); |
613 | SPIGCR1_LOOPBACK_MASK); | ||
614 | 419 | ||
615 | return 0; | 420 | return retval; |
616 | } | 421 | } |
617 | 422 | ||
618 | static int davinci_spi_check_error(struct davinci_spi *davinci_spi, | 423 | static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status) |
619 | int int_status) | ||
620 | { | 424 | { |
621 | struct device *sdev = davinci_spi->bitbang.master->dev.parent; | 425 | struct device *sdev = dspi->bitbang.master->dev.parent; |
622 | 426 | ||
623 | if (int_status & SPIFLG_TIMEOUT_MASK) { | 427 | if (int_status & SPIFLG_TIMEOUT_MASK) { |
624 | dev_dbg(sdev, "SPI Time-out Error\n"); | 428 | dev_dbg(sdev, "SPI Time-out Error\n"); |
@@ -633,7 +437,7 @@ static int davinci_spi_check_error(struct davinci_spi *davinci_spi, | |||
633 | return -EIO; | 437 | return -EIO; |
634 | } | 438 | } |
635 | 439 | ||
636 | if (davinci_spi->version == SPI_VERSION_2) { | 440 | if (dspi->version == SPI_VERSION_2) { |
637 | if (int_status & SPIFLG_DLEN_ERR_MASK) { | 441 | if (int_status & SPIFLG_DLEN_ERR_MASK) { |
638 | dev_dbg(sdev, "SPI Data Length Error\n"); | 442 | dev_dbg(sdev, "SPI Data Length Error\n"); |
639 | return -EIO; | 443 | return -EIO; |
@@ -646,10 +450,6 @@ static int davinci_spi_check_error(struct davinci_spi *davinci_spi, | |||
646 | dev_dbg(sdev, "SPI Data Overrun error\n"); | 450 | dev_dbg(sdev, "SPI Data Overrun error\n"); |
647 | return -EIO; | 451 | return -EIO; |
648 | } | 452 | } |
649 | if (int_status & SPIFLG_TX_INTR_MASK) { | ||
650 | dev_dbg(sdev, "SPI TX intr bit set\n"); | ||
651 | return -EIO; | ||
652 | } | ||
653 | if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) { | 453 | if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) { |
654 | dev_dbg(sdev, "SPI Buffer Init Active\n"); | 454 | dev_dbg(sdev, "SPI Buffer Init Active\n"); |
655 | return -EBUSY; | 455 | return -EBUSY; |
@@ -660,366 +460,339 @@ static int davinci_spi_check_error(struct davinci_spi *davinci_spi, | |||
660 | } | 460 | } |
661 | 461 | ||
662 | /** | 462 | /** |
663 | * davinci_spi_bufs - functions which will handle transfer data | 463 | * davinci_spi_process_events - check for and handle any SPI controller events |
664 | * @spi: spi device on which data transfer to be done | 464 | * @dspi: the controller data |
665 | * @t: spi transfer in which transfer info is filled | ||
666 | * | 465 | * |
667 | * This function will put data to be transferred into data register | 466 | * This function will check the SPIFLG register and handle any events that are |
668 | * of SPI controller and then wait until the completion will be marked | 467 | * detected there |
669 | * by the IRQ Handler. | ||
670 | */ | 468 | */ |
671 | static int davinci_spi_bufs_pio(struct spi_device *spi, struct spi_transfer *t) | 469 | static int davinci_spi_process_events(struct davinci_spi *dspi) |
672 | { | 470 | { |
673 | struct davinci_spi *davinci_spi; | 471 | u32 buf, status, errors = 0, spidat1; |
674 | int int_status, count, ret; | ||
675 | u8 conv, tmp; | ||
676 | u32 tx_data, data1_reg_val; | ||
677 | u32 buf_val, flg_val; | ||
678 | struct davinci_spi_platform_data *pdata; | ||
679 | |||
680 | davinci_spi = spi_master_get_devdata(spi->master); | ||
681 | pdata = davinci_spi->pdata; | ||
682 | |||
683 | davinci_spi->tx = t->tx_buf; | ||
684 | davinci_spi->rx = t->rx_buf; | ||
685 | |||
686 | /* convert len to words based on bits_per_word */ | ||
687 | conv = davinci_spi->slave[spi->chip_select].bytes_per_word; | ||
688 | davinci_spi->count = t->len / conv; | ||
689 | |||
690 | INIT_COMPLETION(davinci_spi->done); | ||
691 | |||
692 | ret = davinci_spi_bufs_prep(spi, davinci_spi); | ||
693 | if (ret) | ||
694 | return ret; | ||
695 | |||
696 | /* Enable SPI */ | ||
697 | set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); | ||
698 | |||
699 | iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) | | ||
700 | (pdata->t2cdelay << SPI_T2CDELAY_SHIFT), | ||
701 | davinci_spi->base + SPIDELAY); | ||
702 | |||
703 | count = davinci_spi->count; | ||
704 | data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT; | ||
705 | tmp = ~(0x1 << spi->chip_select); | ||
706 | |||
707 | clear_io_bits(davinci_spi->base + SPIDEF, ~tmp); | ||
708 | |||
709 | data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT; | ||
710 | 472 | ||
711 | while ((ioread32(davinci_spi->base + SPIBUF) | 473 | buf = ioread32(dspi->base + SPIBUF); |
712 | & SPIBUF_RXEMPTY_MASK) == 0) | ||
713 | cpu_relax(); | ||
714 | 474 | ||
715 | /* Determine the command to execute READ or WRITE */ | 475 | if (dspi->rcount > 0 && !(buf & SPIBUF_RXEMPTY_MASK)) { |
716 | if (t->tx_buf) { | 476 | dspi->get_rx(buf & 0xFFFF, dspi); |
717 | clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); | 477 | dspi->rcount--; |
478 | } | ||
718 | 479 | ||
719 | while (1) { | 480 | status = ioread32(dspi->base + SPIFLG); |
720 | tx_data = davinci_spi->get_tx(davinci_spi); | ||
721 | 481 | ||
722 | data1_reg_val &= ~(0xFFFF); | 482 | if (unlikely(status & SPIFLG_ERROR_MASK)) { |
723 | data1_reg_val |= (0xFFFF & tx_data); | 483 | errors = status & SPIFLG_ERROR_MASK; |
484 | goto out; | ||
485 | } | ||
724 | 486 | ||
725 | buf_val = ioread32(davinci_spi->base + SPIBUF); | 487 | if (dspi->wcount > 0 && !(buf & SPIBUF_TXFULL_MASK)) { |
726 | if ((buf_val & SPIBUF_TXFULL_MASK) == 0) { | 488 | spidat1 = ioread32(dspi->base + SPIDAT1); |
727 | iowrite32(data1_reg_val, | 489 | dspi->wcount--; |
728 | davinci_spi->base + SPIDAT1); | 490 | spidat1 &= ~0xFFFF; |
491 | spidat1 |= 0xFFFF & dspi->get_tx(dspi); | ||
492 | iowrite32(spidat1, dspi->base + SPIDAT1); | ||
493 | } | ||
729 | 494 | ||
730 | count--; | 495 | out: |
731 | } | 496 | return errors; |
732 | while (ioread32(davinci_spi->base + SPIBUF) | 497 | } |
733 | & SPIBUF_RXEMPTY_MASK) | ||
734 | cpu_relax(); | ||
735 | |||
736 | /* getting the returned byte */ | ||
737 | if (t->rx_buf) { | ||
738 | buf_val = ioread32(davinci_spi->base + SPIBUF); | ||
739 | davinci_spi->get_rx(buf_val, davinci_spi); | ||
740 | } | ||
741 | if (count <= 0) | ||
742 | break; | ||
743 | } | ||
744 | } else { | ||
745 | if (pdata->poll_mode) { | ||
746 | while (1) { | ||
747 | /* keeps the serial clock going */ | ||
748 | if ((ioread32(davinci_spi->base + SPIBUF) | ||
749 | & SPIBUF_TXFULL_MASK) == 0) | ||
750 | iowrite32(data1_reg_val, | ||
751 | davinci_spi->base + SPIDAT1); | ||
752 | |||
753 | while (ioread32(davinci_spi->base + SPIBUF) & | ||
754 | SPIBUF_RXEMPTY_MASK) | ||
755 | cpu_relax(); | ||
756 | |||
757 | flg_val = ioread32(davinci_spi->base + SPIFLG); | ||
758 | buf_val = ioread32(davinci_spi->base + SPIBUF); | ||
759 | |||
760 | davinci_spi->get_rx(buf_val, davinci_spi); | ||
761 | |||
762 | count--; | ||
763 | if (count <= 0) | ||
764 | break; | ||
765 | } | ||
766 | } else { /* Receive in Interrupt mode */ | ||
767 | int i; | ||
768 | 498 | ||
769 | for (i = 0; i < davinci_spi->count; i++) { | 499 | static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data) |
770 | set_io_bits(davinci_spi->base + SPIINT, | 500 | { |
771 | SPIINT_BITERR_INTR | 501 | struct davinci_spi *dspi = data; |
772 | | SPIINT_OVRRUN_INTR | 502 | struct davinci_spi_dma *dma = &dspi->dma; |
773 | | SPIINT_RX_INTR); | ||
774 | 503 | ||
775 | iowrite32(data1_reg_val, | 504 | edma_stop(lch); |
776 | davinci_spi->base + SPIDAT1); | ||
777 | 505 | ||
778 | while (ioread32(davinci_spi->base + SPIINT) & | 506 | if (status == DMA_COMPLETE) { |
779 | SPIINT_RX_INTR) | 507 | if (lch == dma->rx_channel) |
780 | cpu_relax(); | 508 | dspi->rcount = 0; |
781 | } | 509 | if (lch == dma->tx_channel) |
782 | iowrite32((data1_reg_val & 0x0ffcffff), | 510 | dspi->wcount = 0; |
783 | davinci_spi->base + SPIDAT1); | ||
784 | } | ||
785 | } | 511 | } |
786 | 512 | ||
787 | /* | 513 | if ((!dspi->wcount && !dspi->rcount) || (status != DMA_COMPLETE)) |
788 | * Check for bit error, desync error,parity error,timeout error and | 514 | complete(&dspi->done); |
789 | * receive overflow errors | ||
790 | */ | ||
791 | int_status = ioread32(davinci_spi->base + SPIFLG); | ||
792 | |||
793 | ret = davinci_spi_check_error(davinci_spi, int_status); | ||
794 | if (ret != 0) | ||
795 | return ret; | ||
796 | |||
797 | /* SPI Framework maintains the count only in bytes so convert back */ | ||
798 | davinci_spi->count *= conv; | ||
799 | |||
800 | return t->len; | ||
801 | } | 515 | } |
802 | 516 | ||
803 | #define DAVINCI_DMA_DATA_TYPE_S8 0x01 | 517 | /** |
804 | #define DAVINCI_DMA_DATA_TYPE_S16 0x02 | 518 | * davinci_spi_bufs - functions which will handle transfer data |
805 | #define DAVINCI_DMA_DATA_TYPE_S32 0x04 | 519 | * @spi: spi device on which data transfer to be done |
806 | 520 | * @t: spi transfer in which transfer info is filled | |
807 | static int davinci_spi_bufs_dma(struct spi_device *spi, struct spi_transfer *t) | 521 | * |
522 | * This function will put data to be transferred into data register | ||
523 | * of SPI controller and then wait until the completion will be marked | ||
524 | * by the IRQ Handler. | ||
525 | */ | ||
526 | static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) | ||
808 | { | 527 | { |
809 | struct davinci_spi *davinci_spi; | 528 | struct davinci_spi *dspi; |
810 | int int_status = 0; | 529 | int data_type, ret; |
811 | int count, temp_count; | 530 | u32 tx_data, spidat1; |
812 | u8 conv = 1; | 531 | u32 errors = 0; |
813 | u8 tmp; | 532 | struct davinci_spi_config *spicfg; |
814 | u32 data1_reg_val; | ||
815 | struct davinci_spi_dma *davinci_spi_dma; | ||
816 | int word_len, data_type, ret; | ||
817 | unsigned long tx_reg, rx_reg; | ||
818 | struct davinci_spi_platform_data *pdata; | 533 | struct davinci_spi_platform_data *pdata; |
534 | unsigned uninitialized_var(rx_buf_count); | ||
819 | struct device *sdev; | 535 | struct device *sdev; |
820 | 536 | ||
821 | davinci_spi = spi_master_get_devdata(spi->master); | 537 | dspi = spi_master_get_devdata(spi->master); |
822 | pdata = davinci_spi->pdata; | 538 | pdata = dspi->pdata; |
823 | sdev = davinci_spi->bitbang.master->dev.parent; | 539 | spicfg = (struct davinci_spi_config *)spi->controller_data; |
824 | 540 | if (!spicfg) | |
825 | davinci_spi_dma = &davinci_spi->dma_channels[spi->chip_select]; | 541 | spicfg = &davinci_spi_default_cfg; |
826 | 542 | sdev = dspi->bitbang.master->dev.parent; | |
827 | tx_reg = (unsigned long)davinci_spi->pbase + SPIDAT1; | ||
828 | rx_reg = (unsigned long)davinci_spi->pbase + SPIBUF; | ||
829 | |||
830 | davinci_spi->tx = t->tx_buf; | ||
831 | davinci_spi->rx = t->rx_buf; | ||
832 | 543 | ||
833 | /* convert len to words based on bits_per_word */ | 544 | /* convert len to words based on bits_per_word */ |
834 | conv = davinci_spi->slave[spi->chip_select].bytes_per_word; | 545 | data_type = dspi->bytes_per_word[spi->chip_select]; |
835 | davinci_spi->count = t->len / conv; | ||
836 | |||
837 | INIT_COMPLETION(davinci_spi->done); | ||
838 | 546 | ||
839 | init_completion(&davinci_spi_dma->dma_rx_completion); | 547 | dspi->tx = t->tx_buf; |
840 | init_completion(&davinci_spi_dma->dma_tx_completion); | 548 | dspi->rx = t->rx_buf; |
549 | dspi->wcount = t->len / data_type; | ||
550 | dspi->rcount = dspi->wcount; | ||
841 | 551 | ||
842 | word_len = conv * 8; | 552 | spidat1 = ioread32(dspi->base + SPIDAT1); |
843 | |||
844 | if (word_len <= 8) | ||
845 | data_type = DAVINCI_DMA_DATA_TYPE_S8; | ||
846 | else if (word_len <= 16) | ||
847 | data_type = DAVINCI_DMA_DATA_TYPE_S16; | ||
848 | else if (word_len <= 32) | ||
849 | data_type = DAVINCI_DMA_DATA_TYPE_S32; | ||
850 | else | ||
851 | return -EINVAL; | ||
852 | |||
853 | ret = davinci_spi_bufs_prep(spi, davinci_spi); | ||
854 | if (ret) | ||
855 | return ret; | ||
856 | 553 | ||
857 | /* Put delay val if required */ | 554 | clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); |
858 | iowrite32(0 | (pdata->c2tdelay << SPI_C2TDELAY_SHIFT) | | 555 | set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); |
859 | (pdata->t2cdelay << SPI_T2CDELAY_SHIFT), | ||
860 | davinci_spi->base + SPIDELAY); | ||
861 | 556 | ||
862 | count = davinci_spi->count; /* the number of elements */ | 557 | INIT_COMPLETION(dspi->done); |
863 | data1_reg_val = pdata->cs_hold << SPIDAT1_CSHOLD_SHIFT; | ||
864 | 558 | ||
865 | /* CS default = 0xFF */ | 559 | if (spicfg->io_type == SPI_IO_TYPE_INTR) |
866 | tmp = ~(0x1 << spi->chip_select); | 560 | set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT); |
867 | 561 | ||
868 | clear_io_bits(davinci_spi->base + SPIDEF, ~tmp); | 562 | if (spicfg->io_type != SPI_IO_TYPE_DMA) { |
869 | 563 | /* start the transfer */ | |
870 | data1_reg_val |= tmp << SPIDAT1_CSNR_SHIFT; | 564 | dspi->wcount--; |
871 | 565 | tx_data = dspi->get_tx(dspi); | |
872 | /* disable all interrupts for dma transfers */ | 566 | spidat1 &= 0xFFFF0000; |
873 | clear_io_bits(davinci_spi->base + SPIINT, SPIINT_MASKALL); | 567 | spidat1 |= tx_data & 0xFFFF; |
874 | /* Disable SPI to write configuration bits in SPIDAT */ | 568 | iowrite32(spidat1, dspi->base + SPIDAT1); |
875 | clear_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); | ||
876 | iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); | ||
877 | /* Enable SPI */ | ||
878 | set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); | ||
879 | |||
880 | while ((ioread32(davinci_spi->base + SPIBUF) | ||
881 | & SPIBUF_RXEMPTY_MASK) == 0) | ||
882 | cpu_relax(); | ||
883 | |||
884 | |||
885 | if (t->tx_buf) { | ||
886 | t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, count, | ||
887 | DMA_TO_DEVICE); | ||
888 | if (dma_mapping_error(&spi->dev, t->tx_dma)) { | ||
889 | dev_dbg(sdev, "Unable to DMA map a %d bytes" | ||
890 | " TX buffer\n", count); | ||
891 | return -ENOMEM; | ||
892 | } | ||
893 | temp_count = count; | ||
894 | } else { | 569 | } else { |
895 | /* We need TX clocking for RX transaction */ | 570 | struct davinci_spi_dma *dma; |
896 | t->tx_dma = dma_map_single(&spi->dev, | 571 | unsigned long tx_reg, rx_reg; |
897 | (void *)davinci_spi->tmp_buf, count + 1, | 572 | struct edmacc_param param; |
898 | DMA_TO_DEVICE); | 573 | void *rx_buf; |
899 | if (dma_mapping_error(&spi->dev, t->tx_dma)) { | 574 | |
900 | dev_dbg(sdev, "Unable to DMA map a %d bytes" | 575 | dma = &dspi->dma; |
901 | " TX tmp buffer\n", count); | 576 | |
902 | return -ENOMEM; | 577 | tx_reg = (unsigned long)dspi->pbase + SPIDAT1; |
578 | rx_reg = (unsigned long)dspi->pbase + SPIBUF; | ||
579 | |||
580 | /* | ||
581 | * Transmit DMA setup | ||
582 | * | ||
583 | * If there is transmit data, map the transmit buffer, set it | ||
584 | * as the source of data and set the source B index to data | ||
585 | * size. If there is no transmit data, set the transmit register | ||
586 | * as the source of data, and set the source B index to zero. | ||
587 | * | ||
588 | * The destination is always the transmit register itself. And | ||
589 | * the destination never increments. | ||
590 | */ | ||
591 | |||
592 | if (t->tx_buf) { | ||
593 | t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, | ||
594 | dspi->wcount, DMA_TO_DEVICE); | ||
595 | if (dma_mapping_error(&spi->dev, t->tx_dma)) { | ||
596 | dev_dbg(sdev, "Unable to DMA map %d bytes" | ||
597 | "TX buffer\n", dspi->wcount); | ||
598 | return -ENOMEM; | ||
599 | } | ||
903 | } | 600 | } |
904 | temp_count = count + 1; | ||
905 | } | ||
906 | 601 | ||
907 | edma_set_transfer_params(davinci_spi_dma->dma_tx_channel, | 602 | param.opt = TCINTEN | EDMA_TCC(dma->tx_channel); |
908 | data_type, temp_count, 1, 0, ASYNC); | 603 | param.src = t->tx_buf ? t->tx_dma : tx_reg; |
909 | edma_set_dest(davinci_spi_dma->dma_tx_channel, tx_reg, INCR, W8BIT); | 604 | param.a_b_cnt = dspi->wcount << 16 | data_type; |
910 | edma_set_src(davinci_spi_dma->dma_tx_channel, t->tx_dma, INCR, W8BIT); | 605 | param.dst = tx_reg; |
911 | edma_set_src_index(davinci_spi_dma->dma_tx_channel, data_type, 0); | 606 | param.src_dst_bidx = t->tx_buf ? data_type : 0; |
912 | edma_set_dest_index(davinci_spi_dma->dma_tx_channel, 0, 0); | 607 | param.link_bcntrld = 0xffff; |
913 | 608 | param.src_dst_cidx = 0; | |
914 | if (t->rx_buf) { | 609 | param.ccnt = 1; |
915 | /* initiate transaction */ | 610 | edma_write_slot(dma->tx_channel, ¶m); |
916 | iowrite32(data1_reg_val, davinci_spi->base + SPIDAT1); | 611 | edma_link(dma->tx_channel, dma->dummy_param_slot); |
612 | |||
613 | /* | ||
614 | * Receive DMA setup | ||
615 | * | ||
616 | * If there is receive buffer, use it to receive data. If there | ||
617 | * is none provided, use a temporary receive buffer. Set the | ||
618 | * destination B index to 0 so effectively only one byte is used | ||
619 | * in the temporary buffer (address does not increment). | ||
620 | * | ||
621 | * The source of receive data is the receive data register. The | ||
622 | * source address never increments. | ||
623 | */ | ||
624 | |||
625 | if (t->rx_buf) { | ||
626 | rx_buf = t->rx_buf; | ||
627 | rx_buf_count = dspi->rcount; | ||
628 | } else { | ||
629 | rx_buf = dspi->rx_tmp_buf; | ||
630 | rx_buf_count = sizeof(dspi->rx_tmp_buf); | ||
631 | } | ||
917 | 632 | ||
918 | t->rx_dma = dma_map_single(&spi->dev, (void *)t->rx_buf, count, | 633 | t->rx_dma = dma_map_single(&spi->dev, rx_buf, rx_buf_count, |
919 | DMA_FROM_DEVICE); | 634 | DMA_FROM_DEVICE); |
920 | if (dma_mapping_error(&spi->dev, t->rx_dma)) { | 635 | if (dma_mapping_error(&spi->dev, t->rx_dma)) { |
921 | dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", | 636 | dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", |
922 | count); | 637 | rx_buf_count); |
923 | if (t->tx_buf != NULL) | 638 | if (t->tx_buf) |
924 | dma_unmap_single(NULL, t->tx_dma, | 639 | dma_unmap_single(NULL, t->tx_dma, dspi->wcount, |
925 | count, DMA_TO_DEVICE); | 640 | DMA_TO_DEVICE); |
926 | return -ENOMEM; | 641 | return -ENOMEM; |
927 | } | 642 | } |
928 | edma_set_transfer_params(davinci_spi_dma->dma_rx_channel, | ||
929 | data_type, count, 1, 0, ASYNC); | ||
930 | edma_set_src(davinci_spi_dma->dma_rx_channel, | ||
931 | rx_reg, INCR, W8BIT); | ||
932 | edma_set_dest(davinci_spi_dma->dma_rx_channel, | ||
933 | t->rx_dma, INCR, W8BIT); | ||
934 | edma_set_src_index(davinci_spi_dma->dma_rx_channel, 0, 0); | ||
935 | edma_set_dest_index(davinci_spi_dma->dma_rx_channel, | ||
936 | data_type, 0); | ||
937 | } | ||
938 | 643 | ||
939 | if ((t->tx_buf) || (t->rx_buf)) | 644 | param.opt = TCINTEN | EDMA_TCC(dma->rx_channel); |
940 | edma_start(davinci_spi_dma->dma_tx_channel); | 645 | param.src = rx_reg; |
646 | param.a_b_cnt = dspi->rcount << 16 | data_type; | ||
647 | param.dst = t->rx_dma; | ||
648 | param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16; | ||
649 | param.link_bcntrld = 0xffff; | ||
650 | param.src_dst_cidx = 0; | ||
651 | param.ccnt = 1; | ||
652 | edma_write_slot(dma->rx_channel, ¶m); | ||
653 | |||
654 | if (pdata->cshold_bug) | ||
655 | iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2); | ||
656 | |||
657 | edma_start(dma->rx_channel); | ||
658 | edma_start(dma->tx_channel); | ||
659 | set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); | ||
660 | } | ||
941 | 661 | ||
942 | if (t->rx_buf) | 662 | /* Wait for the transfer to complete */ |
943 | edma_start(davinci_spi_dma->dma_rx_channel); | 663 | if (spicfg->io_type != SPI_IO_TYPE_POLL) { |
664 | wait_for_completion_interruptible(&(dspi->done)); | ||
665 | } else { | ||
666 | while (dspi->rcount > 0 || dspi->wcount > 0) { | ||
667 | errors = davinci_spi_process_events(dspi); | ||
668 | if (errors) | ||
669 | break; | ||
670 | cpu_relax(); | ||
671 | } | ||
672 | } | ||
944 | 673 | ||
945 | if ((t->rx_buf) || (t->tx_buf)) | 674 | clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL); |
946 | davinci_spi_set_dma_req(spi, 1); | 675 | if (spicfg->io_type == SPI_IO_TYPE_DMA) { |
947 | 676 | ||
948 | if (t->tx_buf) | 677 | if (t->tx_buf) |
949 | wait_for_completion_interruptible( | 678 | dma_unmap_single(NULL, t->tx_dma, dspi->wcount, |
950 | &davinci_spi_dma->dma_tx_completion); | 679 | DMA_TO_DEVICE); |
951 | 680 | ||
952 | if (t->rx_buf) | 681 | dma_unmap_single(NULL, t->rx_dma, rx_buf_count, |
953 | wait_for_completion_interruptible( | 682 | DMA_FROM_DEVICE); |
954 | &davinci_spi_dma->dma_rx_completion); | ||
955 | 683 | ||
956 | dma_unmap_single(NULL, t->tx_dma, temp_count, DMA_TO_DEVICE); | 684 | clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); |
685 | } | ||
957 | 686 | ||
958 | if (t->rx_buf) | 687 | clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); |
959 | dma_unmap_single(NULL, t->rx_dma, count, DMA_FROM_DEVICE); | 688 | set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); |
960 | 689 | ||
961 | /* | 690 | /* |
962 | * Check for bit error, desync error,parity error,timeout error and | 691 | * Check for bit error, desync error,parity error,timeout error and |
963 | * receive overflow errors | 692 | * receive overflow errors |
964 | */ | 693 | */ |
965 | int_status = ioread32(davinci_spi->base + SPIFLG); | 694 | if (errors) { |
966 | 695 | ret = davinci_spi_check_error(dspi, errors); | |
967 | ret = davinci_spi_check_error(davinci_spi, int_status); | 696 | WARN(!ret, "%s: error reported but no error found!\n", |
968 | if (ret != 0) | 697 | dev_name(&spi->dev)); |
969 | return ret; | 698 | return ret; |
699 | } | ||
970 | 700 | ||
971 | /* SPI Framework maintains the count only in bytes so convert back */ | 701 | if (dspi->rcount != 0 || dspi->wcount != 0) { |
972 | davinci_spi->count *= conv; | 702 | dev_err(sdev, "SPI data transfer error\n"); |
703 | return -EIO; | ||
704 | } | ||
973 | 705 | ||
974 | return t->len; | 706 | return t->len; |
975 | } | 707 | } |
976 | 708 | ||
977 | /** | 709 | /** |
978 | * davinci_spi_irq - IRQ handler for DaVinci SPI | 710 | * davinci_spi_irq - Interrupt handler for SPI Master Controller |
979 | * @irq: IRQ number for this SPI Master | 711 | * @irq: IRQ number for this SPI Master |
980 | * @context_data: structure for SPI Master controller davinci_spi | 712 | * @context_data: structure for SPI Master controller davinci_spi |
713 | * | ||
714 | * ISR will determine that interrupt arrives either for READ or WRITE command. | ||
715 | * According to command it will do the appropriate action. It will check | ||
716 | * transfer length and if it is not zero then dispatch transfer command again. | ||
717 | * If transfer length is zero then it will indicate the COMPLETION so that | ||
718 | * davinci_spi_bufs function can go ahead. | ||
981 | */ | 719 | */ |
982 | static irqreturn_t davinci_spi_irq(s32 irq, void *context_data) | 720 | static irqreturn_t davinci_spi_irq(s32 irq, void *data) |
983 | { | 721 | { |
984 | struct davinci_spi *davinci_spi = context_data; | 722 | struct davinci_spi *dspi = data; |
985 | u32 int_status, rx_data = 0; | 723 | int status; |
986 | irqreturn_t ret = IRQ_NONE; | ||
987 | 724 | ||
988 | int_status = ioread32(davinci_spi->base + SPIFLG); | 725 | status = davinci_spi_process_events(dspi); |
726 | if (unlikely(status != 0)) | ||
727 | clear_io_bits(dspi->base + SPIINT, SPIINT_MASKINT); | ||
989 | 728 | ||
990 | while ((int_status & SPIFLG_RX_INTR_MASK)) { | 729 | if ((!dspi->rcount && !dspi->wcount) || status) |
991 | if (likely(int_status & SPIFLG_RX_INTR_MASK)) { | 730 | complete(&dspi->done); |
992 | ret = IRQ_HANDLED; | ||
993 | 731 | ||
994 | rx_data = ioread32(davinci_spi->base + SPIBUF); | 732 | return IRQ_HANDLED; |
995 | davinci_spi->get_rx(rx_data, davinci_spi); | 733 | } |
996 | 734 | ||
997 | /* Disable Receive Interrupt */ | 735 | static int davinci_spi_request_dma(struct davinci_spi *dspi) |
998 | iowrite32(~(SPIINT_RX_INTR | SPIINT_TX_INTR), | 736 | { |
999 | davinci_spi->base + SPIINT); | 737 | int r; |
1000 | } else | 738 | struct davinci_spi_dma *dma = &dspi->dma; |
1001 | (void)davinci_spi_check_error(davinci_spi, int_status); | ||
1002 | 739 | ||
1003 | int_status = ioread32(davinci_spi->base + SPIFLG); | 740 | r = edma_alloc_channel(dma->rx_channel, davinci_spi_dma_callback, dspi, |
741 | dma->eventq); | ||
742 | if (r < 0) { | ||
743 | pr_err("Unable to request DMA channel for SPI RX\n"); | ||
744 | r = -EAGAIN; | ||
745 | goto rx_dma_failed; | ||
1004 | } | 746 | } |
1005 | 747 | ||
1006 | return ret; | 748 | r = edma_alloc_channel(dma->tx_channel, davinci_spi_dma_callback, dspi, |
749 | dma->eventq); | ||
750 | if (r < 0) { | ||
751 | pr_err("Unable to request DMA channel for SPI TX\n"); | ||
752 | r = -EAGAIN; | ||
753 | goto tx_dma_failed; | ||
754 | } | ||
755 | |||
756 | r = edma_alloc_slot(EDMA_CTLR(dma->tx_channel), EDMA_SLOT_ANY); | ||
757 | if (r < 0) { | ||
758 | pr_err("Unable to request SPI TX DMA param slot\n"); | ||
759 | r = -EAGAIN; | ||
760 | goto param_failed; | ||
761 | } | ||
762 | dma->dummy_param_slot = r; | ||
763 | edma_link(dma->dummy_param_slot, dma->dummy_param_slot); | ||
764 | |||
765 | return 0; | ||
766 | param_failed: | ||
767 | edma_free_channel(dma->tx_channel); | ||
768 | tx_dma_failed: | ||
769 | edma_free_channel(dma->rx_channel); | ||
770 | rx_dma_failed: | ||
771 | return r; | ||
1007 | } | 772 | } |
1008 | 773 | ||
1009 | /** | 774 | /** |
1010 | * davinci_spi_probe - probe function for SPI Master Controller | 775 | * davinci_spi_probe - probe function for SPI Master Controller |
1011 | * @pdev: platform_device structure which contains plateform specific data | 776 | * @pdev: platform_device structure which contains plateform specific data |
777 | * | ||
778 | * According to Linux Device Model this function will be invoked by Linux | ||
779 | * with platform_device struct which contains the device specific info. | ||
780 | * This function will map the SPI controller's memory, register IRQ, | ||
781 | * Reset SPI controller and setting its registers to default value. | ||
782 | * It will invoke spi_bitbang_start to create work queue so that client driver | ||
783 | * can register transfer method to work queue. | ||
1012 | */ | 784 | */ |
1013 | static int davinci_spi_probe(struct platform_device *pdev) | 785 | static int davinci_spi_probe(struct platform_device *pdev) |
1014 | { | 786 | { |
1015 | struct spi_master *master; | 787 | struct spi_master *master; |
1016 | struct davinci_spi *davinci_spi; | 788 | struct davinci_spi *dspi; |
1017 | struct davinci_spi_platform_data *pdata; | 789 | struct davinci_spi_platform_data *pdata; |
1018 | struct resource *r, *mem; | 790 | struct resource *r, *mem; |
1019 | resource_size_t dma_rx_chan = SPI_NO_RESOURCE; | 791 | resource_size_t dma_rx_chan = SPI_NO_RESOURCE; |
1020 | resource_size_t dma_tx_chan = SPI_NO_RESOURCE; | 792 | resource_size_t dma_tx_chan = SPI_NO_RESOURCE; |
1021 | resource_size_t dma_eventq = SPI_NO_RESOURCE; | 793 | resource_size_t dma_eventq = SPI_NO_RESOURCE; |
1022 | int i = 0, ret = 0; | 794 | int i = 0, ret = 0; |
795 | u32 spipc0; | ||
1023 | 796 | ||
1024 | pdata = pdev->dev.platform_data; | 797 | pdata = pdev->dev.platform_data; |
1025 | if (pdata == NULL) { | 798 | if (pdata == NULL) { |
@@ -1035,8 +808,8 @@ static int davinci_spi_probe(struct platform_device *pdev) | |||
1035 | 808 | ||
1036 | dev_set_drvdata(&pdev->dev, master); | 809 | dev_set_drvdata(&pdev->dev, master); |
1037 | 810 | ||
1038 | davinci_spi = spi_master_get_devdata(master); | 811 | dspi = spi_master_get_devdata(master); |
1039 | if (davinci_spi == NULL) { | 812 | if (dspi == NULL) { |
1040 | ret = -ENOENT; | 813 | ret = -ENOENT; |
1041 | goto free_master; | 814 | goto free_master; |
1042 | } | 815 | } |
@@ -1047,164 +820,143 @@ static int davinci_spi_probe(struct platform_device *pdev) | |||
1047 | goto free_master; | 820 | goto free_master; |
1048 | } | 821 | } |
1049 | 822 | ||
1050 | davinci_spi->pbase = r->start; | 823 | dspi->pbase = r->start; |
1051 | davinci_spi->region_size = resource_size(r); | 824 | dspi->pdata = pdata; |
1052 | davinci_spi->pdata = pdata; | ||
1053 | 825 | ||
1054 | mem = request_mem_region(r->start, davinci_spi->region_size, | 826 | mem = request_mem_region(r->start, resource_size(r), pdev->name); |
1055 | pdev->name); | ||
1056 | if (mem == NULL) { | 827 | if (mem == NULL) { |
1057 | ret = -EBUSY; | 828 | ret = -EBUSY; |
1058 | goto free_master; | 829 | goto free_master; |
1059 | } | 830 | } |
1060 | 831 | ||
1061 | davinci_spi->base = (struct davinci_spi_reg __iomem *) | 832 | dspi->base = ioremap(r->start, resource_size(r)); |
1062 | ioremap(r->start, davinci_spi->region_size); | 833 | if (dspi->base == NULL) { |
1063 | if (davinci_spi->base == NULL) { | ||
1064 | ret = -ENOMEM; | 834 | ret = -ENOMEM; |
1065 | goto release_region; | 835 | goto release_region; |
1066 | } | 836 | } |
1067 | 837 | ||
1068 | davinci_spi->irq = platform_get_irq(pdev, 0); | 838 | dspi->irq = platform_get_irq(pdev, 0); |
1069 | if (davinci_spi->irq <= 0) { | 839 | if (dspi->irq <= 0) { |
1070 | ret = -EINVAL; | 840 | ret = -EINVAL; |
1071 | goto unmap_io; | 841 | goto unmap_io; |
1072 | } | 842 | } |
1073 | 843 | ||
1074 | ret = request_irq(davinci_spi->irq, davinci_spi_irq, IRQF_DISABLED, | 844 | ret = request_irq(dspi->irq, davinci_spi_irq, 0, dev_name(&pdev->dev), |
1075 | dev_name(&pdev->dev), davinci_spi); | 845 | dspi); |
1076 | if (ret) | 846 | if (ret) |
1077 | goto unmap_io; | 847 | goto unmap_io; |
1078 | 848 | ||
1079 | /* Allocate tmp_buf for tx_buf */ | 849 | dspi->bitbang.master = spi_master_get(master); |
1080 | davinci_spi->tmp_buf = kzalloc(SPI_BUFSIZ, GFP_KERNEL); | 850 | if (dspi->bitbang.master == NULL) { |
1081 | if (davinci_spi->tmp_buf == NULL) { | ||
1082 | ret = -ENOMEM; | ||
1083 | goto irq_free; | ||
1084 | } | ||
1085 | |||
1086 | davinci_spi->bitbang.master = spi_master_get(master); | ||
1087 | if (davinci_spi->bitbang.master == NULL) { | ||
1088 | ret = -ENODEV; | 851 | ret = -ENODEV; |
1089 | goto free_tmp_buf; | 852 | goto irq_free; |
1090 | } | 853 | } |
1091 | 854 | ||
1092 | davinci_spi->clk = clk_get(&pdev->dev, NULL); | 855 | dspi->clk = clk_get(&pdev->dev, NULL); |
1093 | if (IS_ERR(davinci_spi->clk)) { | 856 | if (IS_ERR(dspi->clk)) { |
1094 | ret = -ENODEV; | 857 | ret = -ENODEV; |
1095 | goto put_master; | 858 | goto put_master; |
1096 | } | 859 | } |
1097 | clk_enable(davinci_spi->clk); | 860 | clk_enable(dspi->clk); |
1098 | |||
1099 | 861 | ||
1100 | master->bus_num = pdev->id; | 862 | master->bus_num = pdev->id; |
1101 | master->num_chipselect = pdata->num_chipselect; | 863 | master->num_chipselect = pdata->num_chipselect; |
1102 | master->setup = davinci_spi_setup; | 864 | master->setup = davinci_spi_setup; |
1103 | master->cleanup = davinci_spi_cleanup; | ||
1104 | |||
1105 | davinci_spi->bitbang.chipselect = davinci_spi_chipselect; | ||
1106 | davinci_spi->bitbang.setup_transfer = davinci_spi_setup_transfer; | ||
1107 | |||
1108 | davinci_spi->version = pdata->version; | ||
1109 | use_dma = pdata->use_dma; | ||
1110 | |||
1111 | davinci_spi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP; | ||
1112 | if (davinci_spi->version == SPI_VERSION_2) | ||
1113 | davinci_spi->bitbang.flags |= SPI_READY; | ||
1114 | |||
1115 | if (use_dma) { | ||
1116 | r = platform_get_resource(pdev, IORESOURCE_DMA, 0); | ||
1117 | if (r) | ||
1118 | dma_rx_chan = r->start; | ||
1119 | r = platform_get_resource(pdev, IORESOURCE_DMA, 1); | ||
1120 | if (r) | ||
1121 | dma_tx_chan = r->start; | ||
1122 | r = platform_get_resource(pdev, IORESOURCE_DMA, 2); | ||
1123 | if (r) | ||
1124 | dma_eventq = r->start; | ||
1125 | } | ||
1126 | 865 | ||
1127 | if (!use_dma || | 866 | dspi->bitbang.chipselect = davinci_spi_chipselect; |
1128 | dma_rx_chan == SPI_NO_RESOURCE || | 867 | dspi->bitbang.setup_transfer = davinci_spi_setup_transfer; |
1129 | dma_tx_chan == SPI_NO_RESOURCE || | 868 | |
1130 | dma_eventq == SPI_NO_RESOURCE) { | 869 | dspi->version = pdata->version; |
1131 | davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_pio; | 870 | |
1132 | use_dma = 0; | 871 | dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP; |
1133 | } else { | 872 | if (dspi->version == SPI_VERSION_2) |
1134 | davinci_spi->bitbang.txrx_bufs = davinci_spi_bufs_dma; | 873 | dspi->bitbang.flags |= SPI_READY; |
1135 | davinci_spi->dma_channels = kzalloc(master->num_chipselect | 874 | |
1136 | * sizeof(struct davinci_spi_dma), GFP_KERNEL); | 875 | r = platform_get_resource(pdev, IORESOURCE_DMA, 0); |
1137 | if (davinci_spi->dma_channels == NULL) { | 876 | if (r) |
1138 | ret = -ENOMEM; | 877 | dma_rx_chan = r->start; |
878 | r = platform_get_resource(pdev, IORESOURCE_DMA, 1); | ||
879 | if (r) | ||
880 | dma_tx_chan = r->start; | ||
881 | r = platform_get_resource(pdev, IORESOURCE_DMA, 2); | ||
882 | if (r) | ||
883 | dma_eventq = r->start; | ||
884 | |||
885 | dspi->bitbang.txrx_bufs = davinci_spi_bufs; | ||
886 | if (dma_rx_chan != SPI_NO_RESOURCE && | ||
887 | dma_tx_chan != SPI_NO_RESOURCE && | ||
888 | dma_eventq != SPI_NO_RESOURCE) { | ||
889 | dspi->dma.rx_channel = dma_rx_chan; | ||
890 | dspi->dma.tx_channel = dma_tx_chan; | ||
891 | dspi->dma.eventq = dma_eventq; | ||
892 | |||
893 | ret = davinci_spi_request_dma(dspi); | ||
894 | if (ret) | ||
1139 | goto free_clk; | 895 | goto free_clk; |
1140 | } | ||
1141 | 896 | ||
1142 | for (i = 0; i < master->num_chipselect; i++) { | 897 | dev_info(&pdev->dev, "DMA: supported\n"); |
1143 | davinci_spi->dma_channels[i].dma_rx_channel = -1; | 898 | dev_info(&pdev->dev, "DMA: RX channel: %d, TX channel: %d, " |
1144 | davinci_spi->dma_channels[i].dma_rx_sync_dev = | 899 | "event queue: %d\n", dma_rx_chan, dma_tx_chan, |
1145 | dma_rx_chan; | ||
1146 | davinci_spi->dma_channels[i].dma_tx_channel = -1; | ||
1147 | davinci_spi->dma_channels[i].dma_tx_sync_dev = | ||
1148 | dma_tx_chan; | ||
1149 | davinci_spi->dma_channels[i].eventq = dma_eventq; | ||
1150 | } | ||
1151 | dev_info(&pdev->dev, "DaVinci SPI driver in EDMA mode\n" | ||
1152 | "Using RX channel = %d , TX channel = %d and " | ||
1153 | "event queue = %d", dma_rx_chan, dma_tx_chan, | ||
1154 | dma_eventq); | 900 | dma_eventq); |
1155 | } | 901 | } |
1156 | 902 | ||
1157 | davinci_spi->get_rx = davinci_spi_rx_buf_u8; | 903 | dspi->get_rx = davinci_spi_rx_buf_u8; |
1158 | davinci_spi->get_tx = davinci_spi_tx_buf_u8; | 904 | dspi->get_tx = davinci_spi_tx_buf_u8; |
1159 | 905 | ||
1160 | init_completion(&davinci_spi->done); | 906 | init_completion(&dspi->done); |
1161 | 907 | ||
1162 | /* Reset In/OUT SPI module */ | 908 | /* Reset In/OUT SPI module */ |
1163 | iowrite32(0, davinci_spi->base + SPIGCR0); | 909 | iowrite32(0, dspi->base + SPIGCR0); |
1164 | udelay(100); | 910 | udelay(100); |
1165 | iowrite32(1, davinci_spi->base + SPIGCR0); | 911 | iowrite32(1, dspi->base + SPIGCR0); |
1166 | 912 | ||
1167 | /* Clock internal */ | 913 | /* Set up SPIPC0. CS and ENA init is done in davinci_spi_setup */ |
1168 | if (davinci_spi->pdata->clk_internal) | 914 | spipc0 = SPIPC0_DIFUN_MASK | SPIPC0_DOFUN_MASK | SPIPC0_CLKFUN_MASK; |
1169 | set_io_bits(davinci_spi->base + SPIGCR1, | 915 | iowrite32(spipc0, dspi->base + SPIPC0); |
1170 | SPIGCR1_CLKMOD_MASK); | ||
1171 | else | ||
1172 | clear_io_bits(davinci_spi->base + SPIGCR1, | ||
1173 | SPIGCR1_CLKMOD_MASK); | ||
1174 | 916 | ||
1175 | /* master mode default */ | 917 | /* initialize chip selects */ |
1176 | set_io_bits(davinci_spi->base + SPIGCR1, SPIGCR1_MASTER_MASK); | 918 | if (pdata->chip_sel) { |
919 | for (i = 0; i < pdata->num_chipselect; i++) { | ||
920 | if (pdata->chip_sel[i] != SPI_INTERN_CS) | ||
921 | gpio_direction_output(pdata->chip_sel[i], 1); | ||
922 | } | ||
923 | } | ||
1177 | 924 | ||
1178 | if (davinci_spi->pdata->intr_level) | 925 | if (pdata->intr_line) |
1179 | iowrite32(SPI_INTLVL_1, davinci_spi->base + SPILVL); | 926 | iowrite32(SPI_INTLVL_1, dspi->base + SPILVL); |
1180 | else | 927 | else |
1181 | iowrite32(SPI_INTLVL_0, davinci_spi->base + SPILVL); | 928 | iowrite32(SPI_INTLVL_0, dspi->base + SPILVL); |
1182 | 929 | ||
1183 | ret = spi_bitbang_start(&davinci_spi->bitbang); | 930 | iowrite32(CS_DEFAULT, dspi->base + SPIDEF); |
1184 | if (ret) | ||
1185 | goto free_clk; | ||
1186 | 931 | ||
1187 | dev_info(&pdev->dev, "Controller at 0x%p \n", davinci_spi->base); | 932 | /* master mode default */ |
933 | set_io_bits(dspi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK); | ||
934 | set_io_bits(dspi->base + SPIGCR1, SPIGCR1_MASTER_MASK); | ||
935 | set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK); | ||
936 | |||
937 | ret = spi_bitbang_start(&dspi->bitbang); | ||
938 | if (ret) | ||
939 | goto free_dma; | ||
1188 | 940 | ||
1189 | if (!pdata->poll_mode) | 941 | dev_info(&pdev->dev, "Controller at 0x%p\n", dspi->base); |
1190 | dev_info(&pdev->dev, "Operating in interrupt mode" | ||
1191 | " using IRQ %d\n", davinci_spi->irq); | ||
1192 | 942 | ||
1193 | return ret; | 943 | return ret; |
1194 | 944 | ||
945 | free_dma: | ||
946 | edma_free_channel(dspi->dma.tx_channel); | ||
947 | edma_free_channel(dspi->dma.rx_channel); | ||
948 | edma_free_slot(dspi->dma.dummy_param_slot); | ||
1195 | free_clk: | 949 | free_clk: |
1196 | clk_disable(davinci_spi->clk); | 950 | clk_disable(dspi->clk); |
1197 | clk_put(davinci_spi->clk); | 951 | clk_put(dspi->clk); |
1198 | put_master: | 952 | put_master: |
1199 | spi_master_put(master); | 953 | spi_master_put(master); |
1200 | free_tmp_buf: | ||
1201 | kfree(davinci_spi->tmp_buf); | ||
1202 | irq_free: | 954 | irq_free: |
1203 | free_irq(davinci_spi->irq, davinci_spi); | 955 | free_irq(dspi->irq, dspi); |
1204 | unmap_io: | 956 | unmap_io: |
1205 | iounmap(davinci_spi->base); | 957 | iounmap(dspi->base); |
1206 | release_region: | 958 | release_region: |
1207 | release_mem_region(davinci_spi->pbase, davinci_spi->region_size); | 959 | release_mem_region(dspi->pbase, resource_size(r)); |
1208 | free_master: | 960 | free_master: |
1209 | kfree(master); | 961 | kfree(master); |
1210 | err: | 962 | err: |
@@ -1222,27 +974,31 @@ err: | |||
1222 | */ | 974 | */ |
1223 | static int __exit davinci_spi_remove(struct platform_device *pdev) | 975 | static int __exit davinci_spi_remove(struct platform_device *pdev) |
1224 | { | 976 | { |
1225 | struct davinci_spi *davinci_spi; | 977 | struct davinci_spi *dspi; |
1226 | struct spi_master *master; | 978 | struct spi_master *master; |
979 | struct resource *r; | ||
1227 | 980 | ||
1228 | master = dev_get_drvdata(&pdev->dev); | 981 | master = dev_get_drvdata(&pdev->dev); |
1229 | davinci_spi = spi_master_get_devdata(master); | 982 | dspi = spi_master_get_devdata(master); |
1230 | 983 | ||
1231 | spi_bitbang_stop(&davinci_spi->bitbang); | 984 | spi_bitbang_stop(&dspi->bitbang); |
1232 | 985 | ||
1233 | clk_disable(davinci_spi->clk); | 986 | clk_disable(dspi->clk); |
1234 | clk_put(davinci_spi->clk); | 987 | clk_put(dspi->clk); |
1235 | spi_master_put(master); | 988 | spi_master_put(master); |
1236 | kfree(davinci_spi->tmp_buf); | 989 | free_irq(dspi->irq, dspi); |
1237 | free_irq(davinci_spi->irq, davinci_spi); | 990 | iounmap(dspi->base); |
1238 | iounmap(davinci_spi->base); | 991 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1239 | release_mem_region(davinci_spi->pbase, davinci_spi->region_size); | 992 | release_mem_region(dspi->pbase, resource_size(r)); |
1240 | 993 | ||
1241 | return 0; | 994 | return 0; |
1242 | } | 995 | } |
1243 | 996 | ||
1244 | static struct platform_driver davinci_spi_driver = { | 997 | static struct platform_driver davinci_spi_driver = { |
1245 | .driver.name = "spi_davinci", | 998 | .driver = { |
999 | .name = "spi_davinci", | ||
1000 | .owner = THIS_MODULE, | ||
1001 | }, | ||
1246 | .remove = __exit_p(davinci_spi_remove), | 1002 | .remove = __exit_p(davinci_spi_remove), |
1247 | }; | 1003 | }; |
1248 | 1004 | ||