diff options
Diffstat (limited to 'drivers/spi/spi-pl022.c')
-rw-r--r-- | drivers/spi/spi-pl022.c | 2342 |
1 files changed, 2342 insertions, 0 deletions
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c new file mode 100644 index 000000000000..25417054a456 --- /dev/null +++ b/drivers/spi/spi-pl022.c | |||
@@ -0,0 +1,2342 @@ | |||
1 | /* | ||
2 | * A driver for the ARM PL022 PrimeCell SSP/SPI bus master. | ||
3 | * | ||
4 | * Copyright (C) 2008-2009 ST-Ericsson AB | ||
5 | * Copyright (C) 2006 STMicroelectronics Pvt. Ltd. | ||
6 | * | ||
7 | * Author: Linus Walleij <linus.walleij@stericsson.com> | ||
8 | * | ||
9 | * Initial version inspired by: | ||
10 | * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c | ||
11 | * Initial adoption to PL022 by: | ||
12 | * Sachin Verma <sachin.verma@st.com> | ||
13 | * | ||
14 | * This program is free software; you can redistribute it and/or modify | ||
15 | * it under the terms of the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2 of the License, or | ||
17 | * (at your option) any later version. | ||
18 | * | ||
19 | * This program is distributed in the hope that it will be useful, | ||
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | * GNU General Public License for more details. | ||
23 | */ | ||
24 | |||
25 | #include <linux/init.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/device.h> | ||
28 | #include <linux/ioport.h> | ||
29 | #include <linux/errno.h> | ||
30 | #include <linux/interrupt.h> | ||
31 | #include <linux/spi/spi.h> | ||
32 | #include <linux/workqueue.h> | ||
33 | #include <linux/delay.h> | ||
34 | #include <linux/clk.h> | ||
35 | #include <linux/err.h> | ||
36 | #include <linux/amba/bus.h> | ||
37 | #include <linux/amba/pl022.h> | ||
38 | #include <linux/io.h> | ||
39 | #include <linux/slab.h> | ||
40 | #include <linux/dmaengine.h> | ||
41 | #include <linux/dma-mapping.h> | ||
42 | #include <linux/scatterlist.h> | ||
43 | |||
44 | /* | ||
45 | * This macro is used to define some register default values. | ||
46 | * reg is masked with mask, the OR:ed with an (again masked) | ||
47 | * val shifted sb steps to the left. | ||
48 | */ | ||
49 | #define SSP_WRITE_BITS(reg, val, mask, sb) \ | ||
50 | ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask)))) | ||
51 | |||
52 | /* | ||
53 | * This macro is also used to define some default values. | ||
54 | * It will just shift val by sb steps to the left and mask | ||
55 | * the result with mask. | ||
56 | */ | ||
57 | #define GEN_MASK_BITS(val, mask, sb) \ | ||
58 | (((val)<<(sb)) & (mask)) | ||
59 | |||
60 | #define DRIVE_TX 0 | ||
61 | #define DO_NOT_DRIVE_TX 1 | ||
62 | |||
63 | #define DO_NOT_QUEUE_DMA 0 | ||
64 | #define QUEUE_DMA 1 | ||
65 | |||
66 | #define RX_TRANSFER 1 | ||
67 | #define TX_TRANSFER 2 | ||
68 | |||
69 | /* | ||
70 | * Macros to access SSP Registers with their offsets | ||
71 | */ | ||
72 | #define SSP_CR0(r) (r + 0x000) | ||
73 | #define SSP_CR1(r) (r + 0x004) | ||
74 | #define SSP_DR(r) (r + 0x008) | ||
75 | #define SSP_SR(r) (r + 0x00C) | ||
76 | #define SSP_CPSR(r) (r + 0x010) | ||
77 | #define SSP_IMSC(r) (r + 0x014) | ||
78 | #define SSP_RIS(r) (r + 0x018) | ||
79 | #define SSP_MIS(r) (r + 0x01C) | ||
80 | #define SSP_ICR(r) (r + 0x020) | ||
81 | #define SSP_DMACR(r) (r + 0x024) | ||
82 | #define SSP_ITCR(r) (r + 0x080) | ||
83 | #define SSP_ITIP(r) (r + 0x084) | ||
84 | #define SSP_ITOP(r) (r + 0x088) | ||
85 | #define SSP_TDR(r) (r + 0x08C) | ||
86 | |||
87 | #define SSP_PID0(r) (r + 0xFE0) | ||
88 | #define SSP_PID1(r) (r + 0xFE4) | ||
89 | #define SSP_PID2(r) (r + 0xFE8) | ||
90 | #define SSP_PID3(r) (r + 0xFEC) | ||
91 | |||
92 | #define SSP_CID0(r) (r + 0xFF0) | ||
93 | #define SSP_CID1(r) (r + 0xFF4) | ||
94 | #define SSP_CID2(r) (r + 0xFF8) | ||
95 | #define SSP_CID3(r) (r + 0xFFC) | ||
96 | |||
97 | /* | ||
98 | * SSP Control Register 0 - SSP_CR0 | ||
99 | */ | ||
100 | #define SSP_CR0_MASK_DSS (0x0FUL << 0) | ||
101 | #define SSP_CR0_MASK_FRF (0x3UL << 4) | ||
102 | #define SSP_CR0_MASK_SPO (0x1UL << 6) | ||
103 | #define SSP_CR0_MASK_SPH (0x1UL << 7) | ||
104 | #define SSP_CR0_MASK_SCR (0xFFUL << 8) | ||
105 | |||
106 | /* | ||
107 | * The ST version of this block moves som bits | ||
108 | * in SSP_CR0 and extends it to 32 bits | ||
109 | */ | ||
110 | #define SSP_CR0_MASK_DSS_ST (0x1FUL << 0) | ||
111 | #define SSP_CR0_MASK_HALFDUP_ST (0x1UL << 5) | ||
112 | #define SSP_CR0_MASK_CSS_ST (0x1FUL << 16) | ||
113 | #define SSP_CR0_MASK_FRF_ST (0x3UL << 21) | ||
114 | |||
115 | |||
116 | /* | ||
117 | * SSP Control Register 0 - SSP_CR1 | ||
118 | */ | ||
119 | #define SSP_CR1_MASK_LBM (0x1UL << 0) | ||
120 | #define SSP_CR1_MASK_SSE (0x1UL << 1) | ||
121 | #define SSP_CR1_MASK_MS (0x1UL << 2) | ||
122 | #define SSP_CR1_MASK_SOD (0x1UL << 3) | ||
123 | |||
124 | /* | ||
125 | * The ST version of this block adds some bits | ||
126 | * in SSP_CR1 | ||
127 | */ | ||
128 | #define SSP_CR1_MASK_RENDN_ST (0x1UL << 4) | ||
129 | #define SSP_CR1_MASK_TENDN_ST (0x1UL << 5) | ||
130 | #define SSP_CR1_MASK_MWAIT_ST (0x1UL << 6) | ||
131 | #define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7) | ||
132 | #define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10) | ||
133 | /* This one is only in the PL023 variant */ | ||
134 | #define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13) | ||
135 | |||
136 | /* | ||
137 | * SSP Status Register - SSP_SR | ||
138 | */ | ||
139 | #define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */ | ||
140 | #define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */ | ||
141 | #define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */ | ||
142 | #define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */ | ||
143 | #define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */ | ||
144 | |||
145 | /* | ||
146 | * SSP Clock Prescale Register - SSP_CPSR | ||
147 | */ | ||
148 | #define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0) | ||
149 | |||
150 | /* | ||
151 | * SSP Interrupt Mask Set/Clear Register - SSP_IMSC | ||
152 | */ | ||
153 | #define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */ | ||
154 | #define SSP_IMSC_MASK_RTIM (0x1UL << 1) /* Receive timeout Interrupt mask */ | ||
155 | #define SSP_IMSC_MASK_RXIM (0x1UL << 2) /* Receive FIFO Interrupt mask */ | ||
156 | #define SSP_IMSC_MASK_TXIM (0x1UL << 3) /* Transmit FIFO Interrupt mask */ | ||
157 | |||
158 | /* | ||
159 | * SSP Raw Interrupt Status Register - SSP_RIS | ||
160 | */ | ||
161 | /* Receive Overrun Raw Interrupt status */ | ||
162 | #define SSP_RIS_MASK_RORRIS (0x1UL << 0) | ||
163 | /* Receive Timeout Raw Interrupt status */ | ||
164 | #define SSP_RIS_MASK_RTRIS (0x1UL << 1) | ||
165 | /* Receive FIFO Raw Interrupt status */ | ||
166 | #define SSP_RIS_MASK_RXRIS (0x1UL << 2) | ||
167 | /* Transmit FIFO Raw Interrupt status */ | ||
168 | #define SSP_RIS_MASK_TXRIS (0x1UL << 3) | ||
169 | |||
170 | /* | ||
171 | * SSP Masked Interrupt Status Register - SSP_MIS | ||
172 | */ | ||
173 | /* Receive Overrun Masked Interrupt status */ | ||
174 | #define SSP_MIS_MASK_RORMIS (0x1UL << 0) | ||
175 | /* Receive Timeout Masked Interrupt status */ | ||
176 | #define SSP_MIS_MASK_RTMIS (0x1UL << 1) | ||
177 | /* Receive FIFO Masked Interrupt status */ | ||
178 | #define SSP_MIS_MASK_RXMIS (0x1UL << 2) | ||
179 | /* Transmit FIFO Masked Interrupt status */ | ||
180 | #define SSP_MIS_MASK_TXMIS (0x1UL << 3) | ||
181 | |||
182 | /* | ||
183 | * SSP Interrupt Clear Register - SSP_ICR | ||
184 | */ | ||
185 | /* Receive Overrun Raw Clear Interrupt bit */ | ||
186 | #define SSP_ICR_MASK_RORIC (0x1UL << 0) | ||
187 | /* Receive Timeout Clear Interrupt bit */ | ||
188 | #define SSP_ICR_MASK_RTIC (0x1UL << 1) | ||
189 | |||
190 | /* | ||
191 | * SSP DMA Control Register - SSP_DMACR | ||
192 | */ | ||
193 | /* Receive DMA Enable bit */ | ||
194 | #define SSP_DMACR_MASK_RXDMAE (0x1UL << 0) | ||
195 | /* Transmit DMA Enable bit */ | ||
196 | #define SSP_DMACR_MASK_TXDMAE (0x1UL << 1) | ||
197 | |||
198 | /* | ||
199 | * SSP Integration Test control Register - SSP_ITCR | ||
200 | */ | ||
201 | #define SSP_ITCR_MASK_ITEN (0x1UL << 0) | ||
202 | #define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1) | ||
203 | |||
204 | /* | ||
205 | * SSP Integration Test Input Register - SSP_ITIP | ||
206 | */ | ||
207 | #define ITIP_MASK_SSPRXD (0x1UL << 0) | ||
208 | #define ITIP_MASK_SSPFSSIN (0x1UL << 1) | ||
209 | #define ITIP_MASK_SSPCLKIN (0x1UL << 2) | ||
210 | #define ITIP_MASK_RXDMAC (0x1UL << 3) | ||
211 | #define ITIP_MASK_TXDMAC (0x1UL << 4) | ||
212 | #define ITIP_MASK_SSPTXDIN (0x1UL << 5) | ||
213 | |||
214 | /* | ||
215 | * SSP Integration Test output Register - SSP_ITOP | ||
216 | */ | ||
217 | #define ITOP_MASK_SSPTXD (0x1UL << 0) | ||
218 | #define ITOP_MASK_SSPFSSOUT (0x1UL << 1) | ||
219 | #define ITOP_MASK_SSPCLKOUT (0x1UL << 2) | ||
220 | #define ITOP_MASK_SSPOEn (0x1UL << 3) | ||
221 | #define ITOP_MASK_SSPCTLOEn (0x1UL << 4) | ||
222 | #define ITOP_MASK_RORINTR (0x1UL << 5) | ||
223 | #define ITOP_MASK_RTINTR (0x1UL << 6) | ||
224 | #define ITOP_MASK_RXINTR (0x1UL << 7) | ||
225 | #define ITOP_MASK_TXINTR (0x1UL << 8) | ||
226 | #define ITOP_MASK_INTR (0x1UL << 9) | ||
227 | #define ITOP_MASK_RXDMABREQ (0x1UL << 10) | ||
228 | #define ITOP_MASK_RXDMASREQ (0x1UL << 11) | ||
229 | #define ITOP_MASK_TXDMABREQ (0x1UL << 12) | ||
230 | #define ITOP_MASK_TXDMASREQ (0x1UL << 13) | ||
231 | |||
232 | /* | ||
233 | * SSP Test Data Register - SSP_TDR | ||
234 | */ | ||
235 | #define TDR_MASK_TESTDATA (0xFFFFFFFF) | ||
236 | |||
237 | /* | ||
238 | * Message State | ||
239 | * we use the spi_message.state (void *) pointer to | ||
240 | * hold a single state value, that's why all this | ||
241 | * (void *) casting is done here. | ||
242 | */ | ||
243 | #define STATE_START ((void *) 0) | ||
244 | #define STATE_RUNNING ((void *) 1) | ||
245 | #define STATE_DONE ((void *) 2) | ||
246 | #define STATE_ERROR ((void *) -1) | ||
247 | |||
248 | /* | ||
249 | * SSP State - Whether Enabled or Disabled | ||
250 | */ | ||
251 | #define SSP_DISABLED (0) | ||
252 | #define SSP_ENABLED (1) | ||
253 | |||
254 | /* | ||
255 | * SSP DMA State - Whether DMA Enabled or Disabled | ||
256 | */ | ||
257 | #define SSP_DMA_DISABLED (0) | ||
258 | #define SSP_DMA_ENABLED (1) | ||
259 | |||
260 | /* | ||
261 | * SSP Clock Defaults | ||
262 | */ | ||
263 | #define SSP_DEFAULT_CLKRATE 0x2 | ||
264 | #define SSP_DEFAULT_PRESCALE 0x40 | ||
265 | |||
266 | /* | ||
267 | * SSP Clock Parameter ranges | ||
268 | */ | ||
269 | #define CPSDVR_MIN 0x02 | ||
270 | #define CPSDVR_MAX 0xFE | ||
271 | #define SCR_MIN 0x00 | ||
272 | #define SCR_MAX 0xFF | ||
273 | |||
274 | /* | ||
275 | * SSP Interrupt related Macros | ||
276 | */ | ||
277 | #define DEFAULT_SSP_REG_IMSC 0x0UL | ||
278 | #define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC | ||
279 | #define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC) | ||
280 | |||
281 | #define CLEAR_ALL_INTERRUPTS 0x3 | ||
282 | |||
283 | #define SPI_POLLING_TIMEOUT 1000 | ||
284 | |||
285 | |||
286 | /* | ||
287 | * The type of reading going on on this chip | ||
288 | */ | ||
289 | enum ssp_reading { | ||
290 | READING_NULL, | ||
291 | READING_U8, | ||
292 | READING_U16, | ||
293 | READING_U32 | ||
294 | }; | ||
295 | |||
296 | /** | ||
297 | * The type of writing going on on this chip | ||
298 | */ | ||
299 | enum ssp_writing { | ||
300 | WRITING_NULL, | ||
301 | WRITING_U8, | ||
302 | WRITING_U16, | ||
303 | WRITING_U32 | ||
304 | }; | ||
305 | |||
306 | /** | ||
307 | * struct vendor_data - vendor-specific config parameters | ||
308 | * for PL022 derivates | ||
309 | * @fifodepth: depth of FIFOs (both) | ||
310 | * @max_bpw: maximum number of bits per word | ||
311 | * @unidir: supports unidirection transfers | ||
312 | * @extended_cr: 32 bit wide control register 0 with extra | ||
313 | * features and extra features in CR1 as found in the ST variants | ||
314 | * @pl023: supports a subset of the ST extensions called "PL023" | ||
315 | */ | ||
316 | struct vendor_data { | ||
317 | int fifodepth; | ||
318 | int max_bpw; | ||
319 | bool unidir; | ||
320 | bool extended_cr; | ||
321 | bool pl023; | ||
322 | bool loopback; | ||
323 | }; | ||
324 | |||
325 | /** | ||
326 | * struct pl022 - This is the private SSP driver data structure | ||
327 | * @adev: AMBA device model hookup | ||
328 | * @vendor: vendor data for the IP block | ||
329 | * @phybase: the physical memory where the SSP device resides | ||
330 | * @virtbase: the virtual memory where the SSP is mapped | ||
331 | * @clk: outgoing clock "SPICLK" for the SPI bus | ||
332 | * @master: SPI framework hookup | ||
333 | * @master_info: controller-specific data from machine setup | ||
334 | * @workqueue: a workqueue on which any spi_message request is queued | ||
335 | * @pump_messages: work struct for scheduling work to the workqueue | ||
336 | * @queue_lock: spinlock to syncronise access to message queue | ||
337 | * @queue: message queue | ||
338 | * @busy: workqueue is busy | ||
339 | * @running: workqueue is running | ||
340 | * @pump_transfers: Tasklet used in Interrupt Transfer mode | ||
341 | * @cur_msg: Pointer to current spi_message being processed | ||
342 | * @cur_transfer: Pointer to current spi_transfer | ||
343 | * @cur_chip: pointer to current clients chip(assigned from controller_state) | ||
344 | * @tx: current position in TX buffer to be read | ||
345 | * @tx_end: end position in TX buffer to be read | ||
346 | * @rx: current position in RX buffer to be written | ||
347 | * @rx_end: end position in RX buffer to be written | ||
348 | * @read: the type of read currently going on | ||
349 | * @write: the type of write currently going on | ||
350 | * @exp_fifo_level: expected FIFO level | ||
351 | * @dma_rx_channel: optional channel for RX DMA | ||
352 | * @dma_tx_channel: optional channel for TX DMA | ||
353 | * @sgt_rx: scattertable for the RX transfer | ||
354 | * @sgt_tx: scattertable for the TX transfer | ||
355 | * @dummypage: a dummy page used for driving data on the bus with DMA | ||
356 | */ | ||
357 | struct pl022 { | ||
358 | struct amba_device *adev; | ||
359 | struct vendor_data *vendor; | ||
360 | resource_size_t phybase; | ||
361 | void __iomem *virtbase; | ||
362 | struct clk *clk; | ||
363 | struct spi_master *master; | ||
364 | struct pl022_ssp_controller *master_info; | ||
365 | /* Driver message queue */ | ||
366 | struct workqueue_struct *workqueue; | ||
367 | struct work_struct pump_messages; | ||
368 | spinlock_t queue_lock; | ||
369 | struct list_head queue; | ||
370 | bool busy; | ||
371 | bool running; | ||
372 | /* Message transfer pump */ | ||
373 | struct tasklet_struct pump_transfers; | ||
374 | struct spi_message *cur_msg; | ||
375 | struct spi_transfer *cur_transfer; | ||
376 | struct chip_data *cur_chip; | ||
377 | void *tx; | ||
378 | void *tx_end; | ||
379 | void *rx; | ||
380 | void *rx_end; | ||
381 | enum ssp_reading read; | ||
382 | enum ssp_writing write; | ||
383 | u32 exp_fifo_level; | ||
384 | /* DMA settings */ | ||
385 | #ifdef CONFIG_DMA_ENGINE | ||
386 | struct dma_chan *dma_rx_channel; | ||
387 | struct dma_chan *dma_tx_channel; | ||
388 | struct sg_table sgt_rx; | ||
389 | struct sg_table sgt_tx; | ||
390 | char *dummypage; | ||
391 | #endif | ||
392 | }; | ||
393 | |||
394 | /** | ||
395 | * struct chip_data - To maintain runtime state of SSP for each client chip | ||
396 | * @cr0: Value of control register CR0 of SSP - on later ST variants this | ||
397 | * register is 32 bits wide rather than just 16 | ||
398 | * @cr1: Value of control register CR1 of SSP | ||
399 | * @dmacr: Value of DMA control Register of SSP | ||
400 | * @cpsr: Value of Clock prescale register | ||
401 | * @n_bytes: how many bytes(power of 2) reqd for a given data width of client | ||
402 | * @enable_dma: Whether to enable DMA or not | ||
403 | * @read: function ptr to be used to read when doing xfer for this chip | ||
404 | * @write: function ptr to be used to write when doing xfer for this chip | ||
405 | * @cs_control: chip select callback provided by chip | ||
406 | * @xfer_type: polling/interrupt/DMA | ||
407 | * | ||
408 | * Runtime state of the SSP controller, maintained per chip, | ||
409 | * This would be set according to the current message that would be served | ||
410 | */ | ||
411 | struct chip_data { | ||
412 | u32 cr0; | ||
413 | u16 cr1; | ||
414 | u16 dmacr; | ||
415 | u16 cpsr; | ||
416 | u8 n_bytes; | ||
417 | bool enable_dma; | ||
418 | enum ssp_reading read; | ||
419 | enum ssp_writing write; | ||
420 | void (*cs_control) (u32 command); | ||
421 | int xfer_type; | ||
422 | }; | ||
423 | |||
424 | /** | ||
425 | * null_cs_control - Dummy chip select function | ||
426 | * @command: select/delect the chip | ||
427 | * | ||
428 | * If no chip select function is provided by client this is used as dummy | ||
429 | * chip select | ||
430 | */ | ||
431 | static void null_cs_control(u32 command) | ||
432 | { | ||
433 | pr_debug("pl022: dummy chip select control, CS=0x%x\n", command); | ||
434 | } | ||
435 | |||
436 | /** | ||
437 | * giveback - current spi_message is over, schedule next message and call | ||
438 | * callback of this message. Assumes that caller already | ||
439 | * set message->status; dma and pio irqs are blocked | ||
440 | * @pl022: SSP driver private data structure | ||
441 | */ | ||
442 | static void giveback(struct pl022 *pl022) | ||
443 | { | ||
444 | struct spi_transfer *last_transfer; | ||
445 | unsigned long flags; | ||
446 | struct spi_message *msg; | ||
447 | void (*curr_cs_control) (u32 command); | ||
448 | |||
449 | /* | ||
450 | * This local reference to the chip select function | ||
451 | * is needed because we set curr_chip to NULL | ||
452 | * as a step toward termininating the message. | ||
453 | */ | ||
454 | curr_cs_control = pl022->cur_chip->cs_control; | ||
455 | spin_lock_irqsave(&pl022->queue_lock, flags); | ||
456 | msg = pl022->cur_msg; | ||
457 | pl022->cur_msg = NULL; | ||
458 | pl022->cur_transfer = NULL; | ||
459 | pl022->cur_chip = NULL; | ||
460 | queue_work(pl022->workqueue, &pl022->pump_messages); | ||
461 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
462 | |||
463 | last_transfer = list_entry(msg->transfers.prev, | ||
464 | struct spi_transfer, | ||
465 | transfer_list); | ||
466 | |||
467 | /* Delay if requested before any change in chip select */ | ||
468 | if (last_transfer->delay_usecs) | ||
469 | /* | ||
470 | * FIXME: This runs in interrupt context. | ||
471 | * Is this really smart? | ||
472 | */ | ||
473 | udelay(last_transfer->delay_usecs); | ||
474 | |||
475 | /* | ||
476 | * Drop chip select UNLESS cs_change is true or we are returning | ||
477 | * a message with an error, or next message is for another chip | ||
478 | */ | ||
479 | if (!last_transfer->cs_change) | ||
480 | curr_cs_control(SSP_CHIP_DESELECT); | ||
481 | else { | ||
482 | struct spi_message *next_msg; | ||
483 | |||
484 | /* Holding of cs was hinted, but we need to make sure | ||
485 | * the next message is for the same chip. Don't waste | ||
486 | * time with the following tests unless this was hinted. | ||
487 | * | ||
488 | * We cannot postpone this until pump_messages, because | ||
489 | * after calling msg->complete (below) the driver that | ||
490 | * sent the current message could be unloaded, which | ||
491 | * could invalidate the cs_control() callback... | ||
492 | */ | ||
493 | |||
494 | /* get a pointer to the next message, if any */ | ||
495 | spin_lock_irqsave(&pl022->queue_lock, flags); | ||
496 | if (list_empty(&pl022->queue)) | ||
497 | next_msg = NULL; | ||
498 | else | ||
499 | next_msg = list_entry(pl022->queue.next, | ||
500 | struct spi_message, queue); | ||
501 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
502 | |||
503 | /* see if the next and current messages point | ||
504 | * to the same chip | ||
505 | */ | ||
506 | if (next_msg && next_msg->spi != msg->spi) | ||
507 | next_msg = NULL; | ||
508 | if (!next_msg || msg->state == STATE_ERROR) | ||
509 | curr_cs_control(SSP_CHIP_DESELECT); | ||
510 | } | ||
511 | msg->state = NULL; | ||
512 | if (msg->complete) | ||
513 | msg->complete(msg->context); | ||
514 | /* This message is completed, so let's turn off the clocks & power */ | ||
515 | clk_disable(pl022->clk); | ||
516 | amba_pclk_disable(pl022->adev); | ||
517 | amba_vcore_disable(pl022->adev); | ||
518 | } | ||
519 | |||
520 | /** | ||
521 | * flush - flush the FIFO to reach a clean state | ||
522 | * @pl022: SSP driver private data structure | ||
523 | */ | ||
524 | static int flush(struct pl022 *pl022) | ||
525 | { | ||
526 | unsigned long limit = loops_per_jiffy << 1; | ||
527 | |||
528 | dev_dbg(&pl022->adev->dev, "flush\n"); | ||
529 | do { | ||
530 | while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) | ||
531 | readw(SSP_DR(pl022->virtbase)); | ||
532 | } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--); | ||
533 | |||
534 | pl022->exp_fifo_level = 0; | ||
535 | |||
536 | return limit; | ||
537 | } | ||
538 | |||
539 | /** | ||
540 | * restore_state - Load configuration of current chip | ||
541 | * @pl022: SSP driver private data structure | ||
542 | */ | ||
543 | static void restore_state(struct pl022 *pl022) | ||
544 | { | ||
545 | struct chip_data *chip = pl022->cur_chip; | ||
546 | |||
547 | if (pl022->vendor->extended_cr) | ||
548 | writel(chip->cr0, SSP_CR0(pl022->virtbase)); | ||
549 | else | ||
550 | writew(chip->cr0, SSP_CR0(pl022->virtbase)); | ||
551 | writew(chip->cr1, SSP_CR1(pl022->virtbase)); | ||
552 | writew(chip->dmacr, SSP_DMACR(pl022->virtbase)); | ||
553 | writew(chip->cpsr, SSP_CPSR(pl022->virtbase)); | ||
554 | writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); | ||
555 | writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); | ||
556 | } | ||
557 | |||
558 | /* | ||
559 | * Default SSP Register Values | ||
560 | */ | ||
561 | #define DEFAULT_SSP_REG_CR0 ( \ | ||
562 | GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \ | ||
563 | GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \ | ||
564 | GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ | ||
565 | GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ | ||
566 | GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ | ||
567 | ) | ||
568 | |||
569 | /* ST versions have slightly different bit layout */ | ||
570 | #define DEFAULT_SSP_REG_CR0_ST ( \ | ||
571 | GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ | ||
572 | GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \ | ||
573 | GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ | ||
574 | GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ | ||
575 | GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \ | ||
576 | GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16) | \ | ||
577 | GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \ | ||
578 | ) | ||
579 | |||
580 | /* The PL023 version is slightly different again */ | ||
581 | #define DEFAULT_SSP_REG_CR0_ST_PL023 ( \ | ||
582 | GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ | ||
583 | GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ | ||
584 | GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ | ||
585 | GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ | ||
586 | ) | ||
587 | |||
588 | #define DEFAULT_SSP_REG_CR1 ( \ | ||
589 | GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \ | ||
590 | GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ | ||
591 | GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ | ||
592 | GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \ | ||
593 | ) | ||
594 | |||
595 | /* ST versions extend this register to use all 16 bits */ | ||
596 | #define DEFAULT_SSP_REG_CR1_ST ( \ | ||
597 | DEFAULT_SSP_REG_CR1 | \ | ||
598 | GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ | ||
599 | GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ | ||
600 | GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\ | ||
601 | GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ | ||
602 | GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \ | ||
603 | ) | ||
604 | |||
605 | /* | ||
606 | * The PL023 variant has further differences: no loopback mode, no microwire | ||
607 | * support, and a new clock feedback delay setting. | ||
608 | */ | ||
609 | #define DEFAULT_SSP_REG_CR1_ST_PL023 ( \ | ||
610 | GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ | ||
611 | GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ | ||
612 | GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \ | ||
613 | GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ | ||
614 | GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ | ||
615 | GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ | ||
616 | GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \ | ||
617 | GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \ | ||
618 | ) | ||
619 | |||
620 | #define DEFAULT_SSP_REG_CPSR ( \ | ||
621 | GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \ | ||
622 | ) | ||
623 | |||
624 | #define DEFAULT_SSP_REG_DMACR (\ | ||
625 | GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \ | ||
626 | GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \ | ||
627 | ) | ||
628 | |||
629 | /** | ||
630 | * load_ssp_default_config - Load default configuration for SSP | ||
631 | * @pl022: SSP driver private data structure | ||
632 | */ | ||
633 | static void load_ssp_default_config(struct pl022 *pl022) | ||
634 | { | ||
635 | if (pl022->vendor->pl023) { | ||
636 | writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase)); | ||
637 | writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase)); | ||
638 | } else if (pl022->vendor->extended_cr) { | ||
639 | writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase)); | ||
640 | writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase)); | ||
641 | } else { | ||
642 | writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase)); | ||
643 | writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase)); | ||
644 | } | ||
645 | writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase)); | ||
646 | writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase)); | ||
647 | writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); | ||
648 | writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); | ||
649 | } | ||
650 | |||
651 | /** | ||
652 | * This will write to TX and read from RX according to the parameters | ||
653 | * set in pl022. | ||
654 | */ | ||
655 | static void readwriter(struct pl022 *pl022) | ||
656 | { | ||
657 | |||
658 | /* | ||
659 | * The FIFO depth is different between primecell variants. | ||
660 | * I believe filling in too much in the FIFO might cause | ||
661 | * errons in 8bit wide transfers on ARM variants (just 8 words | ||
662 | * FIFO, means only 8x8 = 64 bits in FIFO) at least. | ||
663 | * | ||
664 | * To prevent this issue, the TX FIFO is only filled to the | ||
665 | * unused RX FIFO fill length, regardless of what the TX | ||
666 | * FIFO status flag indicates. | ||
667 | */ | ||
668 | dev_dbg(&pl022->adev->dev, | ||
669 | "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n", | ||
670 | __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end); | ||
671 | |||
672 | /* Read as much as you can */ | ||
673 | while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) | ||
674 | && (pl022->rx < pl022->rx_end)) { | ||
675 | switch (pl022->read) { | ||
676 | case READING_NULL: | ||
677 | readw(SSP_DR(pl022->virtbase)); | ||
678 | break; | ||
679 | case READING_U8: | ||
680 | *(u8 *) (pl022->rx) = | ||
681 | readw(SSP_DR(pl022->virtbase)) & 0xFFU; | ||
682 | break; | ||
683 | case READING_U16: | ||
684 | *(u16 *) (pl022->rx) = | ||
685 | (u16) readw(SSP_DR(pl022->virtbase)); | ||
686 | break; | ||
687 | case READING_U32: | ||
688 | *(u32 *) (pl022->rx) = | ||
689 | readl(SSP_DR(pl022->virtbase)); | ||
690 | break; | ||
691 | } | ||
692 | pl022->rx += (pl022->cur_chip->n_bytes); | ||
693 | pl022->exp_fifo_level--; | ||
694 | } | ||
695 | /* | ||
696 | * Write as much as possible up to the RX FIFO size | ||
697 | */ | ||
698 | while ((pl022->exp_fifo_level < pl022->vendor->fifodepth) | ||
699 | && (pl022->tx < pl022->tx_end)) { | ||
700 | switch (pl022->write) { | ||
701 | case WRITING_NULL: | ||
702 | writew(0x0, SSP_DR(pl022->virtbase)); | ||
703 | break; | ||
704 | case WRITING_U8: | ||
705 | writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase)); | ||
706 | break; | ||
707 | case WRITING_U16: | ||
708 | writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase)); | ||
709 | break; | ||
710 | case WRITING_U32: | ||
711 | writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase)); | ||
712 | break; | ||
713 | } | ||
714 | pl022->tx += (pl022->cur_chip->n_bytes); | ||
715 | pl022->exp_fifo_level++; | ||
716 | /* | ||
717 | * This inner reader takes care of things appearing in the RX | ||
718 | * FIFO as we're transmitting. This will happen a lot since the | ||
719 | * clock starts running when you put things into the TX FIFO, | ||
720 | * and then things are continuously clocked into the RX FIFO. | ||
721 | */ | ||
722 | while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) | ||
723 | && (pl022->rx < pl022->rx_end)) { | ||
724 | switch (pl022->read) { | ||
725 | case READING_NULL: | ||
726 | readw(SSP_DR(pl022->virtbase)); | ||
727 | break; | ||
728 | case READING_U8: | ||
729 | *(u8 *) (pl022->rx) = | ||
730 | readw(SSP_DR(pl022->virtbase)) & 0xFFU; | ||
731 | break; | ||
732 | case READING_U16: | ||
733 | *(u16 *) (pl022->rx) = | ||
734 | (u16) readw(SSP_DR(pl022->virtbase)); | ||
735 | break; | ||
736 | case READING_U32: | ||
737 | *(u32 *) (pl022->rx) = | ||
738 | readl(SSP_DR(pl022->virtbase)); | ||
739 | break; | ||
740 | } | ||
741 | pl022->rx += (pl022->cur_chip->n_bytes); | ||
742 | pl022->exp_fifo_level--; | ||
743 | } | ||
744 | } | ||
745 | /* | ||
746 | * When we exit here the TX FIFO should be full and the RX FIFO | ||
747 | * should be empty | ||
748 | */ | ||
749 | } | ||
750 | |||
751 | |||
752 | /** | ||
753 | * next_transfer - Move to the Next transfer in the current spi message | ||
754 | * @pl022: SSP driver private data structure | ||
755 | * | ||
756 | * This function moves though the linked list of spi transfers in the | ||
757 | * current spi message and returns with the state of current spi | ||
758 | * message i.e whether its last transfer is done(STATE_DONE) or | ||
759 | * Next transfer is ready(STATE_RUNNING) | ||
760 | */ | ||
761 | static void *next_transfer(struct pl022 *pl022) | ||
762 | { | ||
763 | struct spi_message *msg = pl022->cur_msg; | ||
764 | struct spi_transfer *trans = pl022->cur_transfer; | ||
765 | |||
766 | /* Move to next transfer */ | ||
767 | if (trans->transfer_list.next != &msg->transfers) { | ||
768 | pl022->cur_transfer = | ||
769 | list_entry(trans->transfer_list.next, | ||
770 | struct spi_transfer, transfer_list); | ||
771 | return STATE_RUNNING; | ||
772 | } | ||
773 | return STATE_DONE; | ||
774 | } | ||
775 | |||
776 | /* | ||
777 | * This DMA functionality is only compiled in if we have | ||
778 | * access to the generic DMA devices/DMA engine. | ||
779 | */ | ||
780 | #ifdef CONFIG_DMA_ENGINE | ||
781 | static void unmap_free_dma_scatter(struct pl022 *pl022) | ||
782 | { | ||
783 | /* Unmap and free the SG tables */ | ||
784 | dma_unmap_sg(pl022->dma_tx_channel->device->dev, pl022->sgt_tx.sgl, | ||
785 | pl022->sgt_tx.nents, DMA_TO_DEVICE); | ||
786 | dma_unmap_sg(pl022->dma_rx_channel->device->dev, pl022->sgt_rx.sgl, | ||
787 | pl022->sgt_rx.nents, DMA_FROM_DEVICE); | ||
788 | sg_free_table(&pl022->sgt_rx); | ||
789 | sg_free_table(&pl022->sgt_tx); | ||
790 | } | ||
791 | |||
792 | static void dma_callback(void *data) | ||
793 | { | ||
794 | struct pl022 *pl022 = data; | ||
795 | struct spi_message *msg = pl022->cur_msg; | ||
796 | |||
797 | BUG_ON(!pl022->sgt_rx.sgl); | ||
798 | |||
799 | #ifdef VERBOSE_DEBUG | ||
800 | /* | ||
801 | * Optionally dump out buffers to inspect contents, this is | ||
802 | * good if you want to convince yourself that the loopback | ||
803 | * read/write contents are the same, when adopting to a new | ||
804 | * DMA engine. | ||
805 | */ | ||
806 | { | ||
807 | struct scatterlist *sg; | ||
808 | unsigned int i; | ||
809 | |||
810 | dma_sync_sg_for_cpu(&pl022->adev->dev, | ||
811 | pl022->sgt_rx.sgl, | ||
812 | pl022->sgt_rx.nents, | ||
813 | DMA_FROM_DEVICE); | ||
814 | |||
815 | for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) { | ||
816 | dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i); | ||
817 | print_hex_dump(KERN_ERR, "SPI RX: ", | ||
818 | DUMP_PREFIX_OFFSET, | ||
819 | 16, | ||
820 | 1, | ||
821 | sg_virt(sg), | ||
822 | sg_dma_len(sg), | ||
823 | 1); | ||
824 | } | ||
825 | for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) { | ||
826 | dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i); | ||
827 | print_hex_dump(KERN_ERR, "SPI TX: ", | ||
828 | DUMP_PREFIX_OFFSET, | ||
829 | 16, | ||
830 | 1, | ||
831 | sg_virt(sg), | ||
832 | sg_dma_len(sg), | ||
833 | 1); | ||
834 | } | ||
835 | } | ||
836 | #endif | ||
837 | |||
838 | unmap_free_dma_scatter(pl022); | ||
839 | |||
840 | /* Update total bytes transferred */ | ||
841 | msg->actual_length += pl022->cur_transfer->len; | ||
842 | if (pl022->cur_transfer->cs_change) | ||
843 | pl022->cur_chip-> | ||
844 | cs_control(SSP_CHIP_DESELECT); | ||
845 | |||
846 | /* Move to next transfer */ | ||
847 | msg->state = next_transfer(pl022); | ||
848 | tasklet_schedule(&pl022->pump_transfers); | ||
849 | } | ||
850 | |||
851 | static void setup_dma_scatter(struct pl022 *pl022, | ||
852 | void *buffer, | ||
853 | unsigned int length, | ||
854 | struct sg_table *sgtab) | ||
855 | { | ||
856 | struct scatterlist *sg; | ||
857 | int bytesleft = length; | ||
858 | void *bufp = buffer; | ||
859 | int mapbytes; | ||
860 | int i; | ||
861 | |||
862 | if (buffer) { | ||
863 | for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { | ||
864 | /* | ||
865 | * If there are less bytes left than what fits | ||
866 | * in the current page (plus page alignment offset) | ||
867 | * we just feed in this, else we stuff in as much | ||
868 | * as we can. | ||
869 | */ | ||
870 | if (bytesleft < (PAGE_SIZE - offset_in_page(bufp))) | ||
871 | mapbytes = bytesleft; | ||
872 | else | ||
873 | mapbytes = PAGE_SIZE - offset_in_page(bufp); | ||
874 | sg_set_page(sg, virt_to_page(bufp), | ||
875 | mapbytes, offset_in_page(bufp)); | ||
876 | bufp += mapbytes; | ||
877 | bytesleft -= mapbytes; | ||
878 | dev_dbg(&pl022->adev->dev, | ||
879 | "set RX/TX target page @ %p, %d bytes, %d left\n", | ||
880 | bufp, mapbytes, bytesleft); | ||
881 | } | ||
882 | } else { | ||
883 | /* Map the dummy buffer on every page */ | ||
884 | for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { | ||
885 | if (bytesleft < PAGE_SIZE) | ||
886 | mapbytes = bytesleft; | ||
887 | else | ||
888 | mapbytes = PAGE_SIZE; | ||
889 | sg_set_page(sg, virt_to_page(pl022->dummypage), | ||
890 | mapbytes, 0); | ||
891 | bytesleft -= mapbytes; | ||
892 | dev_dbg(&pl022->adev->dev, | ||
893 | "set RX/TX to dummy page %d bytes, %d left\n", | ||
894 | mapbytes, bytesleft); | ||
895 | |||
896 | } | ||
897 | } | ||
898 | BUG_ON(bytesleft); | ||
899 | } | ||
900 | |||
901 | /** | ||
902 | * configure_dma - configures the channels for the next transfer | ||
903 | * @pl022: SSP driver's private data structure | ||
904 | */ | ||
905 | static int configure_dma(struct pl022 *pl022) | ||
906 | { | ||
907 | struct dma_slave_config rx_conf = { | ||
908 | .src_addr = SSP_DR(pl022->phybase), | ||
909 | .direction = DMA_FROM_DEVICE, | ||
910 | .src_maxburst = pl022->vendor->fifodepth >> 1, | ||
911 | }; | ||
912 | struct dma_slave_config tx_conf = { | ||
913 | .dst_addr = SSP_DR(pl022->phybase), | ||
914 | .direction = DMA_TO_DEVICE, | ||
915 | .dst_maxburst = pl022->vendor->fifodepth >> 1, | ||
916 | }; | ||
917 | unsigned int pages; | ||
918 | int ret; | ||
919 | int rx_sglen, tx_sglen; | ||
920 | struct dma_chan *rxchan = pl022->dma_rx_channel; | ||
921 | struct dma_chan *txchan = pl022->dma_tx_channel; | ||
922 | struct dma_async_tx_descriptor *rxdesc; | ||
923 | struct dma_async_tx_descriptor *txdesc; | ||
924 | |||
925 | /* Check that the channels are available */ | ||
926 | if (!rxchan || !txchan) | ||
927 | return -ENODEV; | ||
928 | |||
929 | switch (pl022->read) { | ||
930 | case READING_NULL: | ||
931 | /* Use the same as for writing */ | ||
932 | rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; | ||
933 | break; | ||
934 | case READING_U8: | ||
935 | rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
936 | break; | ||
937 | case READING_U16: | ||
938 | rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
939 | break; | ||
940 | case READING_U32: | ||
941 | rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
942 | break; | ||
943 | } | ||
944 | |||
945 | switch (pl022->write) { | ||
946 | case WRITING_NULL: | ||
947 | /* Use the same as for reading */ | ||
948 | tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; | ||
949 | break; | ||
950 | case WRITING_U8: | ||
951 | tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
952 | break; | ||
953 | case WRITING_U16: | ||
954 | tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
955 | break; | ||
956 | case WRITING_U32: | ||
957 | tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
958 | break; | ||
959 | } | ||
960 | |||
961 | /* SPI pecularity: we need to read and write the same width */ | ||
962 | if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) | ||
963 | rx_conf.src_addr_width = tx_conf.dst_addr_width; | ||
964 | if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) | ||
965 | tx_conf.dst_addr_width = rx_conf.src_addr_width; | ||
966 | BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width); | ||
967 | |||
968 | dmaengine_slave_config(rxchan, &rx_conf); | ||
969 | dmaengine_slave_config(txchan, &tx_conf); | ||
970 | |||
971 | /* Create sglists for the transfers */ | ||
972 | pages = (pl022->cur_transfer->len >> PAGE_SHIFT) + 1; | ||
973 | dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages); | ||
974 | |||
975 | ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_KERNEL); | ||
976 | if (ret) | ||
977 | goto err_alloc_rx_sg; | ||
978 | |||
979 | ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_KERNEL); | ||
980 | if (ret) | ||
981 | goto err_alloc_tx_sg; | ||
982 | |||
983 | /* Fill in the scatterlists for the RX+TX buffers */ | ||
984 | setup_dma_scatter(pl022, pl022->rx, | ||
985 | pl022->cur_transfer->len, &pl022->sgt_rx); | ||
986 | setup_dma_scatter(pl022, pl022->tx, | ||
987 | pl022->cur_transfer->len, &pl022->sgt_tx); | ||
988 | |||
989 | /* Map DMA buffers */ | ||
990 | rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl, | ||
991 | pl022->sgt_rx.nents, DMA_FROM_DEVICE); | ||
992 | if (!rx_sglen) | ||
993 | goto err_rx_sgmap; | ||
994 | |||
995 | tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl, | ||
996 | pl022->sgt_tx.nents, DMA_TO_DEVICE); | ||
997 | if (!tx_sglen) | ||
998 | goto err_tx_sgmap; | ||
999 | |||
1000 | /* Send both scatterlists */ | ||
1001 | rxdesc = rxchan->device->device_prep_slave_sg(rxchan, | ||
1002 | pl022->sgt_rx.sgl, | ||
1003 | rx_sglen, | ||
1004 | DMA_FROM_DEVICE, | ||
1005 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
1006 | if (!rxdesc) | ||
1007 | goto err_rxdesc; | ||
1008 | |||
1009 | txdesc = txchan->device->device_prep_slave_sg(txchan, | ||
1010 | pl022->sgt_tx.sgl, | ||
1011 | tx_sglen, | ||
1012 | DMA_TO_DEVICE, | ||
1013 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | ||
1014 | if (!txdesc) | ||
1015 | goto err_txdesc; | ||
1016 | |||
1017 | /* Put the callback on the RX transfer only, that should finish last */ | ||
1018 | rxdesc->callback = dma_callback; | ||
1019 | rxdesc->callback_param = pl022; | ||
1020 | |||
1021 | /* Submit and fire RX and TX with TX last so we're ready to read! */ | ||
1022 | dmaengine_submit(rxdesc); | ||
1023 | dmaengine_submit(txdesc); | ||
1024 | dma_async_issue_pending(rxchan); | ||
1025 | dma_async_issue_pending(txchan); | ||
1026 | |||
1027 | return 0; | ||
1028 | |||
1029 | err_txdesc: | ||
1030 | dmaengine_terminate_all(txchan); | ||
1031 | err_rxdesc: | ||
1032 | dmaengine_terminate_all(rxchan); | ||
1033 | dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl, | ||
1034 | pl022->sgt_tx.nents, DMA_TO_DEVICE); | ||
1035 | err_tx_sgmap: | ||
1036 | dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl, | ||
1037 | pl022->sgt_tx.nents, DMA_FROM_DEVICE); | ||
1038 | err_rx_sgmap: | ||
1039 | sg_free_table(&pl022->sgt_tx); | ||
1040 | err_alloc_tx_sg: | ||
1041 | sg_free_table(&pl022->sgt_rx); | ||
1042 | err_alloc_rx_sg: | ||
1043 | return -ENOMEM; | ||
1044 | } | ||
1045 | |||
1046 | static int __init pl022_dma_probe(struct pl022 *pl022) | ||
1047 | { | ||
1048 | dma_cap_mask_t mask; | ||
1049 | |||
1050 | /* Try to acquire a generic DMA engine slave channel */ | ||
1051 | dma_cap_zero(mask); | ||
1052 | dma_cap_set(DMA_SLAVE, mask); | ||
1053 | /* | ||
1054 | * We need both RX and TX channels to do DMA, else do none | ||
1055 | * of them. | ||
1056 | */ | ||
1057 | pl022->dma_rx_channel = dma_request_channel(mask, | ||
1058 | pl022->master_info->dma_filter, | ||
1059 | pl022->master_info->dma_rx_param); | ||
1060 | if (!pl022->dma_rx_channel) { | ||
1061 | dev_dbg(&pl022->adev->dev, "no RX DMA channel!\n"); | ||
1062 | goto err_no_rxchan; | ||
1063 | } | ||
1064 | |||
1065 | pl022->dma_tx_channel = dma_request_channel(mask, | ||
1066 | pl022->master_info->dma_filter, | ||
1067 | pl022->master_info->dma_tx_param); | ||
1068 | if (!pl022->dma_tx_channel) { | ||
1069 | dev_dbg(&pl022->adev->dev, "no TX DMA channel!\n"); | ||
1070 | goto err_no_txchan; | ||
1071 | } | ||
1072 | |||
1073 | pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
1074 | if (!pl022->dummypage) { | ||
1075 | dev_dbg(&pl022->adev->dev, "no DMA dummypage!\n"); | ||
1076 | goto err_no_dummypage; | ||
1077 | } | ||
1078 | |||
1079 | dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n", | ||
1080 | dma_chan_name(pl022->dma_rx_channel), | ||
1081 | dma_chan_name(pl022->dma_tx_channel)); | ||
1082 | |||
1083 | return 0; | ||
1084 | |||
1085 | err_no_dummypage: | ||
1086 | dma_release_channel(pl022->dma_tx_channel); | ||
1087 | err_no_txchan: | ||
1088 | dma_release_channel(pl022->dma_rx_channel); | ||
1089 | pl022->dma_rx_channel = NULL; | ||
1090 | err_no_rxchan: | ||
1091 | dev_err(&pl022->adev->dev, | ||
1092 | "Failed to work in dma mode, work without dma!\n"); | ||
1093 | return -ENODEV; | ||
1094 | } | ||
1095 | |||
1096 | static void terminate_dma(struct pl022 *pl022) | ||
1097 | { | ||
1098 | struct dma_chan *rxchan = pl022->dma_rx_channel; | ||
1099 | struct dma_chan *txchan = pl022->dma_tx_channel; | ||
1100 | |||
1101 | dmaengine_terminate_all(rxchan); | ||
1102 | dmaengine_terminate_all(txchan); | ||
1103 | unmap_free_dma_scatter(pl022); | ||
1104 | } | ||
1105 | |||
1106 | static void pl022_dma_remove(struct pl022 *pl022) | ||
1107 | { | ||
1108 | if (pl022->busy) | ||
1109 | terminate_dma(pl022); | ||
1110 | if (pl022->dma_tx_channel) | ||
1111 | dma_release_channel(pl022->dma_tx_channel); | ||
1112 | if (pl022->dma_rx_channel) | ||
1113 | dma_release_channel(pl022->dma_rx_channel); | ||
1114 | kfree(pl022->dummypage); | ||
1115 | } | ||
1116 | |||
1117 | #else | ||
1118 | static inline int configure_dma(struct pl022 *pl022) | ||
1119 | { | ||
1120 | return -ENODEV; | ||
1121 | } | ||
1122 | |||
1123 | static inline int pl022_dma_probe(struct pl022 *pl022) | ||
1124 | { | ||
1125 | return 0; | ||
1126 | } | ||
1127 | |||
1128 | static inline void pl022_dma_remove(struct pl022 *pl022) | ||
1129 | { | ||
1130 | } | ||
1131 | #endif | ||
1132 | |||
1133 | /** | ||
1134 | * pl022_interrupt_handler - Interrupt handler for SSP controller | ||
1135 | * | ||
1136 | * This function handles interrupts generated for an interrupt based transfer. | ||
1137 | * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the | ||
1138 | * current message's state as STATE_ERROR and schedule the tasklet | ||
1139 | * pump_transfers which will do the postprocessing of the current message by | ||
1140 | * calling giveback(). Otherwise it reads data from RX FIFO till there is no | ||
1141 | * more data, and writes data in TX FIFO till it is not full. If we complete | ||
1142 | * the transfer we move to the next transfer and schedule the tasklet. | ||
1143 | */ | ||
1144 | static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) | ||
1145 | { | ||
1146 | struct pl022 *pl022 = dev_id; | ||
1147 | struct spi_message *msg = pl022->cur_msg; | ||
1148 | u16 irq_status = 0; | ||
1149 | u16 flag = 0; | ||
1150 | |||
1151 | if (unlikely(!msg)) { | ||
1152 | dev_err(&pl022->adev->dev, | ||
1153 | "bad message state in interrupt handler"); | ||
1154 | /* Never fail */ | ||
1155 | return IRQ_HANDLED; | ||
1156 | } | ||
1157 | |||
1158 | /* Read the Interrupt Status Register */ | ||
1159 | irq_status = readw(SSP_MIS(pl022->virtbase)); | ||
1160 | |||
1161 | if (unlikely(!irq_status)) | ||
1162 | return IRQ_NONE; | ||
1163 | |||
1164 | /* | ||
1165 | * This handles the FIFO interrupts, the timeout | ||
1166 | * interrupts are flatly ignored, they cannot be | ||
1167 | * trusted. | ||
1168 | */ | ||
1169 | if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) { | ||
1170 | /* | ||
1171 | * Overrun interrupt - bail out since our Data has been | ||
1172 | * corrupted | ||
1173 | */ | ||
1174 | dev_err(&pl022->adev->dev, "FIFO overrun\n"); | ||
1175 | if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) | ||
1176 | dev_err(&pl022->adev->dev, | ||
1177 | "RXFIFO is full\n"); | ||
1178 | if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF) | ||
1179 | dev_err(&pl022->adev->dev, | ||
1180 | "TXFIFO is full\n"); | ||
1181 | |||
1182 | /* | ||
1183 | * Disable and clear interrupts, disable SSP, | ||
1184 | * mark message with bad status so it can be | ||
1185 | * retried. | ||
1186 | */ | ||
1187 | writew(DISABLE_ALL_INTERRUPTS, | ||
1188 | SSP_IMSC(pl022->virtbase)); | ||
1189 | writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); | ||
1190 | writew((readw(SSP_CR1(pl022->virtbase)) & | ||
1191 | (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); | ||
1192 | msg->state = STATE_ERROR; | ||
1193 | |||
1194 | /* Schedule message queue handler */ | ||
1195 | tasklet_schedule(&pl022->pump_transfers); | ||
1196 | return IRQ_HANDLED; | ||
1197 | } | ||
1198 | |||
1199 | readwriter(pl022); | ||
1200 | |||
1201 | if ((pl022->tx == pl022->tx_end) && (flag == 0)) { | ||
1202 | flag = 1; | ||
1203 | /* Disable Transmit interrupt */ | ||
1204 | writew(readw(SSP_IMSC(pl022->virtbase)) & | ||
1205 | (~SSP_IMSC_MASK_TXIM), | ||
1206 | SSP_IMSC(pl022->virtbase)); | ||
1207 | } | ||
1208 | |||
1209 | /* | ||
1210 | * Since all transactions must write as much as shall be read, | ||
1211 | * we can conclude the entire transaction once RX is complete. | ||
1212 | * At this point, all TX will always be finished. | ||
1213 | */ | ||
1214 | if (pl022->rx >= pl022->rx_end) { | ||
1215 | writew(DISABLE_ALL_INTERRUPTS, | ||
1216 | SSP_IMSC(pl022->virtbase)); | ||
1217 | writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); | ||
1218 | if (unlikely(pl022->rx > pl022->rx_end)) { | ||
1219 | dev_warn(&pl022->adev->dev, "read %u surplus " | ||
1220 | "bytes (did you request an odd " | ||
1221 | "number of bytes on a 16bit bus?)\n", | ||
1222 | (u32) (pl022->rx - pl022->rx_end)); | ||
1223 | } | ||
1224 | /* Update total bytes transferred */ | ||
1225 | msg->actual_length += pl022->cur_transfer->len; | ||
1226 | if (pl022->cur_transfer->cs_change) | ||
1227 | pl022->cur_chip-> | ||
1228 | cs_control(SSP_CHIP_DESELECT); | ||
1229 | /* Move to next transfer */ | ||
1230 | msg->state = next_transfer(pl022); | ||
1231 | tasklet_schedule(&pl022->pump_transfers); | ||
1232 | return IRQ_HANDLED; | ||
1233 | } | ||
1234 | |||
1235 | return IRQ_HANDLED; | ||
1236 | } | ||
1237 | |||
1238 | /** | ||
1239 | * This sets up the pointers to memory for the next message to | ||
1240 | * send out on the SPI bus. | ||
1241 | */ | ||
1242 | static int set_up_next_transfer(struct pl022 *pl022, | ||
1243 | struct spi_transfer *transfer) | ||
1244 | { | ||
1245 | int residue; | ||
1246 | |||
1247 | /* Sanity check the message for this bus width */ | ||
1248 | residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes; | ||
1249 | if (unlikely(residue != 0)) { | ||
1250 | dev_err(&pl022->adev->dev, | ||
1251 | "message of %u bytes to transmit but the current " | ||
1252 | "chip bus has a data width of %u bytes!\n", | ||
1253 | pl022->cur_transfer->len, | ||
1254 | pl022->cur_chip->n_bytes); | ||
1255 | dev_err(&pl022->adev->dev, "skipping this message\n"); | ||
1256 | return -EIO; | ||
1257 | } | ||
1258 | pl022->tx = (void *)transfer->tx_buf; | ||
1259 | pl022->tx_end = pl022->tx + pl022->cur_transfer->len; | ||
1260 | pl022->rx = (void *)transfer->rx_buf; | ||
1261 | pl022->rx_end = pl022->rx + pl022->cur_transfer->len; | ||
1262 | pl022->write = | ||
1263 | pl022->tx ? pl022->cur_chip->write : WRITING_NULL; | ||
1264 | pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL; | ||
1265 | return 0; | ||
1266 | } | ||
1267 | |||
1268 | /** | ||
1269 | * pump_transfers - Tasklet function which schedules next transfer | ||
1270 | * when running in interrupt or DMA transfer mode. | ||
1271 | * @data: SSP driver private data structure | ||
1272 | * | ||
1273 | */ | ||
1274 | static void pump_transfers(unsigned long data) | ||
1275 | { | ||
1276 | struct pl022 *pl022 = (struct pl022 *) data; | ||
1277 | struct spi_message *message = NULL; | ||
1278 | struct spi_transfer *transfer = NULL; | ||
1279 | struct spi_transfer *previous = NULL; | ||
1280 | |||
1281 | /* Get current state information */ | ||
1282 | message = pl022->cur_msg; | ||
1283 | transfer = pl022->cur_transfer; | ||
1284 | |||
1285 | /* Handle for abort */ | ||
1286 | if (message->state == STATE_ERROR) { | ||
1287 | message->status = -EIO; | ||
1288 | giveback(pl022); | ||
1289 | return; | ||
1290 | } | ||
1291 | |||
1292 | /* Handle end of message */ | ||
1293 | if (message->state == STATE_DONE) { | ||
1294 | message->status = 0; | ||
1295 | giveback(pl022); | ||
1296 | return; | ||
1297 | } | ||
1298 | |||
1299 | /* Delay if requested at end of transfer before CS change */ | ||
1300 | if (message->state == STATE_RUNNING) { | ||
1301 | previous = list_entry(transfer->transfer_list.prev, | ||
1302 | struct spi_transfer, | ||
1303 | transfer_list); | ||
1304 | if (previous->delay_usecs) | ||
1305 | /* | ||
1306 | * FIXME: This runs in interrupt context. | ||
1307 | * Is this really smart? | ||
1308 | */ | ||
1309 | udelay(previous->delay_usecs); | ||
1310 | |||
1311 | /* Drop chip select only if cs_change is requested */ | ||
1312 | if (previous->cs_change) | ||
1313 | pl022->cur_chip->cs_control(SSP_CHIP_SELECT); | ||
1314 | } else { | ||
1315 | /* STATE_START */ | ||
1316 | message->state = STATE_RUNNING; | ||
1317 | } | ||
1318 | |||
1319 | if (set_up_next_transfer(pl022, transfer)) { | ||
1320 | message->state = STATE_ERROR; | ||
1321 | message->status = -EIO; | ||
1322 | giveback(pl022); | ||
1323 | return; | ||
1324 | } | ||
1325 | /* Flush the FIFOs and let's go! */ | ||
1326 | flush(pl022); | ||
1327 | |||
1328 | if (pl022->cur_chip->enable_dma) { | ||
1329 | if (configure_dma(pl022)) { | ||
1330 | dev_dbg(&pl022->adev->dev, | ||
1331 | "configuration of DMA failed, fall back to interrupt mode\n"); | ||
1332 | goto err_config_dma; | ||
1333 | } | ||
1334 | return; | ||
1335 | } | ||
1336 | |||
1337 | err_config_dma: | ||
1338 | writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); | ||
1339 | } | ||
1340 | |||
1341 | static void do_interrupt_dma_transfer(struct pl022 *pl022) | ||
1342 | { | ||
1343 | u32 irqflags = ENABLE_ALL_INTERRUPTS; | ||
1344 | |||
1345 | /* Enable target chip */ | ||
1346 | pl022->cur_chip->cs_control(SSP_CHIP_SELECT); | ||
1347 | if (set_up_next_transfer(pl022, pl022->cur_transfer)) { | ||
1348 | /* Error path */ | ||
1349 | pl022->cur_msg->state = STATE_ERROR; | ||
1350 | pl022->cur_msg->status = -EIO; | ||
1351 | giveback(pl022); | ||
1352 | return; | ||
1353 | } | ||
1354 | /* If we're using DMA, set up DMA here */ | ||
1355 | if (pl022->cur_chip->enable_dma) { | ||
1356 | /* Configure DMA transfer */ | ||
1357 | if (configure_dma(pl022)) { | ||
1358 | dev_dbg(&pl022->adev->dev, | ||
1359 | "configuration of DMA failed, fall back to interrupt mode\n"); | ||
1360 | goto err_config_dma; | ||
1361 | } | ||
1362 | /* Disable interrupts in DMA mode, IRQ from DMA controller */ | ||
1363 | irqflags = DISABLE_ALL_INTERRUPTS; | ||
1364 | } | ||
1365 | err_config_dma: | ||
1366 | /* Enable SSP, turn on interrupts */ | ||
1367 | writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), | ||
1368 | SSP_CR1(pl022->virtbase)); | ||
1369 | writew(irqflags, SSP_IMSC(pl022->virtbase)); | ||
1370 | } | ||
1371 | |||
1372 | static void do_polling_transfer(struct pl022 *pl022) | ||
1373 | { | ||
1374 | struct spi_message *message = NULL; | ||
1375 | struct spi_transfer *transfer = NULL; | ||
1376 | struct spi_transfer *previous = NULL; | ||
1377 | struct chip_data *chip; | ||
1378 | unsigned long time, timeout; | ||
1379 | |||
1380 | chip = pl022->cur_chip; | ||
1381 | message = pl022->cur_msg; | ||
1382 | |||
1383 | while (message->state != STATE_DONE) { | ||
1384 | /* Handle for abort */ | ||
1385 | if (message->state == STATE_ERROR) | ||
1386 | break; | ||
1387 | transfer = pl022->cur_transfer; | ||
1388 | |||
1389 | /* Delay if requested at end of transfer */ | ||
1390 | if (message->state == STATE_RUNNING) { | ||
1391 | previous = | ||
1392 | list_entry(transfer->transfer_list.prev, | ||
1393 | struct spi_transfer, transfer_list); | ||
1394 | if (previous->delay_usecs) | ||
1395 | udelay(previous->delay_usecs); | ||
1396 | if (previous->cs_change) | ||
1397 | pl022->cur_chip->cs_control(SSP_CHIP_SELECT); | ||
1398 | } else { | ||
1399 | /* STATE_START */ | ||
1400 | message->state = STATE_RUNNING; | ||
1401 | pl022->cur_chip->cs_control(SSP_CHIP_SELECT); | ||
1402 | } | ||
1403 | |||
1404 | /* Configuration Changing Per Transfer */ | ||
1405 | if (set_up_next_transfer(pl022, transfer)) { | ||
1406 | /* Error path */ | ||
1407 | message->state = STATE_ERROR; | ||
1408 | break; | ||
1409 | } | ||
1410 | /* Flush FIFOs and enable SSP */ | ||
1411 | flush(pl022); | ||
1412 | writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), | ||
1413 | SSP_CR1(pl022->virtbase)); | ||
1414 | |||
1415 | dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n"); | ||
1416 | |||
1417 | timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT); | ||
1418 | while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) { | ||
1419 | time = jiffies; | ||
1420 | readwriter(pl022); | ||
1421 | if (time_after(time, timeout)) { | ||
1422 | dev_warn(&pl022->adev->dev, | ||
1423 | "%s: timeout!\n", __func__); | ||
1424 | message->state = STATE_ERROR; | ||
1425 | goto out; | ||
1426 | } | ||
1427 | cpu_relax(); | ||
1428 | } | ||
1429 | |||
1430 | /* Update total byte transferred */ | ||
1431 | message->actual_length += pl022->cur_transfer->len; | ||
1432 | if (pl022->cur_transfer->cs_change) | ||
1433 | pl022->cur_chip->cs_control(SSP_CHIP_DESELECT); | ||
1434 | /* Move to next transfer */ | ||
1435 | message->state = next_transfer(pl022); | ||
1436 | } | ||
1437 | out: | ||
1438 | /* Handle end of message */ | ||
1439 | if (message->state == STATE_DONE) | ||
1440 | message->status = 0; | ||
1441 | else | ||
1442 | message->status = -EIO; | ||
1443 | |||
1444 | giveback(pl022); | ||
1445 | return; | ||
1446 | } | ||
1447 | |||
1448 | /** | ||
1449 | * pump_messages - Workqueue function which processes spi message queue | ||
1450 | * @data: pointer to private data of SSP driver | ||
1451 | * | ||
1452 | * This function checks if there is any spi message in the queue that | ||
1453 | * needs processing and delegate control to appropriate function | ||
1454 | * do_polling_transfer()/do_interrupt_dma_transfer() | ||
1455 | * based on the kind of the transfer | ||
1456 | * | ||
1457 | */ | ||
1458 | static void pump_messages(struct work_struct *work) | ||
1459 | { | ||
1460 | struct pl022 *pl022 = | ||
1461 | container_of(work, struct pl022, pump_messages); | ||
1462 | unsigned long flags; | ||
1463 | |||
1464 | /* Lock queue and check for queue work */ | ||
1465 | spin_lock_irqsave(&pl022->queue_lock, flags); | ||
1466 | if (list_empty(&pl022->queue) || !pl022->running) { | ||
1467 | pl022->busy = false; | ||
1468 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
1469 | return; | ||
1470 | } | ||
1471 | /* Make sure we are not already running a message */ | ||
1472 | if (pl022->cur_msg) { | ||
1473 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
1474 | return; | ||
1475 | } | ||
1476 | /* Extract head of queue */ | ||
1477 | pl022->cur_msg = | ||
1478 | list_entry(pl022->queue.next, struct spi_message, queue); | ||
1479 | |||
1480 | list_del_init(&pl022->cur_msg->queue); | ||
1481 | pl022->busy = true; | ||
1482 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
1483 | |||
1484 | /* Initial message state */ | ||
1485 | pl022->cur_msg->state = STATE_START; | ||
1486 | pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next, | ||
1487 | struct spi_transfer, | ||
1488 | transfer_list); | ||
1489 | |||
1490 | /* Setup the SPI using the per chip configuration */ | ||
1491 | pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); | ||
1492 | /* | ||
1493 | * We enable the core voltage and clocks here, then the clocks | ||
1494 | * and core will be disabled when giveback() is called in each method | ||
1495 | * (poll/interrupt/DMA) | ||
1496 | */ | ||
1497 | amba_vcore_enable(pl022->adev); | ||
1498 | amba_pclk_enable(pl022->adev); | ||
1499 | clk_enable(pl022->clk); | ||
1500 | restore_state(pl022); | ||
1501 | flush(pl022); | ||
1502 | |||
1503 | if (pl022->cur_chip->xfer_type == POLLING_TRANSFER) | ||
1504 | do_polling_transfer(pl022); | ||
1505 | else | ||
1506 | do_interrupt_dma_transfer(pl022); | ||
1507 | } | ||
1508 | |||
1509 | |||
1510 | static int __init init_queue(struct pl022 *pl022) | ||
1511 | { | ||
1512 | INIT_LIST_HEAD(&pl022->queue); | ||
1513 | spin_lock_init(&pl022->queue_lock); | ||
1514 | |||
1515 | pl022->running = false; | ||
1516 | pl022->busy = false; | ||
1517 | |||
1518 | tasklet_init(&pl022->pump_transfers, | ||
1519 | pump_transfers, (unsigned long)pl022); | ||
1520 | |||
1521 | INIT_WORK(&pl022->pump_messages, pump_messages); | ||
1522 | pl022->workqueue = create_singlethread_workqueue( | ||
1523 | dev_name(pl022->master->dev.parent)); | ||
1524 | if (pl022->workqueue == NULL) | ||
1525 | return -EBUSY; | ||
1526 | |||
1527 | return 0; | ||
1528 | } | ||
1529 | |||
1530 | |||
1531 | static int start_queue(struct pl022 *pl022) | ||
1532 | { | ||
1533 | unsigned long flags; | ||
1534 | |||
1535 | spin_lock_irqsave(&pl022->queue_lock, flags); | ||
1536 | |||
1537 | if (pl022->running || pl022->busy) { | ||
1538 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
1539 | return -EBUSY; | ||
1540 | } | ||
1541 | |||
1542 | pl022->running = true; | ||
1543 | pl022->cur_msg = NULL; | ||
1544 | pl022->cur_transfer = NULL; | ||
1545 | pl022->cur_chip = NULL; | ||
1546 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
1547 | |||
1548 | queue_work(pl022->workqueue, &pl022->pump_messages); | ||
1549 | |||
1550 | return 0; | ||
1551 | } | ||
1552 | |||
1553 | |||
1554 | static int stop_queue(struct pl022 *pl022) | ||
1555 | { | ||
1556 | unsigned long flags; | ||
1557 | unsigned limit = 500; | ||
1558 | int status = 0; | ||
1559 | |||
1560 | spin_lock_irqsave(&pl022->queue_lock, flags); | ||
1561 | |||
1562 | /* This is a bit lame, but is optimized for the common execution path. | ||
1563 | * A wait_queue on the pl022->busy could be used, but then the common | ||
1564 | * execution path (pump_messages) would be required to call wake_up or | ||
1565 | * friends on every SPI message. Do this instead */ | ||
1566 | while ((!list_empty(&pl022->queue) || pl022->busy) && limit--) { | ||
1567 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
1568 | msleep(10); | ||
1569 | spin_lock_irqsave(&pl022->queue_lock, flags); | ||
1570 | } | ||
1571 | |||
1572 | if (!list_empty(&pl022->queue) || pl022->busy) | ||
1573 | status = -EBUSY; | ||
1574 | else | ||
1575 | pl022->running = false; | ||
1576 | |||
1577 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
1578 | |||
1579 | return status; | ||
1580 | } | ||
1581 | |||
1582 | static int destroy_queue(struct pl022 *pl022) | ||
1583 | { | ||
1584 | int status; | ||
1585 | |||
1586 | status = stop_queue(pl022); | ||
1587 | /* we are unloading the module or failing to load (only two calls | ||
1588 | * to this routine), and neither call can handle a return value. | ||
1589 | * However, destroy_workqueue calls flush_workqueue, and that will | ||
1590 | * block until all work is done. If the reason that stop_queue | ||
1591 | * timed out is that the work will never finish, then it does no | ||
1592 | * good to call destroy_workqueue, so return anyway. */ | ||
1593 | if (status != 0) | ||
1594 | return status; | ||
1595 | |||
1596 | destroy_workqueue(pl022->workqueue); | ||
1597 | |||
1598 | return 0; | ||
1599 | } | ||
1600 | |||
1601 | static int verify_controller_parameters(struct pl022 *pl022, | ||
1602 | struct pl022_config_chip const *chip_info) | ||
1603 | { | ||
1604 | if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI) | ||
1605 | || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) { | ||
1606 | dev_err(&pl022->adev->dev, | ||
1607 | "interface is configured incorrectly\n"); | ||
1608 | return -EINVAL; | ||
1609 | } | ||
1610 | if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) && | ||
1611 | (!pl022->vendor->unidir)) { | ||
1612 | dev_err(&pl022->adev->dev, | ||
1613 | "unidirectional mode not supported in this " | ||
1614 | "hardware version\n"); | ||
1615 | return -EINVAL; | ||
1616 | } | ||
1617 | if ((chip_info->hierarchy != SSP_MASTER) | ||
1618 | && (chip_info->hierarchy != SSP_SLAVE)) { | ||
1619 | dev_err(&pl022->adev->dev, | ||
1620 | "hierarchy is configured incorrectly\n"); | ||
1621 | return -EINVAL; | ||
1622 | } | ||
1623 | if ((chip_info->com_mode != INTERRUPT_TRANSFER) | ||
1624 | && (chip_info->com_mode != DMA_TRANSFER) | ||
1625 | && (chip_info->com_mode != POLLING_TRANSFER)) { | ||
1626 | dev_err(&pl022->adev->dev, | ||
1627 | "Communication mode is configured incorrectly\n"); | ||
1628 | return -EINVAL; | ||
1629 | } | ||
1630 | if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM) | ||
1631 | || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) { | ||
1632 | dev_err(&pl022->adev->dev, | ||
1633 | "RX FIFO Trigger Level is configured incorrectly\n"); | ||
1634 | return -EINVAL; | ||
1635 | } | ||
1636 | if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC) | ||
1637 | || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) { | ||
1638 | dev_err(&pl022->adev->dev, | ||
1639 | "TX FIFO Trigger Level is configured incorrectly\n"); | ||
1640 | return -EINVAL; | ||
1641 | } | ||
1642 | if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) { | ||
1643 | if ((chip_info->ctrl_len < SSP_BITS_4) | ||
1644 | || (chip_info->ctrl_len > SSP_BITS_32)) { | ||
1645 | dev_err(&pl022->adev->dev, | ||
1646 | "CTRL LEN is configured incorrectly\n"); | ||
1647 | return -EINVAL; | ||
1648 | } | ||
1649 | if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO) | ||
1650 | && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) { | ||
1651 | dev_err(&pl022->adev->dev, | ||
1652 | "Wait State is configured incorrectly\n"); | ||
1653 | return -EINVAL; | ||
1654 | } | ||
1655 | /* Half duplex is only available in the ST Micro version */ | ||
1656 | if (pl022->vendor->extended_cr) { | ||
1657 | if ((chip_info->duplex != | ||
1658 | SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) | ||
1659 | && (chip_info->duplex != | ||
1660 | SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) { | ||
1661 | dev_err(&pl022->adev->dev, | ||
1662 | "Microwire duplex mode is configured incorrectly\n"); | ||
1663 | return -EINVAL; | ||
1664 | } | ||
1665 | } else { | ||
1666 | if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) | ||
1667 | dev_err(&pl022->adev->dev, | ||
1668 | "Microwire half duplex mode requested," | ||
1669 | " but this is only available in the" | ||
1670 | " ST version of PL022\n"); | ||
1671 | return -EINVAL; | ||
1672 | } | ||
1673 | } | ||
1674 | return 0; | ||
1675 | } | ||
1676 | |||
1677 | /** | ||
1678 | * pl022_transfer - transfer function registered to SPI master framework | ||
1679 | * @spi: spi device which is requesting transfer | ||
1680 | * @msg: spi message which is to handled is queued to driver queue | ||
1681 | * | ||
1682 | * This function is registered to the SPI framework for this SPI master | ||
1683 | * controller. It will queue the spi_message in the queue of driver if | ||
1684 | * the queue is not stopped and return. | ||
1685 | */ | ||
1686 | static int pl022_transfer(struct spi_device *spi, struct spi_message *msg) | ||
1687 | { | ||
1688 | struct pl022 *pl022 = spi_master_get_devdata(spi->master); | ||
1689 | unsigned long flags; | ||
1690 | |||
1691 | spin_lock_irqsave(&pl022->queue_lock, flags); | ||
1692 | |||
1693 | if (!pl022->running) { | ||
1694 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
1695 | return -ESHUTDOWN; | ||
1696 | } | ||
1697 | msg->actual_length = 0; | ||
1698 | msg->status = -EINPROGRESS; | ||
1699 | msg->state = STATE_START; | ||
1700 | |||
1701 | list_add_tail(&msg->queue, &pl022->queue); | ||
1702 | if (pl022->running && !pl022->busy) | ||
1703 | queue_work(pl022->workqueue, &pl022->pump_messages); | ||
1704 | |||
1705 | spin_unlock_irqrestore(&pl022->queue_lock, flags); | ||
1706 | return 0; | ||
1707 | } | ||
1708 | |||
1709 | static int calculate_effective_freq(struct pl022 *pl022, | ||
1710 | int freq, | ||
1711 | struct ssp_clock_params *clk_freq) | ||
1712 | { | ||
1713 | /* Lets calculate the frequency parameters */ | ||
1714 | u16 cpsdvsr = 2; | ||
1715 | u16 scr = 0; | ||
1716 | bool freq_found = false; | ||
1717 | u32 rate; | ||
1718 | u32 max_tclk; | ||
1719 | u32 min_tclk; | ||
1720 | |||
1721 | rate = clk_get_rate(pl022->clk); | ||
1722 | /* cpsdvscr = 2 & scr 0 */ | ||
1723 | max_tclk = (rate / (CPSDVR_MIN * (1 + SCR_MIN))); | ||
1724 | /* cpsdvsr = 254 & scr = 255 */ | ||
1725 | min_tclk = (rate / (CPSDVR_MAX * (1 + SCR_MAX))); | ||
1726 | |||
1727 | if ((freq <= max_tclk) && (freq >= min_tclk)) { | ||
1728 | while (cpsdvsr <= CPSDVR_MAX && !freq_found) { | ||
1729 | while (scr <= SCR_MAX && !freq_found) { | ||
1730 | if ((rate / | ||
1731 | (cpsdvsr * (1 + scr))) > freq) | ||
1732 | scr += 1; | ||
1733 | else { | ||
1734 | /* | ||
1735 | * This bool is made true when | ||
1736 | * effective frequency >= | ||
1737 | * target frequency is found | ||
1738 | */ | ||
1739 | freq_found = true; | ||
1740 | if ((rate / | ||
1741 | (cpsdvsr * (1 + scr))) != freq) { | ||
1742 | if (scr == SCR_MIN) { | ||
1743 | cpsdvsr -= 2; | ||
1744 | scr = SCR_MAX; | ||
1745 | } else | ||
1746 | scr -= 1; | ||
1747 | } | ||
1748 | } | ||
1749 | } | ||
1750 | if (!freq_found) { | ||
1751 | cpsdvsr += 2; | ||
1752 | scr = SCR_MIN; | ||
1753 | } | ||
1754 | } | ||
1755 | if (cpsdvsr != 0) { | ||
1756 | dev_dbg(&pl022->adev->dev, | ||
1757 | "SSP Effective Frequency is %u\n", | ||
1758 | (rate / (cpsdvsr * (1 + scr)))); | ||
1759 | clk_freq->cpsdvsr = (u8) (cpsdvsr & 0xFF); | ||
1760 | clk_freq->scr = (u8) (scr & 0xFF); | ||
1761 | dev_dbg(&pl022->adev->dev, | ||
1762 | "SSP cpsdvsr = %d, scr = %d\n", | ||
1763 | clk_freq->cpsdvsr, clk_freq->scr); | ||
1764 | } | ||
1765 | } else { | ||
1766 | dev_err(&pl022->adev->dev, | ||
1767 | "controller data is incorrect: out of range frequency"); | ||
1768 | return -EINVAL; | ||
1769 | } | ||
1770 | return 0; | ||
1771 | } | ||
1772 | |||
1773 | |||
1774 | /* | ||
1775 | * A piece of default chip info unless the platform | ||
1776 | * supplies it. | ||
1777 | */ | ||
1778 | static const struct pl022_config_chip pl022_default_chip_info = { | ||
1779 | .com_mode = POLLING_TRANSFER, | ||
1780 | .iface = SSP_INTERFACE_MOTOROLA_SPI, | ||
1781 | .hierarchy = SSP_SLAVE, | ||
1782 | .slave_tx_disable = DO_NOT_DRIVE_TX, | ||
1783 | .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM, | ||
1784 | .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC, | ||
1785 | .ctrl_len = SSP_BITS_8, | ||
1786 | .wait_state = SSP_MWIRE_WAIT_ZERO, | ||
1787 | .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, | ||
1788 | .cs_control = null_cs_control, | ||
1789 | }; | ||
1790 | |||
1791 | |||
1792 | /** | ||
1793 | * pl022_setup - setup function registered to SPI master framework | ||
1794 | * @spi: spi device which is requesting setup | ||
1795 | * | ||
1796 | * This function is registered to the SPI framework for this SPI master | ||
1797 | * controller. If it is the first time when setup is called by this device, | ||
1798 | * this function will initialize the runtime state for this chip and save | ||
1799 | * the same in the device structure. Else it will update the runtime info | ||
1800 | * with the updated chip info. Nothing is really being written to the | ||
1801 | * controller hardware here, that is not done until the actual transfer | ||
1802 | * commence. | ||
1803 | */ | ||
1804 | static int pl022_setup(struct spi_device *spi) | ||
1805 | { | ||
1806 | struct pl022_config_chip const *chip_info; | ||
1807 | struct chip_data *chip; | ||
1808 | struct ssp_clock_params clk_freq = {0, }; | ||
1809 | int status = 0; | ||
1810 | struct pl022 *pl022 = spi_master_get_devdata(spi->master); | ||
1811 | unsigned int bits = spi->bits_per_word; | ||
1812 | u32 tmp; | ||
1813 | |||
1814 | if (!spi->max_speed_hz) | ||
1815 | return -EINVAL; | ||
1816 | |||
1817 | /* Get controller_state if one is supplied */ | ||
1818 | chip = spi_get_ctldata(spi); | ||
1819 | |||
1820 | if (chip == NULL) { | ||
1821 | chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); | ||
1822 | if (!chip) { | ||
1823 | dev_err(&spi->dev, | ||
1824 | "cannot allocate controller state\n"); | ||
1825 | return -ENOMEM; | ||
1826 | } | ||
1827 | dev_dbg(&spi->dev, | ||
1828 | "allocated memory for controller's runtime state\n"); | ||
1829 | } | ||
1830 | |||
1831 | /* Get controller data if one is supplied */ | ||
1832 | chip_info = spi->controller_data; | ||
1833 | |||
1834 | if (chip_info == NULL) { | ||
1835 | chip_info = &pl022_default_chip_info; | ||
1836 | /* spi_board_info.controller_data not is supplied */ | ||
1837 | dev_dbg(&spi->dev, | ||
1838 | "using default controller_data settings\n"); | ||
1839 | } else | ||
1840 | dev_dbg(&spi->dev, | ||
1841 | "using user supplied controller_data settings\n"); | ||
1842 | |||
1843 | /* | ||
1844 | * We can override with custom divisors, else we use the board | ||
1845 | * frequency setting | ||
1846 | */ | ||
1847 | if ((0 == chip_info->clk_freq.cpsdvsr) | ||
1848 | && (0 == chip_info->clk_freq.scr)) { | ||
1849 | status = calculate_effective_freq(pl022, | ||
1850 | spi->max_speed_hz, | ||
1851 | &clk_freq); | ||
1852 | if (status < 0) | ||
1853 | goto err_config_params; | ||
1854 | } else { | ||
1855 | memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq)); | ||
1856 | if ((clk_freq.cpsdvsr % 2) != 0) | ||
1857 | clk_freq.cpsdvsr = | ||
1858 | clk_freq.cpsdvsr - 1; | ||
1859 | } | ||
1860 | if ((clk_freq.cpsdvsr < CPSDVR_MIN) | ||
1861 | || (clk_freq.cpsdvsr > CPSDVR_MAX)) { | ||
1862 | dev_err(&spi->dev, | ||
1863 | "cpsdvsr is configured incorrectly\n"); | ||
1864 | goto err_config_params; | ||
1865 | } | ||
1866 | |||
1867 | |||
1868 | status = verify_controller_parameters(pl022, chip_info); | ||
1869 | if (status) { | ||
1870 | dev_err(&spi->dev, "controller data is incorrect"); | ||
1871 | goto err_config_params; | ||
1872 | } | ||
1873 | |||
1874 | /* Now set controller state based on controller data */ | ||
1875 | chip->xfer_type = chip_info->com_mode; | ||
1876 | if (!chip_info->cs_control) { | ||
1877 | chip->cs_control = null_cs_control; | ||
1878 | dev_warn(&spi->dev, | ||
1879 | "chip select function is NULL for this chip\n"); | ||
1880 | } else | ||
1881 | chip->cs_control = chip_info->cs_control; | ||
1882 | |||
1883 | if (bits <= 3) { | ||
1884 | /* PL022 doesn't support less than 4-bits */ | ||
1885 | status = -ENOTSUPP; | ||
1886 | goto err_config_params; | ||
1887 | } else if (bits <= 8) { | ||
1888 | dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n"); | ||
1889 | chip->n_bytes = 1; | ||
1890 | chip->read = READING_U8; | ||
1891 | chip->write = WRITING_U8; | ||
1892 | } else if (bits <= 16) { | ||
1893 | dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n"); | ||
1894 | chip->n_bytes = 2; | ||
1895 | chip->read = READING_U16; | ||
1896 | chip->write = WRITING_U16; | ||
1897 | } else { | ||
1898 | if (pl022->vendor->max_bpw >= 32) { | ||
1899 | dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n"); | ||
1900 | chip->n_bytes = 4; | ||
1901 | chip->read = READING_U32; | ||
1902 | chip->write = WRITING_U32; | ||
1903 | } else { | ||
1904 | dev_err(&spi->dev, | ||
1905 | "illegal data size for this controller!\n"); | ||
1906 | dev_err(&spi->dev, | ||
1907 | "a standard pl022 can only handle " | ||
1908 | "1 <= n <= 16 bit words\n"); | ||
1909 | status = -ENOTSUPP; | ||
1910 | goto err_config_params; | ||
1911 | } | ||
1912 | } | ||
1913 | |||
1914 | /* Now Initialize all register settings required for this chip */ | ||
1915 | chip->cr0 = 0; | ||
1916 | chip->cr1 = 0; | ||
1917 | chip->dmacr = 0; | ||
1918 | chip->cpsr = 0; | ||
1919 | if ((chip_info->com_mode == DMA_TRANSFER) | ||
1920 | && ((pl022->master_info)->enable_dma)) { | ||
1921 | chip->enable_dma = true; | ||
1922 | dev_dbg(&spi->dev, "DMA mode set in controller state\n"); | ||
1923 | SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, | ||
1924 | SSP_DMACR_MASK_RXDMAE, 0); | ||
1925 | SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, | ||
1926 | SSP_DMACR_MASK_TXDMAE, 1); | ||
1927 | } else { | ||
1928 | chip->enable_dma = false; | ||
1929 | dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n"); | ||
1930 | SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, | ||
1931 | SSP_DMACR_MASK_RXDMAE, 0); | ||
1932 | SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, | ||
1933 | SSP_DMACR_MASK_TXDMAE, 1); | ||
1934 | } | ||
1935 | |||
1936 | chip->cpsr = clk_freq.cpsdvsr; | ||
1937 | |||
1938 | /* Special setup for the ST micro extended control registers */ | ||
1939 | if (pl022->vendor->extended_cr) { | ||
1940 | u32 etx; | ||
1941 | |||
1942 | if (pl022->vendor->pl023) { | ||
1943 | /* These bits are only in the PL023 */ | ||
1944 | SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay, | ||
1945 | SSP_CR1_MASK_FBCLKDEL_ST, 13); | ||
1946 | } else { | ||
1947 | /* These bits are in the PL022 but not PL023 */ | ||
1948 | SSP_WRITE_BITS(chip->cr0, chip_info->duplex, | ||
1949 | SSP_CR0_MASK_HALFDUP_ST, 5); | ||
1950 | SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, | ||
1951 | SSP_CR0_MASK_CSS_ST, 16); | ||
1952 | SSP_WRITE_BITS(chip->cr0, chip_info->iface, | ||
1953 | SSP_CR0_MASK_FRF_ST, 21); | ||
1954 | SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, | ||
1955 | SSP_CR1_MASK_MWAIT_ST, 6); | ||
1956 | } | ||
1957 | SSP_WRITE_BITS(chip->cr0, bits - 1, | ||
1958 | SSP_CR0_MASK_DSS_ST, 0); | ||
1959 | |||
1960 | if (spi->mode & SPI_LSB_FIRST) { | ||
1961 | tmp = SSP_RX_LSB; | ||
1962 | etx = SSP_TX_LSB; | ||
1963 | } else { | ||
1964 | tmp = SSP_RX_MSB; | ||
1965 | etx = SSP_TX_MSB; | ||
1966 | } | ||
1967 | SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4); | ||
1968 | SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5); | ||
1969 | SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, | ||
1970 | SSP_CR1_MASK_RXIFLSEL_ST, 7); | ||
1971 | SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, | ||
1972 | SSP_CR1_MASK_TXIFLSEL_ST, 10); | ||
1973 | } else { | ||
1974 | SSP_WRITE_BITS(chip->cr0, bits - 1, | ||
1975 | SSP_CR0_MASK_DSS, 0); | ||
1976 | SSP_WRITE_BITS(chip->cr0, chip_info->iface, | ||
1977 | SSP_CR0_MASK_FRF, 4); | ||
1978 | } | ||
1979 | |||
1980 | /* Stuff that is common for all versions */ | ||
1981 | if (spi->mode & SPI_CPOL) | ||
1982 | tmp = SSP_CLK_POL_IDLE_HIGH; | ||
1983 | else | ||
1984 | tmp = SSP_CLK_POL_IDLE_LOW; | ||
1985 | SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6); | ||
1986 | |||
1987 | if (spi->mode & SPI_CPHA) | ||
1988 | tmp = SSP_CLK_SECOND_EDGE; | ||
1989 | else | ||
1990 | tmp = SSP_CLK_FIRST_EDGE; | ||
1991 | SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7); | ||
1992 | |||
1993 | SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8); | ||
1994 | /* Loopback is available on all versions except PL023 */ | ||
1995 | if (pl022->vendor->loopback) { | ||
1996 | if (spi->mode & SPI_LOOP) | ||
1997 | tmp = LOOPBACK_ENABLED; | ||
1998 | else | ||
1999 | tmp = LOOPBACK_DISABLED; | ||
2000 | SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0); | ||
2001 | } | ||
2002 | SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1); | ||
2003 | SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2); | ||
2004 | SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3); | ||
2005 | |||
2006 | /* Save controller_state */ | ||
2007 | spi_set_ctldata(spi, chip); | ||
2008 | return status; | ||
2009 | err_config_params: | ||
2010 | spi_set_ctldata(spi, NULL); | ||
2011 | kfree(chip); | ||
2012 | return status; | ||
2013 | } | ||
2014 | |||
2015 | /** | ||
2016 | * pl022_cleanup - cleanup function registered to SPI master framework | ||
2017 | * @spi: spi device which is requesting cleanup | ||
2018 | * | ||
2019 | * This function is registered to the SPI framework for this SPI master | ||
2020 | * controller. It will free the runtime state of chip. | ||
2021 | */ | ||
2022 | static void pl022_cleanup(struct spi_device *spi) | ||
2023 | { | ||
2024 | struct chip_data *chip = spi_get_ctldata(spi); | ||
2025 | |||
2026 | spi_set_ctldata(spi, NULL); | ||
2027 | kfree(chip); | ||
2028 | } | ||
2029 | |||
2030 | |||
2031 | static int __devinit | ||
2032 | pl022_probe(struct amba_device *adev, const struct amba_id *id) | ||
2033 | { | ||
2034 | struct device *dev = &adev->dev; | ||
2035 | struct pl022_ssp_controller *platform_info = adev->dev.platform_data; | ||
2036 | struct spi_master *master; | ||
2037 | struct pl022 *pl022 = NULL; /*Data for this driver */ | ||
2038 | int status = 0; | ||
2039 | |||
2040 | dev_info(&adev->dev, | ||
2041 | "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid); | ||
2042 | if (platform_info == NULL) { | ||
2043 | dev_err(&adev->dev, "probe - no platform data supplied\n"); | ||
2044 | status = -ENODEV; | ||
2045 | goto err_no_pdata; | ||
2046 | } | ||
2047 | |||
2048 | /* Allocate master with space for data */ | ||
2049 | master = spi_alloc_master(dev, sizeof(struct pl022)); | ||
2050 | if (master == NULL) { | ||
2051 | dev_err(&adev->dev, "probe - cannot alloc SPI master\n"); | ||
2052 | status = -ENOMEM; | ||
2053 | goto err_no_master; | ||
2054 | } | ||
2055 | |||
2056 | pl022 = spi_master_get_devdata(master); | ||
2057 | pl022->master = master; | ||
2058 | pl022->master_info = platform_info; | ||
2059 | pl022->adev = adev; | ||
2060 | pl022->vendor = id->data; | ||
2061 | |||
2062 | /* | ||
2063 | * Bus Number Which has been Assigned to this SSP controller | ||
2064 | * on this board | ||
2065 | */ | ||
2066 | master->bus_num = platform_info->bus_id; | ||
2067 | master->num_chipselect = platform_info->num_chipselect; | ||
2068 | master->cleanup = pl022_cleanup; | ||
2069 | master->setup = pl022_setup; | ||
2070 | master->transfer = pl022_transfer; | ||
2071 | |||
2072 | /* | ||
2073 | * Supports mode 0-3, loopback, and active low CS. Transfers are | ||
2074 | * always MS bit first on the original pl022. | ||
2075 | */ | ||
2076 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; | ||
2077 | if (pl022->vendor->extended_cr) | ||
2078 | master->mode_bits |= SPI_LSB_FIRST; | ||
2079 | |||
2080 | dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num); | ||
2081 | |||
2082 | status = amba_request_regions(adev, NULL); | ||
2083 | if (status) | ||
2084 | goto err_no_ioregion; | ||
2085 | |||
2086 | pl022->phybase = adev->res.start; | ||
2087 | pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res)); | ||
2088 | if (pl022->virtbase == NULL) { | ||
2089 | status = -ENOMEM; | ||
2090 | goto err_no_ioremap; | ||
2091 | } | ||
2092 | printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n", | ||
2093 | adev->res.start, pl022->virtbase); | ||
2094 | |||
2095 | pl022->clk = clk_get(&adev->dev, NULL); | ||
2096 | if (IS_ERR(pl022->clk)) { | ||
2097 | status = PTR_ERR(pl022->clk); | ||
2098 | dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n"); | ||
2099 | goto err_no_clk; | ||
2100 | } | ||
2101 | |||
2102 | /* Disable SSP */ | ||
2103 | writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), | ||
2104 | SSP_CR1(pl022->virtbase)); | ||
2105 | load_ssp_default_config(pl022); | ||
2106 | |||
2107 | status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022", | ||
2108 | pl022); | ||
2109 | if (status < 0) { | ||
2110 | dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status); | ||
2111 | goto err_no_irq; | ||
2112 | } | ||
2113 | |||
2114 | /* Get DMA channels */ | ||
2115 | if (platform_info->enable_dma) { | ||
2116 | status = pl022_dma_probe(pl022); | ||
2117 | if (status != 0) | ||
2118 | platform_info->enable_dma = 0; | ||
2119 | } | ||
2120 | |||
2121 | /* Initialize and start queue */ | ||
2122 | status = init_queue(pl022); | ||
2123 | if (status != 0) { | ||
2124 | dev_err(&adev->dev, "probe - problem initializing queue\n"); | ||
2125 | goto err_init_queue; | ||
2126 | } | ||
2127 | status = start_queue(pl022); | ||
2128 | if (status != 0) { | ||
2129 | dev_err(&adev->dev, "probe - problem starting queue\n"); | ||
2130 | goto err_start_queue; | ||
2131 | } | ||
2132 | /* Register with the SPI framework */ | ||
2133 | amba_set_drvdata(adev, pl022); | ||
2134 | status = spi_register_master(master); | ||
2135 | if (status != 0) { | ||
2136 | dev_err(&adev->dev, | ||
2137 | "probe - problem registering spi master\n"); | ||
2138 | goto err_spi_register; | ||
2139 | } | ||
2140 | dev_dbg(dev, "probe succeeded\n"); | ||
2141 | /* | ||
2142 | * Disable the silicon block pclk and any voltage domain and just | ||
2143 | * power it up and clock it when it's needed | ||
2144 | */ | ||
2145 | amba_pclk_disable(adev); | ||
2146 | amba_vcore_disable(adev); | ||
2147 | return 0; | ||
2148 | |||
2149 | err_spi_register: | ||
2150 | err_start_queue: | ||
2151 | err_init_queue: | ||
2152 | destroy_queue(pl022); | ||
2153 | pl022_dma_remove(pl022); | ||
2154 | free_irq(adev->irq[0], pl022); | ||
2155 | err_no_irq: | ||
2156 | clk_put(pl022->clk); | ||
2157 | err_no_clk: | ||
2158 | iounmap(pl022->virtbase); | ||
2159 | err_no_ioremap: | ||
2160 | amba_release_regions(adev); | ||
2161 | err_no_ioregion: | ||
2162 | spi_master_put(master); | ||
2163 | err_no_master: | ||
2164 | err_no_pdata: | ||
2165 | return status; | ||
2166 | } | ||
2167 | |||
2168 | static int __devexit | ||
2169 | pl022_remove(struct amba_device *adev) | ||
2170 | { | ||
2171 | struct pl022 *pl022 = amba_get_drvdata(adev); | ||
2172 | int status = 0; | ||
2173 | if (!pl022) | ||
2174 | return 0; | ||
2175 | |||
2176 | /* Remove the queue */ | ||
2177 | status = destroy_queue(pl022); | ||
2178 | if (status != 0) { | ||
2179 | dev_err(&adev->dev, | ||
2180 | "queue remove failed (%d)\n", status); | ||
2181 | return status; | ||
2182 | } | ||
2183 | load_ssp_default_config(pl022); | ||
2184 | pl022_dma_remove(pl022); | ||
2185 | free_irq(adev->irq[0], pl022); | ||
2186 | clk_disable(pl022->clk); | ||
2187 | clk_put(pl022->clk); | ||
2188 | iounmap(pl022->virtbase); | ||
2189 | amba_release_regions(adev); | ||
2190 | tasklet_disable(&pl022->pump_transfers); | ||
2191 | spi_unregister_master(pl022->master); | ||
2192 | spi_master_put(pl022->master); | ||
2193 | amba_set_drvdata(adev, NULL); | ||
2194 | dev_dbg(&adev->dev, "remove succeeded\n"); | ||
2195 | return 0; | ||
2196 | } | ||
2197 | |||
2198 | #ifdef CONFIG_PM | ||
2199 | static int pl022_suspend(struct amba_device *adev, pm_message_t state) | ||
2200 | { | ||
2201 | struct pl022 *pl022 = amba_get_drvdata(adev); | ||
2202 | int status = 0; | ||
2203 | |||
2204 | status = stop_queue(pl022); | ||
2205 | if (status) { | ||
2206 | dev_warn(&adev->dev, "suspend cannot stop queue\n"); | ||
2207 | return status; | ||
2208 | } | ||
2209 | |||
2210 | amba_vcore_enable(adev); | ||
2211 | amba_pclk_enable(adev); | ||
2212 | load_ssp_default_config(pl022); | ||
2213 | amba_pclk_disable(adev); | ||
2214 | amba_vcore_disable(adev); | ||
2215 | dev_dbg(&adev->dev, "suspended\n"); | ||
2216 | return 0; | ||
2217 | } | ||
2218 | |||
2219 | static int pl022_resume(struct amba_device *adev) | ||
2220 | { | ||
2221 | struct pl022 *pl022 = amba_get_drvdata(adev); | ||
2222 | int status = 0; | ||
2223 | |||
2224 | /* Start the queue running */ | ||
2225 | status = start_queue(pl022); | ||
2226 | if (status) | ||
2227 | dev_err(&adev->dev, "problem starting queue (%d)\n", status); | ||
2228 | else | ||
2229 | dev_dbg(&adev->dev, "resumed\n"); | ||
2230 | |||
2231 | return status; | ||
2232 | } | ||
2233 | #else | ||
2234 | #define pl022_suspend NULL | ||
2235 | #define pl022_resume NULL | ||
2236 | #endif /* CONFIG_PM */ | ||
2237 | |||
2238 | static struct vendor_data vendor_arm = { | ||
2239 | .fifodepth = 8, | ||
2240 | .max_bpw = 16, | ||
2241 | .unidir = false, | ||
2242 | .extended_cr = false, | ||
2243 | .pl023 = false, | ||
2244 | .loopback = true, | ||
2245 | }; | ||
2246 | |||
2247 | |||
2248 | static struct vendor_data vendor_st = { | ||
2249 | .fifodepth = 32, | ||
2250 | .max_bpw = 32, | ||
2251 | .unidir = false, | ||
2252 | .extended_cr = true, | ||
2253 | .pl023 = false, | ||
2254 | .loopback = true, | ||
2255 | }; | ||
2256 | |||
2257 | static struct vendor_data vendor_st_pl023 = { | ||
2258 | .fifodepth = 32, | ||
2259 | .max_bpw = 32, | ||
2260 | .unidir = false, | ||
2261 | .extended_cr = true, | ||
2262 | .pl023 = true, | ||
2263 | .loopback = false, | ||
2264 | }; | ||
2265 | |||
2266 | static struct vendor_data vendor_db5500_pl023 = { | ||
2267 | .fifodepth = 32, | ||
2268 | .max_bpw = 32, | ||
2269 | .unidir = false, | ||
2270 | .extended_cr = true, | ||
2271 | .pl023 = true, | ||
2272 | .loopback = true, | ||
2273 | }; | ||
2274 | |||
2275 | static struct amba_id pl022_ids[] = { | ||
2276 | { | ||
2277 | /* | ||
2278 | * ARM PL022 variant, this has a 16bit wide | ||
2279 | * and 8 locations deep TX/RX FIFO | ||
2280 | */ | ||
2281 | .id = 0x00041022, | ||
2282 | .mask = 0x000fffff, | ||
2283 | .data = &vendor_arm, | ||
2284 | }, | ||
2285 | { | ||
2286 | /* | ||
2287 | * ST Micro derivative, this has 32bit wide | ||
2288 | * and 32 locations deep TX/RX FIFO | ||
2289 | */ | ||
2290 | .id = 0x01080022, | ||
2291 | .mask = 0xffffffff, | ||
2292 | .data = &vendor_st, | ||
2293 | }, | ||
2294 | { | ||
2295 | /* | ||
2296 | * ST-Ericsson derivative "PL023" (this is not | ||
2297 | * an official ARM number), this is a PL022 SSP block | ||
2298 | * stripped to SPI mode only, it has 32bit wide | ||
2299 | * and 32 locations deep TX/RX FIFO but no extended | ||
2300 | * CR0/CR1 register | ||
2301 | */ | ||
2302 | .id = 0x00080023, | ||
2303 | .mask = 0xffffffff, | ||
2304 | .data = &vendor_st_pl023, | ||
2305 | }, | ||
2306 | { | ||
2307 | .id = 0x10080023, | ||
2308 | .mask = 0xffffffff, | ||
2309 | .data = &vendor_db5500_pl023, | ||
2310 | }, | ||
2311 | { 0, 0 }, | ||
2312 | }; | ||
2313 | |||
2314 | static struct amba_driver pl022_driver = { | ||
2315 | .drv = { | ||
2316 | .name = "ssp-pl022", | ||
2317 | }, | ||
2318 | .id_table = pl022_ids, | ||
2319 | .probe = pl022_probe, | ||
2320 | .remove = __devexit_p(pl022_remove), | ||
2321 | .suspend = pl022_suspend, | ||
2322 | .resume = pl022_resume, | ||
2323 | }; | ||
2324 | |||
2325 | |||
2326 | static int __init pl022_init(void) | ||
2327 | { | ||
2328 | return amba_driver_register(&pl022_driver); | ||
2329 | } | ||
2330 | |||
2331 | subsys_initcall(pl022_init); | ||
2332 | |||
2333 | static void __exit pl022_exit(void) | ||
2334 | { | ||
2335 | amba_driver_unregister(&pl022_driver); | ||
2336 | } | ||
2337 | |||
2338 | module_exit(pl022_exit); | ||
2339 | |||
2340 | MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>"); | ||
2341 | MODULE_DESCRIPTION("PL022 SSP Controller Driver"); | ||
2342 | MODULE_LICENSE("GPL"); | ||