diff options
Diffstat (limited to 'drivers/spi/spi-pxa2xx.c')
-rw-r--r-- | drivers/spi/spi-pxa2xx.c | 1816 |
1 files changed, 1816 insertions, 0 deletions
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c new file mode 100644 index 000000000000..dc25bee8d33f --- /dev/null +++ b/drivers/spi/spi-pxa2xx.c | |||
@@ -0,0 +1,1816 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/init.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/device.h> | ||
22 | #include <linux/ioport.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/spi/pxa2xx_spi.h> | ||
27 | #include <linux/dma-mapping.h> | ||
28 | #include <linux/spi/spi.h> | ||
29 | #include <linux/workqueue.h> | ||
30 | #include <linux/delay.h> | ||
31 | #include <linux/gpio.h> | ||
32 | #include <linux/slab.h> | ||
33 | |||
34 | #include <asm/io.h> | ||
35 | #include <asm/irq.h> | ||
36 | #include <asm/delay.h> | ||
37 | |||
38 | |||
39 | MODULE_AUTHOR("Stephen Street"); | ||
40 | MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); | ||
41 | MODULE_LICENSE("GPL"); | ||
42 | MODULE_ALIAS("platform:pxa2xx-spi"); | ||
43 | |||
44 | #define MAX_BUSES 3 | ||
45 | |||
46 | #define TIMOUT_DFLT 1000 | ||
47 | |||
48 | #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) | ||
49 | #define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) | ||
50 | #define IS_DMA_ALIGNED(x) ((((u32)(x)) & 0x07) == 0) | ||
51 | #define MAX_DMA_LEN 8191 | ||
52 | #define DMA_ALIGNMENT 8 | ||
53 | |||
54 | /* | ||
55 | * for testing SSCR1 changes that require SSP restart, basically | ||
56 | * everything except the service and interrupt enables, the pxa270 developer | ||
57 | * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this | ||
58 | * list, but the PXA255 dev man says all bits without really meaning the | ||
59 | * service and interrupt enables | ||
60 | */ | ||
61 | #define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \ | ||
62 | | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \ | ||
63 | | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \ | ||
64 | | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \ | ||
65 | | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \ | ||
66 | | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM) | ||
67 | |||
68 | #define DEFINE_SSP_REG(reg, off) \ | ||
69 | static inline u32 read_##reg(void const __iomem *p) \ | ||
70 | { return __raw_readl(p + (off)); } \ | ||
71 | \ | ||
72 | static inline void write_##reg(u32 v, void __iomem *p) \ | ||
73 | { __raw_writel(v, p + (off)); } | ||
74 | |||
75 | DEFINE_SSP_REG(SSCR0, 0x00) | ||
76 | DEFINE_SSP_REG(SSCR1, 0x04) | ||
77 | DEFINE_SSP_REG(SSSR, 0x08) | ||
78 | DEFINE_SSP_REG(SSITR, 0x0c) | ||
79 | DEFINE_SSP_REG(SSDR, 0x10) | ||
80 | DEFINE_SSP_REG(SSTO, 0x28) | ||
81 | DEFINE_SSP_REG(SSPSP, 0x2c) | ||
82 | |||
83 | #define START_STATE ((void*)0) | ||
84 | #define RUNNING_STATE ((void*)1) | ||
85 | #define DONE_STATE ((void*)2) | ||
86 | #define ERROR_STATE ((void*)-1) | ||
87 | |||
88 | #define QUEUE_RUNNING 0 | ||
89 | #define QUEUE_STOPPED 1 | ||
90 | |||
91 | struct driver_data { | ||
92 | /* Driver model hookup */ | ||
93 | struct platform_device *pdev; | ||
94 | |||
95 | /* SSP Info */ | ||
96 | struct ssp_device *ssp; | ||
97 | |||
98 | /* SPI framework hookup */ | ||
99 | enum pxa_ssp_type ssp_type; | ||
100 | struct spi_master *master; | ||
101 | |||
102 | /* PXA hookup */ | ||
103 | struct pxa2xx_spi_master *master_info; | ||
104 | |||
105 | /* DMA setup stuff */ | ||
106 | int rx_channel; | ||
107 | int tx_channel; | ||
108 | u32 *null_dma_buf; | ||
109 | |||
110 | /* SSP register addresses */ | ||
111 | void __iomem *ioaddr; | ||
112 | u32 ssdr_physical; | ||
113 | |||
114 | /* SSP masks*/ | ||
115 | u32 dma_cr1; | ||
116 | u32 int_cr1; | ||
117 | u32 clear_sr; | ||
118 | u32 mask_sr; | ||
119 | |||
120 | /* Driver message queue */ | ||
121 | struct workqueue_struct *workqueue; | ||
122 | struct work_struct pump_messages; | ||
123 | spinlock_t lock; | ||
124 | struct list_head queue; | ||
125 | int busy; | ||
126 | int run; | ||
127 | |||
128 | /* Message Transfer pump */ | ||
129 | struct tasklet_struct pump_transfers; | ||
130 | |||
131 | /* Current message transfer state info */ | ||
132 | struct spi_message* cur_msg; | ||
133 | struct spi_transfer* cur_transfer; | ||
134 | struct chip_data *cur_chip; | ||
135 | size_t len; | ||
136 | void *tx; | ||
137 | void *tx_end; | ||
138 | void *rx; | ||
139 | void *rx_end; | ||
140 | int dma_mapped; | ||
141 | dma_addr_t rx_dma; | ||
142 | dma_addr_t tx_dma; | ||
143 | size_t rx_map_len; | ||
144 | size_t tx_map_len; | ||
145 | u8 n_bytes; | ||
146 | u32 dma_width; | ||
147 | int (*write)(struct driver_data *drv_data); | ||
148 | int (*read)(struct driver_data *drv_data); | ||
149 | irqreturn_t (*transfer_handler)(struct driver_data *drv_data); | ||
150 | void (*cs_control)(u32 command); | ||
151 | }; | ||
152 | |||
153 | struct chip_data { | ||
154 | u32 cr0; | ||
155 | u32 cr1; | ||
156 | u32 psp; | ||
157 | u32 timeout; | ||
158 | u8 n_bytes; | ||
159 | u32 dma_width; | ||
160 | u32 dma_burst_size; | ||
161 | u32 threshold; | ||
162 | u32 dma_threshold; | ||
163 | u8 enable_dma; | ||
164 | u8 bits_per_word; | ||
165 | u32 speed_hz; | ||
166 | union { | ||
167 | int gpio_cs; | ||
168 | unsigned int frm; | ||
169 | }; | ||
170 | int gpio_cs_inverted; | ||
171 | int (*write)(struct driver_data *drv_data); | ||
172 | int (*read)(struct driver_data *drv_data); | ||
173 | void (*cs_control)(u32 command); | ||
174 | }; | ||
175 | |||
176 | static void pump_messages(struct work_struct *work); | ||
177 | |||
178 | static void cs_assert(struct driver_data *drv_data) | ||
179 | { | ||
180 | struct chip_data *chip = drv_data->cur_chip; | ||
181 | |||
182 | if (drv_data->ssp_type == CE4100_SSP) { | ||
183 | write_SSSR(drv_data->cur_chip->frm, drv_data->ioaddr); | ||
184 | return; | ||
185 | } | ||
186 | |||
187 | if (chip->cs_control) { | ||
188 | chip->cs_control(PXA2XX_CS_ASSERT); | ||
189 | return; | ||
190 | } | ||
191 | |||
192 | if (gpio_is_valid(chip->gpio_cs)) | ||
193 | gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted); | ||
194 | } | ||
195 | |||
196 | static void cs_deassert(struct driver_data *drv_data) | ||
197 | { | ||
198 | struct chip_data *chip = drv_data->cur_chip; | ||
199 | |||
200 | if (drv_data->ssp_type == CE4100_SSP) | ||
201 | return; | ||
202 | |||
203 | if (chip->cs_control) { | ||
204 | chip->cs_control(PXA2XX_CS_DEASSERT); | ||
205 | return; | ||
206 | } | ||
207 | |||
208 | if (gpio_is_valid(chip->gpio_cs)) | ||
209 | gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted); | ||
210 | } | ||
211 | |||
212 | static void write_SSSR_CS(struct driver_data *drv_data, u32 val) | ||
213 | { | ||
214 | void __iomem *reg = drv_data->ioaddr; | ||
215 | |||
216 | if (drv_data->ssp_type == CE4100_SSP) | ||
217 | val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK; | ||
218 | |||
219 | write_SSSR(val, reg); | ||
220 | } | ||
221 | |||
222 | static int pxa25x_ssp_comp(struct driver_data *drv_data) | ||
223 | { | ||
224 | if (drv_data->ssp_type == PXA25x_SSP) | ||
225 | return 1; | ||
226 | if (drv_data->ssp_type == CE4100_SSP) | ||
227 | return 1; | ||
228 | return 0; | ||
229 | } | ||
230 | |||
231 | static int flush(struct driver_data *drv_data) | ||
232 | { | ||
233 | unsigned long limit = loops_per_jiffy << 1; | ||
234 | |||
235 | void __iomem *reg = drv_data->ioaddr; | ||
236 | |||
237 | do { | ||
238 | while (read_SSSR(reg) & SSSR_RNE) { | ||
239 | read_SSDR(reg); | ||
240 | } | ||
241 | } while ((read_SSSR(reg) & SSSR_BSY) && --limit); | ||
242 | write_SSSR_CS(drv_data, SSSR_ROR); | ||
243 | |||
244 | return limit; | ||
245 | } | ||
246 | |||
247 | static int null_writer(struct driver_data *drv_data) | ||
248 | { | ||
249 | void __iomem *reg = drv_data->ioaddr; | ||
250 | u8 n_bytes = drv_data->n_bytes; | ||
251 | |||
252 | if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) | ||
253 | || (drv_data->tx == drv_data->tx_end)) | ||
254 | return 0; | ||
255 | |||
256 | write_SSDR(0, reg); | ||
257 | drv_data->tx += n_bytes; | ||
258 | |||
259 | return 1; | ||
260 | } | ||
261 | |||
262 | static int null_reader(struct driver_data *drv_data) | ||
263 | { | ||
264 | void __iomem *reg = drv_data->ioaddr; | ||
265 | u8 n_bytes = drv_data->n_bytes; | ||
266 | |||
267 | while ((read_SSSR(reg) & SSSR_RNE) | ||
268 | && (drv_data->rx < drv_data->rx_end)) { | ||
269 | read_SSDR(reg); | ||
270 | drv_data->rx += n_bytes; | ||
271 | } | ||
272 | |||
273 | return drv_data->rx == drv_data->rx_end; | ||
274 | } | ||
275 | |||
276 | static int u8_writer(struct driver_data *drv_data) | ||
277 | { | ||
278 | void __iomem *reg = drv_data->ioaddr; | ||
279 | |||
280 | if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) | ||
281 | || (drv_data->tx == drv_data->tx_end)) | ||
282 | return 0; | ||
283 | |||
284 | write_SSDR(*(u8 *)(drv_data->tx), reg); | ||
285 | ++drv_data->tx; | ||
286 | |||
287 | return 1; | ||
288 | } | ||
289 | |||
290 | static int u8_reader(struct driver_data *drv_data) | ||
291 | { | ||
292 | void __iomem *reg = drv_data->ioaddr; | ||
293 | |||
294 | while ((read_SSSR(reg) & SSSR_RNE) | ||
295 | && (drv_data->rx < drv_data->rx_end)) { | ||
296 | *(u8 *)(drv_data->rx) = read_SSDR(reg); | ||
297 | ++drv_data->rx; | ||
298 | } | ||
299 | |||
300 | return drv_data->rx == drv_data->rx_end; | ||
301 | } | ||
302 | |||
303 | static int u16_writer(struct driver_data *drv_data) | ||
304 | { | ||
305 | void __iomem *reg = drv_data->ioaddr; | ||
306 | |||
307 | if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) | ||
308 | || (drv_data->tx == drv_data->tx_end)) | ||
309 | return 0; | ||
310 | |||
311 | write_SSDR(*(u16 *)(drv_data->tx), reg); | ||
312 | drv_data->tx += 2; | ||
313 | |||
314 | return 1; | ||
315 | } | ||
316 | |||
317 | static int u16_reader(struct driver_data *drv_data) | ||
318 | { | ||
319 | void __iomem *reg = drv_data->ioaddr; | ||
320 | |||
321 | while ((read_SSSR(reg) & SSSR_RNE) | ||
322 | && (drv_data->rx < drv_data->rx_end)) { | ||
323 | *(u16 *)(drv_data->rx) = read_SSDR(reg); | ||
324 | drv_data->rx += 2; | ||
325 | } | ||
326 | |||
327 | return drv_data->rx == drv_data->rx_end; | ||
328 | } | ||
329 | |||
330 | static int u32_writer(struct driver_data *drv_data) | ||
331 | { | ||
332 | void __iomem *reg = drv_data->ioaddr; | ||
333 | |||
334 | if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK) | ||
335 | || (drv_data->tx == drv_data->tx_end)) | ||
336 | return 0; | ||
337 | |||
338 | write_SSDR(*(u32 *)(drv_data->tx), reg); | ||
339 | drv_data->tx += 4; | ||
340 | |||
341 | return 1; | ||
342 | } | ||
343 | |||
344 | static int u32_reader(struct driver_data *drv_data) | ||
345 | { | ||
346 | void __iomem *reg = drv_data->ioaddr; | ||
347 | |||
348 | while ((read_SSSR(reg) & SSSR_RNE) | ||
349 | && (drv_data->rx < drv_data->rx_end)) { | ||
350 | *(u32 *)(drv_data->rx) = read_SSDR(reg); | ||
351 | drv_data->rx += 4; | ||
352 | } | ||
353 | |||
354 | return drv_data->rx == drv_data->rx_end; | ||
355 | } | ||
356 | |||
357 | static void *next_transfer(struct driver_data *drv_data) | ||
358 | { | ||
359 | struct spi_message *msg = drv_data->cur_msg; | ||
360 | struct spi_transfer *trans = drv_data->cur_transfer; | ||
361 | |||
362 | /* Move to next transfer */ | ||
363 | if (trans->transfer_list.next != &msg->transfers) { | ||
364 | drv_data->cur_transfer = | ||
365 | list_entry(trans->transfer_list.next, | ||
366 | struct spi_transfer, | ||
367 | transfer_list); | ||
368 | return RUNNING_STATE; | ||
369 | } else | ||
370 | return DONE_STATE; | ||
371 | } | ||
372 | |||
373 | static int map_dma_buffers(struct driver_data *drv_data) | ||
374 | { | ||
375 | struct spi_message *msg = drv_data->cur_msg; | ||
376 | struct device *dev = &msg->spi->dev; | ||
377 | |||
378 | if (!drv_data->cur_chip->enable_dma) | ||
379 | return 0; | ||
380 | |||
381 | if (msg->is_dma_mapped) | ||
382 | return drv_data->rx_dma && drv_data->tx_dma; | ||
383 | |||
384 | if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) | ||
385 | return 0; | ||
386 | |||
387 | /* Modify setup if rx buffer is null */ | ||
388 | if (drv_data->rx == NULL) { | ||
389 | *drv_data->null_dma_buf = 0; | ||
390 | drv_data->rx = drv_data->null_dma_buf; | ||
391 | drv_data->rx_map_len = 4; | ||
392 | } else | ||
393 | drv_data->rx_map_len = drv_data->len; | ||
394 | |||
395 | |||
396 | /* Modify setup if tx buffer is null */ | ||
397 | if (drv_data->tx == NULL) { | ||
398 | *drv_data->null_dma_buf = 0; | ||
399 | drv_data->tx = drv_data->null_dma_buf; | ||
400 | drv_data->tx_map_len = 4; | ||
401 | } else | ||
402 | drv_data->tx_map_len = drv_data->len; | ||
403 | |||
404 | /* Stream map the tx buffer. Always do DMA_TO_DEVICE first | ||
405 | * so we flush the cache *before* invalidating it, in case | ||
406 | * the tx and rx buffers overlap. | ||
407 | */ | ||
408 | drv_data->tx_dma = dma_map_single(dev, drv_data->tx, | ||
409 | drv_data->tx_map_len, DMA_TO_DEVICE); | ||
410 | if (dma_mapping_error(dev, drv_data->tx_dma)) | ||
411 | return 0; | ||
412 | |||
413 | /* Stream map the rx buffer */ | ||
414 | drv_data->rx_dma = dma_map_single(dev, drv_data->rx, | ||
415 | drv_data->rx_map_len, DMA_FROM_DEVICE); | ||
416 | if (dma_mapping_error(dev, drv_data->rx_dma)) { | ||
417 | dma_unmap_single(dev, drv_data->tx_dma, | ||
418 | drv_data->tx_map_len, DMA_TO_DEVICE); | ||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | return 1; | ||
423 | } | ||
424 | |||
425 | static void unmap_dma_buffers(struct driver_data *drv_data) | ||
426 | { | ||
427 | struct device *dev; | ||
428 | |||
429 | if (!drv_data->dma_mapped) | ||
430 | return; | ||
431 | |||
432 | if (!drv_data->cur_msg->is_dma_mapped) { | ||
433 | dev = &drv_data->cur_msg->spi->dev; | ||
434 | dma_unmap_single(dev, drv_data->rx_dma, | ||
435 | drv_data->rx_map_len, DMA_FROM_DEVICE); | ||
436 | dma_unmap_single(dev, drv_data->tx_dma, | ||
437 | drv_data->tx_map_len, DMA_TO_DEVICE); | ||
438 | } | ||
439 | |||
440 | drv_data->dma_mapped = 0; | ||
441 | } | ||
442 | |||
443 | /* caller already set message->status; dma and pio irqs are blocked */ | ||
444 | static void giveback(struct driver_data *drv_data) | ||
445 | { | ||
446 | struct spi_transfer* last_transfer; | ||
447 | unsigned long flags; | ||
448 | struct spi_message *msg; | ||
449 | |||
450 | spin_lock_irqsave(&drv_data->lock, flags); | ||
451 | msg = drv_data->cur_msg; | ||
452 | drv_data->cur_msg = NULL; | ||
453 | drv_data->cur_transfer = NULL; | ||
454 | queue_work(drv_data->workqueue, &drv_data->pump_messages); | ||
455 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
456 | |||
457 | last_transfer = list_entry(msg->transfers.prev, | ||
458 | struct spi_transfer, | ||
459 | transfer_list); | ||
460 | |||
461 | /* Delay if requested before any change in chip select */ | ||
462 | if (last_transfer->delay_usecs) | ||
463 | udelay(last_transfer->delay_usecs); | ||
464 | |||
465 | /* Drop chip select UNLESS cs_change is true or we are returning | ||
466 | * a message with an error, or next message is for another chip | ||
467 | */ | ||
468 | if (!last_transfer->cs_change) | ||
469 | cs_deassert(drv_data); | ||
470 | else { | ||
471 | struct spi_message *next_msg; | ||
472 | |||
473 | /* Holding of cs was hinted, but we need to make sure | ||
474 | * the next message is for the same chip. Don't waste | ||
475 | * time with the following tests unless this was hinted. | ||
476 | * | ||
477 | * We cannot postpone this until pump_messages, because | ||
478 | * after calling msg->complete (below) the driver that | ||
479 | * sent the current message could be unloaded, which | ||
480 | * could invalidate the cs_control() callback... | ||
481 | */ | ||
482 | |||
483 | /* get a pointer to the next message, if any */ | ||
484 | spin_lock_irqsave(&drv_data->lock, flags); | ||
485 | if (list_empty(&drv_data->queue)) | ||
486 | next_msg = NULL; | ||
487 | else | ||
488 | next_msg = list_entry(drv_data->queue.next, | ||
489 | struct spi_message, queue); | ||
490 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
491 | |||
492 | /* see if the next and current messages point | ||
493 | * to the same chip | ||
494 | */ | ||
495 | if (next_msg && next_msg->spi != msg->spi) | ||
496 | next_msg = NULL; | ||
497 | if (!next_msg || msg->state == ERROR_STATE) | ||
498 | cs_deassert(drv_data); | ||
499 | } | ||
500 | |||
501 | msg->state = NULL; | ||
502 | if (msg->complete) | ||
503 | msg->complete(msg->context); | ||
504 | |||
505 | drv_data->cur_chip = NULL; | ||
506 | } | ||
507 | |||
508 | static int wait_ssp_rx_stall(void const __iomem *ioaddr) | ||
509 | { | ||
510 | unsigned long limit = loops_per_jiffy << 1; | ||
511 | |||
512 | while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit) | ||
513 | cpu_relax(); | ||
514 | |||
515 | return limit; | ||
516 | } | ||
517 | |||
518 | static int wait_dma_channel_stop(int channel) | ||
519 | { | ||
520 | unsigned long limit = loops_per_jiffy << 1; | ||
521 | |||
522 | while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit) | ||
523 | cpu_relax(); | ||
524 | |||
525 | return limit; | ||
526 | } | ||
527 | |||
528 | static void dma_error_stop(struct driver_data *drv_data, const char *msg) | ||
529 | { | ||
530 | void __iomem *reg = drv_data->ioaddr; | ||
531 | |||
532 | /* Stop and reset */ | ||
533 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
534 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
535 | write_SSSR_CS(drv_data, drv_data->clear_sr); | ||
536 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | ||
537 | if (!pxa25x_ssp_comp(drv_data)) | ||
538 | write_SSTO(0, reg); | ||
539 | flush(drv_data); | ||
540 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | ||
541 | |||
542 | unmap_dma_buffers(drv_data); | ||
543 | |||
544 | dev_err(&drv_data->pdev->dev, "%s\n", msg); | ||
545 | |||
546 | drv_data->cur_msg->state = ERROR_STATE; | ||
547 | tasklet_schedule(&drv_data->pump_transfers); | ||
548 | } | ||
549 | |||
550 | static void dma_transfer_complete(struct driver_data *drv_data) | ||
551 | { | ||
552 | void __iomem *reg = drv_data->ioaddr; | ||
553 | struct spi_message *msg = drv_data->cur_msg; | ||
554 | |||
555 | /* Clear and disable interrupts on SSP and DMA channels*/ | ||
556 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | ||
557 | write_SSSR_CS(drv_data, drv_data->clear_sr); | ||
558 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
559 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
560 | |||
561 | if (wait_dma_channel_stop(drv_data->rx_channel) == 0) | ||
562 | dev_err(&drv_data->pdev->dev, | ||
563 | "dma_handler: dma rx channel stop failed\n"); | ||
564 | |||
565 | if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) | ||
566 | dev_err(&drv_data->pdev->dev, | ||
567 | "dma_transfer: ssp rx stall failed\n"); | ||
568 | |||
569 | unmap_dma_buffers(drv_data); | ||
570 | |||
571 | /* update the buffer pointer for the amount completed in dma */ | ||
572 | drv_data->rx += drv_data->len - | ||
573 | (DCMD(drv_data->rx_channel) & DCMD_LENGTH); | ||
574 | |||
575 | /* read trailing data from fifo, it does not matter how many | ||
576 | * bytes are in the fifo just read until buffer is full | ||
577 | * or fifo is empty, which ever occurs first */ | ||
578 | drv_data->read(drv_data); | ||
579 | |||
580 | /* return count of what was actually read */ | ||
581 | msg->actual_length += drv_data->len - | ||
582 | (drv_data->rx_end - drv_data->rx); | ||
583 | |||
584 | /* Transfer delays and chip select release are | ||
585 | * handled in pump_transfers or giveback | ||
586 | */ | ||
587 | |||
588 | /* Move to next transfer */ | ||
589 | msg->state = next_transfer(drv_data); | ||
590 | |||
591 | /* Schedule transfer tasklet */ | ||
592 | tasklet_schedule(&drv_data->pump_transfers); | ||
593 | } | ||
594 | |||
595 | static void dma_handler(int channel, void *data) | ||
596 | { | ||
597 | struct driver_data *drv_data = data; | ||
598 | u32 irq_status = DCSR(channel) & DMA_INT_MASK; | ||
599 | |||
600 | if (irq_status & DCSR_BUSERR) { | ||
601 | |||
602 | if (channel == drv_data->tx_channel) | ||
603 | dma_error_stop(drv_data, | ||
604 | "dma_handler: " | ||
605 | "bad bus address on tx channel"); | ||
606 | else | ||
607 | dma_error_stop(drv_data, | ||
608 | "dma_handler: " | ||
609 | "bad bus address on rx channel"); | ||
610 | return; | ||
611 | } | ||
612 | |||
613 | /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */ | ||
614 | if ((channel == drv_data->tx_channel) | ||
615 | && (irq_status & DCSR_ENDINTR) | ||
616 | && (drv_data->ssp_type == PXA25x_SSP)) { | ||
617 | |||
618 | /* Wait for rx to stall */ | ||
619 | if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) | ||
620 | dev_err(&drv_data->pdev->dev, | ||
621 | "dma_handler: ssp rx stall failed\n"); | ||
622 | |||
623 | /* finish this transfer, start the next */ | ||
624 | dma_transfer_complete(drv_data); | ||
625 | } | ||
626 | } | ||
627 | |||
628 | static irqreturn_t dma_transfer(struct driver_data *drv_data) | ||
629 | { | ||
630 | u32 irq_status; | ||
631 | void __iomem *reg = drv_data->ioaddr; | ||
632 | |||
633 | irq_status = read_SSSR(reg) & drv_data->mask_sr; | ||
634 | if (irq_status & SSSR_ROR) { | ||
635 | dma_error_stop(drv_data, "dma_transfer: fifo overrun"); | ||
636 | return IRQ_HANDLED; | ||
637 | } | ||
638 | |||
639 | /* Check for false positive timeout */ | ||
640 | if ((irq_status & SSSR_TINT) | ||
641 | && (DCSR(drv_data->tx_channel) & DCSR_RUN)) { | ||
642 | write_SSSR(SSSR_TINT, reg); | ||
643 | return IRQ_HANDLED; | ||
644 | } | ||
645 | |||
646 | if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) { | ||
647 | |||
648 | /* Clear and disable timeout interrupt, do the rest in | ||
649 | * dma_transfer_complete */ | ||
650 | if (!pxa25x_ssp_comp(drv_data)) | ||
651 | write_SSTO(0, reg); | ||
652 | |||
653 | /* finish this transfer, start the next */ | ||
654 | dma_transfer_complete(drv_data); | ||
655 | |||
656 | return IRQ_HANDLED; | ||
657 | } | ||
658 | |||
659 | /* Opps problem detected */ | ||
660 | return IRQ_NONE; | ||
661 | } | ||
662 | |||
663 | static void reset_sccr1(struct driver_data *drv_data) | ||
664 | { | ||
665 | void __iomem *reg = drv_data->ioaddr; | ||
666 | struct chip_data *chip = drv_data->cur_chip; | ||
667 | u32 sccr1_reg; | ||
668 | |||
669 | sccr1_reg = read_SSCR1(reg) & ~drv_data->int_cr1; | ||
670 | sccr1_reg &= ~SSCR1_RFT; | ||
671 | sccr1_reg |= chip->threshold; | ||
672 | write_SSCR1(sccr1_reg, reg); | ||
673 | } | ||
674 | |||
675 | static void int_error_stop(struct driver_data *drv_data, const char* msg) | ||
676 | { | ||
677 | void __iomem *reg = drv_data->ioaddr; | ||
678 | |||
679 | /* Stop and reset SSP */ | ||
680 | write_SSSR_CS(drv_data, drv_data->clear_sr); | ||
681 | reset_sccr1(drv_data); | ||
682 | if (!pxa25x_ssp_comp(drv_data)) | ||
683 | write_SSTO(0, reg); | ||
684 | flush(drv_data); | ||
685 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | ||
686 | |||
687 | dev_err(&drv_data->pdev->dev, "%s\n", msg); | ||
688 | |||
689 | drv_data->cur_msg->state = ERROR_STATE; | ||
690 | tasklet_schedule(&drv_data->pump_transfers); | ||
691 | } | ||
692 | |||
693 | static void int_transfer_complete(struct driver_data *drv_data) | ||
694 | { | ||
695 | void __iomem *reg = drv_data->ioaddr; | ||
696 | |||
697 | /* Stop SSP */ | ||
698 | write_SSSR_CS(drv_data, drv_data->clear_sr); | ||
699 | reset_sccr1(drv_data); | ||
700 | if (!pxa25x_ssp_comp(drv_data)) | ||
701 | write_SSTO(0, reg); | ||
702 | |||
703 | /* Update total byte transferred return count actual bytes read */ | ||
704 | drv_data->cur_msg->actual_length += drv_data->len - | ||
705 | (drv_data->rx_end - drv_data->rx); | ||
706 | |||
707 | /* Transfer delays and chip select release are | ||
708 | * handled in pump_transfers or giveback | ||
709 | */ | ||
710 | |||
711 | /* Move to next transfer */ | ||
712 | drv_data->cur_msg->state = next_transfer(drv_data); | ||
713 | |||
714 | /* Schedule transfer tasklet */ | ||
715 | tasklet_schedule(&drv_data->pump_transfers); | ||
716 | } | ||
717 | |||
718 | static irqreturn_t interrupt_transfer(struct driver_data *drv_data) | ||
719 | { | ||
720 | void __iomem *reg = drv_data->ioaddr; | ||
721 | |||
722 | u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ? | ||
723 | drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS; | ||
724 | |||
725 | u32 irq_status = read_SSSR(reg) & irq_mask; | ||
726 | |||
727 | if (irq_status & SSSR_ROR) { | ||
728 | int_error_stop(drv_data, "interrupt_transfer: fifo overrun"); | ||
729 | return IRQ_HANDLED; | ||
730 | } | ||
731 | |||
732 | if (irq_status & SSSR_TINT) { | ||
733 | write_SSSR(SSSR_TINT, reg); | ||
734 | if (drv_data->read(drv_data)) { | ||
735 | int_transfer_complete(drv_data); | ||
736 | return IRQ_HANDLED; | ||
737 | } | ||
738 | } | ||
739 | |||
740 | /* Drain rx fifo, Fill tx fifo and prevent overruns */ | ||
741 | do { | ||
742 | if (drv_data->read(drv_data)) { | ||
743 | int_transfer_complete(drv_data); | ||
744 | return IRQ_HANDLED; | ||
745 | } | ||
746 | } while (drv_data->write(drv_data)); | ||
747 | |||
748 | if (drv_data->read(drv_data)) { | ||
749 | int_transfer_complete(drv_data); | ||
750 | return IRQ_HANDLED; | ||
751 | } | ||
752 | |||
753 | if (drv_data->tx == drv_data->tx_end) { | ||
754 | u32 bytes_left; | ||
755 | u32 sccr1_reg; | ||
756 | |||
757 | sccr1_reg = read_SSCR1(reg); | ||
758 | sccr1_reg &= ~SSCR1_TIE; | ||
759 | |||
760 | /* | ||
761 | * PXA25x_SSP has no timeout, set up rx threshould for the | ||
762 | * remaining RX bytes. | ||
763 | */ | ||
764 | if (pxa25x_ssp_comp(drv_data)) { | ||
765 | |||
766 | sccr1_reg &= ~SSCR1_RFT; | ||
767 | |||
768 | bytes_left = drv_data->rx_end - drv_data->rx; | ||
769 | switch (drv_data->n_bytes) { | ||
770 | case 4: | ||
771 | bytes_left >>= 1; | ||
772 | case 2: | ||
773 | bytes_left >>= 1; | ||
774 | } | ||
775 | |||
776 | if (bytes_left > RX_THRESH_DFLT) | ||
777 | bytes_left = RX_THRESH_DFLT; | ||
778 | |||
779 | sccr1_reg |= SSCR1_RxTresh(bytes_left); | ||
780 | } | ||
781 | write_SSCR1(sccr1_reg, reg); | ||
782 | } | ||
783 | |||
784 | /* We did something */ | ||
785 | return IRQ_HANDLED; | ||
786 | } | ||
787 | |||
788 | static irqreturn_t ssp_int(int irq, void *dev_id) | ||
789 | { | ||
790 | struct driver_data *drv_data = dev_id; | ||
791 | void __iomem *reg = drv_data->ioaddr; | ||
792 | u32 sccr1_reg = read_SSCR1(reg); | ||
793 | u32 mask = drv_data->mask_sr; | ||
794 | u32 status; | ||
795 | |||
796 | status = read_SSSR(reg); | ||
797 | |||
798 | /* Ignore possible writes if we don't need to write */ | ||
799 | if (!(sccr1_reg & SSCR1_TIE)) | ||
800 | mask &= ~SSSR_TFS; | ||
801 | |||
802 | if (!(status & mask)) | ||
803 | return IRQ_NONE; | ||
804 | |||
805 | if (!drv_data->cur_msg) { | ||
806 | |||
807 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | ||
808 | write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); | ||
809 | if (!pxa25x_ssp_comp(drv_data)) | ||
810 | write_SSTO(0, reg); | ||
811 | write_SSSR_CS(drv_data, drv_data->clear_sr); | ||
812 | |||
813 | dev_err(&drv_data->pdev->dev, "bad message state " | ||
814 | "in interrupt handler\n"); | ||
815 | |||
816 | /* Never fail */ | ||
817 | return IRQ_HANDLED; | ||
818 | } | ||
819 | |||
820 | return drv_data->transfer_handler(drv_data); | ||
821 | } | ||
822 | |||
823 | static int set_dma_burst_and_threshold(struct chip_data *chip, | ||
824 | struct spi_device *spi, | ||
825 | u8 bits_per_word, u32 *burst_code, | ||
826 | u32 *threshold) | ||
827 | { | ||
828 | struct pxa2xx_spi_chip *chip_info = | ||
829 | (struct pxa2xx_spi_chip *)spi->controller_data; | ||
830 | int bytes_per_word; | ||
831 | int burst_bytes; | ||
832 | int thresh_words; | ||
833 | int req_burst_size; | ||
834 | int retval = 0; | ||
835 | |||
836 | /* Set the threshold (in registers) to equal the same amount of data | ||
837 | * as represented by burst size (in bytes). The computation below | ||
838 | * is (burst_size rounded up to nearest 8 byte, word or long word) | ||
839 | * divided by (bytes/register); the tx threshold is the inverse of | ||
840 | * the rx, so that there will always be enough data in the rx fifo | ||
841 | * to satisfy a burst, and there will always be enough space in the | ||
842 | * tx fifo to accept a burst (a tx burst will overwrite the fifo if | ||
843 | * there is not enough space), there must always remain enough empty | ||
844 | * space in the rx fifo for any data loaded to the tx fifo. | ||
845 | * Whenever burst_size (in bytes) equals bits/word, the fifo threshold | ||
846 | * will be 8, or half the fifo; | ||
847 | * The threshold can only be set to 2, 4 or 8, but not 16, because | ||
848 | * to burst 16 to the tx fifo, the fifo would have to be empty; | ||
849 | * however, the minimum fifo trigger level is 1, and the tx will | ||
850 | * request service when the fifo is at this level, with only 15 spaces. | ||
851 | */ | ||
852 | |||
853 | /* find bytes/word */ | ||
854 | if (bits_per_word <= 8) | ||
855 | bytes_per_word = 1; | ||
856 | else if (bits_per_word <= 16) | ||
857 | bytes_per_word = 2; | ||
858 | else | ||
859 | bytes_per_word = 4; | ||
860 | |||
861 | /* use struct pxa2xx_spi_chip->dma_burst_size if available */ | ||
862 | if (chip_info) | ||
863 | req_burst_size = chip_info->dma_burst_size; | ||
864 | else { | ||
865 | switch (chip->dma_burst_size) { | ||
866 | default: | ||
867 | /* if the default burst size is not set, | ||
868 | * do it now */ | ||
869 | chip->dma_burst_size = DCMD_BURST8; | ||
870 | case DCMD_BURST8: | ||
871 | req_burst_size = 8; | ||
872 | break; | ||
873 | case DCMD_BURST16: | ||
874 | req_burst_size = 16; | ||
875 | break; | ||
876 | case DCMD_BURST32: | ||
877 | req_burst_size = 32; | ||
878 | break; | ||
879 | } | ||
880 | } | ||
881 | if (req_burst_size <= 8) { | ||
882 | *burst_code = DCMD_BURST8; | ||
883 | burst_bytes = 8; | ||
884 | } else if (req_burst_size <= 16) { | ||
885 | if (bytes_per_word == 1) { | ||
886 | /* don't burst more than 1/2 the fifo */ | ||
887 | *burst_code = DCMD_BURST8; | ||
888 | burst_bytes = 8; | ||
889 | retval = 1; | ||
890 | } else { | ||
891 | *burst_code = DCMD_BURST16; | ||
892 | burst_bytes = 16; | ||
893 | } | ||
894 | } else { | ||
895 | if (bytes_per_word == 1) { | ||
896 | /* don't burst more than 1/2 the fifo */ | ||
897 | *burst_code = DCMD_BURST8; | ||
898 | burst_bytes = 8; | ||
899 | retval = 1; | ||
900 | } else if (bytes_per_word == 2) { | ||
901 | /* don't burst more than 1/2 the fifo */ | ||
902 | *burst_code = DCMD_BURST16; | ||
903 | burst_bytes = 16; | ||
904 | retval = 1; | ||
905 | } else { | ||
906 | *burst_code = DCMD_BURST32; | ||
907 | burst_bytes = 32; | ||
908 | } | ||
909 | } | ||
910 | |||
911 | thresh_words = burst_bytes / bytes_per_word; | ||
912 | |||
913 | /* thresh_words will be between 2 and 8 */ | ||
914 | *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT) | ||
915 | | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT); | ||
916 | |||
917 | return retval; | ||
918 | } | ||
919 | |||
920 | static unsigned int ssp_get_clk_div(struct ssp_device *ssp, int rate) | ||
921 | { | ||
922 | unsigned long ssp_clk = clk_get_rate(ssp->clk); | ||
923 | |||
924 | if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP) | ||
925 | return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8; | ||
926 | else | ||
927 | return ((ssp_clk / rate - 1) & 0xfff) << 8; | ||
928 | } | ||
929 | |||
930 | static void pump_transfers(unsigned long data) | ||
931 | { | ||
932 | struct driver_data *drv_data = (struct driver_data *)data; | ||
933 | struct spi_message *message = NULL; | ||
934 | struct spi_transfer *transfer = NULL; | ||
935 | struct spi_transfer *previous = NULL; | ||
936 | struct chip_data *chip = NULL; | ||
937 | struct ssp_device *ssp = drv_data->ssp; | ||
938 | void __iomem *reg = drv_data->ioaddr; | ||
939 | u32 clk_div = 0; | ||
940 | u8 bits = 0; | ||
941 | u32 speed = 0; | ||
942 | u32 cr0; | ||
943 | u32 cr1; | ||
944 | u32 dma_thresh = drv_data->cur_chip->dma_threshold; | ||
945 | u32 dma_burst = drv_data->cur_chip->dma_burst_size; | ||
946 | |||
947 | /* Get current state information */ | ||
948 | message = drv_data->cur_msg; | ||
949 | transfer = drv_data->cur_transfer; | ||
950 | chip = drv_data->cur_chip; | ||
951 | |||
952 | /* Handle for abort */ | ||
953 | if (message->state == ERROR_STATE) { | ||
954 | message->status = -EIO; | ||
955 | giveback(drv_data); | ||
956 | return; | ||
957 | } | ||
958 | |||
959 | /* Handle end of message */ | ||
960 | if (message->state == DONE_STATE) { | ||
961 | message->status = 0; | ||
962 | giveback(drv_data); | ||
963 | return; | ||
964 | } | ||
965 | |||
966 | /* Delay if requested at end of transfer before CS change */ | ||
967 | if (message->state == RUNNING_STATE) { | ||
968 | previous = list_entry(transfer->transfer_list.prev, | ||
969 | struct spi_transfer, | ||
970 | transfer_list); | ||
971 | if (previous->delay_usecs) | ||
972 | udelay(previous->delay_usecs); | ||
973 | |||
974 | /* Drop chip select only if cs_change is requested */ | ||
975 | if (previous->cs_change) | ||
976 | cs_deassert(drv_data); | ||
977 | } | ||
978 | |||
979 | /* Check for transfers that need multiple DMA segments */ | ||
980 | if (transfer->len > MAX_DMA_LEN && chip->enable_dma) { | ||
981 | |||
982 | /* reject already-mapped transfers; PIO won't always work */ | ||
983 | if (message->is_dma_mapped | ||
984 | || transfer->rx_dma || transfer->tx_dma) { | ||
985 | dev_err(&drv_data->pdev->dev, | ||
986 | "pump_transfers: mapped transfer length " | ||
987 | "of %u is greater than %d\n", | ||
988 | transfer->len, MAX_DMA_LEN); | ||
989 | message->status = -EINVAL; | ||
990 | giveback(drv_data); | ||
991 | return; | ||
992 | } | ||
993 | |||
994 | /* warn ... we force this to PIO mode */ | ||
995 | if (printk_ratelimit()) | ||
996 | dev_warn(&message->spi->dev, "pump_transfers: " | ||
997 | "DMA disabled for transfer length %ld " | ||
998 | "greater than %d\n", | ||
999 | (long)drv_data->len, MAX_DMA_LEN); | ||
1000 | } | ||
1001 | |||
1002 | /* Setup the transfer state based on the type of transfer */ | ||
1003 | if (flush(drv_data) == 0) { | ||
1004 | dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); | ||
1005 | message->status = -EIO; | ||
1006 | giveback(drv_data); | ||
1007 | return; | ||
1008 | } | ||
1009 | drv_data->n_bytes = chip->n_bytes; | ||
1010 | drv_data->dma_width = chip->dma_width; | ||
1011 | drv_data->tx = (void *)transfer->tx_buf; | ||
1012 | drv_data->tx_end = drv_data->tx + transfer->len; | ||
1013 | drv_data->rx = transfer->rx_buf; | ||
1014 | drv_data->rx_end = drv_data->rx + transfer->len; | ||
1015 | drv_data->rx_dma = transfer->rx_dma; | ||
1016 | drv_data->tx_dma = transfer->tx_dma; | ||
1017 | drv_data->len = transfer->len & DCMD_LENGTH; | ||
1018 | drv_data->write = drv_data->tx ? chip->write : null_writer; | ||
1019 | drv_data->read = drv_data->rx ? chip->read : null_reader; | ||
1020 | |||
1021 | /* Change speed and bit per word on a per transfer */ | ||
1022 | cr0 = chip->cr0; | ||
1023 | if (transfer->speed_hz || transfer->bits_per_word) { | ||
1024 | |||
1025 | bits = chip->bits_per_word; | ||
1026 | speed = chip->speed_hz; | ||
1027 | |||
1028 | if (transfer->speed_hz) | ||
1029 | speed = transfer->speed_hz; | ||
1030 | |||
1031 | if (transfer->bits_per_word) | ||
1032 | bits = transfer->bits_per_word; | ||
1033 | |||
1034 | clk_div = ssp_get_clk_div(ssp, speed); | ||
1035 | |||
1036 | if (bits <= 8) { | ||
1037 | drv_data->n_bytes = 1; | ||
1038 | drv_data->dma_width = DCMD_WIDTH1; | ||
1039 | drv_data->read = drv_data->read != null_reader ? | ||
1040 | u8_reader : null_reader; | ||
1041 | drv_data->write = drv_data->write != null_writer ? | ||
1042 | u8_writer : null_writer; | ||
1043 | } else if (bits <= 16) { | ||
1044 | drv_data->n_bytes = 2; | ||
1045 | drv_data->dma_width = DCMD_WIDTH2; | ||
1046 | drv_data->read = drv_data->read != null_reader ? | ||
1047 | u16_reader : null_reader; | ||
1048 | drv_data->write = drv_data->write != null_writer ? | ||
1049 | u16_writer : null_writer; | ||
1050 | } else if (bits <= 32) { | ||
1051 | drv_data->n_bytes = 4; | ||
1052 | drv_data->dma_width = DCMD_WIDTH4; | ||
1053 | drv_data->read = drv_data->read != null_reader ? | ||
1054 | u32_reader : null_reader; | ||
1055 | drv_data->write = drv_data->write != null_writer ? | ||
1056 | u32_writer : null_writer; | ||
1057 | } | ||
1058 | /* if bits/word is changed in dma mode, then must check the | ||
1059 | * thresholds and burst also */ | ||
1060 | if (chip->enable_dma) { | ||
1061 | if (set_dma_burst_and_threshold(chip, message->spi, | ||
1062 | bits, &dma_burst, | ||
1063 | &dma_thresh)) | ||
1064 | if (printk_ratelimit()) | ||
1065 | dev_warn(&message->spi->dev, | ||
1066 | "pump_transfers: " | ||
1067 | "DMA burst size reduced to " | ||
1068 | "match bits_per_word\n"); | ||
1069 | } | ||
1070 | |||
1071 | cr0 = clk_div | ||
1072 | | SSCR0_Motorola | ||
1073 | | SSCR0_DataSize(bits > 16 ? bits - 16 : bits) | ||
1074 | | SSCR0_SSE | ||
1075 | | (bits > 16 ? SSCR0_EDSS : 0); | ||
1076 | } | ||
1077 | |||
1078 | message->state = RUNNING_STATE; | ||
1079 | |||
1080 | /* Try to map dma buffer and do a dma transfer if successful, but | ||
1081 | * only if the length is non-zero and less than MAX_DMA_LEN. | ||
1082 | * | ||
1083 | * Zero-length non-descriptor DMA is illegal on PXA2xx; force use | ||
1084 | * of PIO instead. Care is needed above because the transfer may | ||
1085 | * have have been passed with buffers that are already dma mapped. | ||
1086 | * A zero-length transfer in PIO mode will not try to write/read | ||
1087 | * to/from the buffers | ||
1088 | * | ||
1089 | * REVISIT large transfers are exactly where we most want to be | ||
1090 | * using DMA. If this happens much, split those transfers into | ||
1091 | * multiple DMA segments rather than forcing PIO. | ||
1092 | */ | ||
1093 | drv_data->dma_mapped = 0; | ||
1094 | if (drv_data->len > 0 && drv_data->len <= MAX_DMA_LEN) | ||
1095 | drv_data->dma_mapped = map_dma_buffers(drv_data); | ||
1096 | if (drv_data->dma_mapped) { | ||
1097 | |||
1098 | /* Ensure we have the correct interrupt handler */ | ||
1099 | drv_data->transfer_handler = dma_transfer; | ||
1100 | |||
1101 | /* Setup rx DMA Channel */ | ||
1102 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
1103 | DSADR(drv_data->rx_channel) = drv_data->ssdr_physical; | ||
1104 | DTADR(drv_data->rx_channel) = drv_data->rx_dma; | ||
1105 | if (drv_data->rx == drv_data->null_dma_buf) | ||
1106 | /* No target address increment */ | ||
1107 | DCMD(drv_data->rx_channel) = DCMD_FLOWSRC | ||
1108 | | drv_data->dma_width | ||
1109 | | dma_burst | ||
1110 | | drv_data->len; | ||
1111 | else | ||
1112 | DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR | ||
1113 | | DCMD_FLOWSRC | ||
1114 | | drv_data->dma_width | ||
1115 | | dma_burst | ||
1116 | | drv_data->len; | ||
1117 | |||
1118 | /* Setup tx DMA Channel */ | ||
1119 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
1120 | DSADR(drv_data->tx_channel) = drv_data->tx_dma; | ||
1121 | DTADR(drv_data->tx_channel) = drv_data->ssdr_physical; | ||
1122 | if (drv_data->tx == drv_data->null_dma_buf) | ||
1123 | /* No source address increment */ | ||
1124 | DCMD(drv_data->tx_channel) = DCMD_FLOWTRG | ||
1125 | | drv_data->dma_width | ||
1126 | | dma_burst | ||
1127 | | drv_data->len; | ||
1128 | else | ||
1129 | DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR | ||
1130 | | DCMD_FLOWTRG | ||
1131 | | drv_data->dma_width | ||
1132 | | dma_burst | ||
1133 | | drv_data->len; | ||
1134 | |||
1135 | /* Enable dma end irqs on SSP to detect end of transfer */ | ||
1136 | if (drv_data->ssp_type == PXA25x_SSP) | ||
1137 | DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN; | ||
1138 | |||
1139 | /* Clear status and start DMA engine */ | ||
1140 | cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; | ||
1141 | write_SSSR(drv_data->clear_sr, reg); | ||
1142 | DCSR(drv_data->rx_channel) |= DCSR_RUN; | ||
1143 | DCSR(drv_data->tx_channel) |= DCSR_RUN; | ||
1144 | } else { | ||
1145 | /* Ensure we have the correct interrupt handler */ | ||
1146 | drv_data->transfer_handler = interrupt_transfer; | ||
1147 | |||
1148 | /* Clear status */ | ||
1149 | cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1; | ||
1150 | write_SSSR_CS(drv_data, drv_data->clear_sr); | ||
1151 | } | ||
1152 | |||
1153 | /* see if we need to reload the config registers */ | ||
1154 | if ((read_SSCR0(reg) != cr0) | ||
1155 | || (read_SSCR1(reg) & SSCR1_CHANGE_MASK) != | ||
1156 | (cr1 & SSCR1_CHANGE_MASK)) { | ||
1157 | |||
1158 | /* stop the SSP, and update the other bits */ | ||
1159 | write_SSCR0(cr0 & ~SSCR0_SSE, reg); | ||
1160 | if (!pxa25x_ssp_comp(drv_data)) | ||
1161 | write_SSTO(chip->timeout, reg); | ||
1162 | /* first set CR1 without interrupt and service enables */ | ||
1163 | write_SSCR1(cr1 & SSCR1_CHANGE_MASK, reg); | ||
1164 | /* restart the SSP */ | ||
1165 | write_SSCR0(cr0, reg); | ||
1166 | |||
1167 | } else { | ||
1168 | if (!pxa25x_ssp_comp(drv_data)) | ||
1169 | write_SSTO(chip->timeout, reg); | ||
1170 | } | ||
1171 | |||
1172 | cs_assert(drv_data); | ||
1173 | |||
1174 | /* after chip select, release the data by enabling service | ||
1175 | * requests and interrupts, without changing any mode bits */ | ||
1176 | write_SSCR1(cr1, reg); | ||
1177 | } | ||
1178 | |||
1179 | static void pump_messages(struct work_struct *work) | ||
1180 | { | ||
1181 | struct driver_data *drv_data = | ||
1182 | container_of(work, struct driver_data, pump_messages); | ||
1183 | unsigned long flags; | ||
1184 | |||
1185 | /* Lock queue and check for queue work */ | ||
1186 | spin_lock_irqsave(&drv_data->lock, flags); | ||
1187 | if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) { | ||
1188 | drv_data->busy = 0; | ||
1189 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
1190 | return; | ||
1191 | } | ||
1192 | |||
1193 | /* Make sure we are not already running a message */ | ||
1194 | if (drv_data->cur_msg) { | ||
1195 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
1196 | return; | ||
1197 | } | ||
1198 | |||
1199 | /* Extract head of queue */ | ||
1200 | drv_data->cur_msg = list_entry(drv_data->queue.next, | ||
1201 | struct spi_message, queue); | ||
1202 | list_del_init(&drv_data->cur_msg->queue); | ||
1203 | |||
1204 | /* Initial message state*/ | ||
1205 | drv_data->cur_msg->state = START_STATE; | ||
1206 | drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, | ||
1207 | struct spi_transfer, | ||
1208 | transfer_list); | ||
1209 | |||
1210 | /* prepare to setup the SSP, in pump_transfers, using the per | ||
1211 | * chip configuration */ | ||
1212 | drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); | ||
1213 | |||
1214 | /* Mark as busy and launch transfers */ | ||
1215 | tasklet_schedule(&drv_data->pump_transfers); | ||
1216 | |||
1217 | drv_data->busy = 1; | ||
1218 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
1219 | } | ||
1220 | |||
1221 | static int transfer(struct spi_device *spi, struct spi_message *msg) | ||
1222 | { | ||
1223 | struct driver_data *drv_data = spi_master_get_devdata(spi->master); | ||
1224 | unsigned long flags; | ||
1225 | |||
1226 | spin_lock_irqsave(&drv_data->lock, flags); | ||
1227 | |||
1228 | if (drv_data->run == QUEUE_STOPPED) { | ||
1229 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
1230 | return -ESHUTDOWN; | ||
1231 | } | ||
1232 | |||
1233 | msg->actual_length = 0; | ||
1234 | msg->status = -EINPROGRESS; | ||
1235 | msg->state = START_STATE; | ||
1236 | |||
1237 | list_add_tail(&msg->queue, &drv_data->queue); | ||
1238 | |||
1239 | if (drv_data->run == QUEUE_RUNNING && !drv_data->busy) | ||
1240 | queue_work(drv_data->workqueue, &drv_data->pump_messages); | ||
1241 | |||
1242 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
1243 | |||
1244 | return 0; | ||
1245 | } | ||
1246 | |||
1247 | static int setup_cs(struct spi_device *spi, struct chip_data *chip, | ||
1248 | struct pxa2xx_spi_chip *chip_info) | ||
1249 | { | ||
1250 | int err = 0; | ||
1251 | |||
1252 | if (chip == NULL || chip_info == NULL) | ||
1253 | return 0; | ||
1254 | |||
1255 | /* NOTE: setup() can be called multiple times, possibly with | ||
1256 | * different chip_info, release previously requested GPIO | ||
1257 | */ | ||
1258 | if (gpio_is_valid(chip->gpio_cs)) | ||
1259 | gpio_free(chip->gpio_cs); | ||
1260 | |||
1261 | /* If (*cs_control) is provided, ignore GPIO chip select */ | ||
1262 | if (chip_info->cs_control) { | ||
1263 | chip->cs_control = chip_info->cs_control; | ||
1264 | return 0; | ||
1265 | } | ||
1266 | |||
1267 | if (gpio_is_valid(chip_info->gpio_cs)) { | ||
1268 | err = gpio_request(chip_info->gpio_cs, "SPI_CS"); | ||
1269 | if (err) { | ||
1270 | dev_err(&spi->dev, "failed to request chip select " | ||
1271 | "GPIO%d\n", chip_info->gpio_cs); | ||
1272 | return err; | ||
1273 | } | ||
1274 | |||
1275 | chip->gpio_cs = chip_info->gpio_cs; | ||
1276 | chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH; | ||
1277 | |||
1278 | err = gpio_direction_output(chip->gpio_cs, | ||
1279 | !chip->gpio_cs_inverted); | ||
1280 | } | ||
1281 | |||
1282 | return err; | ||
1283 | } | ||
1284 | |||
1285 | static int setup(struct spi_device *spi) | ||
1286 | { | ||
1287 | struct pxa2xx_spi_chip *chip_info = NULL; | ||
1288 | struct chip_data *chip; | ||
1289 | struct driver_data *drv_data = spi_master_get_devdata(spi->master); | ||
1290 | struct ssp_device *ssp = drv_data->ssp; | ||
1291 | unsigned int clk_div; | ||
1292 | uint tx_thres = TX_THRESH_DFLT; | ||
1293 | uint rx_thres = RX_THRESH_DFLT; | ||
1294 | |||
1295 | if (!pxa25x_ssp_comp(drv_data) | ||
1296 | && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) { | ||
1297 | dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " | ||
1298 | "b/w not 4-32 for type non-PXA25x_SSP\n", | ||
1299 | drv_data->ssp_type, spi->bits_per_word); | ||
1300 | return -EINVAL; | ||
1301 | } else if (pxa25x_ssp_comp(drv_data) | ||
1302 | && (spi->bits_per_word < 4 | ||
1303 | || spi->bits_per_word > 16)) { | ||
1304 | dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d " | ||
1305 | "b/w not 4-16 for type PXA25x_SSP\n", | ||
1306 | drv_data->ssp_type, spi->bits_per_word); | ||
1307 | return -EINVAL; | ||
1308 | } | ||
1309 | |||
1310 | /* Only alloc on first setup */ | ||
1311 | chip = spi_get_ctldata(spi); | ||
1312 | if (!chip) { | ||
1313 | chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); | ||
1314 | if (!chip) { | ||
1315 | dev_err(&spi->dev, | ||
1316 | "failed setup: can't allocate chip data\n"); | ||
1317 | return -ENOMEM; | ||
1318 | } | ||
1319 | |||
1320 | if (drv_data->ssp_type == CE4100_SSP) { | ||
1321 | if (spi->chip_select > 4) { | ||
1322 | dev_err(&spi->dev, "failed setup: " | ||
1323 | "cs number must not be > 4.\n"); | ||
1324 | kfree(chip); | ||
1325 | return -EINVAL; | ||
1326 | } | ||
1327 | |||
1328 | chip->frm = spi->chip_select; | ||
1329 | } else | ||
1330 | chip->gpio_cs = -1; | ||
1331 | chip->enable_dma = 0; | ||
1332 | chip->timeout = TIMOUT_DFLT; | ||
1333 | chip->dma_burst_size = drv_data->master_info->enable_dma ? | ||
1334 | DCMD_BURST8 : 0; | ||
1335 | } | ||
1336 | |||
1337 | /* protocol drivers may change the chip settings, so... | ||
1338 | * if chip_info exists, use it */ | ||
1339 | chip_info = spi->controller_data; | ||
1340 | |||
1341 | /* chip_info isn't always needed */ | ||
1342 | chip->cr1 = 0; | ||
1343 | if (chip_info) { | ||
1344 | if (chip_info->timeout) | ||
1345 | chip->timeout = chip_info->timeout; | ||
1346 | if (chip_info->tx_threshold) | ||
1347 | tx_thres = chip_info->tx_threshold; | ||
1348 | if (chip_info->rx_threshold) | ||
1349 | rx_thres = chip_info->rx_threshold; | ||
1350 | chip->enable_dma = drv_data->master_info->enable_dma; | ||
1351 | chip->dma_threshold = 0; | ||
1352 | if (chip_info->enable_loopback) | ||
1353 | chip->cr1 = SSCR1_LBM; | ||
1354 | } | ||
1355 | |||
1356 | chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) | | ||
1357 | (SSCR1_TxTresh(tx_thres) & SSCR1_TFT); | ||
1358 | |||
1359 | /* set dma burst and threshold outside of chip_info path so that if | ||
1360 | * chip_info goes away after setting chip->enable_dma, the | ||
1361 | * burst and threshold can still respond to changes in bits_per_word */ | ||
1362 | if (chip->enable_dma) { | ||
1363 | /* set up legal burst and threshold for dma */ | ||
1364 | if (set_dma_burst_and_threshold(chip, spi, spi->bits_per_word, | ||
1365 | &chip->dma_burst_size, | ||
1366 | &chip->dma_threshold)) { | ||
1367 | dev_warn(&spi->dev, "in setup: DMA burst size reduced " | ||
1368 | "to match bits_per_word\n"); | ||
1369 | } | ||
1370 | } | ||
1371 | |||
1372 | clk_div = ssp_get_clk_div(ssp, spi->max_speed_hz); | ||
1373 | chip->speed_hz = spi->max_speed_hz; | ||
1374 | |||
1375 | chip->cr0 = clk_div | ||
1376 | | SSCR0_Motorola | ||
1377 | | SSCR0_DataSize(spi->bits_per_word > 16 ? | ||
1378 | spi->bits_per_word - 16 : spi->bits_per_word) | ||
1379 | | SSCR0_SSE | ||
1380 | | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0); | ||
1381 | chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH); | ||
1382 | chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0) | ||
1383 | | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0); | ||
1384 | |||
1385 | /* NOTE: PXA25x_SSP _could_ use external clocking ... */ | ||
1386 | if (!pxa25x_ssp_comp(drv_data)) | ||
1387 | dev_dbg(&spi->dev, "%ld Hz actual, %s\n", | ||
1388 | clk_get_rate(ssp->clk) | ||
1389 | / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)), | ||
1390 | chip->enable_dma ? "DMA" : "PIO"); | ||
1391 | else | ||
1392 | dev_dbg(&spi->dev, "%ld Hz actual, %s\n", | ||
1393 | clk_get_rate(ssp->clk) / 2 | ||
1394 | / (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)), | ||
1395 | chip->enable_dma ? "DMA" : "PIO"); | ||
1396 | |||
1397 | if (spi->bits_per_word <= 8) { | ||
1398 | chip->n_bytes = 1; | ||
1399 | chip->dma_width = DCMD_WIDTH1; | ||
1400 | chip->read = u8_reader; | ||
1401 | chip->write = u8_writer; | ||
1402 | } else if (spi->bits_per_word <= 16) { | ||
1403 | chip->n_bytes = 2; | ||
1404 | chip->dma_width = DCMD_WIDTH2; | ||
1405 | chip->read = u16_reader; | ||
1406 | chip->write = u16_writer; | ||
1407 | } else if (spi->bits_per_word <= 32) { | ||
1408 | chip->cr0 |= SSCR0_EDSS; | ||
1409 | chip->n_bytes = 4; | ||
1410 | chip->dma_width = DCMD_WIDTH4; | ||
1411 | chip->read = u32_reader; | ||
1412 | chip->write = u32_writer; | ||
1413 | } else { | ||
1414 | dev_err(&spi->dev, "invalid wordsize\n"); | ||
1415 | return -ENODEV; | ||
1416 | } | ||
1417 | chip->bits_per_word = spi->bits_per_word; | ||
1418 | |||
1419 | spi_set_ctldata(spi, chip); | ||
1420 | |||
1421 | if (drv_data->ssp_type == CE4100_SSP) | ||
1422 | return 0; | ||
1423 | |||
1424 | return setup_cs(spi, chip, chip_info); | ||
1425 | } | ||
1426 | |||
1427 | static void cleanup(struct spi_device *spi) | ||
1428 | { | ||
1429 | struct chip_data *chip = spi_get_ctldata(spi); | ||
1430 | struct driver_data *drv_data = spi_master_get_devdata(spi->master); | ||
1431 | |||
1432 | if (!chip) | ||
1433 | return; | ||
1434 | |||
1435 | if (drv_data->ssp_type != CE4100_SSP && gpio_is_valid(chip->gpio_cs)) | ||
1436 | gpio_free(chip->gpio_cs); | ||
1437 | |||
1438 | kfree(chip); | ||
1439 | } | ||
1440 | |||
1441 | static int __devinit init_queue(struct driver_data *drv_data) | ||
1442 | { | ||
1443 | INIT_LIST_HEAD(&drv_data->queue); | ||
1444 | spin_lock_init(&drv_data->lock); | ||
1445 | |||
1446 | drv_data->run = QUEUE_STOPPED; | ||
1447 | drv_data->busy = 0; | ||
1448 | |||
1449 | tasklet_init(&drv_data->pump_transfers, | ||
1450 | pump_transfers, (unsigned long)drv_data); | ||
1451 | |||
1452 | INIT_WORK(&drv_data->pump_messages, pump_messages); | ||
1453 | drv_data->workqueue = create_singlethread_workqueue( | ||
1454 | dev_name(drv_data->master->dev.parent)); | ||
1455 | if (drv_data->workqueue == NULL) | ||
1456 | return -EBUSY; | ||
1457 | |||
1458 | return 0; | ||
1459 | } | ||
1460 | |||
1461 | static int start_queue(struct driver_data *drv_data) | ||
1462 | { | ||
1463 | unsigned long flags; | ||
1464 | |||
1465 | spin_lock_irqsave(&drv_data->lock, flags); | ||
1466 | |||
1467 | if (drv_data->run == QUEUE_RUNNING || drv_data->busy) { | ||
1468 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
1469 | return -EBUSY; | ||
1470 | } | ||
1471 | |||
1472 | drv_data->run = QUEUE_RUNNING; | ||
1473 | drv_data->cur_msg = NULL; | ||
1474 | drv_data->cur_transfer = NULL; | ||
1475 | drv_data->cur_chip = NULL; | ||
1476 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
1477 | |||
1478 | queue_work(drv_data->workqueue, &drv_data->pump_messages); | ||
1479 | |||
1480 | return 0; | ||
1481 | } | ||
1482 | |||
1483 | static int stop_queue(struct driver_data *drv_data) | ||
1484 | { | ||
1485 | unsigned long flags; | ||
1486 | unsigned limit = 500; | ||
1487 | int status = 0; | ||
1488 | |||
1489 | spin_lock_irqsave(&drv_data->lock, flags); | ||
1490 | |||
1491 | /* This is a bit lame, but is optimized for the common execution path. | ||
1492 | * A wait_queue on the drv_data->busy could be used, but then the common | ||
1493 | * execution path (pump_messages) would be required to call wake_up or | ||
1494 | * friends on every SPI message. Do this instead */ | ||
1495 | drv_data->run = QUEUE_STOPPED; | ||
1496 | while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) { | ||
1497 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
1498 | msleep(10); | ||
1499 | spin_lock_irqsave(&drv_data->lock, flags); | ||
1500 | } | ||
1501 | |||
1502 | if (!list_empty(&drv_data->queue) || drv_data->busy) | ||
1503 | status = -EBUSY; | ||
1504 | |||
1505 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
1506 | |||
1507 | return status; | ||
1508 | } | ||
1509 | |||
1510 | static int destroy_queue(struct driver_data *drv_data) | ||
1511 | { | ||
1512 | int status; | ||
1513 | |||
1514 | status = stop_queue(drv_data); | ||
1515 | /* we are unloading the module or failing to load (only two calls | ||
1516 | * to this routine), and neither call can handle a return value. | ||
1517 | * However, destroy_workqueue calls flush_workqueue, and that will | ||
1518 | * block until all work is done. If the reason that stop_queue | ||
1519 | * timed out is that the work will never finish, then it does no | ||
1520 | * good to call destroy_workqueue, so return anyway. */ | ||
1521 | if (status != 0) | ||
1522 | return status; | ||
1523 | |||
1524 | destroy_workqueue(drv_data->workqueue); | ||
1525 | |||
1526 | return 0; | ||
1527 | } | ||
1528 | |||
1529 | static int __devinit pxa2xx_spi_probe(struct platform_device *pdev) | ||
1530 | { | ||
1531 | struct device *dev = &pdev->dev; | ||
1532 | struct pxa2xx_spi_master *platform_info; | ||
1533 | struct spi_master *master; | ||
1534 | struct driver_data *drv_data; | ||
1535 | struct ssp_device *ssp; | ||
1536 | int status; | ||
1537 | |||
1538 | platform_info = dev->platform_data; | ||
1539 | |||
1540 | ssp = pxa_ssp_request(pdev->id, pdev->name); | ||
1541 | if (ssp == NULL) { | ||
1542 | dev_err(&pdev->dev, "failed to request SSP%d\n", pdev->id); | ||
1543 | return -ENODEV; | ||
1544 | } | ||
1545 | |||
1546 | /* Allocate master with space for drv_data and null dma buffer */ | ||
1547 | master = spi_alloc_master(dev, sizeof(struct driver_data) + 16); | ||
1548 | if (!master) { | ||
1549 | dev_err(&pdev->dev, "cannot alloc spi_master\n"); | ||
1550 | pxa_ssp_free(ssp); | ||
1551 | return -ENOMEM; | ||
1552 | } | ||
1553 | drv_data = spi_master_get_devdata(master); | ||
1554 | drv_data->master = master; | ||
1555 | drv_data->master_info = platform_info; | ||
1556 | drv_data->pdev = pdev; | ||
1557 | drv_data->ssp = ssp; | ||
1558 | |||
1559 | master->dev.parent = &pdev->dev; | ||
1560 | master->dev.of_node = pdev->dev.of_node; | ||
1561 | /* the spi->mode bits understood by this driver: */ | ||
1562 | master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; | ||
1563 | |||
1564 | master->bus_num = pdev->id; | ||
1565 | master->num_chipselect = platform_info->num_chipselect; | ||
1566 | master->dma_alignment = DMA_ALIGNMENT; | ||
1567 | master->cleanup = cleanup; | ||
1568 | master->setup = setup; | ||
1569 | master->transfer = transfer; | ||
1570 | |||
1571 | drv_data->ssp_type = ssp->type; | ||
1572 | drv_data->null_dma_buf = (u32 *)ALIGN((u32)(drv_data + | ||
1573 | sizeof(struct driver_data)), 8); | ||
1574 | |||
1575 | drv_data->ioaddr = ssp->mmio_base; | ||
1576 | drv_data->ssdr_physical = ssp->phys_base + SSDR; | ||
1577 | if (pxa25x_ssp_comp(drv_data)) { | ||
1578 | drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE; | ||
1579 | drv_data->dma_cr1 = 0; | ||
1580 | drv_data->clear_sr = SSSR_ROR; | ||
1581 | drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR; | ||
1582 | } else { | ||
1583 | drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE; | ||
1584 | drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE; | ||
1585 | drv_data->clear_sr = SSSR_ROR | SSSR_TINT; | ||
1586 | drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR; | ||
1587 | } | ||
1588 | |||
1589 | status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev), | ||
1590 | drv_data); | ||
1591 | if (status < 0) { | ||
1592 | dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq); | ||
1593 | goto out_error_master_alloc; | ||
1594 | } | ||
1595 | |||
1596 | /* Setup DMA if requested */ | ||
1597 | drv_data->tx_channel = -1; | ||
1598 | drv_data->rx_channel = -1; | ||
1599 | if (platform_info->enable_dma) { | ||
1600 | |||
1601 | /* Get two DMA channels (rx and tx) */ | ||
1602 | drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx", | ||
1603 | DMA_PRIO_HIGH, | ||
1604 | dma_handler, | ||
1605 | drv_data); | ||
1606 | if (drv_data->rx_channel < 0) { | ||
1607 | dev_err(dev, "problem (%d) requesting rx channel\n", | ||
1608 | drv_data->rx_channel); | ||
1609 | status = -ENODEV; | ||
1610 | goto out_error_irq_alloc; | ||
1611 | } | ||
1612 | drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx", | ||
1613 | DMA_PRIO_MEDIUM, | ||
1614 | dma_handler, | ||
1615 | drv_data); | ||
1616 | if (drv_data->tx_channel < 0) { | ||
1617 | dev_err(dev, "problem (%d) requesting tx channel\n", | ||
1618 | drv_data->tx_channel); | ||
1619 | status = -ENODEV; | ||
1620 | goto out_error_dma_alloc; | ||
1621 | } | ||
1622 | |||
1623 | DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel; | ||
1624 | DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel; | ||
1625 | } | ||
1626 | |||
1627 | /* Enable SOC clock */ | ||
1628 | clk_enable(ssp->clk); | ||
1629 | |||
1630 | /* Load default SSP configuration */ | ||
1631 | write_SSCR0(0, drv_data->ioaddr); | ||
1632 | write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) | | ||
1633 | SSCR1_TxTresh(TX_THRESH_DFLT), | ||
1634 | drv_data->ioaddr); | ||
1635 | write_SSCR0(SSCR0_SCR(2) | ||
1636 | | SSCR0_Motorola | ||
1637 | | SSCR0_DataSize(8), | ||
1638 | drv_data->ioaddr); | ||
1639 | if (!pxa25x_ssp_comp(drv_data)) | ||
1640 | write_SSTO(0, drv_data->ioaddr); | ||
1641 | write_SSPSP(0, drv_data->ioaddr); | ||
1642 | |||
1643 | /* Initial and start queue */ | ||
1644 | status = init_queue(drv_data); | ||
1645 | if (status != 0) { | ||
1646 | dev_err(&pdev->dev, "problem initializing queue\n"); | ||
1647 | goto out_error_clock_enabled; | ||
1648 | } | ||
1649 | status = start_queue(drv_data); | ||
1650 | if (status != 0) { | ||
1651 | dev_err(&pdev->dev, "problem starting queue\n"); | ||
1652 | goto out_error_clock_enabled; | ||
1653 | } | ||
1654 | |||
1655 | /* Register with the SPI framework */ | ||
1656 | platform_set_drvdata(pdev, drv_data); | ||
1657 | status = spi_register_master(master); | ||
1658 | if (status != 0) { | ||
1659 | dev_err(&pdev->dev, "problem registering spi master\n"); | ||
1660 | goto out_error_queue_alloc; | ||
1661 | } | ||
1662 | |||
1663 | return status; | ||
1664 | |||
1665 | out_error_queue_alloc: | ||
1666 | destroy_queue(drv_data); | ||
1667 | |||
1668 | out_error_clock_enabled: | ||
1669 | clk_disable(ssp->clk); | ||
1670 | |||
1671 | out_error_dma_alloc: | ||
1672 | if (drv_data->tx_channel != -1) | ||
1673 | pxa_free_dma(drv_data->tx_channel); | ||
1674 | if (drv_data->rx_channel != -1) | ||
1675 | pxa_free_dma(drv_data->rx_channel); | ||
1676 | |||
1677 | out_error_irq_alloc: | ||
1678 | free_irq(ssp->irq, drv_data); | ||
1679 | |||
1680 | out_error_master_alloc: | ||
1681 | spi_master_put(master); | ||
1682 | pxa_ssp_free(ssp); | ||
1683 | return status; | ||
1684 | } | ||
1685 | |||
1686 | static int pxa2xx_spi_remove(struct platform_device *pdev) | ||
1687 | { | ||
1688 | struct driver_data *drv_data = platform_get_drvdata(pdev); | ||
1689 | struct ssp_device *ssp; | ||
1690 | int status = 0; | ||
1691 | |||
1692 | if (!drv_data) | ||
1693 | return 0; | ||
1694 | ssp = drv_data->ssp; | ||
1695 | |||
1696 | /* Remove the queue */ | ||
1697 | status = destroy_queue(drv_data); | ||
1698 | if (status != 0) | ||
1699 | /* the kernel does not check the return status of this | ||
1700 | * this routine (mod->exit, within the kernel). Therefore | ||
1701 | * nothing is gained by returning from here, the module is | ||
1702 | * going away regardless, and we should not leave any more | ||
1703 | * resources allocated than necessary. We cannot free the | ||
1704 | * message memory in drv_data->queue, but we can release the | ||
1705 | * resources below. I think the kernel should honor -EBUSY | ||
1706 | * returns but... */ | ||
1707 | dev_err(&pdev->dev, "pxa2xx_spi_remove: workqueue will not " | ||
1708 | "complete, message memory not freed\n"); | ||
1709 | |||
1710 | /* Disable the SSP at the peripheral and SOC level */ | ||
1711 | write_SSCR0(0, drv_data->ioaddr); | ||
1712 | clk_disable(ssp->clk); | ||
1713 | |||
1714 | /* Release DMA */ | ||
1715 | if (drv_data->master_info->enable_dma) { | ||
1716 | DRCMR(ssp->drcmr_rx) = 0; | ||
1717 | DRCMR(ssp->drcmr_tx) = 0; | ||
1718 | pxa_free_dma(drv_data->tx_channel); | ||
1719 | pxa_free_dma(drv_data->rx_channel); | ||
1720 | } | ||
1721 | |||
1722 | /* Release IRQ */ | ||
1723 | free_irq(ssp->irq, drv_data); | ||
1724 | |||
1725 | /* Release SSP */ | ||
1726 | pxa_ssp_free(ssp); | ||
1727 | |||
1728 | /* Disconnect from the SPI framework */ | ||
1729 | spi_unregister_master(drv_data->master); | ||
1730 | |||
1731 | /* Prevent double remove */ | ||
1732 | platform_set_drvdata(pdev, NULL); | ||
1733 | |||
1734 | return 0; | ||
1735 | } | ||
1736 | |||
1737 | static void pxa2xx_spi_shutdown(struct platform_device *pdev) | ||
1738 | { | ||
1739 | int status = 0; | ||
1740 | |||
1741 | if ((status = pxa2xx_spi_remove(pdev)) != 0) | ||
1742 | dev_err(&pdev->dev, "shutdown failed with %d\n", status); | ||
1743 | } | ||
1744 | |||
1745 | #ifdef CONFIG_PM | ||
1746 | static int pxa2xx_spi_suspend(struct device *dev) | ||
1747 | { | ||
1748 | struct driver_data *drv_data = dev_get_drvdata(dev); | ||
1749 | struct ssp_device *ssp = drv_data->ssp; | ||
1750 | int status = 0; | ||
1751 | |||
1752 | status = stop_queue(drv_data); | ||
1753 | if (status != 0) | ||
1754 | return status; | ||
1755 | write_SSCR0(0, drv_data->ioaddr); | ||
1756 | clk_disable(ssp->clk); | ||
1757 | |||
1758 | return 0; | ||
1759 | } | ||
1760 | |||
1761 | static int pxa2xx_spi_resume(struct device *dev) | ||
1762 | { | ||
1763 | struct driver_data *drv_data = dev_get_drvdata(dev); | ||
1764 | struct ssp_device *ssp = drv_data->ssp; | ||
1765 | int status = 0; | ||
1766 | |||
1767 | if (drv_data->rx_channel != -1) | ||
1768 | DRCMR(drv_data->ssp->drcmr_rx) = | ||
1769 | DRCMR_MAPVLD | drv_data->rx_channel; | ||
1770 | if (drv_data->tx_channel != -1) | ||
1771 | DRCMR(drv_data->ssp->drcmr_tx) = | ||
1772 | DRCMR_MAPVLD | drv_data->tx_channel; | ||
1773 | |||
1774 | /* Enable the SSP clock */ | ||
1775 | clk_enable(ssp->clk); | ||
1776 | |||
1777 | /* Start the queue running */ | ||
1778 | status = start_queue(drv_data); | ||
1779 | if (status != 0) { | ||
1780 | dev_err(dev, "problem starting queue (%d)\n", status); | ||
1781 | return status; | ||
1782 | } | ||
1783 | |||
1784 | return 0; | ||
1785 | } | ||
1786 | |||
1787 | static const struct dev_pm_ops pxa2xx_spi_pm_ops = { | ||
1788 | .suspend = pxa2xx_spi_suspend, | ||
1789 | .resume = pxa2xx_spi_resume, | ||
1790 | }; | ||
1791 | #endif | ||
1792 | |||
1793 | static struct platform_driver driver = { | ||
1794 | .driver = { | ||
1795 | .name = "pxa2xx-spi", | ||
1796 | .owner = THIS_MODULE, | ||
1797 | #ifdef CONFIG_PM | ||
1798 | .pm = &pxa2xx_spi_pm_ops, | ||
1799 | #endif | ||
1800 | }, | ||
1801 | .probe = pxa2xx_spi_probe, | ||
1802 | .remove = pxa2xx_spi_remove, | ||
1803 | .shutdown = pxa2xx_spi_shutdown, | ||
1804 | }; | ||
1805 | |||
1806 | static int __init pxa2xx_spi_init(void) | ||
1807 | { | ||
1808 | return platform_driver_register(&driver); | ||
1809 | } | ||
1810 | subsys_initcall(pxa2xx_spi_init); | ||
1811 | |||
1812 | static void __exit pxa2xx_spi_exit(void) | ||
1813 | { | ||
1814 | platform_driver_unregister(&driver); | ||
1815 | } | ||
1816 | module_exit(pxa2xx_spi_exit); | ||