diff options
Diffstat (limited to 'drivers/spi/spi-pxa2xx.c')
-rw-r--r-- | drivers/spi/spi-pxa2xx.c | 595 |
1 files changed, 28 insertions, 567 deletions
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 304cf6eb50e6..5b7c2a4ba828 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
26 | #include <linux/spi/pxa2xx_spi.h> | 26 | #include <linux/spi/pxa2xx_spi.h> |
27 | #include <linux/dma-mapping.h> | ||
28 | #include <linux/spi/spi.h> | 27 | #include <linux/spi/spi.h> |
29 | #include <linux/workqueue.h> | 28 | #include <linux/workqueue.h> |
30 | #include <linux/delay.h> | 29 | #include <linux/delay.h> |
@@ -36,6 +35,7 @@ | |||
36 | #include <asm/irq.h> | 35 | #include <asm/irq.h> |
37 | #include <asm/delay.h> | 36 | #include <asm/delay.h> |
38 | 37 | ||
38 | #include "spi-pxa2xx.h" | ||
39 | 39 | ||
40 | MODULE_AUTHOR("Stephen Street"); | 40 | MODULE_AUTHOR("Stephen Street"); |
41 | MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); | 41 | MODULE_DESCRIPTION("PXA2xx SSP SPI Controller"); |
@@ -46,12 +46,6 @@ MODULE_ALIAS("platform:pxa2xx-spi"); | |||
46 | 46 | ||
47 | #define TIMOUT_DFLT 1000 | 47 | #define TIMOUT_DFLT 1000 |
48 | 48 | ||
49 | #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) | ||
50 | #define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) | ||
51 | #define IS_DMA_ALIGNED(x) IS_ALIGNED((unsigned long)(x), DMA_ALIGNMENT) | ||
52 | #define MAX_DMA_LEN 8191 | ||
53 | #define DMA_ALIGNMENT 8 | ||
54 | |||
55 | /* | 49 | /* |
56 | * for testing SSCR1 changes that require SSP restart, basically | 50 | * for testing SSCR1 changes that require SSP restart, basically |
57 | * everything except the service and interrupt enables, the pxa270 developer | 51 | * everything except the service and interrupt enables, the pxa270 developer |
@@ -66,106 +60,6 @@ MODULE_ALIAS("platform:pxa2xx-spi"); | |||
66 | | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \ | 60 | | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \ |
67 | | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM) | 61 | | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM) |
68 | 62 | ||
69 | #define DEFINE_SSP_REG(reg, off) \ | ||
70 | static inline u32 read_##reg(void const __iomem *p) \ | ||
71 | { return __raw_readl(p + (off)); } \ | ||
72 | \ | ||
73 | static inline void write_##reg(u32 v, void __iomem *p) \ | ||
74 | { __raw_writel(v, p + (off)); } | ||
75 | |||
76 | DEFINE_SSP_REG(SSCR0, 0x00) | ||
77 | DEFINE_SSP_REG(SSCR1, 0x04) | ||
78 | DEFINE_SSP_REG(SSSR, 0x08) | ||
79 | DEFINE_SSP_REG(SSITR, 0x0c) | ||
80 | DEFINE_SSP_REG(SSDR, 0x10) | ||
81 | DEFINE_SSP_REG(SSTO, 0x28) | ||
82 | DEFINE_SSP_REG(SSPSP, 0x2c) | ||
83 | |||
84 | #define START_STATE ((void*)0) | ||
85 | #define RUNNING_STATE ((void*)1) | ||
86 | #define DONE_STATE ((void*)2) | ||
87 | #define ERROR_STATE ((void*)-1) | ||
88 | |||
89 | struct driver_data { | ||
90 | /* Driver model hookup */ | ||
91 | struct platform_device *pdev; | ||
92 | |||
93 | /* SSP Info */ | ||
94 | struct ssp_device *ssp; | ||
95 | |||
96 | /* SPI framework hookup */ | ||
97 | enum pxa_ssp_type ssp_type; | ||
98 | struct spi_master *master; | ||
99 | |||
100 | /* PXA hookup */ | ||
101 | struct pxa2xx_spi_master *master_info; | ||
102 | |||
103 | /* DMA setup stuff */ | ||
104 | int rx_channel; | ||
105 | int tx_channel; | ||
106 | u32 *null_dma_buf; | ||
107 | |||
108 | /* SSP register addresses */ | ||
109 | void __iomem *ioaddr; | ||
110 | u32 ssdr_physical; | ||
111 | |||
112 | /* SSP masks*/ | ||
113 | u32 dma_cr1; | ||
114 | u32 int_cr1; | ||
115 | u32 clear_sr; | ||
116 | u32 mask_sr; | ||
117 | |||
118 | /* Maximun clock rate */ | ||
119 | unsigned long max_clk_rate; | ||
120 | |||
121 | /* Message Transfer pump */ | ||
122 | struct tasklet_struct pump_transfers; | ||
123 | |||
124 | /* Current message transfer state info */ | ||
125 | struct spi_message* cur_msg; | ||
126 | struct spi_transfer* cur_transfer; | ||
127 | struct chip_data *cur_chip; | ||
128 | size_t len; | ||
129 | void *tx; | ||
130 | void *tx_end; | ||
131 | void *rx; | ||
132 | void *rx_end; | ||
133 | int dma_mapped; | ||
134 | dma_addr_t rx_dma; | ||
135 | dma_addr_t tx_dma; | ||
136 | size_t rx_map_len; | ||
137 | size_t tx_map_len; | ||
138 | u8 n_bytes; | ||
139 | u32 dma_width; | ||
140 | int (*write)(struct driver_data *drv_data); | ||
141 | int (*read)(struct driver_data *drv_data); | ||
142 | irqreturn_t (*transfer_handler)(struct driver_data *drv_data); | ||
143 | void (*cs_control)(u32 command); | ||
144 | }; | ||
145 | |||
146 | struct chip_data { | ||
147 | u32 cr0; | ||
148 | u32 cr1; | ||
149 | u32 psp; | ||
150 | u32 timeout; | ||
151 | u8 n_bytes; | ||
152 | u32 dma_width; | ||
153 | u32 dma_burst_size; | ||
154 | u32 threshold; | ||
155 | u32 dma_threshold; | ||
156 | u8 enable_dma; | ||
157 | u8 bits_per_word; | ||
158 | u32 speed_hz; | ||
159 | union { | ||
160 | int gpio_cs; | ||
161 | unsigned int frm; | ||
162 | }; | ||
163 | int gpio_cs_inverted; | ||
164 | int (*write)(struct driver_data *drv_data); | ||
165 | int (*read)(struct driver_data *drv_data); | ||
166 | void (*cs_control)(u32 command); | ||
167 | }; | ||
168 | |||
169 | static void cs_assert(struct driver_data *drv_data) | 63 | static void cs_assert(struct driver_data *drv_data) |
170 | { | 64 | { |
171 | struct chip_data *chip = drv_data->cur_chip; | 65 | struct chip_data *chip = drv_data->cur_chip; |
@@ -200,26 +94,7 @@ static void cs_deassert(struct driver_data *drv_data) | |||
200 | gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted); | 94 | gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted); |
201 | } | 95 | } |
202 | 96 | ||
203 | static void write_SSSR_CS(struct driver_data *drv_data, u32 val) | 97 | int pxa2xx_spi_flush(struct driver_data *drv_data) |
204 | { | ||
205 | void __iomem *reg = drv_data->ioaddr; | ||
206 | |||
207 | if (drv_data->ssp_type == CE4100_SSP) | ||
208 | val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK; | ||
209 | |||
210 | write_SSSR(val, reg); | ||
211 | } | ||
212 | |||
213 | static int pxa25x_ssp_comp(struct driver_data *drv_data) | ||
214 | { | ||
215 | if (drv_data->ssp_type == PXA25x_SSP) | ||
216 | return 1; | ||
217 | if (drv_data->ssp_type == CE4100_SSP) | ||
218 | return 1; | ||
219 | return 0; | ||
220 | } | ||
221 | |||
222 | static int flush(struct driver_data *drv_data) | ||
223 | { | 98 | { |
224 | unsigned long limit = loops_per_jiffy << 1; | 99 | unsigned long limit = loops_per_jiffy << 1; |
225 | 100 | ||
@@ -345,7 +220,7 @@ static int u32_reader(struct driver_data *drv_data) | |||
345 | return drv_data->rx == drv_data->rx_end; | 220 | return drv_data->rx == drv_data->rx_end; |
346 | } | 221 | } |
347 | 222 | ||
348 | static void *next_transfer(struct driver_data *drv_data) | 223 | void *pxa2xx_spi_next_transfer(struct driver_data *drv_data) |
349 | { | 224 | { |
350 | struct spi_message *msg = drv_data->cur_msg; | 225 | struct spi_message *msg = drv_data->cur_msg; |
351 | struct spi_transfer *trans = drv_data->cur_transfer; | 226 | struct spi_transfer *trans = drv_data->cur_transfer; |
@@ -361,76 +236,6 @@ static void *next_transfer(struct driver_data *drv_data) | |||
361 | return DONE_STATE; | 236 | return DONE_STATE; |
362 | } | 237 | } |
363 | 238 | ||
364 | static int map_dma_buffers(struct driver_data *drv_data) | ||
365 | { | ||
366 | struct spi_message *msg = drv_data->cur_msg; | ||
367 | struct device *dev = &msg->spi->dev; | ||
368 | |||
369 | if (!drv_data->cur_chip->enable_dma) | ||
370 | return 0; | ||
371 | |||
372 | if (msg->is_dma_mapped) | ||
373 | return drv_data->rx_dma && drv_data->tx_dma; | ||
374 | |||
375 | if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) | ||
376 | return 0; | ||
377 | |||
378 | /* Modify setup if rx buffer is null */ | ||
379 | if (drv_data->rx == NULL) { | ||
380 | *drv_data->null_dma_buf = 0; | ||
381 | drv_data->rx = drv_data->null_dma_buf; | ||
382 | drv_data->rx_map_len = 4; | ||
383 | } else | ||
384 | drv_data->rx_map_len = drv_data->len; | ||
385 | |||
386 | |||
387 | /* Modify setup if tx buffer is null */ | ||
388 | if (drv_data->tx == NULL) { | ||
389 | *drv_data->null_dma_buf = 0; | ||
390 | drv_data->tx = drv_data->null_dma_buf; | ||
391 | drv_data->tx_map_len = 4; | ||
392 | } else | ||
393 | drv_data->tx_map_len = drv_data->len; | ||
394 | |||
395 | /* Stream map the tx buffer. Always do DMA_TO_DEVICE first | ||
396 | * so we flush the cache *before* invalidating it, in case | ||
397 | * the tx and rx buffers overlap. | ||
398 | */ | ||
399 | drv_data->tx_dma = dma_map_single(dev, drv_data->tx, | ||
400 | drv_data->tx_map_len, DMA_TO_DEVICE); | ||
401 | if (dma_mapping_error(dev, drv_data->tx_dma)) | ||
402 | return 0; | ||
403 | |||
404 | /* Stream map the rx buffer */ | ||
405 | drv_data->rx_dma = dma_map_single(dev, drv_data->rx, | ||
406 | drv_data->rx_map_len, DMA_FROM_DEVICE); | ||
407 | if (dma_mapping_error(dev, drv_data->rx_dma)) { | ||
408 | dma_unmap_single(dev, drv_data->tx_dma, | ||
409 | drv_data->tx_map_len, DMA_TO_DEVICE); | ||
410 | return 0; | ||
411 | } | ||
412 | |||
413 | return 1; | ||
414 | } | ||
415 | |||
416 | static void unmap_dma_buffers(struct driver_data *drv_data) | ||
417 | { | ||
418 | struct device *dev; | ||
419 | |||
420 | if (!drv_data->dma_mapped) | ||
421 | return; | ||
422 | |||
423 | if (!drv_data->cur_msg->is_dma_mapped) { | ||
424 | dev = &drv_data->cur_msg->spi->dev; | ||
425 | dma_unmap_single(dev, drv_data->rx_dma, | ||
426 | drv_data->rx_map_len, DMA_FROM_DEVICE); | ||
427 | dma_unmap_single(dev, drv_data->tx_dma, | ||
428 | drv_data->tx_map_len, DMA_TO_DEVICE); | ||
429 | } | ||
430 | |||
431 | drv_data->dma_mapped = 0; | ||
432 | } | ||
433 | |||
434 | /* caller already set message->status; dma and pio irqs are blocked */ | 239 | /* caller already set message->status; dma and pio irqs are blocked */ |
435 | static void giveback(struct driver_data *drv_data) | 240 | static void giveback(struct driver_data *drv_data) |
436 | { | 241 | { |
@@ -483,161 +288,6 @@ static void giveback(struct driver_data *drv_data) | |||
483 | drv_data->cur_chip = NULL; | 288 | drv_data->cur_chip = NULL; |
484 | } | 289 | } |
485 | 290 | ||
486 | static int wait_ssp_rx_stall(void const __iomem *ioaddr) | ||
487 | { | ||
488 | unsigned long limit = loops_per_jiffy << 1; | ||
489 | |||
490 | while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit) | ||
491 | cpu_relax(); | ||
492 | |||
493 | return limit; | ||
494 | } | ||
495 | |||
496 | static int wait_dma_channel_stop(int channel) | ||
497 | { | ||
498 | unsigned long limit = loops_per_jiffy << 1; | ||
499 | |||
500 | while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit) | ||
501 | cpu_relax(); | ||
502 | |||
503 | return limit; | ||
504 | } | ||
505 | |||
506 | static void dma_error_stop(struct driver_data *drv_data, const char *msg) | ||
507 | { | ||
508 | void __iomem *reg = drv_data->ioaddr; | ||
509 | |||
510 | /* Stop and reset */ | ||
511 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
512 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
513 | write_SSSR_CS(drv_data, drv_data->clear_sr); | ||
514 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | ||
515 | if (!pxa25x_ssp_comp(drv_data)) | ||
516 | write_SSTO(0, reg); | ||
517 | flush(drv_data); | ||
518 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | ||
519 | |||
520 | unmap_dma_buffers(drv_data); | ||
521 | |||
522 | dev_err(&drv_data->pdev->dev, "%s\n", msg); | ||
523 | |||
524 | drv_data->cur_msg->state = ERROR_STATE; | ||
525 | tasklet_schedule(&drv_data->pump_transfers); | ||
526 | } | ||
527 | |||
528 | static void dma_transfer_complete(struct driver_data *drv_data) | ||
529 | { | ||
530 | void __iomem *reg = drv_data->ioaddr; | ||
531 | struct spi_message *msg = drv_data->cur_msg; | ||
532 | |||
533 | /* Clear and disable interrupts on SSP and DMA channels*/ | ||
534 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | ||
535 | write_SSSR_CS(drv_data, drv_data->clear_sr); | ||
536 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
537 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
538 | |||
539 | if (wait_dma_channel_stop(drv_data->rx_channel) == 0) | ||
540 | dev_err(&drv_data->pdev->dev, | ||
541 | "dma_handler: dma rx channel stop failed\n"); | ||
542 | |||
543 | if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) | ||
544 | dev_err(&drv_data->pdev->dev, | ||
545 | "dma_transfer: ssp rx stall failed\n"); | ||
546 | |||
547 | unmap_dma_buffers(drv_data); | ||
548 | |||
549 | /* update the buffer pointer for the amount completed in dma */ | ||
550 | drv_data->rx += drv_data->len - | ||
551 | (DCMD(drv_data->rx_channel) & DCMD_LENGTH); | ||
552 | |||
553 | /* read trailing data from fifo, it does not matter how many | ||
554 | * bytes are in the fifo just read until buffer is full | ||
555 | * or fifo is empty, which ever occurs first */ | ||
556 | drv_data->read(drv_data); | ||
557 | |||
558 | /* return count of what was actually read */ | ||
559 | msg->actual_length += drv_data->len - | ||
560 | (drv_data->rx_end - drv_data->rx); | ||
561 | |||
562 | /* Transfer delays and chip select release are | ||
563 | * handled in pump_transfers or giveback | ||
564 | */ | ||
565 | |||
566 | /* Move to next transfer */ | ||
567 | msg->state = next_transfer(drv_data); | ||
568 | |||
569 | /* Schedule transfer tasklet */ | ||
570 | tasklet_schedule(&drv_data->pump_transfers); | ||
571 | } | ||
572 | |||
573 | static void dma_handler(int channel, void *data) | ||
574 | { | ||
575 | struct driver_data *drv_data = data; | ||
576 | u32 irq_status = DCSR(channel) & DMA_INT_MASK; | ||
577 | |||
578 | if (irq_status & DCSR_BUSERR) { | ||
579 | |||
580 | if (channel == drv_data->tx_channel) | ||
581 | dma_error_stop(drv_data, | ||
582 | "dma_handler: " | ||
583 | "bad bus address on tx channel"); | ||
584 | else | ||
585 | dma_error_stop(drv_data, | ||
586 | "dma_handler: " | ||
587 | "bad bus address on rx channel"); | ||
588 | return; | ||
589 | } | ||
590 | |||
591 | /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */ | ||
592 | if ((channel == drv_data->tx_channel) | ||
593 | && (irq_status & DCSR_ENDINTR) | ||
594 | && (drv_data->ssp_type == PXA25x_SSP)) { | ||
595 | |||
596 | /* Wait for rx to stall */ | ||
597 | if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) | ||
598 | dev_err(&drv_data->pdev->dev, | ||
599 | "dma_handler: ssp rx stall failed\n"); | ||
600 | |||
601 | /* finish this transfer, start the next */ | ||
602 | dma_transfer_complete(drv_data); | ||
603 | } | ||
604 | } | ||
605 | |||
606 | static irqreturn_t dma_transfer(struct driver_data *drv_data) | ||
607 | { | ||
608 | u32 irq_status; | ||
609 | void __iomem *reg = drv_data->ioaddr; | ||
610 | |||
611 | irq_status = read_SSSR(reg) & drv_data->mask_sr; | ||
612 | if (irq_status & SSSR_ROR) { | ||
613 | dma_error_stop(drv_data, "dma_transfer: fifo overrun"); | ||
614 | return IRQ_HANDLED; | ||
615 | } | ||
616 | |||
617 | /* Check for false positive timeout */ | ||
618 | if ((irq_status & SSSR_TINT) | ||
619 | && (DCSR(drv_data->tx_channel) & DCSR_RUN)) { | ||
620 | write_SSSR(SSSR_TINT, reg); | ||
621 | return IRQ_HANDLED; | ||
622 | } | ||
623 | |||
624 | if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) { | ||
625 | |||
626 | /* Clear and disable timeout interrupt, do the rest in | ||
627 | * dma_transfer_complete */ | ||
628 | if (!pxa25x_ssp_comp(drv_data)) | ||
629 | write_SSTO(0, reg); | ||
630 | |||
631 | /* finish this transfer, start the next */ | ||
632 | dma_transfer_complete(drv_data); | ||
633 | |||
634 | return IRQ_HANDLED; | ||
635 | } | ||
636 | |||
637 | /* Opps problem detected */ | ||
638 | return IRQ_NONE; | ||
639 | } | ||
640 | |||
641 | static void reset_sccr1(struct driver_data *drv_data) | 291 | static void reset_sccr1(struct driver_data *drv_data) |
642 | { | 292 | { |
643 | void __iomem *reg = drv_data->ioaddr; | 293 | void __iomem *reg = drv_data->ioaddr; |
@@ -659,7 +309,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg) | |||
659 | reset_sccr1(drv_data); | 309 | reset_sccr1(drv_data); |
660 | if (!pxa25x_ssp_comp(drv_data)) | 310 | if (!pxa25x_ssp_comp(drv_data)) |
661 | write_SSTO(0, reg); | 311 | write_SSTO(0, reg); |
662 | flush(drv_data); | 312 | pxa2xx_spi_flush(drv_data); |
663 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | 313 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); |
664 | 314 | ||
665 | dev_err(&drv_data->pdev->dev, "%s\n", msg); | 315 | dev_err(&drv_data->pdev->dev, "%s\n", msg); |
@@ -687,7 +337,7 @@ static void int_transfer_complete(struct driver_data *drv_data) | |||
687 | */ | 337 | */ |
688 | 338 | ||
689 | /* Move to next transfer */ | 339 | /* Move to next transfer */ |
690 | drv_data->cur_msg->state = next_transfer(drv_data); | 340 | drv_data->cur_msg->state = pxa2xx_spi_next_transfer(drv_data); |
691 | 341 | ||
692 | /* Schedule transfer tasklet */ | 342 | /* Schedule transfer tasklet */ |
693 | tasklet_schedule(&drv_data->pump_transfers); | 343 | tasklet_schedule(&drv_data->pump_transfers); |
@@ -798,103 +448,6 @@ static irqreturn_t ssp_int(int irq, void *dev_id) | |||
798 | return drv_data->transfer_handler(drv_data); | 448 | return drv_data->transfer_handler(drv_data); |
799 | } | 449 | } |
800 | 450 | ||
801 | static int set_dma_burst_and_threshold(struct chip_data *chip, | ||
802 | struct spi_device *spi, | ||
803 | u8 bits_per_word, u32 *burst_code, | ||
804 | u32 *threshold) | ||
805 | { | ||
806 | struct pxa2xx_spi_chip *chip_info = | ||
807 | (struct pxa2xx_spi_chip *)spi->controller_data; | ||
808 | int bytes_per_word; | ||
809 | int burst_bytes; | ||
810 | int thresh_words; | ||
811 | int req_burst_size; | ||
812 | int retval = 0; | ||
813 | |||
814 | /* Set the threshold (in registers) to equal the same amount of data | ||
815 | * as represented by burst size (in bytes). The computation below | ||
816 | * is (burst_size rounded up to nearest 8 byte, word or long word) | ||
817 | * divided by (bytes/register); the tx threshold is the inverse of | ||
818 | * the rx, so that there will always be enough data in the rx fifo | ||
819 | * to satisfy a burst, and there will always be enough space in the | ||
820 | * tx fifo to accept a burst (a tx burst will overwrite the fifo if | ||
821 | * there is not enough space), there must always remain enough empty | ||
822 | * space in the rx fifo for any data loaded to the tx fifo. | ||
823 | * Whenever burst_size (in bytes) equals bits/word, the fifo threshold | ||
824 | * will be 8, or half the fifo; | ||
825 | * The threshold can only be set to 2, 4 or 8, but not 16, because | ||
826 | * to burst 16 to the tx fifo, the fifo would have to be empty; | ||
827 | * however, the minimum fifo trigger level is 1, and the tx will | ||
828 | * request service when the fifo is at this level, with only 15 spaces. | ||
829 | */ | ||
830 | |||
831 | /* find bytes/word */ | ||
832 | if (bits_per_word <= 8) | ||
833 | bytes_per_word = 1; | ||
834 | else if (bits_per_word <= 16) | ||
835 | bytes_per_word = 2; | ||
836 | else | ||
837 | bytes_per_word = 4; | ||
838 | |||
839 | /* use struct pxa2xx_spi_chip->dma_burst_size if available */ | ||
840 | if (chip_info) | ||
841 | req_burst_size = chip_info->dma_burst_size; | ||
842 | else { | ||
843 | switch (chip->dma_burst_size) { | ||
844 | default: | ||
845 | /* if the default burst size is not set, | ||
846 | * do it now */ | ||
847 | chip->dma_burst_size = DCMD_BURST8; | ||
848 | case DCMD_BURST8: | ||
849 | req_burst_size = 8; | ||
850 | break; | ||
851 | case DCMD_BURST16: | ||
852 | req_burst_size = 16; | ||
853 | break; | ||
854 | case DCMD_BURST32: | ||
855 | req_burst_size = 32; | ||
856 | break; | ||
857 | } | ||
858 | } | ||
859 | if (req_burst_size <= 8) { | ||
860 | *burst_code = DCMD_BURST8; | ||
861 | burst_bytes = 8; | ||
862 | } else if (req_burst_size <= 16) { | ||
863 | if (bytes_per_word == 1) { | ||
864 | /* don't burst more than 1/2 the fifo */ | ||
865 | *burst_code = DCMD_BURST8; | ||
866 | burst_bytes = 8; | ||
867 | retval = 1; | ||
868 | } else { | ||
869 | *burst_code = DCMD_BURST16; | ||
870 | burst_bytes = 16; | ||
871 | } | ||
872 | } else { | ||
873 | if (bytes_per_word == 1) { | ||
874 | /* don't burst more than 1/2 the fifo */ | ||
875 | *burst_code = DCMD_BURST8; | ||
876 | burst_bytes = 8; | ||
877 | retval = 1; | ||
878 | } else if (bytes_per_word == 2) { | ||
879 | /* don't burst more than 1/2 the fifo */ | ||
880 | *burst_code = DCMD_BURST16; | ||
881 | burst_bytes = 16; | ||
882 | retval = 1; | ||
883 | } else { | ||
884 | *burst_code = DCMD_BURST32; | ||
885 | burst_bytes = 32; | ||
886 | } | ||
887 | } | ||
888 | |||
889 | thresh_words = burst_bytes / bytes_per_word; | ||
890 | |||
891 | /* thresh_words will be between 2 and 8 */ | ||
892 | *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT) | ||
893 | | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT); | ||
894 | |||
895 | return retval; | ||
896 | } | ||
897 | |||
898 | static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) | 451 | static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate) |
899 | { | 452 | { |
900 | unsigned long ssp_clk = drv_data->max_clk_rate; | 453 | unsigned long ssp_clk = drv_data->max_clk_rate; |
@@ -956,8 +509,8 @@ static void pump_transfers(unsigned long data) | |||
956 | cs_deassert(drv_data); | 509 | cs_deassert(drv_data); |
957 | } | 510 | } |
958 | 511 | ||
959 | /* Check for transfers that need multiple DMA segments */ | 512 | /* Check if we can DMA this transfer */ |
960 | if (transfer->len > MAX_DMA_LEN && chip->enable_dma) { | 513 | if (!pxa2xx_spi_dma_is_possible(transfer->len) && chip->enable_dma) { |
961 | 514 | ||
962 | /* reject already-mapped transfers; PIO won't always work */ | 515 | /* reject already-mapped transfers; PIO won't always work */ |
963 | if (message->is_dma_mapped | 516 | if (message->is_dma_mapped |
@@ -980,21 +533,20 @@ static void pump_transfers(unsigned long data) | |||
980 | } | 533 | } |
981 | 534 | ||
982 | /* Setup the transfer state based on the type of transfer */ | 535 | /* Setup the transfer state based on the type of transfer */ |
983 | if (flush(drv_data) == 0) { | 536 | if (pxa2xx_spi_flush(drv_data) == 0) { |
984 | dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); | 537 | dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); |
985 | message->status = -EIO; | 538 | message->status = -EIO; |
986 | giveback(drv_data); | 539 | giveback(drv_data); |
987 | return; | 540 | return; |
988 | } | 541 | } |
989 | drv_data->n_bytes = chip->n_bytes; | 542 | drv_data->n_bytes = chip->n_bytes; |
990 | drv_data->dma_width = chip->dma_width; | ||
991 | drv_data->tx = (void *)transfer->tx_buf; | 543 | drv_data->tx = (void *)transfer->tx_buf; |
992 | drv_data->tx_end = drv_data->tx + transfer->len; | 544 | drv_data->tx_end = drv_data->tx + transfer->len; |
993 | drv_data->rx = transfer->rx_buf; | 545 | drv_data->rx = transfer->rx_buf; |
994 | drv_data->rx_end = drv_data->rx + transfer->len; | 546 | drv_data->rx_end = drv_data->rx + transfer->len; |
995 | drv_data->rx_dma = transfer->rx_dma; | 547 | drv_data->rx_dma = transfer->rx_dma; |
996 | drv_data->tx_dma = transfer->tx_dma; | 548 | drv_data->tx_dma = transfer->tx_dma; |
997 | drv_data->len = transfer->len & DCMD_LENGTH; | 549 | drv_data->len = transfer->len; |
998 | drv_data->write = drv_data->tx ? chip->write : null_writer; | 550 | drv_data->write = drv_data->tx ? chip->write : null_writer; |
999 | drv_data->read = drv_data->rx ? chip->read : null_reader; | 551 | drv_data->read = drv_data->rx ? chip->read : null_reader; |
1000 | 552 | ||
@@ -1015,21 +567,18 @@ static void pump_transfers(unsigned long data) | |||
1015 | 567 | ||
1016 | if (bits <= 8) { | 568 | if (bits <= 8) { |
1017 | drv_data->n_bytes = 1; | 569 | drv_data->n_bytes = 1; |
1018 | drv_data->dma_width = DCMD_WIDTH1; | ||
1019 | drv_data->read = drv_data->read != null_reader ? | 570 | drv_data->read = drv_data->read != null_reader ? |
1020 | u8_reader : null_reader; | 571 | u8_reader : null_reader; |
1021 | drv_data->write = drv_data->write != null_writer ? | 572 | drv_data->write = drv_data->write != null_writer ? |
1022 | u8_writer : null_writer; | 573 | u8_writer : null_writer; |
1023 | } else if (bits <= 16) { | 574 | } else if (bits <= 16) { |
1024 | drv_data->n_bytes = 2; | 575 | drv_data->n_bytes = 2; |
1025 | drv_data->dma_width = DCMD_WIDTH2; | ||
1026 | drv_data->read = drv_data->read != null_reader ? | 576 | drv_data->read = drv_data->read != null_reader ? |
1027 | u16_reader : null_reader; | 577 | u16_reader : null_reader; |
1028 | drv_data->write = drv_data->write != null_writer ? | 578 | drv_data->write = drv_data->write != null_writer ? |
1029 | u16_writer : null_writer; | 579 | u16_writer : null_writer; |
1030 | } else if (bits <= 32) { | 580 | } else if (bits <= 32) { |
1031 | drv_data->n_bytes = 4; | 581 | drv_data->n_bytes = 4; |
1032 | drv_data->dma_width = DCMD_WIDTH4; | ||
1033 | drv_data->read = drv_data->read != null_reader ? | 582 | drv_data->read = drv_data->read != null_reader ? |
1034 | u32_reader : null_reader; | 583 | u32_reader : null_reader; |
1035 | drv_data->write = drv_data->write != null_writer ? | 584 | drv_data->write = drv_data->write != null_writer ? |
@@ -1038,7 +587,8 @@ static void pump_transfers(unsigned long data) | |||
1038 | /* if bits/word is changed in dma mode, then must check the | 587 | /* if bits/word is changed in dma mode, then must check the |
1039 | * thresholds and burst also */ | 588 | * thresholds and burst also */ |
1040 | if (chip->enable_dma) { | 589 | if (chip->enable_dma) { |
1041 | if (set_dma_burst_and_threshold(chip, message->spi, | 590 | if (pxa2xx_spi_set_dma_burst_and_threshold(chip, |
591 | message->spi, | ||
1042 | bits, &dma_burst, | 592 | bits, &dma_burst, |
1043 | &dma_thresh)) | 593 | &dma_thresh)) |
1044 | if (printk_ratelimit()) | 594 | if (printk_ratelimit()) |
@@ -1057,70 +607,21 @@ static void pump_transfers(unsigned long data) | |||
1057 | 607 | ||
1058 | message->state = RUNNING_STATE; | 608 | message->state = RUNNING_STATE; |
1059 | 609 | ||
1060 | /* Try to map dma buffer and do a dma transfer if successful, but | ||
1061 | * only if the length is non-zero and less than MAX_DMA_LEN. | ||
1062 | * | ||
1063 | * Zero-length non-descriptor DMA is illegal on PXA2xx; force use | ||
1064 | * of PIO instead. Care is needed above because the transfer may | ||
1065 | * have have been passed with buffers that are already dma mapped. | ||
1066 | * A zero-length transfer in PIO mode will not try to write/read | ||
1067 | * to/from the buffers | ||
1068 | * | ||
1069 | * REVISIT large transfers are exactly where we most want to be | ||
1070 | * using DMA. If this happens much, split those transfers into | ||
1071 | * multiple DMA segments rather than forcing PIO. | ||
1072 | */ | ||
1073 | drv_data->dma_mapped = 0; | 610 | drv_data->dma_mapped = 0; |
1074 | if (drv_data->len > 0 && drv_data->len <= MAX_DMA_LEN) | 611 | if (pxa2xx_spi_dma_is_possible(drv_data->len)) |
1075 | drv_data->dma_mapped = map_dma_buffers(drv_data); | 612 | drv_data->dma_mapped = pxa2xx_spi_map_dma_buffers(drv_data); |
1076 | if (drv_data->dma_mapped) { | 613 | if (drv_data->dma_mapped) { |
1077 | 614 | ||
1078 | /* Ensure we have the correct interrupt handler */ | 615 | /* Ensure we have the correct interrupt handler */ |
1079 | drv_data->transfer_handler = dma_transfer; | 616 | drv_data->transfer_handler = pxa2xx_spi_dma_transfer; |
1080 | 617 | ||
1081 | /* Setup rx DMA Channel */ | 618 | pxa2xx_spi_dma_prepare(drv_data, dma_burst); |
1082 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
1083 | DSADR(drv_data->rx_channel) = drv_data->ssdr_physical; | ||
1084 | DTADR(drv_data->rx_channel) = drv_data->rx_dma; | ||
1085 | if (drv_data->rx == drv_data->null_dma_buf) | ||
1086 | /* No target address increment */ | ||
1087 | DCMD(drv_data->rx_channel) = DCMD_FLOWSRC | ||
1088 | | drv_data->dma_width | ||
1089 | | dma_burst | ||
1090 | | drv_data->len; | ||
1091 | else | ||
1092 | DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR | ||
1093 | | DCMD_FLOWSRC | ||
1094 | | drv_data->dma_width | ||
1095 | | dma_burst | ||
1096 | | drv_data->len; | ||
1097 | |||
1098 | /* Setup tx DMA Channel */ | ||
1099 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
1100 | DSADR(drv_data->tx_channel) = drv_data->tx_dma; | ||
1101 | DTADR(drv_data->tx_channel) = drv_data->ssdr_physical; | ||
1102 | if (drv_data->tx == drv_data->null_dma_buf) | ||
1103 | /* No source address increment */ | ||
1104 | DCMD(drv_data->tx_channel) = DCMD_FLOWTRG | ||
1105 | | drv_data->dma_width | ||
1106 | | dma_burst | ||
1107 | | drv_data->len; | ||
1108 | else | ||
1109 | DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR | ||
1110 | | DCMD_FLOWTRG | ||
1111 | | drv_data->dma_width | ||
1112 | | dma_burst | ||
1113 | | drv_data->len; | ||
1114 | |||
1115 | /* Enable dma end irqs on SSP to detect end of transfer */ | ||
1116 | if (drv_data->ssp_type == PXA25x_SSP) | ||
1117 | DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN; | ||
1118 | 619 | ||
1119 | /* Clear status and start DMA engine */ | 620 | /* Clear status and start DMA engine */ |
1120 | cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; | 621 | cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1; |
1121 | write_SSSR(drv_data->clear_sr, reg); | 622 | write_SSSR(drv_data->clear_sr, reg); |
1122 | DCSR(drv_data->rx_channel) |= DCSR_RUN; | 623 | |
1123 | DCSR(drv_data->tx_channel) |= DCSR_RUN; | 624 | pxa2xx_spi_dma_start(drv_data); |
1124 | } else { | 625 | } else { |
1125 | /* Ensure we have the correct interrupt handler */ | 626 | /* Ensure we have the correct interrupt handler */ |
1126 | drv_data->transfer_handler = interrupt_transfer; | 627 | drv_data->transfer_handler = interrupt_transfer; |
@@ -1262,8 +763,6 @@ static int setup(struct spi_device *spi) | |||
1262 | chip->gpio_cs = -1; | 763 | chip->gpio_cs = -1; |
1263 | chip->enable_dma = 0; | 764 | chip->enable_dma = 0; |
1264 | chip->timeout = TIMOUT_DFLT; | 765 | chip->timeout = TIMOUT_DFLT; |
1265 | chip->dma_burst_size = drv_data->master_info->enable_dma ? | ||
1266 | DCMD_BURST8 : 0; | ||
1267 | } | 766 | } |
1268 | 767 | ||
1269 | /* protocol drivers may change the chip settings, so... | 768 | /* protocol drivers may change the chip settings, so... |
@@ -1293,7 +792,8 @@ static int setup(struct spi_device *spi) | |||
1293 | * burst and threshold can still respond to changes in bits_per_word */ | 792 | * burst and threshold can still respond to changes in bits_per_word */ |
1294 | if (chip->enable_dma) { | 793 | if (chip->enable_dma) { |
1295 | /* set up legal burst and threshold for dma */ | 794 | /* set up legal burst and threshold for dma */ |
1296 | if (set_dma_burst_and_threshold(chip, spi, spi->bits_per_word, | 795 | if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi, |
796 | spi->bits_per_word, | ||
1297 | &chip->dma_burst_size, | 797 | &chip->dma_burst_size, |
1298 | &chip->dma_threshold)) { | 798 | &chip->dma_threshold)) { |
1299 | dev_warn(&spi->dev, "in setup: DMA burst size reduced " | 799 | dev_warn(&spi->dev, "in setup: DMA burst size reduced " |
@@ -1328,18 +828,15 @@ static int setup(struct spi_device *spi) | |||
1328 | 828 | ||
1329 | if (spi->bits_per_word <= 8) { | 829 | if (spi->bits_per_word <= 8) { |
1330 | chip->n_bytes = 1; | 830 | chip->n_bytes = 1; |
1331 | chip->dma_width = DCMD_WIDTH1; | ||
1332 | chip->read = u8_reader; | 831 | chip->read = u8_reader; |
1333 | chip->write = u8_writer; | 832 | chip->write = u8_writer; |
1334 | } else if (spi->bits_per_word <= 16) { | 833 | } else if (spi->bits_per_word <= 16) { |
1335 | chip->n_bytes = 2; | 834 | chip->n_bytes = 2; |
1336 | chip->dma_width = DCMD_WIDTH2; | ||
1337 | chip->read = u16_reader; | 835 | chip->read = u16_reader; |
1338 | chip->write = u16_writer; | 836 | chip->write = u16_writer; |
1339 | } else if (spi->bits_per_word <= 32) { | 837 | } else if (spi->bits_per_word <= 32) { |
1340 | chip->cr0 |= SSCR0_EDSS; | 838 | chip->cr0 |= SSCR0_EDSS; |
1341 | chip->n_bytes = 4; | 839 | chip->n_bytes = 4; |
1342 | chip->dma_width = DCMD_WIDTH4; | ||
1343 | chip->read = u32_reader; | 840 | chip->read = u32_reader; |
1344 | chip->write = u32_writer; | 841 | chip->write = u32_writer; |
1345 | } else { | 842 | } else { |
@@ -1447,31 +944,11 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) | |||
1447 | drv_data->tx_channel = -1; | 944 | drv_data->tx_channel = -1; |
1448 | drv_data->rx_channel = -1; | 945 | drv_data->rx_channel = -1; |
1449 | if (platform_info->enable_dma) { | 946 | if (platform_info->enable_dma) { |
1450 | 947 | status = pxa2xx_spi_dma_setup(drv_data); | |
1451 | /* Get two DMA channels (rx and tx) */ | 948 | if (status) { |
1452 | drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx", | 949 | dev_warn(dev, "failed to setup DMA, using PIO\n"); |
1453 | DMA_PRIO_HIGH, | 950 | platform_info->enable_dma = false; |
1454 | dma_handler, | ||
1455 | drv_data); | ||
1456 | if (drv_data->rx_channel < 0) { | ||
1457 | dev_err(dev, "problem (%d) requesting rx channel\n", | ||
1458 | drv_data->rx_channel); | ||
1459 | status = -ENODEV; | ||
1460 | goto out_error_irq_alloc; | ||
1461 | } | 951 | } |
1462 | drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx", | ||
1463 | DMA_PRIO_MEDIUM, | ||
1464 | dma_handler, | ||
1465 | drv_data); | ||
1466 | if (drv_data->tx_channel < 0) { | ||
1467 | dev_err(dev, "problem (%d) requesting tx channel\n", | ||
1468 | drv_data->tx_channel); | ||
1469 | status = -ENODEV; | ||
1470 | goto out_error_dma_alloc; | ||
1471 | } | ||
1472 | |||
1473 | DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel; | ||
1474 | DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel; | ||
1475 | } | 952 | } |
1476 | 953 | ||
1477 | /* Enable SOC clock */ | 954 | /* Enable SOC clock */ |
@@ -1507,14 +984,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev) | |||
1507 | 984 | ||
1508 | out_error_clock_enabled: | 985 | out_error_clock_enabled: |
1509 | clk_disable_unprepare(ssp->clk); | 986 | clk_disable_unprepare(ssp->clk); |
1510 | 987 | pxa2xx_spi_dma_release(drv_data); | |
1511 | out_error_dma_alloc: | ||
1512 | if (drv_data->tx_channel != -1) | ||
1513 | pxa_free_dma(drv_data->tx_channel); | ||
1514 | if (drv_data->rx_channel != -1) | ||
1515 | pxa_free_dma(drv_data->rx_channel); | ||
1516 | |||
1517 | out_error_irq_alloc: | ||
1518 | free_irq(ssp->irq, drv_data); | 988 | free_irq(ssp->irq, drv_data); |
1519 | 989 | ||
1520 | out_error_master_alloc: | 990 | out_error_master_alloc: |
@@ -1537,12 +1007,8 @@ static int pxa2xx_spi_remove(struct platform_device *pdev) | |||
1537 | clk_disable_unprepare(ssp->clk); | 1007 | clk_disable_unprepare(ssp->clk); |
1538 | 1008 | ||
1539 | /* Release DMA */ | 1009 | /* Release DMA */ |
1540 | if (drv_data->master_info->enable_dma) { | 1010 | if (drv_data->master_info->enable_dma) |
1541 | DRCMR(ssp->drcmr_rx) = 0; | 1011 | pxa2xx_spi_dma_release(drv_data); |
1542 | DRCMR(ssp->drcmr_tx) = 0; | ||
1543 | pxa_free_dma(drv_data->tx_channel); | ||
1544 | pxa_free_dma(drv_data->rx_channel); | ||
1545 | } | ||
1546 | 1012 | ||
1547 | /* Release IRQ */ | 1013 | /* Release IRQ */ |
1548 | free_irq(ssp->irq, drv_data); | 1014 | free_irq(ssp->irq, drv_data); |
@@ -1589,12 +1055,7 @@ static int pxa2xx_spi_resume(struct device *dev) | |||
1589 | struct ssp_device *ssp = drv_data->ssp; | 1055 | struct ssp_device *ssp = drv_data->ssp; |
1590 | int status = 0; | 1056 | int status = 0; |
1591 | 1057 | ||
1592 | if (drv_data->rx_channel != -1) | 1058 | pxa2xx_spi_dma_resume(drv_data); |
1593 | DRCMR(drv_data->ssp->drcmr_rx) = | ||
1594 | DRCMR_MAPVLD | drv_data->rx_channel; | ||
1595 | if (drv_data->tx_channel != -1) | ||
1596 | DRCMR(drv_data->ssp->drcmr_tx) = | ||
1597 | DRCMR_MAPVLD | drv_data->tx_channel; | ||
1598 | 1059 | ||
1599 | /* Enable the SSP clock */ | 1060 | /* Enable the SSP clock */ |
1600 | clk_prepare_enable(ssp->clk); | 1061 | clk_prepare_enable(ssp->clk); |