diff options
author | Wenyou Yang <wenyou.yang@atmel.com> | 2014-01-09 00:19:15 -0500 |
---|---|---|
committer | Mark Brown <broonie@linaro.org> | 2014-01-09 12:41:23 -0500 |
commit | 8090d6d1a415d3ae1a7208995decfab8f60f4f36 (patch) | |
tree | 1061adad4383c773a762fa2e6699543cfe48b21b /drivers/spi/spi-atmel.c | |
parent | d3b72c7e6bf33185a5de1db2164ff237759c554c (diff) |
spi: atmel: Refactor spi-atmel to use SPI framework queue
Replace the deprecated master->transfer with transfer_one_message()
and allow the SPI subsystem handle all the queuing of messages.
Signed-off-by: Wenyou Yang <wenyou.yang@atmel.com>
Tested-by: Richard Genoud <richard.genoud@gmail.com>
Signed-off-by: Mark Brown <broonie@linaro.org>
Diffstat (limited to 'drivers/spi/spi-atmel.c')
-rw-r--r-- | drivers/spi/spi-atmel.c | 678 |
1 files changed, 220 insertions, 458 deletions
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index b96f9a89cdc6..b0842f751016 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c | |||
@@ -189,6 +189,8 @@ | |||
189 | */ | 189 | */ |
190 | #define DMA_MIN_BYTES 16 | 190 | #define DMA_MIN_BYTES 16 |
191 | 191 | ||
192 | #define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000)) | ||
193 | |||
192 | struct atmel_spi_dma { | 194 | struct atmel_spi_dma { |
193 | struct dma_chan *chan_rx; | 195 | struct dma_chan *chan_rx; |
194 | struct dma_chan *chan_tx; | 196 | struct dma_chan *chan_tx; |
@@ -220,17 +222,13 @@ struct atmel_spi { | |||
220 | int irq; | 222 | int irq; |
221 | struct clk *clk; | 223 | struct clk *clk; |
222 | struct platform_device *pdev; | 224 | struct platform_device *pdev; |
223 | struct spi_device *stay; | ||
224 | 225 | ||
225 | u8 stopping; | ||
226 | struct list_head queue; | ||
227 | struct tasklet_struct tasklet; | ||
228 | struct spi_transfer *current_transfer; | 226 | struct spi_transfer *current_transfer; |
229 | unsigned long current_remaining_bytes; | 227 | unsigned long current_remaining_bytes; |
230 | struct spi_transfer *next_transfer; | ||
231 | unsigned long next_remaining_bytes; | ||
232 | int done_status; | 228 | int done_status; |
233 | 229 | ||
230 | struct completion xfer_completion; | ||
231 | |||
234 | /* scratch buffer */ | 232 | /* scratch buffer */ |
235 | void *buffer; | 233 | void *buffer; |
236 | dma_addr_t buffer_dma; | 234 | dma_addr_t buffer_dma; |
@@ -241,6 +239,9 @@ struct atmel_spi { | |||
241 | bool use_pdc; | 239 | bool use_pdc; |
242 | /* dmaengine data */ | 240 | /* dmaengine data */ |
243 | struct atmel_spi_dma dma; | 241 | struct atmel_spi_dma dma; |
242 | |||
243 | bool keep_cs; | ||
244 | bool cs_active; | ||
244 | }; | 245 | }; |
245 | 246 | ||
246 | /* Controller-specific per-slave state */ | 247 | /* Controller-specific per-slave state */ |
@@ -376,17 +377,6 @@ static inline bool atmel_spi_use_dma(struct atmel_spi *as, | |||
376 | return as->use_dma && xfer->len >= DMA_MIN_BYTES; | 377 | return as->use_dma && xfer->len >= DMA_MIN_BYTES; |
377 | } | 378 | } |
378 | 379 | ||
379 | static inline int atmel_spi_xfer_is_last(struct spi_message *msg, | ||
380 | struct spi_transfer *xfer) | ||
381 | { | ||
382 | return msg->transfers.prev == &xfer->transfer_list; | ||
383 | } | ||
384 | |||
385 | static inline int atmel_spi_xfer_can_be_chained(struct spi_transfer *xfer) | ||
386 | { | ||
387 | return xfer->delay_usecs == 0 && !xfer->cs_change; | ||
388 | } | ||
389 | |||
390 | static int atmel_spi_dma_slave_config(struct atmel_spi *as, | 380 | static int atmel_spi_dma_slave_config(struct atmel_spi *as, |
391 | struct dma_slave_config *slave_config, | 381 | struct dma_slave_config *slave_config, |
392 | u8 bits_per_word) | 382 | u8 bits_per_word) |
@@ -513,23 +503,20 @@ static void dma_callback(void *data) | |||
513 | struct spi_master *master = data; | 503 | struct spi_master *master = data; |
514 | struct atmel_spi *as = spi_master_get_devdata(master); | 504 | struct atmel_spi *as = spi_master_get_devdata(master); |
515 | 505 | ||
516 | /* trigger SPI tasklet */ | 506 | complete(&as->xfer_completion); |
517 | tasklet_schedule(&as->tasklet); | ||
518 | } | 507 | } |
519 | 508 | ||
520 | /* | 509 | /* |
521 | * Next transfer using PIO. | 510 | * Next transfer using PIO. |
522 | * lock is held, spi tasklet is blocked | ||
523 | */ | 511 | */ |
524 | static void atmel_spi_next_xfer_pio(struct spi_master *master, | 512 | static void atmel_spi_next_xfer_pio(struct spi_master *master, |
525 | struct spi_transfer *xfer) | 513 | struct spi_transfer *xfer) |
526 | { | 514 | { |
527 | struct atmel_spi *as = spi_master_get_devdata(master); | 515 | struct atmel_spi *as = spi_master_get_devdata(master); |
516 | unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; | ||
528 | 517 | ||
529 | dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_pio\n"); | 518 | dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_pio\n"); |
530 | 519 | ||
531 | as->current_remaining_bytes = xfer->len; | ||
532 | |||
533 | /* Make sure data is not remaining in RDR */ | 520 | /* Make sure data is not remaining in RDR */ |
534 | spi_readl(as, RDR); | 521 | spi_readl(as, RDR); |
535 | while (spi_readl(as, SR) & SPI_BIT(RDRF)) { | 522 | while (spi_readl(as, SR) & SPI_BIT(RDRF)) { |
@@ -537,13 +524,14 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master, | |||
537 | cpu_relax(); | 524 | cpu_relax(); |
538 | } | 525 | } |
539 | 526 | ||
540 | if (xfer->tx_buf) | 527 | if (xfer->tx_buf) { |
541 | if (xfer->bits_per_word > 8) | 528 | if (xfer->bits_per_word > 8) |
542 | spi_writel(as, TDR, *(u16 *)(xfer->tx_buf)); | 529 | spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos)); |
543 | else | 530 | else |
544 | spi_writel(as, TDR, *(u8 *)(xfer->tx_buf)); | 531 | spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos)); |
545 | else | 532 | } else { |
546 | spi_writel(as, TDR, 0); | 533 | spi_writel(as, TDR, 0); |
534 | } | ||
547 | 535 | ||
548 | dev_dbg(master->dev.parent, | 536 | dev_dbg(master->dev.parent, |
549 | " start pio xfer %p: len %u tx %p rx %p bitpw %d\n", | 537 | " start pio xfer %p: len %u tx %p rx %p bitpw %d\n", |
@@ -556,7 +544,6 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master, | |||
556 | 544 | ||
557 | /* | 545 | /* |
558 | * Submit next transfer for DMA. | 546 | * Submit next transfer for DMA. |
559 | * lock is held, spi tasklet is blocked | ||
560 | */ | 547 | */ |
561 | static int atmel_spi_next_xfer_dma_submit(struct spi_master *master, | 548 | static int atmel_spi_next_xfer_dma_submit(struct spi_master *master, |
562 | struct spi_transfer *xfer, | 549 | struct spi_transfer *xfer, |
@@ -747,71 +734,37 @@ static int atmel_spi_set_xfer_speed(struct atmel_spi *as, | |||
747 | * lock is held, spi irq is blocked | 734 | * lock is held, spi irq is blocked |
748 | */ | 735 | */ |
749 | static void atmel_spi_pdc_next_xfer(struct spi_master *master, | 736 | static void atmel_spi_pdc_next_xfer(struct spi_master *master, |
750 | struct spi_message *msg) | 737 | struct spi_message *msg, |
738 | struct spi_transfer *xfer) | ||
751 | { | 739 | { |
752 | struct atmel_spi *as = spi_master_get_devdata(master); | 740 | struct atmel_spi *as = spi_master_get_devdata(master); |
753 | struct spi_transfer *xfer; | 741 | u32 len; |
754 | u32 len, remaining; | ||
755 | u32 ieval; | ||
756 | dma_addr_t tx_dma, rx_dma; | 742 | dma_addr_t tx_dma, rx_dma; |
757 | 743 | ||
758 | if (!as->current_transfer) | 744 | spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); |
759 | xfer = list_entry(msg->transfers.next, | ||
760 | struct spi_transfer, transfer_list); | ||
761 | else if (!as->next_transfer) | ||
762 | xfer = list_entry(as->current_transfer->transfer_list.next, | ||
763 | struct spi_transfer, transfer_list); | ||
764 | else | ||
765 | xfer = NULL; | ||
766 | |||
767 | if (xfer) { | ||
768 | spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); | ||
769 | |||
770 | len = xfer->len; | ||
771 | atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); | ||
772 | remaining = xfer->len - len; | ||
773 | |||
774 | spi_writel(as, RPR, rx_dma); | ||
775 | spi_writel(as, TPR, tx_dma); | ||
776 | |||
777 | if (msg->spi->bits_per_word > 8) | ||
778 | len >>= 1; | ||
779 | spi_writel(as, RCR, len); | ||
780 | spi_writel(as, TCR, len); | ||
781 | |||
782 | atmel_spi_set_xfer_speed(as, msg->spi, xfer); | ||
783 | |||
784 | dev_dbg(&msg->spi->dev, | ||
785 | " start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n", | ||
786 | xfer, xfer->len, xfer->tx_buf, | ||
787 | (unsigned long long)xfer->tx_dma, xfer->rx_buf, | ||
788 | (unsigned long long)xfer->rx_dma); | ||
789 | } else { | ||
790 | xfer = as->next_transfer; | ||
791 | remaining = as->next_remaining_bytes; | ||
792 | } | ||
793 | 745 | ||
794 | as->current_transfer = xfer; | 746 | len = as->current_remaining_bytes; |
795 | as->current_remaining_bytes = remaining; | 747 | atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); |
748 | as->current_remaining_bytes -= len; | ||
796 | 749 | ||
797 | if (remaining > 0) | 750 | spi_writel(as, RPR, rx_dma); |
798 | len = remaining; | 751 | spi_writel(as, TPR, tx_dma); |
799 | else if (!atmel_spi_xfer_is_last(msg, xfer) | ||
800 | && atmel_spi_xfer_can_be_chained(xfer)) { | ||
801 | xfer = list_entry(xfer->transfer_list.next, | ||
802 | struct spi_transfer, transfer_list); | ||
803 | len = xfer->len; | ||
804 | } else | ||
805 | xfer = NULL; | ||
806 | 752 | ||
807 | as->next_transfer = xfer; | 753 | if (msg->spi->bits_per_word > 8) |
754 | len >>= 1; | ||
755 | spi_writel(as, RCR, len); | ||
756 | spi_writel(as, TCR, len); | ||
808 | 757 | ||
809 | if (xfer) { | 758 | dev_dbg(&msg->spi->dev, |
810 | u32 total; | 759 | " start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n", |
760 | xfer, xfer->len, xfer->tx_buf, | ||
761 | (unsigned long long)xfer->tx_dma, xfer->rx_buf, | ||
762 | (unsigned long long)xfer->rx_dma); | ||
811 | 763 | ||
812 | total = len; | 764 | if (as->current_remaining_bytes) { |
765 | len = as->current_remaining_bytes; | ||
813 | atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); | 766 | atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len); |
814 | as->next_remaining_bytes = total - len; | 767 | as->current_remaining_bytes -= len; |
815 | 768 | ||
816 | spi_writel(as, RNPR, rx_dma); | 769 | spi_writel(as, RNPR, rx_dma); |
817 | spi_writel(as, TNPR, tx_dma); | 770 | spi_writel(as, TNPR, tx_dma); |
@@ -826,11 +779,6 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master, | |||
826 | xfer, xfer->len, xfer->tx_buf, | 779 | xfer, xfer->len, xfer->tx_buf, |
827 | (unsigned long long)xfer->tx_dma, xfer->rx_buf, | 780 | (unsigned long long)xfer->tx_dma, xfer->rx_buf, |
828 | (unsigned long long)xfer->rx_dma); | 781 | (unsigned long long)xfer->rx_dma); |
829 | ieval = SPI_BIT(ENDRX) | SPI_BIT(OVRES); | ||
830 | } else { | ||
831 | spi_writel(as, RNCR, 0); | ||
832 | spi_writel(as, TNCR, 0); | ||
833 | ieval = SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) | SPI_BIT(OVRES); | ||
834 | } | 782 | } |
835 | 783 | ||
836 | /* REVISIT: We're waiting for ENDRX before we start the next | 784 | /* REVISIT: We're waiting for ENDRX before we start the next |
@@ -843,84 +791,11 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master, | |||
843 | * | 791 | * |
844 | * It should be doable, though. Just not now... | 792 | * It should be doable, though. Just not now... |
845 | */ | 793 | */ |
846 | spi_writel(as, IER, ieval); | 794 | spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES)); |
847 | spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); | 795 | spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); |
848 | } | 796 | } |
849 | 797 | ||
850 | /* | 798 | /* |
851 | * Choose way to submit next transfer and start it. | ||
852 | * lock is held, spi tasklet is blocked | ||
853 | */ | ||
854 | static void atmel_spi_dma_next_xfer(struct spi_master *master, | ||
855 | struct spi_message *msg) | ||
856 | { | ||
857 | struct atmel_spi *as = spi_master_get_devdata(master); | ||
858 | struct spi_transfer *xfer; | ||
859 | u32 remaining, len; | ||
860 | |||
861 | remaining = as->current_remaining_bytes; | ||
862 | if (remaining) { | ||
863 | xfer = as->current_transfer; | ||
864 | len = remaining; | ||
865 | } else { | ||
866 | if (!as->current_transfer) | ||
867 | xfer = list_entry(msg->transfers.next, | ||
868 | struct spi_transfer, transfer_list); | ||
869 | else | ||
870 | xfer = list_entry( | ||
871 | as->current_transfer->transfer_list.next, | ||
872 | struct spi_transfer, transfer_list); | ||
873 | |||
874 | as->current_transfer = xfer; | ||
875 | len = xfer->len; | ||
876 | atmel_spi_set_xfer_speed(as, msg->spi, xfer); | ||
877 | } | ||
878 | |||
879 | if (atmel_spi_use_dma(as, xfer)) { | ||
880 | u32 total = len; | ||
881 | if (!atmel_spi_next_xfer_dma_submit(master, xfer, &len)) { | ||
882 | as->current_remaining_bytes = total - len; | ||
883 | return; | ||
884 | } else { | ||
885 | dev_err(&msg->spi->dev, "unable to use DMA, fallback to PIO\n"); | ||
886 | } | ||
887 | } | ||
888 | |||
889 | /* use PIO if error appened using DMA */ | ||
890 | atmel_spi_next_xfer_pio(master, xfer); | ||
891 | } | ||
892 | |||
893 | static void atmel_spi_next_message(struct spi_master *master) | ||
894 | { | ||
895 | struct atmel_spi *as = spi_master_get_devdata(master); | ||
896 | struct spi_message *msg; | ||
897 | struct spi_device *spi; | ||
898 | |||
899 | BUG_ON(as->current_transfer); | ||
900 | |||
901 | msg = list_entry(as->queue.next, struct spi_message, queue); | ||
902 | spi = msg->spi; | ||
903 | |||
904 | dev_dbg(master->dev.parent, "start message %p for %s\n", | ||
905 | msg, dev_name(&spi->dev)); | ||
906 | |||
907 | /* select chip if it's not still active */ | ||
908 | if (as->stay) { | ||
909 | if (as->stay != spi) { | ||
910 | cs_deactivate(as, as->stay); | ||
911 | cs_activate(as, spi); | ||
912 | } | ||
913 | as->stay = NULL; | ||
914 | } else | ||
915 | cs_activate(as, spi); | ||
916 | |||
917 | if (as->use_pdc) | ||
918 | atmel_spi_pdc_next_xfer(master, msg); | ||
919 | else | ||
920 | atmel_spi_dma_next_xfer(master, msg); | ||
921 | } | ||
922 | |||
923 | /* | ||
924 | * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma: | 799 | * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma: |
925 | * - The buffer is either valid for CPU access, else NULL | 800 | * - The buffer is either valid for CPU access, else NULL |
926 | * - If the buffer is valid, so is its DMA address | 801 | * - If the buffer is valid, so is its DMA address |
@@ -975,41 +850,7 @@ static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as) | |||
975 | spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); | 850 | spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); |
976 | } | 851 | } |
977 | 852 | ||
978 | static void | ||
979 | atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as, | ||
980 | struct spi_message *msg, int stay) | ||
981 | { | ||
982 | if (!stay || as->done_status < 0) | ||
983 | cs_deactivate(as, msg->spi); | ||
984 | else | ||
985 | as->stay = msg->spi; | ||
986 | |||
987 | list_del(&msg->queue); | ||
988 | msg->status = as->done_status; | ||
989 | |||
990 | dev_dbg(master->dev.parent, | ||
991 | "xfer complete: %u bytes transferred\n", | ||
992 | msg->actual_length); | ||
993 | |||
994 | atmel_spi_unlock(as); | ||
995 | msg->complete(msg->context); | ||
996 | atmel_spi_lock(as); | ||
997 | |||
998 | as->current_transfer = NULL; | ||
999 | as->next_transfer = NULL; | ||
1000 | as->done_status = 0; | ||
1001 | |||
1002 | /* continue if needed */ | ||
1003 | if (list_empty(&as->queue) || as->stopping) { | ||
1004 | if (as->use_pdc) | ||
1005 | atmel_spi_disable_pdc_transfer(as); | ||
1006 | } else { | ||
1007 | atmel_spi_next_message(master); | ||
1008 | } | ||
1009 | } | ||
1010 | |||
1011 | /* Called from IRQ | 853 | /* Called from IRQ |
1012 | * lock is held | ||
1013 | * | 854 | * |
1014 | * Must update "current_remaining_bytes" to keep track of data | 855 | * Must update "current_remaining_bytes" to keep track of data |
1015 | * to transfer. | 856 | * to transfer. |
@@ -1017,9 +858,7 @@ atmel_spi_msg_done(struct spi_master *master, struct atmel_spi *as, | |||
1017 | static void | 858 | static void |
1018 | atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer) | 859 | atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer) |
1019 | { | 860 | { |
1020 | u8 *txp; | ||
1021 | u8 *rxp; | 861 | u8 *rxp; |
1022 | u16 *txp16; | ||
1023 | u16 *rxp16; | 862 | u16 *rxp16; |
1024 | unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; | 863 | unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; |
1025 | 864 | ||
@@ -1041,96 +880,12 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer) | |||
1041 | } else { | 880 | } else { |
1042 | as->current_remaining_bytes--; | 881 | as->current_remaining_bytes--; |
1043 | } | 882 | } |
1044 | |||
1045 | if (as->current_remaining_bytes) { | ||
1046 | if (xfer->tx_buf) { | ||
1047 | if (xfer->bits_per_word > 8) { | ||
1048 | txp16 = (u16 *)(((u8 *)xfer->tx_buf) | ||
1049 | + xfer_pos + 2); | ||
1050 | spi_writel(as, TDR, *txp16); | ||
1051 | } else { | ||
1052 | txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1; | ||
1053 | spi_writel(as, TDR, *txp); | ||
1054 | } | ||
1055 | } else { | ||
1056 | spi_writel(as, TDR, 0); | ||
1057 | } | ||
1058 | } | ||
1059 | } | ||
1060 | |||
1061 | /* Tasklet | ||
1062 | * Called from DMA callback + pio transfer and overrun IRQ. | ||
1063 | */ | ||
1064 | static void atmel_spi_tasklet_func(unsigned long data) | ||
1065 | { | ||
1066 | struct spi_master *master = (struct spi_master *)data; | ||
1067 | struct atmel_spi *as = spi_master_get_devdata(master); | ||
1068 | struct spi_message *msg; | ||
1069 | struct spi_transfer *xfer; | ||
1070 | |||
1071 | dev_vdbg(master->dev.parent, "atmel_spi_tasklet_func\n"); | ||
1072 | |||
1073 | atmel_spi_lock(as); | ||
1074 | |||
1075 | xfer = as->current_transfer; | ||
1076 | |||
1077 | if (xfer == NULL) | ||
1078 | /* already been there */ | ||
1079 | goto tasklet_out; | ||
1080 | |||
1081 | msg = list_entry(as->queue.next, struct spi_message, queue); | ||
1082 | |||
1083 | if (as->current_remaining_bytes == 0) { | ||
1084 | if (as->done_status < 0) { | ||
1085 | /* error happened (overrun) */ | ||
1086 | if (atmel_spi_use_dma(as, xfer)) | ||
1087 | atmel_spi_stop_dma(as); | ||
1088 | } else { | ||
1089 | /* only update length if no error */ | ||
1090 | msg->actual_length += xfer->len; | ||
1091 | } | ||
1092 | |||
1093 | if (atmel_spi_use_dma(as, xfer)) | ||
1094 | if (!msg->is_dma_mapped) | ||
1095 | atmel_spi_dma_unmap_xfer(master, xfer); | ||
1096 | |||
1097 | if (xfer->delay_usecs) | ||
1098 | udelay(xfer->delay_usecs); | ||
1099 | |||
1100 | if (atmel_spi_xfer_is_last(msg, xfer) || as->done_status < 0) { | ||
1101 | /* report completed (or erroneous) message */ | ||
1102 | atmel_spi_msg_done(master, as, msg, xfer->cs_change); | ||
1103 | } else { | ||
1104 | if (xfer->cs_change) { | ||
1105 | cs_deactivate(as, msg->spi); | ||
1106 | udelay(1); | ||
1107 | cs_activate(as, msg->spi); | ||
1108 | } | ||
1109 | |||
1110 | /* | ||
1111 | * Not done yet. Submit the next transfer. | ||
1112 | * | ||
1113 | * FIXME handle protocol options for xfer | ||
1114 | */ | ||
1115 | atmel_spi_dma_next_xfer(master, msg); | ||
1116 | } | ||
1117 | } else { | ||
1118 | /* | ||
1119 | * Keep going, we still have data to send in | ||
1120 | * the current transfer. | ||
1121 | */ | ||
1122 | atmel_spi_dma_next_xfer(master, msg); | ||
1123 | } | ||
1124 | |||
1125 | tasklet_out: | ||
1126 | atmel_spi_unlock(as); | ||
1127 | } | 883 | } |
1128 | 884 | ||
1129 | /* Interrupt | 885 | /* Interrupt |
1130 | * | 886 | * |
1131 | * No need for locking in this Interrupt handler: done_status is the | 887 | * No need for locking in this Interrupt handler: done_status is the |
1132 | * only information modified. What we need is the update of this field | 888 | * only information modified. |
1133 | * before tasklet runs. This is ensured by using barrier. | ||
1134 | */ | 889 | */ |
1135 | static irqreturn_t | 890 | static irqreturn_t |
1136 | atmel_spi_pio_interrupt(int irq, void *dev_id) | 891 | atmel_spi_pio_interrupt(int irq, void *dev_id) |
@@ -1158,8 +913,6 @@ atmel_spi_pio_interrupt(int irq, void *dev_id) | |||
1158 | * | 913 | * |
1159 | * We will also not process any remaning transfers in | 914 | * We will also not process any remaning transfers in |
1160 | * the message. | 915 | * the message. |
1161 | * | ||
1162 | * All actions are done in tasklet with done_status indication | ||
1163 | */ | 916 | */ |
1164 | as->done_status = -EIO; | 917 | as->done_status = -EIO; |
1165 | smp_wmb(); | 918 | smp_wmb(); |
@@ -1167,7 +920,7 @@ atmel_spi_pio_interrupt(int irq, void *dev_id) | |||
1167 | /* Clear any overrun happening while cleaning up */ | 920 | /* Clear any overrun happening while cleaning up */ |
1168 | spi_readl(as, SR); | 921 | spi_readl(as, SR); |
1169 | 922 | ||
1170 | tasklet_schedule(&as->tasklet); | 923 | complete(&as->xfer_completion); |
1171 | 924 | ||
1172 | } else if (pending & SPI_BIT(RDRF)) { | 925 | } else if (pending & SPI_BIT(RDRF)) { |
1173 | atmel_spi_lock(as); | 926 | atmel_spi_lock(as); |
@@ -1176,11 +929,10 @@ atmel_spi_pio_interrupt(int irq, void *dev_id) | |||
1176 | ret = IRQ_HANDLED; | 929 | ret = IRQ_HANDLED; |
1177 | xfer = as->current_transfer; | 930 | xfer = as->current_transfer; |
1178 | atmel_spi_pump_pio_data(as, xfer); | 931 | atmel_spi_pump_pio_data(as, xfer); |
1179 | if (!as->current_remaining_bytes) { | 932 | if (!as->current_remaining_bytes) |
1180 | /* no more data to xfer, kick tasklet */ | ||
1181 | spi_writel(as, IDR, pending); | 933 | spi_writel(as, IDR, pending); |
1182 | tasklet_schedule(&as->tasklet); | 934 | |
1183 | } | 935 | complete(&as->xfer_completion); |
1184 | } | 936 | } |
1185 | 937 | ||
1186 | atmel_spi_unlock(as); | 938 | atmel_spi_unlock(as); |
@@ -1198,116 +950,35 @@ atmel_spi_pdc_interrupt(int irq, void *dev_id) | |||
1198 | { | 950 | { |
1199 | struct spi_master *master = dev_id; | 951 | struct spi_master *master = dev_id; |
1200 | struct atmel_spi *as = spi_master_get_devdata(master); | 952 | struct atmel_spi *as = spi_master_get_devdata(master); |
1201 | struct spi_message *msg; | ||
1202 | struct spi_transfer *xfer; | ||
1203 | u32 status, pending, imr; | 953 | u32 status, pending, imr; |
1204 | int ret = IRQ_NONE; | 954 | int ret = IRQ_NONE; |
1205 | 955 | ||
1206 | atmel_spi_lock(as); | ||
1207 | |||
1208 | xfer = as->current_transfer; | ||
1209 | msg = list_entry(as->queue.next, struct spi_message, queue); | ||
1210 | |||
1211 | imr = spi_readl(as, IMR); | 956 | imr = spi_readl(as, IMR); |
1212 | status = spi_readl(as, SR); | 957 | status = spi_readl(as, SR); |
1213 | pending = status & imr; | 958 | pending = status & imr; |
1214 | 959 | ||
1215 | if (pending & SPI_BIT(OVRES)) { | 960 | if (pending & SPI_BIT(OVRES)) { |
1216 | int timeout; | ||
1217 | 961 | ||
1218 | ret = IRQ_HANDLED; | 962 | ret = IRQ_HANDLED; |
1219 | 963 | ||
1220 | spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) | 964 | spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX) |
1221 | | SPI_BIT(OVRES))); | 965 | | SPI_BIT(OVRES))); |
1222 | 966 | ||
1223 | /* | ||
1224 | * When we get an overrun, we disregard the current | ||
1225 | * transfer. Data will not be copied back from any | ||
1226 | * bounce buffer and msg->actual_len will not be | ||
1227 | * updated with the last xfer. | ||
1228 | * | ||
1229 | * We will also not process any remaning transfers in | ||
1230 | * the message. | ||
1231 | * | ||
1232 | * First, stop the transfer and unmap the DMA buffers. | ||
1233 | */ | ||
1234 | spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS)); | ||
1235 | if (!msg->is_dma_mapped) | ||
1236 | atmel_spi_dma_unmap_xfer(master, xfer); | ||
1237 | |||
1238 | /* REVISIT: udelay in irq is unfriendly */ | ||
1239 | if (xfer->delay_usecs) | ||
1240 | udelay(xfer->delay_usecs); | ||
1241 | |||
1242 | dev_warn(master->dev.parent, "overrun (%u/%u remaining)\n", | ||
1243 | spi_readl(as, TCR), spi_readl(as, RCR)); | ||
1244 | |||
1245 | /* | ||
1246 | * Clean up DMA registers and make sure the data | ||
1247 | * registers are empty. | ||
1248 | */ | ||
1249 | spi_writel(as, RNCR, 0); | ||
1250 | spi_writel(as, TNCR, 0); | ||
1251 | spi_writel(as, RCR, 0); | ||
1252 | spi_writel(as, TCR, 0); | ||
1253 | for (timeout = 1000; timeout; timeout--) | ||
1254 | if (spi_readl(as, SR) & SPI_BIT(TXEMPTY)) | ||
1255 | break; | ||
1256 | if (!timeout) | ||
1257 | dev_warn(master->dev.parent, | ||
1258 | "timeout waiting for TXEMPTY"); | ||
1259 | while (spi_readl(as, SR) & SPI_BIT(RDRF)) | ||
1260 | spi_readl(as, RDR); | ||
1261 | |||
1262 | /* Clear any overrun happening while cleaning up */ | 967 | /* Clear any overrun happening while cleaning up */ |
1263 | spi_readl(as, SR); | 968 | spi_readl(as, SR); |
1264 | 969 | ||
1265 | as->done_status = -EIO; | 970 | as->done_status = -EIO; |
1266 | atmel_spi_msg_done(master, as, msg, 0); | 971 | |
972 | complete(&as->xfer_completion); | ||
973 | |||
1267 | } else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) { | 974 | } else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) { |
1268 | ret = IRQ_HANDLED; | 975 | ret = IRQ_HANDLED; |
1269 | 976 | ||
1270 | spi_writel(as, IDR, pending); | 977 | spi_writel(as, IDR, pending); |
1271 | 978 | ||
1272 | if (as->current_remaining_bytes == 0) { | 979 | complete(&as->xfer_completion); |
1273 | msg->actual_length += xfer->len; | ||
1274 | |||
1275 | if (!msg->is_dma_mapped) | ||
1276 | atmel_spi_dma_unmap_xfer(master, xfer); | ||
1277 | |||
1278 | /* REVISIT: udelay in irq is unfriendly */ | ||
1279 | if (xfer->delay_usecs) | ||
1280 | udelay(xfer->delay_usecs); | ||
1281 | |||
1282 | if (atmel_spi_xfer_is_last(msg, xfer)) { | ||
1283 | /* report completed message */ | ||
1284 | atmel_spi_msg_done(master, as, msg, | ||
1285 | xfer->cs_change); | ||
1286 | } else { | ||
1287 | if (xfer->cs_change) { | ||
1288 | cs_deactivate(as, msg->spi); | ||
1289 | udelay(1); | ||
1290 | cs_activate(as, msg->spi); | ||
1291 | } | ||
1292 | |||
1293 | /* | ||
1294 | * Not done yet. Submit the next transfer. | ||
1295 | * | ||
1296 | * FIXME handle protocol options for xfer | ||
1297 | */ | ||
1298 | atmel_spi_pdc_next_xfer(master, msg); | ||
1299 | } | ||
1300 | } else { | ||
1301 | /* | ||
1302 | * Keep going, we still have data to send in | ||
1303 | * the current transfer. | ||
1304 | */ | ||
1305 | atmel_spi_pdc_next_xfer(master, msg); | ||
1306 | } | ||
1307 | } | 980 | } |
1308 | 981 | ||
1309 | atmel_spi_unlock(as); | ||
1310 | |||
1311 | return ret; | 982 | return ret; |
1312 | } | 983 | } |
1313 | 984 | ||
@@ -1322,9 +993,6 @@ static int atmel_spi_setup(struct spi_device *spi) | |||
1322 | 993 | ||
1323 | as = spi_master_get_devdata(spi->master); | 994 | as = spi_master_get_devdata(spi->master); |
1324 | 995 | ||
1325 | if (as->stopping) | ||
1326 | return -ESHUTDOWN; | ||
1327 | |||
1328 | if (spi->chip_select > spi->master->num_chipselect) { | 996 | if (spi->chip_select > spi->master->num_chipselect) { |
1329 | dev_dbg(&spi->dev, | 997 | dev_dbg(&spi->dev, |
1330 | "setup: invalid chipselect %u (%u defined)\n", | 998 | "setup: invalid chipselect %u (%u defined)\n", |
@@ -1376,12 +1044,6 @@ static int atmel_spi_setup(struct spi_device *spi) | |||
1376 | asd->npcs_pin = npcs_pin; | 1044 | asd->npcs_pin = npcs_pin; |
1377 | spi->controller_state = asd; | 1045 | spi->controller_state = asd; |
1378 | gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH)); | 1046 | gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH)); |
1379 | } else { | ||
1380 | atmel_spi_lock(as); | ||
1381 | if (as->stay == spi) | ||
1382 | as->stay = NULL; | ||
1383 | cs_deactivate(as, spi); | ||
1384 | atmel_spi_unlock(as); | ||
1385 | } | 1047 | } |
1386 | 1048 | ||
1387 | asd->csr = csr; | 1049 | asd->csr = csr; |
@@ -1396,97 +1058,218 @@ static int atmel_spi_setup(struct spi_device *spi) | |||
1396 | return 0; | 1058 | return 0; |
1397 | } | 1059 | } |
1398 | 1060 | ||
1399 | static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) | 1061 | static int atmel_spi_one_transfer(struct spi_master *master, |
1062 | struct spi_message *msg, | ||
1063 | struct spi_transfer *xfer) | ||
1400 | { | 1064 | { |
1401 | struct atmel_spi *as; | 1065 | struct atmel_spi *as; |
1402 | struct spi_transfer *xfer; | 1066 | struct spi_device *spi = msg->spi; |
1403 | struct device *controller = spi->master->dev.parent; | ||
1404 | u8 bits; | 1067 | u8 bits; |
1068 | u32 len; | ||
1405 | struct atmel_spi_device *asd; | 1069 | struct atmel_spi_device *asd; |
1070 | int timeout; | ||
1071 | int ret; | ||
1406 | 1072 | ||
1407 | as = spi_master_get_devdata(spi->master); | 1073 | as = spi_master_get_devdata(master); |
1408 | |||
1409 | dev_dbg(controller, "new message %p submitted for %s\n", | ||
1410 | msg, dev_name(&spi->dev)); | ||
1411 | 1074 | ||
1412 | if (unlikely(list_empty(&msg->transfers))) | 1075 | if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) { |
1076 | dev_dbg(&spi->dev, "missing rx or tx buf\n"); | ||
1413 | return -EINVAL; | 1077 | return -EINVAL; |
1078 | } | ||
1414 | 1079 | ||
1415 | if (as->stopping) | 1080 | if (xfer->bits_per_word) { |
1416 | return -ESHUTDOWN; | 1081 | asd = spi->controller_state; |
1082 | bits = (asd->csr >> 4) & 0xf; | ||
1083 | if (bits != xfer->bits_per_word - 8) { | ||
1084 | dev_dbg(&spi->dev, | ||
1085 | "you can't yet change bits_per_word in transfers\n"); | ||
1086 | return -ENOPROTOOPT; | ||
1087 | } | ||
1088 | } | ||
1417 | 1089 | ||
1418 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | 1090 | if (xfer->bits_per_word > 8) { |
1419 | if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) { | 1091 | if (xfer->len % 2) { |
1420 | dev_dbg(&spi->dev, "missing rx or tx buf\n"); | 1092 | dev_dbg(&spi->dev, |
1093 | "buffer len should be 16 bits aligned\n"); | ||
1421 | return -EINVAL; | 1094 | return -EINVAL; |
1422 | } | 1095 | } |
1096 | } | ||
1097 | |||
1098 | /* | ||
1099 | * DMA map early, for performance (empties dcache ASAP) and | ||
1100 | * better fault reporting. | ||
1101 | */ | ||
1102 | if ((!msg->is_dma_mapped) | ||
1103 | && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) { | ||
1104 | if (atmel_spi_dma_map_xfer(as, xfer) < 0) | ||
1105 | return -ENOMEM; | ||
1106 | } | ||
1423 | 1107 | ||
1424 | if (xfer->bits_per_word) { | 1108 | atmel_spi_set_xfer_speed(as, msg->spi, xfer); |
1425 | asd = spi->controller_state; | 1109 | |
1426 | bits = (asd->csr >> 4) & 0xf; | 1110 | as->done_status = 0; |
1427 | if (bits != xfer->bits_per_word - 8) { | 1111 | as->current_transfer = xfer; |
1428 | dev_dbg(&spi->dev, | 1112 | as->current_remaining_bytes = xfer->len; |
1429 | "you can't yet change bits_per_word in transfers\n"); | 1113 | while (as->current_remaining_bytes) { |
1430 | return -ENOPROTOOPT; | 1114 | reinit_completion(&as->xfer_completion); |
1115 | |||
1116 | if (as->use_pdc) { | ||
1117 | atmel_spi_pdc_next_xfer(master, msg, xfer); | ||
1118 | } else if (atmel_spi_use_dma(as, xfer)) { | ||
1119 | len = as->current_remaining_bytes; | ||
1120 | ret = atmel_spi_next_xfer_dma_submit(master, | ||
1121 | xfer, &len); | ||
1122 | if (ret) { | ||
1123 | dev_err(&spi->dev, | ||
1124 | "unable to use DMA, fallback to PIO\n"); | ||
1125 | atmel_spi_next_xfer_pio(master, xfer); | ||
1126 | } else { | ||
1127 | as->current_remaining_bytes -= len; | ||
1431 | } | 1128 | } |
1129 | } else { | ||
1130 | atmel_spi_next_xfer_pio(master, xfer); | ||
1432 | } | 1131 | } |
1433 | 1132 | ||
1434 | if (xfer->bits_per_word > 8) { | 1133 | ret = wait_for_completion_timeout(&as->xfer_completion, |
1435 | if (xfer->len % 2) { | 1134 | SPI_DMA_TIMEOUT); |
1436 | dev_dbg(&spi->dev, "buffer len should be 16 bits aligned\n"); | 1135 | if (WARN_ON(ret == 0)) { |
1437 | return -EINVAL; | 1136 | dev_err(&spi->dev, |
1438 | } | 1137 | "spi trasfer timeout, err %d\n", ret); |
1138 | as->done_status = -EIO; | ||
1139 | } else { | ||
1140 | ret = 0; | ||
1439 | } | 1141 | } |
1440 | 1142 | ||
1441 | /* | 1143 | if (as->done_status) |
1442 | * DMA map early, for performance (empties dcache ASAP) and | 1144 | break; |
1443 | * better fault reporting. | 1145 | } |
1444 | */ | 1146 | |
1445 | if ((!msg->is_dma_mapped) && (atmel_spi_use_dma(as, xfer) | 1147 | if (as->done_status) { |
1446 | || as->use_pdc)) { | 1148 | if (as->use_pdc) { |
1447 | if (atmel_spi_dma_map_xfer(as, xfer) < 0) | 1149 | dev_warn(master->dev.parent, |
1448 | return -ENOMEM; | 1150 | "overrun (%u/%u remaining)\n", |
1151 | spi_readl(as, TCR), spi_readl(as, RCR)); | ||
1152 | |||
1153 | /* | ||
1154 | * Clean up DMA registers and make sure the data | ||
1155 | * registers are empty. | ||
1156 | */ | ||
1157 | spi_writel(as, RNCR, 0); | ||
1158 | spi_writel(as, TNCR, 0); | ||
1159 | spi_writel(as, RCR, 0); | ||
1160 | spi_writel(as, TCR, 0); | ||
1161 | for (timeout = 1000; timeout; timeout--) | ||
1162 | if (spi_readl(as, SR) & SPI_BIT(TXEMPTY)) | ||
1163 | break; | ||
1164 | if (!timeout) | ||
1165 | dev_warn(master->dev.parent, | ||
1166 | "timeout waiting for TXEMPTY"); | ||
1167 | while (spi_readl(as, SR) & SPI_BIT(RDRF)) | ||
1168 | spi_readl(as, RDR); | ||
1169 | |||
1170 | /* Clear any overrun happening while cleaning up */ | ||
1171 | spi_readl(as, SR); | ||
1172 | |||
1173 | } else if (atmel_spi_use_dma(as, xfer)) { | ||
1174 | atmel_spi_stop_dma(as); | ||
1175 | } | ||
1176 | |||
1177 | if (!msg->is_dma_mapped | ||
1178 | && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) | ||
1179 | atmel_spi_dma_unmap_xfer(master, xfer); | ||
1180 | |||
1181 | return 0; | ||
1182 | |||
1183 | } else { | ||
1184 | /* only update length if no error */ | ||
1185 | msg->actual_length += xfer->len; | ||
1186 | } | ||
1187 | |||
1188 | if (!msg->is_dma_mapped | ||
1189 | && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) | ||
1190 | atmel_spi_dma_unmap_xfer(master, xfer); | ||
1191 | |||
1192 | if (xfer->delay_usecs) | ||
1193 | udelay(xfer->delay_usecs); | ||
1194 | |||
1195 | if (xfer->cs_change) { | ||
1196 | if (list_is_last(&xfer->transfer_list, | ||
1197 | &msg->transfers)) { | ||
1198 | as->keep_cs = true; | ||
1199 | } else { | ||
1200 | as->cs_active = !as->cs_active; | ||
1201 | if (as->cs_active) | ||
1202 | cs_activate(as, msg->spi); | ||
1203 | else | ||
1204 | cs_deactivate(as, msg->spi); | ||
1449 | } | 1205 | } |
1450 | } | 1206 | } |
1451 | 1207 | ||
1452 | #ifdef VERBOSE | 1208 | return 0; |
1209 | } | ||
1210 | |||
1211 | static int atmel_spi_transfer_one_message(struct spi_master *master, | ||
1212 | struct spi_message *msg) | ||
1213 | { | ||
1214 | struct atmel_spi *as; | ||
1215 | struct spi_transfer *xfer; | ||
1216 | struct spi_device *spi = msg->spi; | ||
1217 | int ret = 0; | ||
1218 | |||
1219 | as = spi_master_get_devdata(master); | ||
1220 | |||
1221 | dev_dbg(&spi->dev, "new message %p submitted for %s\n", | ||
1222 | msg, dev_name(&spi->dev)); | ||
1223 | |||
1224 | if (unlikely(list_empty(&msg->transfers))) | ||
1225 | return -EINVAL; | ||
1226 | |||
1227 | atmel_spi_lock(as); | ||
1228 | cs_activate(as, spi); | ||
1229 | |||
1230 | as->cs_active = true; | ||
1231 | as->keep_cs = false; | ||
1232 | |||
1233 | msg->status = 0; | ||
1234 | msg->actual_length = 0; | ||
1235 | |||
1236 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
1237 | ret = atmel_spi_one_transfer(master, msg, xfer); | ||
1238 | if (ret) | ||
1239 | goto msg_done; | ||
1240 | } | ||
1241 | |||
1242 | if (as->use_pdc) | ||
1243 | atmel_spi_disable_pdc_transfer(as); | ||
1244 | |||
1453 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | 1245 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { |
1454 | dev_dbg(controller, | 1246 | dev_dbg(&spi->dev, |
1455 | " xfer %p: len %u tx %p/%08x rx %p/%08x\n", | 1247 | " xfer %p: len %u tx %p/%08x rx %p/%08x\n", |
1456 | xfer, xfer->len, | 1248 | xfer, xfer->len, |
1457 | xfer->tx_buf, xfer->tx_dma, | 1249 | xfer->tx_buf, xfer->tx_dma, |
1458 | xfer->rx_buf, xfer->rx_dma); | 1250 | xfer->rx_buf, xfer->rx_dma); |
1459 | } | 1251 | } |
1460 | #endif | ||
1461 | 1252 | ||
1462 | msg->status = -EINPROGRESS; | 1253 | msg_done: |
1463 | msg->actual_length = 0; | 1254 | if (!as->keep_cs) |
1255 | cs_deactivate(as, msg->spi); | ||
1464 | 1256 | ||
1465 | atmel_spi_lock(as); | ||
1466 | list_add_tail(&msg->queue, &as->queue); | ||
1467 | if (!as->current_transfer) | ||
1468 | atmel_spi_next_message(spi->master); | ||
1469 | atmel_spi_unlock(as); | 1257 | atmel_spi_unlock(as); |
1470 | 1258 | ||
1471 | return 0; | 1259 | msg->status = as->done_status; |
1260 | spi_finalize_current_message(spi->master); | ||
1261 | |||
1262 | return ret; | ||
1472 | } | 1263 | } |
1473 | 1264 | ||
1474 | static void atmel_spi_cleanup(struct spi_device *spi) | 1265 | static void atmel_spi_cleanup(struct spi_device *spi) |
1475 | { | 1266 | { |
1476 | struct atmel_spi *as = spi_master_get_devdata(spi->master); | ||
1477 | struct atmel_spi_device *asd = spi->controller_state; | 1267 | struct atmel_spi_device *asd = spi->controller_state; |
1478 | unsigned gpio = (unsigned) spi->controller_data; | 1268 | unsigned gpio = (unsigned) spi->controller_data; |
1479 | 1269 | ||
1480 | if (!asd) | 1270 | if (!asd) |
1481 | return; | 1271 | return; |
1482 | 1272 | ||
1483 | atmel_spi_lock(as); | ||
1484 | if (as->stay == spi) { | ||
1485 | as->stay = NULL; | ||
1486 | cs_deactivate(as, spi); | ||
1487 | } | ||
1488 | atmel_spi_unlock(as); | ||
1489 | |||
1490 | spi->controller_state = NULL; | 1273 | spi->controller_state = NULL; |
1491 | gpio_free(gpio); | 1274 | gpio_free(gpio); |
1492 | kfree(asd); | 1275 | kfree(asd); |
@@ -1545,7 +1328,7 @@ static int atmel_spi_probe(struct platform_device *pdev) | |||
1545 | master->bus_num = pdev->id; | 1328 | master->bus_num = pdev->id; |
1546 | master->num_chipselect = master->dev.of_node ? 0 : 4; | 1329 | master->num_chipselect = master->dev.of_node ? 0 : 4; |
1547 | master->setup = atmel_spi_setup; | 1330 | master->setup = atmel_spi_setup; |
1548 | master->transfer = atmel_spi_transfer; | 1331 | master->transfer_one_message = atmel_spi_transfer_one_message; |
1549 | master->cleanup = atmel_spi_cleanup; | 1332 | master->cleanup = atmel_spi_cleanup; |
1550 | platform_set_drvdata(pdev, master); | 1333 | platform_set_drvdata(pdev, master); |
1551 | 1334 | ||
@@ -1561,7 +1344,6 @@ static int atmel_spi_probe(struct platform_device *pdev) | |||
1561 | goto out_free; | 1344 | goto out_free; |
1562 | 1345 | ||
1563 | spin_lock_init(&as->lock); | 1346 | spin_lock_init(&as->lock); |
1564 | INIT_LIST_HEAD(&as->queue); | ||
1565 | 1347 | ||
1566 | as->pdev = pdev; | 1348 | as->pdev = pdev; |
1567 | as->regs = devm_ioremap_resource(&pdev->dev, regs); | 1349 | as->regs = devm_ioremap_resource(&pdev->dev, regs); |
@@ -1573,6 +1355,8 @@ static int atmel_spi_probe(struct platform_device *pdev) | |||
1573 | as->irq = irq; | 1355 | as->irq = irq; |
1574 | as->clk = clk; | 1356 | as->clk = clk; |
1575 | 1357 | ||
1358 | init_completion(&as->xfer_completion); | ||
1359 | |||
1576 | atmel_get_caps(as); | 1360 | atmel_get_caps(as); |
1577 | 1361 | ||
1578 | as->use_dma = false; | 1362 | as->use_dma = false; |
@@ -1591,9 +1375,6 @@ static int atmel_spi_probe(struct platform_device *pdev) | |||
1591 | ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pdc_interrupt, | 1375 | ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pdc_interrupt, |
1592 | 0, dev_name(&pdev->dev), master); | 1376 | 0, dev_name(&pdev->dev), master); |
1593 | } else { | 1377 | } else { |
1594 | tasklet_init(&as->tasklet, atmel_spi_tasklet_func, | ||
1595 | (unsigned long)master); | ||
1596 | |||
1597 | ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pio_interrupt, | 1378 | ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pio_interrupt, |
1598 | 0, dev_name(&pdev->dev), master); | 1379 | 0, dev_name(&pdev->dev), master); |
1599 | } | 1380 | } |
@@ -1637,8 +1418,6 @@ out_free_dma: | |||
1637 | out_free_irq: | 1418 | out_free_irq: |
1638 | out_unmap_regs: | 1419 | out_unmap_regs: |
1639 | out_free_buffer: | 1420 | out_free_buffer: |
1640 | if (!as->use_pdc) | ||
1641 | tasklet_kill(&as->tasklet); | ||
1642 | dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, | 1421 | dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, |
1643 | as->buffer_dma); | 1422 | as->buffer_dma); |
1644 | out_free: | 1423 | out_free: |
@@ -1650,12 +1429,9 @@ static int atmel_spi_remove(struct platform_device *pdev) | |||
1650 | { | 1429 | { |
1651 | struct spi_master *master = platform_get_drvdata(pdev); | 1430 | struct spi_master *master = platform_get_drvdata(pdev); |
1652 | struct atmel_spi *as = spi_master_get_devdata(master); | 1431 | struct atmel_spi *as = spi_master_get_devdata(master); |
1653 | struct spi_message *msg; | ||
1654 | struct spi_transfer *xfer; | ||
1655 | 1432 | ||
1656 | /* reset the hardware and block queue progress */ | 1433 | /* reset the hardware and block queue progress */ |
1657 | spin_lock_irq(&as->lock); | 1434 | spin_lock_irq(&as->lock); |
1658 | as->stopping = 1; | ||
1659 | if (as->use_dma) { | 1435 | if (as->use_dma) { |
1660 | atmel_spi_stop_dma(as); | 1436 | atmel_spi_stop_dma(as); |
1661 | atmel_spi_release_dma(as); | 1437 | atmel_spi_release_dma(as); |
@@ -1666,20 +1442,6 @@ static int atmel_spi_remove(struct platform_device *pdev) | |||
1666 | spi_readl(as, SR); | 1442 | spi_readl(as, SR); |
1667 | spin_unlock_irq(&as->lock); | 1443 | spin_unlock_irq(&as->lock); |
1668 | 1444 | ||
1669 | /* Terminate remaining queued transfers */ | ||
1670 | list_for_each_entry(msg, &as->queue, queue) { | ||
1671 | list_for_each_entry(xfer, &msg->transfers, transfer_list) { | ||
1672 | if (!msg->is_dma_mapped | ||
1673 | && (atmel_spi_use_dma(as, xfer) | ||
1674 | || as->use_pdc)) | ||
1675 | atmel_spi_dma_unmap_xfer(master, xfer); | ||
1676 | } | ||
1677 | msg->status = -ESHUTDOWN; | ||
1678 | msg->complete(msg->context); | ||
1679 | } | ||
1680 | |||
1681 | if (!as->use_pdc) | ||
1682 | tasklet_kill(&as->tasklet); | ||
1683 | dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, | 1445 | dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer, |
1684 | as->buffer_dma); | 1446 | as->buffer_dma); |
1685 | 1447 | ||