diff options
author | Grant Likely <grant.likely@secretlab.ca> | 2011-06-09 14:42:57 -0400 |
---|---|---|
committer | Grant Likely <grant.likely@secretlab.ca> | 2011-06-09 14:42:57 -0400 |
commit | e4c8308c852e6b3fa49215052a5b9e99597dee99 (patch) | |
tree | a44ef3377c17d69c2210e809ac2552540ce6f1fc /drivers/spi/spi-ep93xx.c | |
parent | c37f3c2749b53225d36faa5c583203c5f12ae15b (diff) | |
parent | 626a96db11698119a67eeda130488e869aa6f14e (diff) |
Merge branch 'ep93xx-dma' into spi/next
Diffstat (limited to 'drivers/spi/spi-ep93xx.c')
-rw-r--r-- | drivers/spi/spi-ep93xx.c | 303 |
1 files changed, 291 insertions, 12 deletions
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c index d3570071e98f..1cf645479bfe 100644 --- a/drivers/spi/spi-ep93xx.c +++ b/drivers/spi/spi-ep93xx.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Driver for Cirrus Logic EP93xx SPI controller. | 2 | * Driver for Cirrus Logic EP93xx SPI controller. |
3 | * | 3 | * |
4 | * Copyright (c) 2010 Mika Westerberg | 4 | * Copyright (C) 2010-2011 Mika Westerberg |
5 | * | 5 | * |
6 | * Explicit FIFO handling code was inspired by amba-pl022 driver. | 6 | * Explicit FIFO handling code was inspired by amba-pl022 driver. |
7 | * | 7 | * |
@@ -21,13 +21,16 @@ | |||
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
23 | #include <linux/device.h> | 23 | #include <linux/device.h> |
24 | #include <linux/dmaengine.h> | ||
24 | #include <linux/bitops.h> | 25 | #include <linux/bitops.h> |
25 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
26 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
27 | #include <linux/workqueue.h> | 28 | #include <linux/workqueue.h> |
28 | #include <linux/sched.h> | 29 | #include <linux/sched.h> |
30 | #include <linux/scatterlist.h> | ||
29 | #include <linux/spi/spi.h> | 31 | #include <linux/spi/spi.h> |
30 | 32 | ||
33 | #include <mach/dma.h> | ||
31 | #include <mach/ep93xx_spi.h> | 34 | #include <mach/ep93xx_spi.h> |
32 | 35 | ||
33 | #define SSPCR0 0x0000 | 36 | #define SSPCR0 0x0000 |
@@ -71,6 +74,7 @@ | |||
71 | * @pdev: pointer to platform device | 74 | * @pdev: pointer to platform device |
72 | * @clk: clock for the controller | 75 | * @clk: clock for the controller |
73 | * @regs_base: pointer to ioremap()'d registers | 76 | * @regs_base: pointer to ioremap()'d registers |
77 | * @sspdr_phys: physical address of the SSPDR register | ||
74 | * @irq: IRQ number used by the driver | 78 | * @irq: IRQ number used by the driver |
75 | * @min_rate: minimum clock rate (in Hz) supported by the controller | 79 | * @min_rate: minimum clock rate (in Hz) supported by the controller |
76 | * @max_rate: maximum clock rate (in Hz) supported by the controller | 80 | * @max_rate: maximum clock rate (in Hz) supported by the controller |
@@ -84,6 +88,14 @@ | |||
84 | * @rx: current byte in transfer to receive | 88 | * @rx: current byte in transfer to receive |
85 | * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one | 89 | * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one |
86 | * frame decreases this level and sending one frame increases it. | 90 | * frame decreases this level and sending one frame increases it. |
91 | * @dma_rx: RX DMA channel | ||
92 | * @dma_tx: TX DMA channel | ||
93 | * @dma_rx_data: RX parameters passed to the DMA engine | ||
94 | * @dma_tx_data: TX parameters passed to the DMA engine | ||
95 | * @rx_sgt: sg table for RX transfers | ||
96 | * @tx_sgt: sg table for TX transfers | ||
97 | * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by | ||
98 | * the client | ||
87 | * | 99 | * |
88 | * This structure holds EP93xx SPI controller specific information. When | 100 | * This structure holds EP93xx SPI controller specific information. When |
89 | * @running is %true, driver accepts transfer requests from protocol drivers. | 101 | * @running is %true, driver accepts transfer requests from protocol drivers. |
@@ -100,6 +112,7 @@ struct ep93xx_spi { | |||
100 | const struct platform_device *pdev; | 112 | const struct platform_device *pdev; |
101 | struct clk *clk; | 113 | struct clk *clk; |
102 | void __iomem *regs_base; | 114 | void __iomem *regs_base; |
115 | unsigned long sspdr_phys; | ||
103 | int irq; | 116 | int irq; |
104 | unsigned long min_rate; | 117 | unsigned long min_rate; |
105 | unsigned long max_rate; | 118 | unsigned long max_rate; |
@@ -112,6 +125,13 @@ struct ep93xx_spi { | |||
112 | size_t tx; | 125 | size_t tx; |
113 | size_t rx; | 126 | size_t rx; |
114 | size_t fifo_level; | 127 | size_t fifo_level; |
128 | struct dma_chan *dma_rx; | ||
129 | struct dma_chan *dma_tx; | ||
130 | struct ep93xx_dma_data dma_rx_data; | ||
131 | struct ep93xx_dma_data dma_tx_data; | ||
132 | struct sg_table rx_sgt; | ||
133 | struct sg_table tx_sgt; | ||
134 | void *zeropage; | ||
115 | }; | 135 | }; |
116 | 136 | ||
117 | /** | 137 | /** |
@@ -496,14 +516,195 @@ static int ep93xx_spi_read_write(struct ep93xx_spi *espi) | |||
496 | espi->fifo_level++; | 516 | espi->fifo_level++; |
497 | } | 517 | } |
498 | 518 | ||
499 | if (espi->rx == t->len) { | 519 | if (espi->rx == t->len) |
500 | msg->actual_length += t->len; | ||
501 | return 0; | 520 | return 0; |
502 | } | ||
503 | 521 | ||
504 | return -EINPROGRESS; | 522 | return -EINPROGRESS; |
505 | } | 523 | } |
506 | 524 | ||
525 | static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi) | ||
526 | { | ||
527 | /* | ||
528 | * Now everything is set up for the current transfer. We prime the TX | ||
529 | * FIFO, enable interrupts, and wait for the transfer to complete. | ||
530 | */ | ||
531 | if (ep93xx_spi_read_write(espi)) { | ||
532 | ep93xx_spi_enable_interrupts(espi); | ||
533 | wait_for_completion(&espi->wait); | ||
534 | } | ||
535 | } | ||
536 | |||
537 | /** | ||
538 | * ep93xx_spi_dma_prepare() - prepares a DMA transfer | ||
539 | * @espi: ep93xx SPI controller struct | ||
540 | * @dir: DMA transfer direction | ||
541 | * | ||
542 | * Function configures the DMA, maps the buffer and prepares the DMA | ||
543 | * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR | ||
544 | * in case of failure. | ||
545 | */ | ||
546 | static struct dma_async_tx_descriptor * | ||
547 | ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir) | ||
548 | { | ||
549 | struct spi_transfer *t = espi->current_msg->state; | ||
550 | struct dma_async_tx_descriptor *txd; | ||
551 | enum dma_slave_buswidth buswidth; | ||
552 | struct dma_slave_config conf; | ||
553 | struct scatterlist *sg; | ||
554 | struct sg_table *sgt; | ||
555 | struct dma_chan *chan; | ||
556 | const void *buf, *pbuf; | ||
557 | size_t len = t->len; | ||
558 | int i, ret, nents; | ||
559 | |||
560 | if (bits_per_word(espi) > 8) | ||
561 | buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; | ||
562 | else | ||
563 | buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; | ||
564 | |||
565 | memset(&conf, 0, sizeof(conf)); | ||
566 | conf.direction = dir; | ||
567 | |||
568 | if (dir == DMA_FROM_DEVICE) { | ||
569 | chan = espi->dma_rx; | ||
570 | buf = t->rx_buf; | ||
571 | sgt = &espi->rx_sgt; | ||
572 | |||
573 | conf.src_addr = espi->sspdr_phys; | ||
574 | conf.src_addr_width = buswidth; | ||
575 | } else { | ||
576 | chan = espi->dma_tx; | ||
577 | buf = t->tx_buf; | ||
578 | sgt = &espi->tx_sgt; | ||
579 | |||
580 | conf.dst_addr = espi->sspdr_phys; | ||
581 | conf.dst_addr_width = buswidth; | ||
582 | } | ||
583 | |||
584 | ret = dmaengine_slave_config(chan, &conf); | ||
585 | if (ret) | ||
586 | return ERR_PTR(ret); | ||
587 | |||
588 | /* | ||
589 | * We need to split the transfer into PAGE_SIZE'd chunks. This is | ||
590 | * because we are using @espi->zeropage to provide a zero RX buffer | ||
591 | * for the TX transfers and we have only allocated one page for that. | ||
592 | * | ||
593 | * For performance reasons we allocate a new sg_table only when | ||
594 | * needed. Otherwise we will re-use the current one. Eventually the | ||
595 | * last sg_table is released in ep93xx_spi_release_dma(). | ||
596 | */ | ||
597 | |||
598 | nents = DIV_ROUND_UP(len, PAGE_SIZE); | ||
599 | if (nents != sgt->nents) { | ||
600 | sg_free_table(sgt); | ||
601 | |||
602 | ret = sg_alloc_table(sgt, nents, GFP_KERNEL); | ||
603 | if (ret) | ||
604 | return ERR_PTR(ret); | ||
605 | } | ||
606 | |||
607 | pbuf = buf; | ||
608 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | ||
609 | size_t bytes = min_t(size_t, len, PAGE_SIZE); | ||
610 | |||
611 | if (buf) { | ||
612 | sg_set_page(sg, virt_to_page(pbuf), bytes, | ||
613 | offset_in_page(pbuf)); | ||
614 | } else { | ||
615 | sg_set_page(sg, virt_to_page(espi->zeropage), | ||
616 | bytes, 0); | ||
617 | } | ||
618 | |||
619 | pbuf += bytes; | ||
620 | len -= bytes; | ||
621 | } | ||
622 | |||
623 | if (WARN_ON(len)) { | ||
624 | dev_warn(&espi->pdev->dev, "len = %d expected 0!", len); | ||
625 | return ERR_PTR(-EINVAL); | ||
626 | } | ||
627 | |||
628 | nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); | ||
629 | if (!nents) | ||
630 | return ERR_PTR(-ENOMEM); | ||
631 | |||
632 | txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents, | ||
633 | dir, DMA_CTRL_ACK); | ||
634 | if (!txd) { | ||
635 | dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); | ||
636 | return ERR_PTR(-ENOMEM); | ||
637 | } | ||
638 | return txd; | ||
639 | } | ||
640 | |||
641 | /** | ||
642 | * ep93xx_spi_dma_finish() - finishes with a DMA transfer | ||
643 | * @espi: ep93xx SPI controller struct | ||
644 | * @dir: DMA transfer direction | ||
645 | * | ||
646 | * Function finishes with the DMA transfer. After this, the DMA buffer is | ||
647 | * unmapped. | ||
648 | */ | ||
649 | static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi, | ||
650 | enum dma_data_direction dir) | ||
651 | { | ||
652 | struct dma_chan *chan; | ||
653 | struct sg_table *sgt; | ||
654 | |||
655 | if (dir == DMA_FROM_DEVICE) { | ||
656 | chan = espi->dma_rx; | ||
657 | sgt = &espi->rx_sgt; | ||
658 | } else { | ||
659 | chan = espi->dma_tx; | ||
660 | sgt = &espi->tx_sgt; | ||
661 | } | ||
662 | |||
663 | dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir); | ||
664 | } | ||
665 | |||
666 | static void ep93xx_spi_dma_callback(void *callback_param) | ||
667 | { | ||
668 | complete(callback_param); | ||
669 | } | ||
670 | |||
671 | static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi) | ||
672 | { | ||
673 | struct spi_message *msg = espi->current_msg; | ||
674 | struct dma_async_tx_descriptor *rxd, *txd; | ||
675 | |||
676 | rxd = ep93xx_spi_dma_prepare(espi, DMA_FROM_DEVICE); | ||
677 | if (IS_ERR(rxd)) { | ||
678 | dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd)); | ||
679 | msg->status = PTR_ERR(rxd); | ||
680 | return; | ||
681 | } | ||
682 | |||
683 | txd = ep93xx_spi_dma_prepare(espi, DMA_TO_DEVICE); | ||
684 | if (IS_ERR(txd)) { | ||
685 | ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE); | ||
686 | dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd)); | ||
687 | msg->status = PTR_ERR(txd); | ||
688 | return; | ||
689 | } | ||
690 | |||
691 | /* We are ready when RX is done */ | ||
692 | rxd->callback = ep93xx_spi_dma_callback; | ||
693 | rxd->callback_param = &espi->wait; | ||
694 | |||
695 | /* Now submit both descriptors and wait while they finish */ | ||
696 | dmaengine_submit(rxd); | ||
697 | dmaengine_submit(txd); | ||
698 | |||
699 | dma_async_issue_pending(espi->dma_rx); | ||
700 | dma_async_issue_pending(espi->dma_tx); | ||
701 | |||
702 | wait_for_completion(&espi->wait); | ||
703 | |||
704 | ep93xx_spi_dma_finish(espi, DMA_TO_DEVICE); | ||
705 | ep93xx_spi_dma_finish(espi, DMA_FROM_DEVICE); | ||
706 | } | ||
707 | |||
507 | /** | 708 | /** |
508 | * ep93xx_spi_process_transfer() - processes one SPI transfer | 709 | * ep93xx_spi_process_transfer() - processes one SPI transfer |
509 | * @espi: ep93xx SPI controller struct | 710 | * @espi: ep93xx SPI controller struct |
@@ -556,13 +757,14 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, | |||
556 | espi->tx = 0; | 757 | espi->tx = 0; |
557 | 758 | ||
558 | /* | 759 | /* |
559 | * Now everything is set up for the current transfer. We prime the TX | 760 | * There is no point of setting up DMA for the transfers which will |
560 | * FIFO, enable interrupts, and wait for the transfer to complete. | 761 | * fit into the FIFO and can be transferred with a single interrupt. |
762 | * So in these cases we will be using PIO and don't bother for DMA. | ||
561 | */ | 763 | */ |
562 | if (ep93xx_spi_read_write(espi)) { | 764 | if (espi->dma_rx && t->len > SPI_FIFO_SIZE) |
563 | ep93xx_spi_enable_interrupts(espi); | 765 | ep93xx_spi_dma_transfer(espi); |
564 | wait_for_completion(&espi->wait); | 766 | else |
565 | } | 767 | ep93xx_spi_pio_transfer(espi); |
566 | 768 | ||
567 | /* | 769 | /* |
568 | * In case of error during transmit, we bail out from processing | 770 | * In case of error during transmit, we bail out from processing |
@@ -571,6 +773,8 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi, | |||
571 | if (msg->status) | 773 | if (msg->status) |
572 | return; | 774 | return; |
573 | 775 | ||
776 | msg->actual_length += t->len; | ||
777 | |||
574 | /* | 778 | /* |
575 | * After this transfer is finished, perform any possible | 779 | * After this transfer is finished, perform any possible |
576 | * post-transfer actions requested by the protocol driver. | 780 | * post-transfer actions requested by the protocol driver. |
@@ -752,6 +956,75 @@ static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id) | |||
752 | return IRQ_HANDLED; | 956 | return IRQ_HANDLED; |
753 | } | 957 | } |
754 | 958 | ||
959 | static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param) | ||
960 | { | ||
961 | if (ep93xx_dma_chan_is_m2p(chan)) | ||
962 | return false; | ||
963 | |||
964 | chan->private = filter_param; | ||
965 | return true; | ||
966 | } | ||
967 | |||
968 | static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi) | ||
969 | { | ||
970 | dma_cap_mask_t mask; | ||
971 | int ret; | ||
972 | |||
973 | espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL); | ||
974 | if (!espi->zeropage) | ||
975 | return -ENOMEM; | ||
976 | |||
977 | dma_cap_zero(mask); | ||
978 | dma_cap_set(DMA_SLAVE, mask); | ||
979 | |||
980 | espi->dma_rx_data.port = EP93XX_DMA_SSP; | ||
981 | espi->dma_rx_data.direction = DMA_FROM_DEVICE; | ||
982 | espi->dma_rx_data.name = "ep93xx-spi-rx"; | ||
983 | |||
984 | espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter, | ||
985 | &espi->dma_rx_data); | ||
986 | if (!espi->dma_rx) { | ||
987 | ret = -ENODEV; | ||
988 | goto fail_free_page; | ||
989 | } | ||
990 | |||
991 | espi->dma_tx_data.port = EP93XX_DMA_SSP; | ||
992 | espi->dma_tx_data.direction = DMA_TO_DEVICE; | ||
993 | espi->dma_tx_data.name = "ep93xx-spi-tx"; | ||
994 | |||
995 | espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter, | ||
996 | &espi->dma_tx_data); | ||
997 | if (!espi->dma_tx) { | ||
998 | ret = -ENODEV; | ||
999 | goto fail_release_rx; | ||
1000 | } | ||
1001 | |||
1002 | return 0; | ||
1003 | |||
1004 | fail_release_rx: | ||
1005 | dma_release_channel(espi->dma_rx); | ||
1006 | espi->dma_rx = NULL; | ||
1007 | fail_free_page: | ||
1008 | free_page((unsigned long)espi->zeropage); | ||
1009 | |||
1010 | return ret; | ||
1011 | } | ||
1012 | |||
1013 | static void ep93xx_spi_release_dma(struct ep93xx_spi *espi) | ||
1014 | { | ||
1015 | if (espi->dma_rx) { | ||
1016 | dma_release_channel(espi->dma_rx); | ||
1017 | sg_free_table(&espi->rx_sgt); | ||
1018 | } | ||
1019 | if (espi->dma_tx) { | ||
1020 | dma_release_channel(espi->dma_tx); | ||
1021 | sg_free_table(&espi->tx_sgt); | ||
1022 | } | ||
1023 | |||
1024 | if (espi->zeropage) | ||
1025 | free_page((unsigned long)espi->zeropage); | ||
1026 | } | ||
1027 | |||
755 | static int __init ep93xx_spi_probe(struct platform_device *pdev) | 1028 | static int __init ep93xx_spi_probe(struct platform_device *pdev) |
756 | { | 1029 | { |
757 | struct spi_master *master; | 1030 | struct spi_master *master; |
@@ -818,6 +1091,7 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev) | |||
818 | goto fail_put_clock; | 1091 | goto fail_put_clock; |
819 | } | 1092 | } |
820 | 1093 | ||
1094 | espi->sspdr_phys = res->start + SSPDR; | ||
821 | espi->regs_base = ioremap(res->start, resource_size(res)); | 1095 | espi->regs_base = ioremap(res->start, resource_size(res)); |
822 | if (!espi->regs_base) { | 1096 | if (!espi->regs_base) { |
823 | dev_err(&pdev->dev, "failed to map resources\n"); | 1097 | dev_err(&pdev->dev, "failed to map resources\n"); |
@@ -832,10 +1106,13 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev) | |||
832 | goto fail_unmap_regs; | 1106 | goto fail_unmap_regs; |
833 | } | 1107 | } |
834 | 1108 | ||
1109 | if (info->use_dma && ep93xx_spi_setup_dma(espi)) | ||
1110 | dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n"); | ||
1111 | |||
835 | espi->wq = create_singlethread_workqueue("ep93xx_spid"); | 1112 | espi->wq = create_singlethread_workqueue("ep93xx_spid"); |
836 | if (!espi->wq) { | 1113 | if (!espi->wq) { |
837 | dev_err(&pdev->dev, "unable to create workqueue\n"); | 1114 | dev_err(&pdev->dev, "unable to create workqueue\n"); |
838 | goto fail_free_irq; | 1115 | goto fail_free_dma; |
839 | } | 1116 | } |
840 | INIT_WORK(&espi->msg_work, ep93xx_spi_work); | 1117 | INIT_WORK(&espi->msg_work, ep93xx_spi_work); |
841 | INIT_LIST_HEAD(&espi->msg_queue); | 1118 | INIT_LIST_HEAD(&espi->msg_queue); |
@@ -857,7 +1134,8 @@ static int __init ep93xx_spi_probe(struct platform_device *pdev) | |||
857 | 1134 | ||
858 | fail_free_queue: | 1135 | fail_free_queue: |
859 | destroy_workqueue(espi->wq); | 1136 | destroy_workqueue(espi->wq); |
860 | fail_free_irq: | 1137 | fail_free_dma: |
1138 | ep93xx_spi_release_dma(espi); | ||
861 | free_irq(espi->irq, espi); | 1139 | free_irq(espi->irq, espi); |
862 | fail_unmap_regs: | 1140 | fail_unmap_regs: |
863 | iounmap(espi->regs_base); | 1141 | iounmap(espi->regs_base); |
@@ -901,6 +1179,7 @@ static int __exit ep93xx_spi_remove(struct platform_device *pdev) | |||
901 | } | 1179 | } |
902 | spin_unlock_irq(&espi->lock); | 1180 | spin_unlock_irq(&espi->lock); |
903 | 1181 | ||
1182 | ep93xx_spi_release_dma(espi); | ||
904 | free_irq(espi->irq, espi); | 1183 | free_irq(espi->irq, espi); |
905 | iounmap(espi->regs_base); | 1184 | iounmap(espi->regs_base); |
906 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1185 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |