aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi/spi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi/spi.c')
-rw-r--r--drivers/spi/spi.c101
1 files changed, 76 insertions, 25 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index bb7cf561c311..49313dd0a144 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -582,13 +582,70 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
582 spi->master->set_cs(spi, !enable); 582 spi->master->set_cs(spi, !enable);
583} 583}
584 584
585static int spi_map_buf(struct spi_master *master, struct device *dev,
586 struct sg_table *sgt, void *buf, size_t len,
587 enum dma_data_direction dir)
588{
589 const bool vmalloced_buf = is_vmalloc_addr(buf);
590 const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len;
591 const int sgs = DIV_ROUND_UP(len, desc_len);
592 struct page *vm_page;
593 void *sg_buf;
594 size_t min;
595 int i, ret;
596
597 ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
598 if (ret != 0)
599 return ret;
600
601 for (i = 0; i < sgs; i++) {
602 min = min_t(size_t, len, desc_len);
603
604 if (vmalloced_buf) {
605 vm_page = vmalloc_to_page(buf);
606 if (!vm_page) {
607 sg_free_table(sgt);
608 return -ENOMEM;
609 }
610 sg_buf = page_address(vm_page) +
611 ((size_t)buf & ~PAGE_MASK);
612 } else {
613 sg_buf = buf;
614 }
615
616 sg_set_buf(&sgt->sgl[i], sg_buf, min);
617
618 buf += min;
619 len -= min;
620 }
621
622 ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
623 if (ret < 0) {
624 sg_free_table(sgt);
625 return ret;
626 }
627
628 sgt->nents = ret;
629
630 return 0;
631}
632
633static void spi_unmap_buf(struct spi_master *master, struct device *dev,
634 struct sg_table *sgt, enum dma_data_direction dir)
635{
636 if (sgt->orig_nents) {
637 dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
638 sg_free_table(sgt);
639 }
640}
641
585static int spi_map_msg(struct spi_master *master, struct spi_message *msg) 642static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
586{ 643{
587 struct device *dev = master->dev.parent;
588 struct device *tx_dev, *rx_dev; 644 struct device *tx_dev, *rx_dev;
589 struct spi_transfer *xfer; 645 struct spi_transfer *xfer;
590 void *tmp; 646 void *tmp;
591 size_t max_tx, max_rx; 647 size_t max_tx, max_rx;
648 int ret;
592 649
593 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { 650 if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
594 max_tx = 0; 651 max_tx = 0;
@@ -631,7 +688,7 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
631 } 688 }
632 } 689 }
633 690
634 if (msg->is_dma_mapped || !master->can_dma) 691 if (!master->can_dma)
635 return 0; 692 return 0;
636 693
637 tx_dev = &master->dma_tx->dev->device; 694 tx_dev = &master->dma_tx->dev->device;
@@ -642,25 +699,21 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
642 continue; 699 continue;
643 700
644 if (xfer->tx_buf != NULL) { 701 if (xfer->tx_buf != NULL) {
645 xfer->tx_dma = dma_map_single(tx_dev, 702 ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
646 (void *)xfer->tx_buf, 703 (void *)xfer->tx_buf, xfer->len,
647 xfer->len, 704 DMA_TO_DEVICE);
648 DMA_TO_DEVICE); 705 if (ret != 0)
649 if (dma_mapping_error(dev, xfer->tx_dma)) { 706 return ret;
650 dev_err(dev, "dma_map_single Tx failed\n");
651 return -ENOMEM;
652 }
653 } 707 }
654 708
655 if (xfer->rx_buf != NULL) { 709 if (xfer->rx_buf != NULL) {
656 xfer->rx_dma = dma_map_single(rx_dev, 710 ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
657 xfer->rx_buf, xfer->len, 711 xfer->rx_buf, xfer->len,
658 DMA_FROM_DEVICE); 712 DMA_FROM_DEVICE);
659 if (dma_mapping_error(dev, xfer->rx_dma)) { 713 if (ret != 0) {
660 dev_err(dev, "dma_map_single Rx failed\n"); 714 spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
661 dma_unmap_single(tx_dev, xfer->tx_dma, 715 DMA_TO_DEVICE);
662 xfer->len, DMA_TO_DEVICE); 716 return ret;
663 return -ENOMEM;
664 } 717 }
665 } 718 }
666 } 719 }
@@ -675,7 +728,7 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
675 struct spi_transfer *xfer; 728 struct spi_transfer *xfer;
676 struct device *tx_dev, *rx_dev; 729 struct device *tx_dev, *rx_dev;
677 730
678 if (!master->cur_msg_mapped || msg->is_dma_mapped || !master->can_dma) 731 if (!master->cur_msg_mapped || !master->can_dma)
679 return 0; 732 return 0;
680 733
681 tx_dev = &master->dma_tx->dev->device; 734 tx_dev = &master->dma_tx->dev->device;
@@ -685,12 +738,8 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
685 if (!master->can_dma(master, msg->spi, xfer)) 738 if (!master->can_dma(master, msg->spi, xfer))
686 continue; 739 continue;
687 740
688 if (xfer->rx_buf) 741 spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
689 dma_unmap_single(rx_dev, xfer->rx_dma, xfer->len, 742 spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
690 DMA_FROM_DEVICE);
691 if (xfer->tx_buf)
692 dma_unmap_single(tx_dev, xfer->tx_dma, xfer->len,
693 DMA_TO_DEVICE);
694 } 743 }
695 744
696 return 0; 745 return 0;
@@ -1503,6 +1552,8 @@ int spi_register_master(struct spi_master *master)
1503 mutex_init(&master->bus_lock_mutex); 1552 mutex_init(&master->bus_lock_mutex);
1504 master->bus_lock_flag = 0; 1553 master->bus_lock_flag = 0;
1505 init_completion(&master->xfer_completion); 1554 init_completion(&master->xfer_completion);
1555 if (!master->max_dma_len)
1556 master->max_dma_len = INT_MAX;
1506 1557
1507 /* register the device, then userspace will see it. 1558 /* register the device, then userspace will see it.
1508 * registration fails if the bus ID is in use. 1559 * registration fails if the bus ID is in use.