diff options
author | Christoph Hellwig <hch@lst.de> | 2019-02-11 08:19:57 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-02-12 12:09:23 -0500 |
commit | e222822f9be03c6d985ba1af1f22ec863368b8d0 (patch) | |
tree | 248dc74a9d6746e9bb81b845911f4273ba231663 /drivers/net/caif | |
parent | 99e1311475a19d19f0fd6d940254d7ab6113c675 (diff) |
net: caif: pass struct device to DMA API functions
The DMA API generally relies on a struct device to work properly, and
only barely works without one for legacy reasons. Pass the easily
available struct device from the platform_device to remedy this.
Also use the proper Kconfig symbol to check for DMA API availability.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/caif')
-rw-r--r-- | drivers/net/caif/caif_spi.c | 30 |
1 files changed, 16 insertions, 14 deletions
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c index d28a1398c091..7608bc3e00df 100644 --- a/drivers/net/caif/caif_spi.c +++ b/drivers/net/caif/caif_spi.c | |||
@@ -73,35 +73,37 @@ MODULE_PARM_DESC(spi_down_tail_align, "SPI downlink tail alignment."); | |||
73 | #define LOW_WATER_MARK 100 | 73 | #define LOW_WATER_MARK 100 |
74 | #define HIGH_WATER_MARK (LOW_WATER_MARK*5) | 74 | #define HIGH_WATER_MARK (LOW_WATER_MARK*5) |
75 | 75 | ||
76 | #ifdef CONFIG_UML | 76 | #ifndef CONFIG_HAS_DMA |
77 | 77 | ||
78 | /* | 78 | /* |
79 | * We sometimes use UML for debugging, but it cannot handle | 79 | * We sometimes use UML for debugging, but it cannot handle |
80 | * dma_alloc_coherent so we have to wrap it. | 80 | * dma_alloc_coherent so we have to wrap it. |
81 | */ | 81 | */ |
82 | static inline void *dma_alloc(dma_addr_t *daddr) | 82 | static inline void *dma_alloc(struct cfspi *cfspi, dma_addr_t *daddr) |
83 | { | 83 | { |
84 | return kmalloc(SPI_DMA_BUF_LEN, GFP_KERNEL); | 84 | return kmalloc(SPI_DMA_BUF_LEN, GFP_KERNEL); |
85 | } | 85 | } |
86 | 86 | ||
87 | static inline void dma_free(void *cpu_addr, dma_addr_t handle) | 87 | static inline void dma_free(struct cfspi *cfspi, void *cpu_addr, |
88 | dma_addr_t handle) | ||
88 | { | 89 | { |
89 | kfree(cpu_addr); | 90 | kfree(cpu_addr); |
90 | } | 91 | } |
91 | 92 | ||
92 | #else | 93 | #else |
93 | 94 | ||
94 | static inline void *dma_alloc(dma_addr_t *daddr) | 95 | static inline void *dma_alloc(struct cfspi *cfspi, dma_addr_t *daddr) |
95 | { | 96 | { |
96 | return dma_alloc_coherent(NULL, SPI_DMA_BUF_LEN, daddr, | 97 | return dma_alloc_coherent(&cfspi->pdev->dev, SPI_DMA_BUF_LEN, daddr, |
97 | GFP_KERNEL); | 98 | GFP_KERNEL); |
98 | } | 99 | } |
99 | 100 | ||
100 | static inline void dma_free(void *cpu_addr, dma_addr_t handle) | 101 | static inline void dma_free(struct cfspi *cfspi, void *cpu_addr, |
102 | dma_addr_t handle) | ||
101 | { | 103 | { |
102 | dma_free_coherent(NULL, SPI_DMA_BUF_LEN, cpu_addr, handle); | 104 | dma_free_coherent(&cfspi->pdev->dev, SPI_DMA_BUF_LEN, cpu_addr, handle); |
103 | } | 105 | } |
104 | #endif /* CONFIG_UML */ | 106 | #endif /* CONFIG_HAS_DMA */ |
105 | 107 | ||
106 | #ifdef CONFIG_DEBUG_FS | 108 | #ifdef CONFIG_DEBUG_FS |
107 | 109 | ||
@@ -610,13 +612,13 @@ static int cfspi_init(struct net_device *dev) | |||
610 | } | 612 | } |
611 | 613 | ||
612 | /* Allocate DMA buffers. */ | 614 | /* Allocate DMA buffers. */ |
613 | cfspi->xfer.va_tx[0] = dma_alloc(&cfspi->xfer.pa_tx[0]); | 615 | cfspi->xfer.va_tx[0] = dma_alloc(cfspi, &cfspi->xfer.pa_tx[0]); |
614 | if (!cfspi->xfer.va_tx[0]) { | 616 | if (!cfspi->xfer.va_tx[0]) { |
615 | res = -ENODEV; | 617 | res = -ENODEV; |
616 | goto err_dma_alloc_tx_0; | 618 | goto err_dma_alloc_tx_0; |
617 | } | 619 | } |
618 | 620 | ||
619 | cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx); | 621 | cfspi->xfer.va_rx = dma_alloc(cfspi, &cfspi->xfer.pa_rx); |
620 | 622 | ||
621 | if (!cfspi->xfer.va_rx) { | 623 | if (!cfspi->xfer.va_rx) { |
622 | res = -ENODEV; | 624 | res = -ENODEV; |
@@ -665,9 +667,9 @@ static int cfspi_init(struct net_device *dev) | |||
665 | return 0; | 667 | return 0; |
666 | 668 | ||
667 | err_create_wq: | 669 | err_create_wq: |
668 | dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx); | 670 | dma_free(cfspi, cfspi->xfer.va_rx, cfspi->xfer.pa_rx); |
669 | err_dma_alloc_rx: | 671 | err_dma_alloc_rx: |
670 | dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]); | 672 | dma_free(cfspi, cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]); |
671 | err_dma_alloc_tx_0: | 673 | err_dma_alloc_tx_0: |
672 | return res; | 674 | return res; |
673 | } | 675 | } |
@@ -683,8 +685,8 @@ static void cfspi_uninit(struct net_device *dev) | |||
683 | 685 | ||
684 | cfspi->ndev = NULL; | 686 | cfspi->ndev = NULL; |
685 | /* Free DMA buffers. */ | 687 | /* Free DMA buffers. */ |
686 | dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx); | 688 | dma_free(cfspi, cfspi->xfer.va_rx, cfspi->xfer.pa_rx); |
687 | dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]); | 689 | dma_free(cfspi, cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]); |
688 | set_bit(SPI_TERMINATE, &cfspi->state); | 690 | set_bit(SPI_TERMINATE, &cfspi->state); |
689 | wake_up_interruptible(&cfspi->wait); | 691 | wake_up_interruptible(&cfspi->wait); |
690 | destroy_workqueue(cfspi->wq); | 692 | destroy_workqueue(cfspi->wq); |