diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-04 21:02:25 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-11-04 21:02:25 -0400 |
commit | fba9569924e06da076cb2ad12474bbd82d69f54d (patch) | |
tree | f0b7d9c82f8dd90f0dc757a4c00afc0872fc1484 /drivers/spi | |
parent | 3d0a8d10cfb4cc3d1877c29a866ee7d8a46aa2fa (diff) | |
parent | 4598fc2c94b68740e0269db03c98a1e7ad5af773 (diff) |
Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma
* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (63 commits)
dmaengine: mid_dma: mask_peripheral_interrupt only when dmac is idle
dmaengine/ep93xx_dma: add module.h include
pch_dma: Reduce wasting memory
pch_dma: Fix suspend issue
dma/timberdale: free_irq() on an error path
dma: shdma: transfer based runtime PM
dmaengine: shdma: protect against the IRQ handler
dmaengine i.MX DMA/SDMA: add missing include of linux/module.h
dmaengine: delete redundant chan_id and chancnt initialization in dma drivers
dmaengine/amba-pl08x: Check txd->llis_va before freeing dma_pool
dmaengine/amba-pl08x: Add support for sg len greater than one for slave transfers
serial: sh-sci: don't filter on DMA device, use only channel ID
ARM: SAMSUNG: Remove Samsung specific enum type for dma direction
ASoC: Samsung: Update DMA interface
spi/s3c64xx: Merge dma control code
spi/s3c64xx: Add support DMA engine API
ARM: SAMSUNG: Remove S3C-PL330-DMA driver
ARM: S5P64X0: Use generic DMA PL330 driver
ARM: S5PC100: Use generic DMA PL330 driver
ARM: S5PV210: Use generic DMA PL330 driver
...
Fix up fairly trivial conflicts in
- arch/arm/mach-exynos4/{Kconfig,clock.c}
- arch/arm/mach-s5p64x0/dma.c
Diffstat (limited to 'drivers/spi')
-rw-r--r-- | drivers/spi/spi-s3c64xx.c | 175 |
1 files changed, 92 insertions, 83 deletions
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index 595dacc7645f..019a7163572f 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c | |||
@@ -131,6 +131,12 @@ | |||
131 | #define RXBUSY (1<<2) | 131 | #define RXBUSY (1<<2) |
132 | #define TXBUSY (1<<3) | 132 | #define TXBUSY (1<<3) |
133 | 133 | ||
134 | struct s3c64xx_spi_dma_data { | ||
135 | unsigned ch; | ||
136 | enum dma_data_direction direction; | ||
137 | enum dma_ch dmach; | ||
138 | }; | ||
139 | |||
134 | /** | 140 | /** |
135 | * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. | 141 | * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. |
136 | * @clk: Pointer to the spi clock. | 142 | * @clk: Pointer to the spi clock. |
@@ -164,13 +170,14 @@ struct s3c64xx_spi_driver_data { | |||
164 | struct work_struct work; | 170 | struct work_struct work; |
165 | struct list_head queue; | 171 | struct list_head queue; |
166 | spinlock_t lock; | 172 | spinlock_t lock; |
167 | enum dma_ch rx_dmach; | ||
168 | enum dma_ch tx_dmach; | ||
169 | unsigned long sfr_start; | 173 | unsigned long sfr_start; |
170 | struct completion xfer_completion; | 174 | struct completion xfer_completion; |
171 | unsigned state; | 175 | unsigned state; |
172 | unsigned cur_mode, cur_bpw; | 176 | unsigned cur_mode, cur_bpw; |
173 | unsigned cur_speed; | 177 | unsigned cur_speed; |
178 | struct s3c64xx_spi_dma_data rx_dma; | ||
179 | struct s3c64xx_spi_dma_data tx_dma; | ||
180 | struct samsung_dma_ops *ops; | ||
174 | }; | 181 | }; |
175 | 182 | ||
176 | static struct s3c2410_dma_client s3c64xx_spi_dma_client = { | 183 | static struct s3c2410_dma_client s3c64xx_spi_dma_client = { |
@@ -226,6 +233,78 @@ static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) | |||
226 | writel(val, regs + S3C64XX_SPI_CH_CFG); | 233 | writel(val, regs + S3C64XX_SPI_CH_CFG); |
227 | } | 234 | } |
228 | 235 | ||
236 | static void s3c64xx_spi_dmacb(void *data) | ||
237 | { | ||
238 | struct s3c64xx_spi_driver_data *sdd; | ||
239 | struct s3c64xx_spi_dma_data *dma = data; | ||
240 | unsigned long flags; | ||
241 | |||
242 | if (dma->direction == DMA_FROM_DEVICE) | ||
243 | sdd = container_of(data, | ||
244 | struct s3c64xx_spi_driver_data, rx_dma); | ||
245 | else | ||
246 | sdd = container_of(data, | ||
247 | struct s3c64xx_spi_driver_data, tx_dma); | ||
248 | |||
249 | spin_lock_irqsave(&sdd->lock, flags); | ||
250 | |||
251 | if (dma->direction == DMA_FROM_DEVICE) { | ||
252 | sdd->state &= ~RXBUSY; | ||
253 | if (!(sdd->state & TXBUSY)) | ||
254 | complete(&sdd->xfer_completion); | ||
255 | } else { | ||
256 | sdd->state &= ~TXBUSY; | ||
257 | if (!(sdd->state & RXBUSY)) | ||
258 | complete(&sdd->xfer_completion); | ||
259 | } | ||
260 | |||
261 | spin_unlock_irqrestore(&sdd->lock, flags); | ||
262 | } | ||
263 | |||
264 | static void prepare_dma(struct s3c64xx_spi_dma_data *dma, | ||
265 | unsigned len, dma_addr_t buf) | ||
266 | { | ||
267 | struct s3c64xx_spi_driver_data *sdd; | ||
268 | struct samsung_dma_prep_info info; | ||
269 | |||
270 | if (dma->direction == DMA_FROM_DEVICE) | ||
271 | sdd = container_of((void *)dma, | ||
272 | struct s3c64xx_spi_driver_data, rx_dma); | ||
273 | else | ||
274 | sdd = container_of((void *)dma, | ||
275 | struct s3c64xx_spi_driver_data, tx_dma); | ||
276 | |||
277 | info.cap = DMA_SLAVE; | ||
278 | info.len = len; | ||
279 | info.fp = s3c64xx_spi_dmacb; | ||
280 | info.fp_param = dma; | ||
281 | info.direction = dma->direction; | ||
282 | info.buf = buf; | ||
283 | |||
284 | sdd->ops->prepare(dma->ch, &info); | ||
285 | sdd->ops->trigger(dma->ch); | ||
286 | } | ||
287 | |||
288 | static int acquire_dma(struct s3c64xx_spi_driver_data *sdd) | ||
289 | { | ||
290 | struct samsung_dma_info info; | ||
291 | |||
292 | sdd->ops = samsung_dma_get_ops(); | ||
293 | |||
294 | info.cap = DMA_SLAVE; | ||
295 | info.client = &s3c64xx_spi_dma_client; | ||
296 | info.width = sdd->cur_bpw / 8; | ||
297 | |||
298 | info.direction = sdd->rx_dma.direction; | ||
299 | info.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA; | ||
300 | sdd->rx_dma.ch = sdd->ops->request(sdd->rx_dma.dmach, &info); | ||
301 | info.direction = sdd->tx_dma.direction; | ||
302 | info.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA; | ||
303 | sdd->tx_dma.ch = sdd->ops->request(sdd->tx_dma.dmach, &info); | ||
304 | |||
305 | return 1; | ||
306 | } | ||
307 | |||
229 | static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, | 308 | static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, |
230 | struct spi_device *spi, | 309 | struct spi_device *spi, |
231 | struct spi_transfer *xfer, int dma_mode) | 310 | struct spi_transfer *xfer, int dma_mode) |
@@ -258,10 +337,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, | |||
258 | chcfg |= S3C64XX_SPI_CH_TXCH_ON; | 337 | chcfg |= S3C64XX_SPI_CH_TXCH_ON; |
259 | if (dma_mode) { | 338 | if (dma_mode) { |
260 | modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; | 339 | modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; |
261 | s3c2410_dma_config(sdd->tx_dmach, sdd->cur_bpw / 8); | 340 | prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma); |
262 | s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd, | ||
263 | xfer->tx_dma, xfer->len); | ||
264 | s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START); | ||
265 | } else { | 341 | } else { |
266 | switch (sdd->cur_bpw) { | 342 | switch (sdd->cur_bpw) { |
267 | case 32: | 343 | case 32: |
@@ -293,10 +369,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, | |||
293 | writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) | 369 | writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) |
294 | | S3C64XX_SPI_PACKET_CNT_EN, | 370 | | S3C64XX_SPI_PACKET_CNT_EN, |
295 | regs + S3C64XX_SPI_PACKET_CNT); | 371 | regs + S3C64XX_SPI_PACKET_CNT); |
296 | s3c2410_dma_config(sdd->rx_dmach, sdd->cur_bpw / 8); | 372 | prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma); |
297 | s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd, | ||
298 | xfer->rx_dma, xfer->len); | ||
299 | s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START); | ||
300 | } | 373 | } |
301 | } | 374 | } |
302 | 375 | ||
@@ -482,46 +555,6 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd) | |||
482 | } | 555 | } |
483 | } | 556 | } |
484 | 557 | ||
485 | static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, | ||
486 | int size, enum s3c2410_dma_buffresult res) | ||
487 | { | ||
488 | struct s3c64xx_spi_driver_data *sdd = buf_id; | ||
489 | unsigned long flags; | ||
490 | |||
491 | spin_lock_irqsave(&sdd->lock, flags); | ||
492 | |||
493 | if (res == S3C2410_RES_OK) | ||
494 | sdd->state &= ~RXBUSY; | ||
495 | else | ||
496 | dev_err(&sdd->pdev->dev, "DmaAbrtRx-%d\n", size); | ||
497 | |||
498 | /* If the other done */ | ||
499 | if (!(sdd->state & TXBUSY)) | ||
500 | complete(&sdd->xfer_completion); | ||
501 | |||
502 | spin_unlock_irqrestore(&sdd->lock, flags); | ||
503 | } | ||
504 | |||
505 | static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id, | ||
506 | int size, enum s3c2410_dma_buffresult res) | ||
507 | { | ||
508 | struct s3c64xx_spi_driver_data *sdd = buf_id; | ||
509 | unsigned long flags; | ||
510 | |||
511 | spin_lock_irqsave(&sdd->lock, flags); | ||
512 | |||
513 | if (res == S3C2410_RES_OK) | ||
514 | sdd->state &= ~TXBUSY; | ||
515 | else | ||
516 | dev_err(&sdd->pdev->dev, "DmaAbrtTx-%d \n", size); | ||
517 | |||
518 | /* If the other done */ | ||
519 | if (!(sdd->state & RXBUSY)) | ||
520 | complete(&sdd->xfer_completion); | ||
521 | |||
522 | spin_unlock_irqrestore(&sdd->lock, flags); | ||
523 | } | ||
524 | |||
525 | #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) | 558 | #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) |
526 | 559 | ||
527 | static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, | 560 | static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, |
@@ -696,12 +729,10 @@ static void handle_msg(struct s3c64xx_spi_driver_data *sdd, | |||
696 | if (use_dma) { | 729 | if (use_dma) { |
697 | if (xfer->tx_buf != NULL | 730 | if (xfer->tx_buf != NULL |
698 | && (sdd->state & TXBUSY)) | 731 | && (sdd->state & TXBUSY)) |
699 | s3c2410_dma_ctrl(sdd->tx_dmach, | 732 | sdd->ops->stop(sdd->tx_dma.ch); |
700 | S3C2410_DMAOP_FLUSH); | ||
701 | if (xfer->rx_buf != NULL | 733 | if (xfer->rx_buf != NULL |
702 | && (sdd->state & RXBUSY)) | 734 | && (sdd->state & RXBUSY)) |
703 | s3c2410_dma_ctrl(sdd->rx_dmach, | 735 | sdd->ops->stop(sdd->rx_dma.ch); |
704 | S3C2410_DMAOP_FLUSH); | ||
705 | } | 736 | } |
706 | 737 | ||
707 | goto out; | 738 | goto out; |
@@ -739,30 +770,6 @@ out: | |||
739 | msg->complete(msg->context); | 770 | msg->complete(msg->context); |
740 | } | 771 | } |
741 | 772 | ||
742 | static int acquire_dma(struct s3c64xx_spi_driver_data *sdd) | ||
743 | { | ||
744 | if (s3c2410_dma_request(sdd->rx_dmach, | ||
745 | &s3c64xx_spi_dma_client, NULL) < 0) { | ||
746 | dev_err(&sdd->pdev->dev, "cannot get RxDMA\n"); | ||
747 | return 0; | ||
748 | } | ||
749 | s3c2410_dma_set_buffdone_fn(sdd->rx_dmach, s3c64xx_spi_dma_rxcb); | ||
750 | s3c2410_dma_devconfig(sdd->rx_dmach, S3C2410_DMASRC_HW, | ||
751 | sdd->sfr_start + S3C64XX_SPI_RX_DATA); | ||
752 | |||
753 | if (s3c2410_dma_request(sdd->tx_dmach, | ||
754 | &s3c64xx_spi_dma_client, NULL) < 0) { | ||
755 | dev_err(&sdd->pdev->dev, "cannot get TxDMA\n"); | ||
756 | s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client); | ||
757 | return 0; | ||
758 | } | ||
759 | s3c2410_dma_set_buffdone_fn(sdd->tx_dmach, s3c64xx_spi_dma_txcb); | ||
760 | s3c2410_dma_devconfig(sdd->tx_dmach, S3C2410_DMASRC_MEM, | ||
761 | sdd->sfr_start + S3C64XX_SPI_TX_DATA); | ||
762 | |||
763 | return 1; | ||
764 | } | ||
765 | |||
766 | static void s3c64xx_spi_work(struct work_struct *work) | 773 | static void s3c64xx_spi_work(struct work_struct *work) |
767 | { | 774 | { |
768 | struct s3c64xx_spi_driver_data *sdd = container_of(work, | 775 | struct s3c64xx_spi_driver_data *sdd = container_of(work, |
@@ -799,8 +806,8 @@ static void s3c64xx_spi_work(struct work_struct *work) | |||
799 | spin_unlock_irqrestore(&sdd->lock, flags); | 806 | spin_unlock_irqrestore(&sdd->lock, flags); |
800 | 807 | ||
801 | /* Free DMA channels */ | 808 | /* Free DMA channels */ |
802 | s3c2410_dma_free(sdd->tx_dmach, &s3c64xx_spi_dma_client); | 809 | sdd->ops->release(sdd->rx_dma.ch, &s3c64xx_spi_dma_client); |
803 | s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client); | 810 | sdd->ops->release(sdd->tx_dma.ch, &s3c64xx_spi_dma_client); |
804 | } | 811 | } |
805 | 812 | ||
806 | static int s3c64xx_spi_transfer(struct spi_device *spi, | 813 | static int s3c64xx_spi_transfer(struct spi_device *spi, |
@@ -1017,8 +1024,10 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) | |||
1017 | sdd->cntrlr_info = sci; | 1024 | sdd->cntrlr_info = sci; |
1018 | sdd->pdev = pdev; | 1025 | sdd->pdev = pdev; |
1019 | sdd->sfr_start = mem_res->start; | 1026 | sdd->sfr_start = mem_res->start; |
1020 | sdd->tx_dmach = dmatx_res->start; | 1027 | sdd->tx_dma.dmach = dmatx_res->start; |
1021 | sdd->rx_dmach = dmarx_res->start; | 1028 | sdd->tx_dma.direction = DMA_TO_DEVICE; |
1029 | sdd->rx_dma.dmach = dmarx_res->start; | ||
1030 | sdd->rx_dma.direction = DMA_FROM_DEVICE; | ||
1022 | 1031 | ||
1023 | sdd->cur_bpw = 8; | 1032 | sdd->cur_bpw = 8; |
1024 | 1033 | ||
@@ -1106,7 +1115,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev) | |||
1106 | pdev->id, master->num_chipselect); | 1115 | pdev->id, master->num_chipselect); |
1107 | dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n", | 1116 | dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n", |
1108 | mem_res->end, mem_res->start, | 1117 | mem_res->end, mem_res->start, |
1109 | sdd->rx_dmach, sdd->tx_dmach); | 1118 | sdd->rx_dma.dmach, sdd->tx_dma.dmach); |
1110 | 1119 | ||
1111 | return 0; | 1120 | return 0; |
1112 | 1121 | ||