aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/spi/spi-s3c64xx.c
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2013-04-11 16:42:03 -0400
committerMark Brown <broonie@opensource.wolfsonmicro.com>2013-04-12 08:54:58 -0400
commit788437273fa8b824810ea9a23f7ed4d7fdb2949a (patch)
tree79ba33292f981697274cf29534e07617fa196fc2 /drivers/spi/spi-s3c64xx.c
parent6b8cc3306e78490bda26815b04c786d8e1fc1489 (diff)
spi: s3c64xx: move to generic dmaengine API
The spi-s3c64xx uses a Samsung proprietary interface for talking to the DMA engine, which does not work with multiplatform kernels. This version of the patch leaves the old code in place, behind an #ifdef. This can be removed in the future, after the s3c64xx platform start supporting the regular dmaengine interface. An earlier version of this patch was tested successfully on exynos5250 by Padma Venkat. The conversion was rather mechanical, since the samsung interface is just a shallow wrapper around the dmaengine interface. Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'drivers/spi/spi-s3c64xx.c')
-rw-r--r--drivers/spi/spi-s3c64xx.c185
1 files changed, 141 insertions, 44 deletions
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 682b1e738370..4989aeb793fd 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -24,6 +24,7 @@
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
27#include <linux/dmaengine.h>
27#include <linux/platform_device.h> 28#include <linux/platform_device.h>
28#include <linux/pm_runtime.h> 29#include <linux/pm_runtime.h>
29#include <linux/spi/spi.h> 30#include <linux/spi/spi.h>
@@ -31,9 +32,12 @@
31#include <linux/of.h> 32#include <linux/of.h>
32#include <linux/of_gpio.h> 33#include <linux/of_gpio.h>
33 34
34#include <mach/dma.h>
35#include <linux/platform_data/spi-s3c64xx.h> 35#include <linux/platform_data/spi-s3c64xx.h>
36 36
37#ifdef CONFIG_SAMSUNG_DMADEV
38#include <mach/dma.h>
39#endif
40
37#define MAX_SPI_PORTS 3 41#define MAX_SPI_PORTS 3
38 42
39/* Registers and bit-fields */ 43/* Registers and bit-fields */
@@ -131,9 +135,9 @@
131#define TXBUSY (1<<3) 135#define TXBUSY (1<<3)
132 136
133struct s3c64xx_spi_dma_data { 137struct s3c64xx_spi_dma_data {
134 unsigned ch; 138 struct dma_chan *ch;
135 enum dma_transfer_direction direction; 139 enum dma_transfer_direction direction;
136 enum dma_ch dmach; 140 unsigned int dmach;
137}; 141};
138 142
139/** 143/**
@@ -195,16 +199,14 @@ struct s3c64xx_spi_driver_data {
195 unsigned cur_speed; 199 unsigned cur_speed;
196 struct s3c64xx_spi_dma_data rx_dma; 200 struct s3c64xx_spi_dma_data rx_dma;
197 struct s3c64xx_spi_dma_data tx_dma; 201 struct s3c64xx_spi_dma_data tx_dma;
202#ifdef CONFIG_SAMSUNG_DMADEV
198 struct samsung_dma_ops *ops; 203 struct samsung_dma_ops *ops;
204#endif
199 struct s3c64xx_spi_port_config *port_conf; 205 struct s3c64xx_spi_port_config *port_conf;
200 unsigned int port_id; 206 unsigned int port_id;
201 unsigned long gpios[4]; 207 unsigned long gpios[4];
202}; 208};
203 209
204static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
205 .name = "samsung-spi-dma",
206};
207
208static void flush_fifo(struct s3c64xx_spi_driver_data *sdd) 210static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
209{ 211{
210 void __iomem *regs = sdd->regs; 212 void __iomem *regs = sdd->regs;
@@ -281,6 +283,13 @@ static void s3c64xx_spi_dmacb(void *data)
281 spin_unlock_irqrestore(&sdd->lock, flags); 283 spin_unlock_irqrestore(&sdd->lock, flags);
282} 284}
283 285
286#ifdef CONFIG_SAMSUNG_DMADEV
287/* FIXME: remove this section once arch/arm/mach-s3c64xx uses dmaengine */
288
289static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
290 .name = "samsung-spi-dma",
291};
292
284static void prepare_dma(struct s3c64xx_spi_dma_data *dma, 293static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
285 unsigned len, dma_addr_t buf) 294 unsigned len, dma_addr_t buf)
286{ 295{
@@ -294,14 +303,14 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
294 config.direction = sdd->rx_dma.direction; 303 config.direction = sdd->rx_dma.direction;
295 config.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA; 304 config.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
296 config.width = sdd->cur_bpw / 8; 305 config.width = sdd->cur_bpw / 8;
297 sdd->ops->config(sdd->rx_dma.ch, &config); 306 sdd->ops->config((enum dma_ch)sdd->rx_dma.ch, &config);
298 } else { 307 } else {
299 sdd = container_of((void *)dma, 308 sdd = container_of((void *)dma,
300 struct s3c64xx_spi_driver_data, tx_dma); 309 struct s3c64xx_spi_driver_data, tx_dma);
301 config.direction = sdd->tx_dma.direction; 310 config.direction = sdd->tx_dma.direction;
302 config.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA; 311 config.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
303 config.width = sdd->cur_bpw / 8; 312 config.width = sdd->cur_bpw / 8;
304 sdd->ops->config(sdd->tx_dma.ch, &config); 313 sdd->ops->config((enum dma_ch)sdd->tx_dma.ch, &config);
305 } 314 }
306 315
307 info.cap = DMA_SLAVE; 316 info.cap = DMA_SLAVE;
@@ -311,8 +320,8 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
311 info.direction = dma->direction; 320 info.direction = dma->direction;
312 info.buf = buf; 321 info.buf = buf;
313 322
314 sdd->ops->prepare(dma->ch, &info); 323 sdd->ops->prepare((enum dma_ch)dma->ch, &info);
315 sdd->ops->trigger(dma->ch); 324 sdd->ops->trigger((enum dma_ch)dma->ch);
316} 325}
317 326
318static int acquire_dma(struct s3c64xx_spi_driver_data *sdd) 327static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
@@ -325,12 +334,126 @@ static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
325 req.cap = DMA_SLAVE; 334 req.cap = DMA_SLAVE;
326 req.client = &s3c64xx_spi_dma_client; 335 req.client = &s3c64xx_spi_dma_client;
327 336
328 sdd->rx_dma.ch = sdd->ops->request(sdd->rx_dma.dmach, &req, dev, "rx"); 337 sdd->rx_dma.ch = (void *)sdd->ops->request(sdd->rx_dma.dmach, &req, dev, "rx");
329 sdd->tx_dma.ch = sdd->ops->request(sdd->tx_dma.dmach, &req, dev, "tx"); 338 sdd->tx_dma.ch = (void *)sdd->ops->request(sdd->tx_dma.dmach, &req, dev, "tx");
330 339
331 return 1; 340 return 1;
332} 341}
333 342
343static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
344{
345 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
346
347 /* Acquire DMA channels */
348 while (!acquire_dma(sdd))
349 usleep_range(10000, 11000);
350
351 pm_runtime_get_sync(&sdd->pdev->dev);
352
353 return 0;
354}
355
356static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
357{
358 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
359
360 /* Free DMA channels */
361 sdd->ops->release((enum dma_ch)sdd->rx_dma.ch, &s3c64xx_spi_dma_client);
362 sdd->ops->release((enum dma_ch)sdd->tx_dma.ch, &s3c64xx_spi_dma_client);
363
364 pm_runtime_put(&sdd->pdev->dev);
365
366 return 0;
367}
368
369static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
370 struct s3c64xx_spi_dma_data *dma)
371{
372 sdd->ops->stop((enum dma_ch)dma->ch);
373}
374#else
375
376static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
377 unsigned len, dma_addr_t buf)
378{
379 struct s3c64xx_spi_driver_data *sdd;
380 struct dma_slave_config config;
381 struct scatterlist sg;
382 struct dma_async_tx_descriptor *desc;
383
384 if (dma->direction == DMA_DEV_TO_MEM) {
385 sdd = container_of((void *)dma,
386 struct s3c64xx_spi_driver_data, rx_dma);
387 config.direction = dma->direction;
388 config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
389 config.src_addr_width = sdd->cur_bpw / 8;
390 config.src_maxburst = 1;
391 dmaengine_slave_config(dma->ch, &config);
392 } else {
393 sdd = container_of((void *)dma,
394 struct s3c64xx_spi_driver_data, tx_dma);
395 config.direction = dma->direction;
396 config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
397 config.dst_addr_width = sdd->cur_bpw / 8;
398 config.dst_maxburst = 1;
399 dmaengine_slave_config(dma->ch, &config);
400 }
401
402 sg_init_table(&sg, 1);
403 sg_dma_len(&sg) = len;
404 sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf)),
405 len, offset_in_page(buf));
406 sg_dma_address(&sg) = buf;
407
408 desc = dmaengine_prep_slave_sg(dma->ch,
409 &sg, 1, dma->direction, DMA_PREP_INTERRUPT);
410
411 desc->callback = s3c64xx_spi_dmacb;
412 desc->callback_param = dma;
413
414 dmaengine_submit(desc);
415 dma_async_issue_pending(dma->ch);
416}
417
418static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
419{
420 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
421 dma_filter_fn filter = sdd->cntrlr_info->filter;
422 struct device *dev = &sdd->pdev->dev;
423 dma_cap_mask_t mask;
424
425 dma_cap_zero(mask);
426 dma_cap_set(DMA_SLAVE, mask);
427
428 /* Acquire DMA channels */
429 sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter,
430 (void*)sdd->rx_dma.dmach, dev, "rx");
431 sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
432 (void*)sdd->tx_dma.dmach, dev, "tx");
433 pm_runtime_get_sync(&sdd->pdev->dev);
434
435 return 0;
436}
437
438static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
439{
440 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
441
442 /* Free DMA channels */
443 dma_release_channel(sdd->rx_dma.ch);
444 dma_release_channel(sdd->tx_dma.ch);
445
446 pm_runtime_put(&sdd->pdev->dev);
447 return 0;
448}
449
450static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
451 struct s3c64xx_spi_dma_data *dma)
452{
453 dmaengine_terminate_all(dma->ch);
454}
455#endif
456
334static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, 457static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
335 struct spi_device *spi, 458 struct spi_device *spi,
336 struct spi_transfer *xfer, int dma_mode) 459 struct spi_transfer *xfer, int dma_mode)
@@ -713,9 +836,9 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
713 } 836 }
714 837
715 /* Polling method for xfers not bigger than FIFO capacity */ 838 /* Polling method for xfers not bigger than FIFO capacity */
716 if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1)) 839 use_dma = 0;
717 use_dma = 0; 840 if (sdd->rx_dma.ch && sdd->tx_dma.ch &&
718 else 841 (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1)))
719 use_dma = 1; 842 use_dma = 1;
720 843
721 spin_lock_irqsave(&sdd->lock, flags); 844 spin_lock_irqsave(&sdd->lock, flags);
@@ -750,10 +873,10 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
750 if (use_dma) { 873 if (use_dma) {
751 if (xfer->tx_buf != NULL 874 if (xfer->tx_buf != NULL
752 && (sdd->state & TXBUSY)) 875 && (sdd->state & TXBUSY))
753 sdd->ops->stop(sdd->tx_dma.ch); 876 s3c64xx_spi_dma_stop(sdd, &sdd->tx_dma);
754 if (xfer->rx_buf != NULL 877 if (xfer->rx_buf != NULL
755 && (sdd->state & RXBUSY)) 878 && (sdd->state & RXBUSY))
756 sdd->ops->stop(sdd->rx_dma.ch); 879 s3c64xx_spi_dma_stop(sdd, &sdd->rx_dma);
757 } 880 }
758 881
759 goto out; 882 goto out;
@@ -790,32 +913,6 @@ out:
790 return 0; 913 return 0;
791} 914}
792 915
793static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
794{
795 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
796
797 /* Acquire DMA channels */
798 while (!acquire_dma(sdd))
799 usleep_range(10000, 11000);
800
801 pm_runtime_get_sync(&sdd->pdev->dev);
802
803 return 0;
804}
805
806static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
807{
808 struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
809
810 /* Free DMA channels */
811 sdd->ops->release(sdd->rx_dma.ch, &s3c64xx_spi_dma_client);
812 sdd->ops->release(sdd->tx_dma.ch, &s3c64xx_spi_dma_client);
813
814 pm_runtime_put(&sdd->pdev->dev);
815
816 return 0;
817}
818
819static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata( 916static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
820 struct spi_device *spi) 917 struct spi_device *spi)
821{ 918{