aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorGuennadi Liakhovetski <g.liakhovetski@gmx.de>2012-07-05 06:29:43 -0400
committerVinod Koul <vinod.koul@linux.intel.com>2012-07-20 01:58:54 -0400
commit0e79f9ae1610c15f5e5959c39d7c39071619de97 (patch)
tree0b84805cc1e48875681ad5928f5bcb18b61421a1 /drivers/mmc
parent1ff8df4f5388ad66bd7d0199b5839a2e3345c055 (diff)
mmc: sh_mmcif: switch to the new DMA channel allocation and configuration
Using the "private" field from struct dma_chan is deprecated. The sh dmaengine driver now also supports the preferred DMA channel allocation and configuration method, using a standard filter function and a channel configuration operation. This patch updates sh_mmcif to use this new method. Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Cc: Chris Ball <cjb@laptop.org> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/host/sh_mmcif.c82
1 files changed, 47 insertions, 35 deletions
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 9e3b9b1c3637..0f07d2878c49 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -211,8 +211,6 @@ struct sh_mmcif_host {
211 struct mmc_host *mmc; 211 struct mmc_host *mmc;
212 struct mmc_request *mrq; 212 struct mmc_request *mrq;
213 struct platform_device *pd; 213 struct platform_device *pd;
214 struct sh_dmae_slave dma_slave_tx;
215 struct sh_dmae_slave dma_slave_rx;
216 struct clk *hclk; 214 struct clk *hclk;
217 unsigned int clk; 215 unsigned int clk;
218 int bus_width; 216 int bus_width;
@@ -371,52 +369,66 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
371 desc, cookie); 369 desc, cookie);
372} 370}
373 371
374static bool sh_mmcif_filter(struct dma_chan *chan, void *arg)
375{
376 dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
377 chan->private = arg;
378 return true;
379}
380
381static void sh_mmcif_request_dma(struct sh_mmcif_host *host, 372static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
382 struct sh_mmcif_plat_data *pdata) 373 struct sh_mmcif_plat_data *pdata)
383{ 374{
384 struct sh_dmae_slave *tx, *rx; 375 struct resource *res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
376 struct dma_slave_config cfg;
377 dma_cap_mask_t mask;
378 int ret;
379
385 host->dma_active = false; 380 host->dma_active = false;
386 381
382 if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0)
383 return;
384
387 /* We can only either use DMA for both Tx and Rx or not use it at all */ 385 /* We can only either use DMA for both Tx and Rx or not use it at all */
388 tx = &host->dma_slave_tx; 386 dma_cap_zero(mask);
389 tx->shdma_slave.slave_id = pdata->slave_id_tx; 387 dma_cap_set(DMA_SLAVE, mask);
390 rx = &host->dma_slave_rx; 388
391 rx->shdma_slave.slave_id = pdata->slave_id_rx; 389 host->chan_tx = dma_request_channel(mask, shdma_chan_filter,
390 (void *)pdata->slave_id_tx);
391 dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
392 host->chan_tx);
392 393
393 if (tx->shdma_slave.slave_id > 0 && rx->shdma_slave.slave_id > 0) { 394 if (!host->chan_tx)
394 dma_cap_mask_t mask; 395 return;
395 396
396 dma_cap_zero(mask); 397 cfg.slave_id = pdata->slave_id_tx;
397 dma_cap_set(DMA_SLAVE, mask); 398 cfg.direction = DMA_MEM_TO_DEV;
399 cfg.dst_addr = res->start + MMCIF_CE_DATA;
400 cfg.src_addr = 0;
401 ret = dmaengine_slave_config(host->chan_tx, &cfg);
402 if (ret < 0)
403 goto ecfgtx;
398 404
399 host->chan_tx = dma_request_channel(mask, sh_mmcif_filter, 405 host->chan_rx = dma_request_channel(mask, shdma_chan_filter,
400 &tx->shdma_slave); 406 (void *)pdata->slave_id_rx);
401 dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__, 407 dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
402 host->chan_tx); 408 host->chan_rx);
403 409
404 if (!host->chan_tx) 410 if (!host->chan_rx)
405 return; 411 goto erqrx;
406 412
407 host->chan_rx = dma_request_channel(mask, sh_mmcif_filter, 413 cfg.slave_id = pdata->slave_id_rx;
408 &rx->shdma_slave); 414 cfg.direction = DMA_DEV_TO_MEM;
409 dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__, 415 cfg.dst_addr = 0;
410 host->chan_rx); 416 cfg.src_addr = res->start + MMCIF_CE_DATA;
417 ret = dmaengine_slave_config(host->chan_rx, &cfg);
418 if (ret < 0)
419 goto ecfgrx;
411 420
412 if (!host->chan_rx) { 421 init_completion(&host->dma_complete);
413 dma_release_channel(host->chan_tx);
414 host->chan_tx = NULL;
415 return;
416 }
417 422
418 init_completion(&host->dma_complete); 423 return;
419 } 424
425ecfgrx:
426 dma_release_channel(host->chan_rx);
427 host->chan_rx = NULL;
428erqrx:
429ecfgtx:
430 dma_release_channel(host->chan_tx);
431 host->chan_tx = NULL;
420} 432}
421 433
422static void sh_mmcif_release_dma(struct sh_mmcif_host *host) 434static void sh_mmcif_release_dma(struct sh_mmcif_host *host)