aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorGuennadi Liakhovetski <g.liakhovetski@gmx.de>2010-05-19 14:34:22 -0400
committerPaul Mundt <lethal@linux-sh.org>2010-05-22 03:51:18 -0400
commit311f3ac76826bfd8ed6213ded91ec947df164def (patch)
treec848087c301930bd0104441de82d9c13c97c2f09 /drivers/mmc
parent056676dabd9f4c69a6adcad208e9aa2ca7241400 (diff)
mmc: add DMA support to tmio_mmc driver, when used on SuperH
SDHI controllers on SuperH, served by the tmio_mmc driver, can use slave DMA for data transfer. This patch adds support for the dmaengine API to the tmio_mmc driver. Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Acked-by: Ian Molton <ian@mnementh.co.uk> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/host/tmio_mmc.c359
-rw-r--r--drivers/mmc/host/tmio_mmc.h13
2 files changed, 342 insertions, 30 deletions
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index b2b577f6afd4..3ecd41875fac 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -29,6 +29,7 @@
29#include <linux/irq.h> 29#include <linux/irq.h>
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/dmaengine.h>
32#include <linux/mmc/host.h> 33#include <linux/mmc/host.h>
33#include <linux/mfd/core.h> 34#include <linux/mfd/core.h>
34#include <linux/mfd/tmio.h> 35#include <linux/mfd/tmio.h>
@@ -131,8 +132,8 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
131 132
132 host->cmd = cmd; 133 host->cmd = cmd;
133 134
134/* FIXME - this seems to be ok comented out but the spec suggest this bit should 135/* FIXME - this seems to be ok commented out but the spec suggest this bit
135 * be set when issuing app commands. 136 * should be set when issuing app commands.
136 * if(cmd->flags & MMC_FLAG_ACMD) 137 * if(cmd->flags & MMC_FLAG_ACMD)
137 * c |= APP_CMD; 138 * c |= APP_CMD;
138 */ 139 */
@@ -155,12 +156,12 @@ tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd)
155 return 0; 156 return 0;
156} 157}
157 158
158/* This chip always returns (at least?) as much data as you ask for. 159/*
160 * This chip always returns (at least?) as much data as you ask for.
159 * I'm unsure what happens if you ask for less than a block. This should be 161 * I'm unsure what happens if you ask for less than a block. This should be
160 * looked into to ensure that a funny length read doesnt hose the controller. 162 * looked into to ensure that a funny length read doesnt hose the controller.
161 *
162 */ 163 */
163static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host) 164static void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
164{ 165{
165 struct mmc_data *data = host->data; 166 struct mmc_data *data = host->data;
166 unsigned short *buf; 167 unsigned short *buf;
@@ -180,7 +181,7 @@ static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
180 count = data->blksz; 181 count = data->blksz;
181 182
182 pr_debug("count: %08x offset: %08x flags %08x\n", 183 pr_debug("count: %08x offset: %08x flags %08x\n",
183 count, host->sg_off, data->flags); 184 count, host->sg_off, data->flags);
184 185
185 /* Transfer the data */ 186 /* Transfer the data */
186 if (data->flags & MMC_DATA_READ) 187 if (data->flags & MMC_DATA_READ)
@@ -198,7 +199,7 @@ static inline void tmio_mmc_pio_irq(struct tmio_mmc_host *host)
198 return; 199 return;
199} 200}
200 201
201static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host) 202static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host)
202{ 203{
203 struct mmc_data *data = host->data; 204 struct mmc_data *data = host->data;
204 struct mmc_command *stop; 205 struct mmc_command *stop;
@@ -206,7 +207,7 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
206 host->data = NULL; 207 host->data = NULL;
207 208
208 if (!data) { 209 if (!data) {
209 pr_debug("Spurious data end IRQ\n"); 210 dev_warn(&host->pdev->dev, "Spurious data end IRQ\n");
210 return; 211 return;
211 } 212 }
212 stop = data->stop; 213 stop = data->stop;
@@ -219,7 +220,8 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
219 220
220 pr_debug("Completed data request\n"); 221 pr_debug("Completed data request\n");
221 222
222 /*FIXME - other drivers allow an optional stop command of any given type 223 /*
224 * FIXME: other drivers allow an optional stop command of any given type
223 * which we dont do, as the chip can auto generate them. 225 * which we dont do, as the chip can auto generate them.
224 * Perhaps we can be smarter about when to use auto CMD12 and 226 * Perhaps we can be smarter about when to use auto CMD12 and
225 * only issue the auto request when we know this is the desired 227 * only issue the auto request when we know this is the desired
@@ -227,10 +229,17 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
227 * upper layers expect. For now, we do what works. 229 * upper layers expect. For now, we do what works.
228 */ 230 */
229 231
230 if (data->flags & MMC_DATA_READ) 232 if (data->flags & MMC_DATA_READ) {
231 disable_mmc_irqs(host, TMIO_MASK_READOP); 233 if (!host->chan_rx)
232 else 234 disable_mmc_irqs(host, TMIO_MASK_READOP);
233 disable_mmc_irqs(host, TMIO_MASK_WRITEOP); 235 dev_dbg(&host->pdev->dev, "Complete Rx request %p\n",
236 host->mrq);
237 } else {
238 if (!host->chan_tx)
239 disable_mmc_irqs(host, TMIO_MASK_WRITEOP);
240 dev_dbg(&host->pdev->dev, "Complete Tx request %p\n",
241 host->mrq);
242 }
234 243
235 if (stop) { 244 if (stop) {
236 if (stop->opcode == 12 && !stop->arg) 245 if (stop->opcode == 12 && !stop->arg)
@@ -242,7 +251,35 @@ static inline void tmio_mmc_data_irq(struct tmio_mmc_host *host)
242 tmio_mmc_finish_request(host); 251 tmio_mmc_finish_request(host);
243} 252}
244 253
245static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, 254static void tmio_mmc_data_irq(struct tmio_mmc_host *host)
255{
256 struct mmc_data *data = host->data;
257
258 if (!data)
259 return;
260
261 if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) {
262 /*
263 * Has all data been written out yet? Testing on SuperH showed,
264 * that in most cases the first interrupt comes already with the
265 * BUSY status bit clear, but on some operations, like mount or
266 * in the beginning of a write / sync / umount, there is one
267 * DATAEND interrupt with the BUSY bit set, in this cases
268 * waiting for one more interrupt fixes the problem.
269 */
270 if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) {
271 disable_mmc_irqs(host, TMIO_STAT_DATAEND);
272 tasklet_schedule(&host->dma_complete);
273 }
274 } else if (host->chan_rx && (data->flags & MMC_DATA_READ)) {
275 disable_mmc_irqs(host, TMIO_STAT_DATAEND);
276 tasklet_schedule(&host->dma_complete);
277 } else {
278 tmio_mmc_do_data_irq(host);
279 }
280}
281
282static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
246 unsigned int stat) 283 unsigned int stat)
247{ 284{
248 struct mmc_command *cmd = host->cmd; 285 struct mmc_command *cmd = host->cmd;
@@ -282,10 +319,16 @@ static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
282 * If theres no data or we encountered an error, finish now. 319 * If theres no data or we encountered an error, finish now.
283 */ 320 */
284 if (host->data && !cmd->error) { 321 if (host->data && !cmd->error) {
285 if (host->data->flags & MMC_DATA_READ) 322 if (host->data->flags & MMC_DATA_READ) {
286 enable_mmc_irqs(host, TMIO_MASK_READOP); 323 if (!host->chan_rx)
287 else 324 enable_mmc_irqs(host, TMIO_MASK_READOP);
288 enable_mmc_irqs(host, TMIO_MASK_WRITEOP); 325 } else {
326 struct dma_chan *chan = host->chan_tx;
327 if (!chan)
328 enable_mmc_irqs(host, TMIO_MASK_WRITEOP);
329 else
330 tasklet_schedule(&host->dma_issue);
331 }
289 } else { 332 } else {
290 tmio_mmc_finish_request(host); 333 tmio_mmc_finish_request(host);
291 } 334 }
@@ -293,7 +336,6 @@ static inline void tmio_mmc_cmd_irq(struct tmio_mmc_host *host,
293 return; 336 return;
294} 337}
295 338
296
297static irqreturn_t tmio_mmc_irq(int irq, void *devid) 339static irqreturn_t tmio_mmc_irq(int irq, void *devid)
298{ 340{
299 struct tmio_mmc_host *host = devid; 341 struct tmio_mmc_host *host = devid;
@@ -311,7 +353,7 @@ static irqreturn_t tmio_mmc_irq(int irq, void *devid)
311 if (!ireg) { 353 if (!ireg) {
312 disable_mmc_irqs(host, status & ~irq_mask); 354 disable_mmc_irqs(host, status & ~irq_mask);
313 355
314 pr_debug("tmio_mmc: Spurious irq, disabling! " 356 pr_warning("tmio_mmc: Spurious irq, disabling! "
315 "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); 357 "0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg);
316 pr_debug_status(status); 358 pr_debug_status(status);
317 359
@@ -363,16 +405,265 @@ out:
363 return IRQ_HANDLED; 405 return IRQ_HANDLED;
364} 406}
365 407
408#ifdef CONFIG_TMIO_MMC_DMA
409static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
410{
411#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE)
412 /* Switch DMA mode on or off - SuperH specific? */
413 sd_ctrl_write16(host, 0xd8, enable ? 2 : 0);
414#endif
415}
416
417static void tmio_dma_complete(void *arg)
418{
419 struct tmio_mmc_host *host = arg;
420
421 dev_dbg(&host->pdev->dev, "Command completed\n");
422
423 if (!host->data)
424 dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n");
425 else
426 enable_mmc_irqs(host, TMIO_STAT_DATAEND);
427}
428
429static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
430{
431 struct scatterlist *sg = host->sg_ptr;
432 struct dma_async_tx_descriptor *desc = NULL;
433 struct dma_chan *chan = host->chan_rx;
434 int ret;
435
436 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE);
437 if (ret > 0) {
438 host->dma_sglen = ret;
439 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
440 DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
441 }
442
443 if (desc) {
444 host->desc = desc;
445 desc->callback = tmio_dma_complete;
446 desc->callback_param = host;
447 host->cookie = desc->tx_submit(desc);
448 if (host->cookie < 0) {
449 host->desc = NULL;
450 ret = host->cookie;
451 } else {
452 chan->device->device_issue_pending(chan);
453 }
454 }
455 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
456 __func__, host->sg_len, ret, host->cookie, host->mrq);
457
458 if (!host->desc) {
459 /* DMA failed, fall back to PIO */
460 if (ret >= 0)
461 ret = -EIO;
462 host->chan_rx = NULL;
463 dma_release_channel(chan);
464 /* Free the Tx channel too */
465 chan = host->chan_tx;
466 if (chan) {
467 host->chan_tx = NULL;
468 dma_release_channel(chan);
469 }
470 dev_warn(&host->pdev->dev,
471 "DMA failed: %d, falling back to PIO\n", ret);
472 tmio_mmc_enable_dma(host, false);
473 reset(host);
474 /* Fail this request, let above layers recover */
475 host->mrq->cmd->error = ret;
476 tmio_mmc_finish_request(host);
477 }
478
479 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
480 desc, host->cookie, host->sg_len);
481
482 return ret > 0 ? 0 : ret;
483}
484
485static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
486{
487 struct scatterlist *sg = host->sg_ptr;
488 struct dma_async_tx_descriptor *desc = NULL;
489 struct dma_chan *chan = host->chan_tx;
490 int ret;
491
492 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE);
493 if (ret > 0) {
494 host->dma_sglen = ret;
495 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
496 DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
497 }
498
499 if (desc) {
500 host->desc = desc;
501 desc->callback = tmio_dma_complete;
502 desc->callback_param = host;
503 host->cookie = desc->tx_submit(desc);
504 if (host->cookie < 0) {
505 host->desc = NULL;
506 ret = host->cookie;
507 }
508 }
509 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
510 __func__, host->sg_len, ret, host->cookie, host->mrq);
511
512 if (!host->desc) {
513 /* DMA failed, fall back to PIO */
514 if (ret >= 0)
515 ret = -EIO;
516 host->chan_tx = NULL;
517 dma_release_channel(chan);
518 /* Free the Rx channel too */
519 chan = host->chan_rx;
520 if (chan) {
521 host->chan_rx = NULL;
522 dma_release_channel(chan);
523 }
524 dev_warn(&host->pdev->dev,
525 "DMA failed: %d, falling back to PIO\n", ret);
526 tmio_mmc_enable_dma(host, false);
527 reset(host);
528 /* Fail this request, let above layers recover */
529 host->mrq->cmd->error = ret;
530 tmio_mmc_finish_request(host);
531 }
532
533 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
534 desc, host->cookie);
535
536 return ret > 0 ? 0 : ret;
537}
538
539static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
540 struct mmc_data *data)
541{
542 if (data->flags & MMC_DATA_READ) {
543 if (host->chan_rx)
544 return tmio_mmc_start_dma_rx(host);
545 } else {
546 if (host->chan_tx)
547 return tmio_mmc_start_dma_tx(host);
548 }
549
550 return 0;
551}
552
553static void tmio_issue_tasklet_fn(unsigned long priv)
554{
555 struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv;
556 struct dma_chan *chan = host->chan_tx;
557
558 chan->device->device_issue_pending(chan);
559}
560
561static void tmio_tasklet_fn(unsigned long arg)
562{
563 struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
564
565 if (host->data->flags & MMC_DATA_READ)
566 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
567 DMA_FROM_DEVICE);
568 else
569 dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen,
570 DMA_TO_DEVICE);
571
572 tmio_mmc_do_data_irq(host);
573}
574
575/* It might be necessary to make filter MFD specific */
576static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
577{
578 dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
579 chan->private = arg;
580 return true;
581}
582
583static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
584 struct tmio_mmc_data *pdata)
585{
586 host->cookie = -EINVAL;
587 host->desc = NULL;
588
589 /* We can only either use DMA for both Tx and Rx or not use it at all */
590 if (pdata->dma) {
591 dma_cap_mask_t mask;
592
593 dma_cap_zero(mask);
594 dma_cap_set(DMA_SLAVE, mask);
595
596 host->chan_tx = dma_request_channel(mask, tmio_mmc_filter,
597 pdata->dma->chan_priv_tx);
598 dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__,
599 host->chan_tx);
600
601 if (!host->chan_tx)
602 return;
603
604 host->chan_rx = dma_request_channel(mask, tmio_mmc_filter,
605 pdata->dma->chan_priv_rx);
606 dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__,
607 host->chan_rx);
608
609 if (!host->chan_rx) {
610 dma_release_channel(host->chan_tx);
611 host->chan_tx = NULL;
612 return;
613 }
614
615 tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host);
616 tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host);
617
618 tmio_mmc_enable_dma(host, true);
619 }
620}
621
622static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
623{
624 if (host->chan_tx) {
625 struct dma_chan *chan = host->chan_tx;
626 host->chan_tx = NULL;
627 dma_release_channel(chan);
628 }
629 if (host->chan_rx) {
630 struct dma_chan *chan = host->chan_rx;
631 host->chan_rx = NULL;
632 dma_release_channel(chan);
633 }
634
635 host->cookie = -EINVAL;
636 host->desc = NULL;
637}
638#else
639static int tmio_mmc_start_dma(struct tmio_mmc_host *host,
640 struct mmc_data *data)
641{
642 return 0;
643}
644
645static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
646 struct tmio_mmc_data *pdata)
647{
648 host->chan_tx = NULL;
649 host->chan_rx = NULL;
650}
651
652static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
653{
654}
655#endif
656
366static int tmio_mmc_start_data(struct tmio_mmc_host *host, 657static int tmio_mmc_start_data(struct tmio_mmc_host *host,
367 struct mmc_data *data) 658 struct mmc_data *data)
368{ 659{
369 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n", 660 pr_debug("setup data transfer: blocksize %08x nr_blocks %d\n",
370 data->blksz, data->blocks); 661 data->blksz, data->blocks);
371 662
372 /* Hardware cannot perform 1 and 2 byte requests in 4 bit mode */ 663 /* Hardware cannot perform 1 and 2 byte requests in 4 bit mode */
373 if (data->blksz < 4 && host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { 664 if (data->blksz < 4 && host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) {
374 printk(KERN_ERR "%s: %d byte block unsupported in 4 bit mode\n", 665 pr_err("%s: %d byte block unsupported in 4 bit mode\n",
375 mmc_hostname(host->mmc), data->blksz); 666 mmc_hostname(host->mmc), data->blksz);
376 return -EINVAL; 667 return -EINVAL;
377 } 668 }
378 669
@@ -383,7 +674,7 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
383 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); 674 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
384 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); 675 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
385 676
386 return 0; 677 return tmio_mmc_start_dma(host, data);
387} 678}
388 679
389/* Process requests from the MMC layer */ 680/* Process requests from the MMC layer */
@@ -404,7 +695,6 @@ static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
404 } 695 }
405 696
406 ret = tmio_mmc_start_command(host, mrq->cmd); 697 ret = tmio_mmc_start_command(host, mrq->cmd);
407
408 if (!ret) 698 if (!ret)
409 return; 699 return;
410 700
@@ -459,10 +749,10 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc)
459{ 749{
460 struct tmio_mmc_host *host = mmc_priv(mmc); 750 struct tmio_mmc_host *host = mmc_priv(mmc);
461 751
462 return (sd_ctrl_read16(host, CTL_STATUS) & TMIO_STAT_WRPROTECT) ? 0 : 1; 752 return (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT) ? 0 : 1;
463} 753}
464 754
465static struct mmc_host_ops tmio_mmc_ops = { 755static const struct mmc_host_ops tmio_mmc_ops = {
466 .request = tmio_mmc_request, 756 .request = tmio_mmc_request,
467 .set_ios = tmio_mmc_set_ios, 757 .set_ios = tmio_mmc_set_ios,
468 .get_ro = tmio_mmc_get_ro, 758 .get_ro = tmio_mmc_get_ro,
@@ -515,6 +805,7 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
515 struct tmio_mmc_host *host; 805 struct tmio_mmc_host *host;
516 struct mmc_host *mmc; 806 struct mmc_host *mmc;
517 int ret = -EINVAL; 807 int ret = -EINVAL;
808 u32 irq_mask = TMIO_MASK_CMD;
518 809
519 if (dev->num_resources != 2) 810 if (dev->num_resources != 2)
520 goto out; 811 goto out;
@@ -578,13 +869,20 @@ static int __devinit tmio_mmc_probe(struct platform_device *dev)
578 if (ret) 869 if (ret)
579 goto cell_disable; 870 goto cell_disable;
580 871
872 /* See if we also get DMA */
873 tmio_mmc_request_dma(host, pdata);
874
581 mmc_add_host(mmc); 875 mmc_add_host(mmc);
582 876
583 printk(KERN_INFO "%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), 877 pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc),
584 (unsigned long)host->ctl, host->irq); 878 (unsigned long)host->ctl, host->irq);
585 879
586 /* Unmask the IRQs we want to know about */ 880 /* Unmask the IRQs we want to know about */
587 enable_mmc_irqs(host, TMIO_MASK_IRQ); 881 if (!host->chan_rx)
882 irq_mask |= TMIO_MASK_READOP;
883 if (!host->chan_tx)
884 irq_mask |= TMIO_MASK_WRITEOP;
885 enable_mmc_irqs(host, irq_mask);
588 886
589 return 0; 887 return 0;
590 888
@@ -609,6 +907,7 @@ static int __devexit tmio_mmc_remove(struct platform_device *dev)
609 if (mmc) { 907 if (mmc) {
610 struct tmio_mmc_host *host = mmc_priv(mmc); 908 struct tmio_mmc_host *host = mmc_priv(mmc);
611 mmc_remove_host(mmc); 909 mmc_remove_host(mmc);
910 tmio_mmc_release_dma(host);
612 free_irq(host->irq, host); 911 free_irq(host->irq, host);
613 if (cell->disable) 912 if (cell->disable)
614 cell->disable(dev); 913 cell->disable(dev);
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index dafecfbcd91a..64f7d5dfc106 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -10,6 +10,8 @@
10 */ 10 */
11 11
12#include <linux/highmem.h> 12#include <linux/highmem.h>
13#include <linux/interrupt.h>
14#include <linux/dmaengine.h>
13 15
14#define CTL_SD_CMD 0x00 16#define CTL_SD_CMD 0x00
15#define CTL_ARG_REG 0x04 17#define CTL_ARG_REG 0x04
@@ -106,6 +108,17 @@ struct tmio_mmc_host {
106 unsigned int sg_off; 108 unsigned int sg_off;
107 109
108 struct platform_device *pdev; 110 struct platform_device *pdev;
111
112 /* DMA support */
113 struct dma_chan *chan_rx;
114 struct dma_chan *chan_tx;
115 struct tasklet_struct dma_complete;
116 struct tasklet_struct dma_issue;
117#ifdef CONFIG_TMIO_MMC_DMA
118 struct dma_async_tx_descriptor *desc;
119 unsigned int dma_sglen;
120 dma_cookie_t cookie;
121#endif
109}; 122};
110 123
111#include <linux/io.h> 124#include <linux/io.h>