aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorGuennadi Liakhovetski <g.liakhovetski@gmx.de>2010-11-11 06:19:47 -0500
committerChris Ball <cjb@laptop.org>2011-01-08 23:52:28 -0500
commitef17fee1e559b3faeb6f89963e78ad47731d18a1 (patch)
treea2cef08fdbf4f922c10bd9b6ecfd67b5c1d0a4d2 /drivers/mmc
parent729b0c79c2c0f631bffe1be5abaf291febf7792f (diff)
mmc: tmio_mmc: fix PIO fallback on DMA descriptor allocation failure
The easiest way to fall back to PIO, when a DMA descriptor allocation fails is to disable DMA on the controller but continue with the current request in PIO mode. This way tmio_mmc_start_dma() can become void, since it cannot be failing any more. The current version is also broken: it is testing a wrong pointer and thus failing to recognise, that a descriptor allocation wasn't successful. Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/host/tmio_mmc.c69
-rw-r--r--drivers/mmc/host/tmio_mmc.h2
2 files changed, 25 insertions, 46 deletions
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 4e75799291ca..63115a6de935 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -427,11 +427,12 @@ static void tmio_dma_complete(void *arg)
427 enable_mmc_irqs(host, TMIO_STAT_DATAEND); 427 enable_mmc_irqs(host, TMIO_STAT_DATAEND);
428} 428}
429 429
430static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) 430static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
431{ 431{
432 struct scatterlist *sg = host->sg_ptr; 432 struct scatterlist *sg = host->sg_ptr;
433 struct dma_async_tx_descriptor *desc = NULL; 433 struct dma_async_tx_descriptor *desc = NULL;
434 struct dma_chan *chan = host->chan_rx; 434 struct dma_chan *chan = host->chan_rx;
435 dma_cookie_t cookie;
435 int ret; 436 int ret;
436 437
437 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE); 438 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE);
@@ -442,21 +443,20 @@ static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
442 } 443 }
443 444
444 if (desc) { 445 if (desc) {
445 host->desc = desc;
446 desc->callback = tmio_dma_complete; 446 desc->callback = tmio_dma_complete;
447 desc->callback_param = host; 447 desc->callback_param = host;
448 host->cookie = desc->tx_submit(desc); 448 cookie = desc->tx_submit(desc);
449 if (host->cookie < 0) { 449 if (cookie < 0) {
450 host->desc = NULL; 450 desc = NULL;
451 ret = host->cookie; 451 ret = cookie;
452 } else { 452 } else {
453 chan->device->device_issue_pending(chan); 453 chan->device->device_issue_pending(chan);
454 } 454 }
455 } 455 }
456 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 456 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
457 __func__, host->sg_len, ret, host->cookie, host->mrq); 457 __func__, host->sg_len, ret, cookie, host->mrq);
458 458
459 if (!host->desc) { 459 if (!desc) {
460 /* DMA failed, fall back to PIO */ 460 /* DMA failed, fall back to PIO */
461 if (ret >= 0) 461 if (ret >= 0)
462 ret = -EIO; 462 ret = -EIO;
@@ -471,23 +471,18 @@ static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
471 dev_warn(&host->pdev->dev, 471 dev_warn(&host->pdev->dev,
472 "DMA failed: %d, falling back to PIO\n", ret); 472 "DMA failed: %d, falling back to PIO\n", ret);
473 tmio_mmc_enable_dma(host, false); 473 tmio_mmc_enable_dma(host, false);
474 reset(host);
475 /* Fail this request, let above layers recover */
476 host->mrq->cmd->error = ret;
477 tmio_mmc_finish_request(host);
478 } 474 }
479 475
480 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, 476 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
481 desc, host->cookie, host->sg_len); 477 desc, cookie, host->sg_len);
482
483 return ret > 0 ? 0 : ret;
484} 478}
485 479
486static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) 480static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
487{ 481{
488 struct scatterlist *sg = host->sg_ptr; 482 struct scatterlist *sg = host->sg_ptr;
489 struct dma_async_tx_descriptor *desc = NULL; 483 struct dma_async_tx_descriptor *desc = NULL;
490 struct dma_chan *chan = host->chan_tx; 484 struct dma_chan *chan = host->chan_tx;
485 dma_cookie_t cookie;
491 int ret; 486 int ret;
492 487
493 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE); 488 ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE);
@@ -498,19 +493,18 @@ static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
498 } 493 }
499 494
500 if (desc) { 495 if (desc) {
501 host->desc = desc;
502 desc->callback = tmio_dma_complete; 496 desc->callback = tmio_dma_complete;
503 desc->callback_param = host; 497 desc->callback_param = host;
504 host->cookie = desc->tx_submit(desc); 498 cookie = desc->tx_submit(desc);
505 if (host->cookie < 0) { 499 if (cookie < 0) {
506 host->desc = NULL; 500 desc = NULL;
507 ret = host->cookie; 501 ret = cookie;
508 } 502 }
509 } 503 }
510 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", 504 dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n",
511 __func__, host->sg_len, ret, host->cookie, host->mrq); 505 __func__, host->sg_len, ret, cookie, host->mrq);
512 506
513 if (!host->desc) { 507 if (!desc) {
514 /* DMA failed, fall back to PIO */ 508 /* DMA failed, fall back to PIO */
515 if (ret >= 0) 509 if (ret >= 0)
516 ret = -EIO; 510 ret = -EIO;
@@ -525,30 +519,22 @@ static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
525 dev_warn(&host->pdev->dev, 519 dev_warn(&host->pdev->dev,
526 "DMA failed: %d, falling back to PIO\n", ret); 520 "DMA failed: %d, falling back to PIO\n", ret);
527 tmio_mmc_enable_dma(host, false); 521 tmio_mmc_enable_dma(host, false);
528 reset(host);
529 /* Fail this request, let above layers recover */
530 host->mrq->cmd->error = ret;
531 tmio_mmc_finish_request(host);
532 } 522 }
533 523
534 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, 524 dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
535 desc, host->cookie); 525 desc, cookie);
536
537 return ret > 0 ? 0 : ret;
538} 526}
539 527
540static int tmio_mmc_start_dma(struct tmio_mmc_host *host, 528static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
541 struct mmc_data *data) 529 struct mmc_data *data)
542{ 530{
543 if (data->flags & MMC_DATA_READ) { 531 if (data->flags & MMC_DATA_READ) {
544 if (host->chan_rx) 532 if (host->chan_rx)
545 return tmio_mmc_start_dma_rx(host); 533 tmio_mmc_start_dma_rx(host);
546 } else { 534 } else {
547 if (host->chan_tx) 535 if (host->chan_tx)
548 return tmio_mmc_start_dma_tx(host); 536 tmio_mmc_start_dma_tx(host);
549 } 537 }
550
551 return 0;
552} 538}
553 539
554static void tmio_issue_tasklet_fn(unsigned long priv) 540static void tmio_issue_tasklet_fn(unsigned long priv)
@@ -584,9 +570,6 @@ static bool tmio_mmc_filter(struct dma_chan *chan, void *arg)
584static void tmio_mmc_request_dma(struct tmio_mmc_host *host, 570static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
585 struct tmio_mmc_data *pdata) 571 struct tmio_mmc_data *pdata)
586{ 572{
587 host->cookie = -EINVAL;
588 host->desc = NULL;
589
590 /* We can only either use DMA for both Tx and Rx or not use it at all */ 573 /* We can only either use DMA for both Tx and Rx or not use it at all */
591 if (pdata->dma) { 574 if (pdata->dma) {
592 dma_cap_mask_t mask; 575 dma_cap_mask_t mask;
@@ -632,15 +615,11 @@ static void tmio_mmc_release_dma(struct tmio_mmc_host *host)
632 host->chan_rx = NULL; 615 host->chan_rx = NULL;
633 dma_release_channel(chan); 616 dma_release_channel(chan);
634 } 617 }
635
636 host->cookie = -EINVAL;
637 host->desc = NULL;
638} 618}
639#else 619#else
640static int tmio_mmc_start_dma(struct tmio_mmc_host *host, 620static void tmio_mmc_start_dma(struct tmio_mmc_host *host,
641 struct mmc_data *data) 621 struct mmc_data *data)
642{ 622{
643 return 0;
644} 623}
645 624
646static void tmio_mmc_request_dma(struct tmio_mmc_host *host, 625static void tmio_mmc_request_dma(struct tmio_mmc_host *host,
@@ -682,7 +661,9 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
682 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); 661 sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
683 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); 662 sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
684 663
685 return tmio_mmc_start_dma(host, data); 664 tmio_mmc_start_dma(host, data);
665
666 return 0;
686} 667}
687 668
688/* Process requests from the MMC layer */ 669/* Process requests from the MMC layer */
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 0fedc78e3ea5..0b7d9162c1b5 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -112,9 +112,7 @@ struct tmio_mmc_host {
112 struct tasklet_struct dma_complete; 112 struct tasklet_struct dma_complete;
113 struct tasklet_struct dma_issue; 113 struct tasklet_struct dma_issue;
114#ifdef CONFIG_TMIO_MMC_DMA 114#ifdef CONFIG_TMIO_MMC_DMA
115 struct dma_async_tx_descriptor *desc;
116 unsigned int dma_sglen; 115 unsigned int dma_sglen;
117 dma_cookie_t cookie;
118#endif 116#endif
119}; 117};
120 118