aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/mmc/host/mmci.c187
1 files changed, 105 insertions, 82 deletions
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index dee9b73ce293..372e921389c8 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -149,6 +149,24 @@ static struct variant_data variant_ux500v2 = {
149}; 149};
150 150
151/* 151/*
152 * Validate mmc prerequisites
153 */
154static int mmci_validate_data(struct mmci_host *host,
155 struct mmc_data *data)
156{
157 if (!data)
158 return 0;
159
160 if (!is_power_of_2(data->blksz)) {
161 dev_err(mmc_dev(host->mmc),
162 "unsupported block size (%d bytes)\n", data->blksz);
163 return -EINVAL;
164 }
165
166 return 0;
167}
168
169/*
152 * This must be called with host->lock held 170 * This must be called with host->lock held
153 */ 171 */
154static void mmci_write_clkreg(struct mmci_host *host, u32 clk) 172static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
@@ -370,10 +388,33 @@ static inline void mmci_dma_release(struct mmci_host *host)
370 host->dma_rx_channel = host->dma_tx_channel = NULL; 388 host->dma_rx_channel = host->dma_tx_channel = NULL;
371} 389}
372 390
391static void mmci_dma_data_error(struct mmci_host *host)
392{
393 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
394 dmaengine_terminate_all(host->dma_current);
395 host->dma_current = NULL;
396 host->dma_desc_current = NULL;
397 host->data->host_cookie = 0;
398}
399
373static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) 400static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
374{ 401{
375 struct dma_chan *chan = host->dma_current; 402 struct dma_chan *chan;
376 enum dma_data_direction dir; 403 enum dma_data_direction dir;
404
405 if (data->flags & MMC_DATA_READ) {
406 dir = DMA_FROM_DEVICE;
407 chan = host->dma_rx_channel;
408 } else {
409 dir = DMA_TO_DEVICE;
410 chan = host->dma_tx_channel;
411 }
412
413 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
414}
415
416static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
417{
377 u32 status; 418 u32 status;
378 int i; 419 int i;
379 420
@@ -392,19 +433,13 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
392 * contiguous buffers. On TX, we'll get a FIFO underrun error. 433 * contiguous buffers. On TX, we'll get a FIFO underrun error.
393 */ 434 */
394 if (status & MCI_RXDATAAVLBLMASK) { 435 if (status & MCI_RXDATAAVLBLMASK) {
395 dmaengine_terminate_all(chan); 436 mmci_dma_data_error(host);
396 if (!data->error) 437 if (!data->error)
397 data->error = -EIO; 438 data->error = -EIO;
398 } 439 }
399 440
400 if (data->flags & MMC_DATA_WRITE) {
401 dir = DMA_TO_DEVICE;
402 } else {
403 dir = DMA_FROM_DEVICE;
404 }
405
406 if (!data->host_cookie) 441 if (!data->host_cookie)
407 dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); 442 mmci_dma_unmap(host, data);
408 443
409 /* 444 /*
410 * Use of DMA with scatter-gather is impossible. 445 * Use of DMA with scatter-gather is impossible.
@@ -414,16 +449,15 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
414 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); 449 dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
415 mmci_dma_release(host); 450 mmci_dma_release(host);
416 } 451 }
417}
418 452
419static void mmci_dma_data_error(struct mmci_host *host) 453 host->dma_current = NULL;
420{ 454 host->dma_desc_current = NULL;
421 dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
422 dmaengine_terminate_all(host->dma_current);
423} 455}
424 456
425static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, 457/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
426 struct mmci_host_next *next) 458static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
459 struct dma_chan **dma_chan,
460 struct dma_async_tx_descriptor **dma_desc)
427{ 461{
428 struct variant_data *variant = host->variant; 462 struct variant_data *variant = host->variant;
429 struct dma_slave_config conf = { 463 struct dma_slave_config conf = {
@@ -441,16 +475,6 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
441 enum dma_data_direction buffer_dirn; 475 enum dma_data_direction buffer_dirn;
442 int nr_sg; 476 int nr_sg;
443 477
444 /* Check if next job is already prepared */
445 if (data->host_cookie && !next &&
446 host->dma_current && host->dma_desc_current)
447 return 0;
448
449 if (!next) {
450 host->dma_current = NULL;
451 host->dma_desc_current = NULL;
452 }
453
454 if (data->flags & MMC_DATA_READ) { 478 if (data->flags & MMC_DATA_READ) {
455 conf.direction = DMA_DEV_TO_MEM; 479 conf.direction = DMA_DEV_TO_MEM;
456 buffer_dirn = DMA_FROM_DEVICE; 480 buffer_dirn = DMA_FROM_DEVICE;
@@ -480,29 +504,41 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
480 if (!desc) 504 if (!desc)
481 goto unmap_exit; 505 goto unmap_exit;
482 506
483 if (next) { 507 *dma_chan = chan;
484 next->dma_chan = chan; 508 *dma_desc = desc;
485 next->dma_desc = desc;
486 } else {
487 host->dma_current = chan;
488 host->dma_desc_current = desc;
489 }
490 509
491 return 0; 510 return 0;
492 511
493 unmap_exit: 512 unmap_exit:
494 if (!next)
495 dmaengine_terminate_all(chan);
496 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); 513 dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
497 return -ENOMEM; 514 return -ENOMEM;
498} 515}
499 516
517static inline int mmci_dma_prep_data(struct mmci_host *host,
518 struct mmc_data *data)
519{
520 /* Check if next job is already prepared. */
521 if (host->dma_current && host->dma_desc_current)
522 return 0;
523
524 /* No job were prepared thus do it now. */
525 return __mmci_dma_prep_data(host, data, &host->dma_current,
526 &host->dma_desc_current);
527}
528
529static inline int mmci_dma_prep_next(struct mmci_host *host,
530 struct mmc_data *data)
531{
532 struct mmci_host_next *nd = &host->next_data;
533 return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
534}
535
500static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) 536static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
501{ 537{
502 int ret; 538 int ret;
503 struct mmc_data *data = host->data; 539 struct mmc_data *data = host->data;
504 540
505 ret = mmci_dma_prep_data(host, host->data, NULL); 541 ret = mmci_dma_prep_data(host, host->data);
506 if (ret) 542 if (ret)
507 return ret; 543 return ret;
508 544
@@ -532,19 +568,11 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
532{ 568{
533 struct mmci_host_next *next = &host->next_data; 569 struct mmci_host_next *next = &host->next_data;
534 570
535 if (data->host_cookie && data->host_cookie != next->cookie) { 571 WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
536 pr_warning("[%s] invalid cookie: data->host_cookie %d" 572 WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
537 " host->next_data.cookie %d\n",
538 __func__, data->host_cookie, host->next_data.cookie);
539 data->host_cookie = 0;
540 }
541
542 if (!data->host_cookie)
543 return;
544 573
545 host->dma_desc_current = next->dma_desc; 574 host->dma_desc_current = next->dma_desc;
546 host->dma_current = next->dma_chan; 575 host->dma_current = next->dma_chan;
547
548 next->dma_desc = NULL; 576 next->dma_desc = NULL;
549 next->dma_chan = NULL; 577 next->dma_chan = NULL;
550} 578}
@@ -559,19 +587,13 @@ static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
559 if (!data) 587 if (!data)
560 return; 588 return;
561 589
562 if (data->host_cookie) { 590 BUG_ON(data->host_cookie);
563 data->host_cookie = 0; 591
592 if (mmci_validate_data(host, data))
564 return; 593 return;
565 }
566 594
567 /* if config for dma */ 595 if (!mmci_dma_prep_next(host, data))
568 if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) || 596 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
569 ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
570 if (mmci_dma_prep_data(host, data, nd))
571 data->host_cookie = 0;
572 else
573 data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
574 }
575} 597}
576 598
577static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, 599static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
@@ -579,29 +601,23 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
579{ 601{
580 struct mmci_host *host = mmc_priv(mmc); 602 struct mmci_host *host = mmc_priv(mmc);
581 struct mmc_data *data = mrq->data; 603 struct mmc_data *data = mrq->data;
582 struct dma_chan *chan;
583 enum dma_data_direction dir;
584 604
585 if (!data) 605 if (!data || !data->host_cookie)
586 return; 606 return;
587 607
588 if (data->flags & MMC_DATA_READ) { 608 mmci_dma_unmap(host, data);
589 dir = DMA_FROM_DEVICE;
590 chan = host->dma_rx_channel;
591 } else {
592 dir = DMA_TO_DEVICE;
593 chan = host->dma_tx_channel;
594 }
595 609
610 if (err) {
611 struct mmci_host_next *next = &host->next_data;
612 struct dma_chan *chan;
613 if (data->flags & MMC_DATA_READ)
614 chan = host->dma_rx_channel;
615 else
616 chan = host->dma_tx_channel;
617 dmaengine_terminate_all(chan);
596 618
597 /* if config for dma */ 619 next->dma_desc = NULL;
598 if (chan) { 620 next->dma_chan = NULL;
599 if (err)
600 dmaengine_terminate_all(chan);
601 if (data->host_cookie)
602 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
603 data->sg_len, dir);
604 mrq->data->host_cookie = 0;
605 } 621 }
606} 622}
607 623
@@ -622,6 +638,11 @@ static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
622{ 638{
623} 639}
624 640
641static inline void mmci_dma_finalize(struct mmci_host *host,
642 struct mmc_data *data)
643{
644}
645
625static inline void mmci_dma_data_error(struct mmci_host *host) 646static inline void mmci_dma_data_error(struct mmci_host *host)
626{ 647{
627} 648}
@@ -772,8 +793,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
772 u32 remain, success; 793 u32 remain, success;
773 794
774 /* Terminate the DMA transfer */ 795 /* Terminate the DMA transfer */
775 if (dma_inprogress(host)) 796 if (dma_inprogress(host)) {
776 mmci_dma_data_error(host); 797 mmci_dma_data_error(host);
798 mmci_dma_unmap(host, data);
799 }
777 800
778 /* 801 /*
779 * Calculate how far we are into the transfer. Note that 802 * Calculate how far we are into the transfer. Note that
@@ -812,7 +835,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
812 835
813 if (status & MCI_DATAEND || data->error) { 836 if (status & MCI_DATAEND || data->error) {
814 if (dma_inprogress(host)) 837 if (dma_inprogress(host))
815 mmci_dma_unmap(host, data); 838 mmci_dma_finalize(host, data);
816 mmci_stop_data(host); 839 mmci_stop_data(host);
817 840
818 if (!data->error) 841 if (!data->error)
@@ -849,8 +872,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
849 if (!cmd->data || cmd->error) { 872 if (!cmd->data || cmd->error) {
850 if (host->data) { 873 if (host->data) {
851 /* Terminate the DMA transfer */ 874 /* Terminate the DMA transfer */
852 if (dma_inprogress(host)) 875 if (dma_inprogress(host)) {
853 mmci_dma_data_error(host); 876 mmci_dma_data_error(host);
877 mmci_dma_unmap(host, host->data);
878 }
854 mmci_stop_data(host); 879 mmci_stop_data(host);
855 } 880 }
856 mmci_request_end(host, cmd->mrq); 881 mmci_request_end(host, cmd->mrq);
@@ -1076,10 +1101,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1076 1101
1077 WARN_ON(host->mrq != NULL); 1102 WARN_ON(host->mrq != NULL);
1078 1103
1079 if (mrq->data && !is_power_of_2(mrq->data->blksz)) { 1104 mrq->cmd->error = mmci_validate_data(host, mrq->data);
1080 dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", 1105 if (mrq->cmd->error) {
1081 mrq->data->blksz);
1082 mrq->cmd->error = -EINVAL;
1083 mmc_request_done(mmc, mrq); 1106 mmc_request_done(mmc, mrq);
1084 return; 1107 return;
1085 } 1108 }