aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorGuennadi Liakhovetski <g.liakhovetski@gmx.de>2011-12-25 15:07:52 -0500
committerChris Ball <cjb@laptop.org>2012-01-11 23:58:46 -0500
commitf985da17f4d368896fb30d94531e4ffaa18e68d8 (patch)
tree363da7968bab382f54da1e37d6f87cffe42c63b9 /drivers/mmc
parentee4b88879f23badd54f5557852745fa28a1570f6 (diff)
mmc: sh_mmcif: process requests asynchronously
This patch converts the sh_mmcif MMC host driver to process requests asynchronously instead of waiting in its .request() method for completion. This is achieved by using threaded IRQs. Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/host/sh_mmcif.c588
1 files changed, 416 insertions, 172 deletions
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 8aee127e60b8..9371f3a4939b 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -16,6 +16,32 @@
16 * 16 *
17 */ 17 */
18 18
19/*
20 * The MMCIF driver is now processing MMC requests asynchronously, according
21 * to the Linux MMC API requirement.
22 *
23 * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
24 * data, and optional stop. To achieve asynchronous processing each of these
25 * stages is split into two halves: a top and a bottom half. The top half
26 * initialises the hardware, installs a timeout handler to handle completion
27 * timeouts, and returns. In case of the command stage this immediately returns
28 * control to the caller, leaving all further processing to run asynchronously.
29 * All further request processing is performed by the bottom halves.
30 *
31 * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
32 * thread, a DMA completion callback, if DMA is used, a timeout work, and
33 * request- and stage-specific handler methods.
34 *
35 * Each bottom half run begins with either a hardware interrupt, a DMA callback
36 * invocation, or a timeout work run. In case of an error or a successful
37 * processing completion, the MMC core is informed and the request processing is
38 * finished. In case processing has to continue, i.e., if data has to be read
39 * from or written to the card, or if a stop command has to be sent, the next
40 * top half is called, which performs the necessary hardware handling and
41 * reschedules the timeout work. This returns the driver state machine into the
42 * bottom half waiting state.
43 */
44
19#include <linux/bitops.h> 45#include <linux/bitops.h>
20#include <linux/clk.h> 46#include <linux/clk.h>
21#include <linux/completion.h> 47#include <linux/completion.h>
@@ -168,9 +194,22 @@ enum mmcif_state {
168 STATE_IOS, 194 STATE_IOS,
169}; 195};
170 196
197enum mmcif_wait_for {
198 MMCIF_WAIT_FOR_REQUEST,
199 MMCIF_WAIT_FOR_CMD,
200 MMCIF_WAIT_FOR_MREAD,
201 MMCIF_WAIT_FOR_MWRITE,
202 MMCIF_WAIT_FOR_READ,
203 MMCIF_WAIT_FOR_WRITE,
204 MMCIF_WAIT_FOR_READ_END,
205 MMCIF_WAIT_FOR_WRITE_END,
206 MMCIF_WAIT_FOR_STOP,
207};
208
171struct sh_mmcif_host { 209struct sh_mmcif_host {
172 struct mmc_host *mmc; 210 struct mmc_host *mmc;
173 struct mmc_data *data; 211 struct mmc_data *data;
212 struct mmc_request *mrq;
174 struct platform_device *pd; 213 struct platform_device *pd;
175 struct sh_dmae_slave dma_slave_tx; 214 struct sh_dmae_slave dma_slave_tx;
176 struct sh_dmae_slave dma_slave_rx; 215 struct sh_dmae_slave dma_slave_rx;
@@ -178,11 +217,17 @@ struct sh_mmcif_host {
178 unsigned int clk; 217 unsigned int clk;
179 int bus_width; 218 int bus_width;
180 bool sd_error; 219 bool sd_error;
220 bool dying;
181 long timeout; 221 long timeout;
182 void __iomem *addr; 222 void __iomem *addr;
183 struct completion intr_wait; 223 u32 *pio_ptr;
184 spinlock_t lock; /* protect sh_mmcif_host::state */ 224 spinlock_t lock; /* protect sh_mmcif_host::state */
185 enum mmcif_state state; 225 enum mmcif_state state;
226 enum mmcif_wait_for wait_for;
227 struct delayed_work timeout_work;
228 size_t blocksize;
229 int sg_idx;
230 int sg_blkidx;
186 bool power; 231 bool power;
187 bool card_present; 232 bool card_present;
188 233
@@ -468,125 +513,183 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
468 return ret; 513 return ret;
469} 514}
470 515
471static int sh_mmcif_single_read(struct sh_mmcif_host *host, 516static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
472 struct mmc_request *mrq)
473{ 517{
474 struct mmc_data *data = mrq->data; 518 struct mmc_data *data = host->mrq->data;
475 long time; 519
476 u32 blocksize, i, *p = sg_virt(data->sg); 520 host->sg_blkidx += host->blocksize;
521
522 /* data->sg->length must be a multiple of host->blocksize? */
523 BUG_ON(host->sg_blkidx > data->sg->length);
524
525 if (host->sg_blkidx == data->sg->length) {
526 host->sg_blkidx = 0;
527 if (++host->sg_idx < data->sg_len)
528 host->pio_ptr = sg_virt(++data->sg);
529 } else {
530 host->pio_ptr = p;
531 }
532
533 if (host->sg_idx == data->sg_len)
534 return false;
535
536 return true;
537}
538
539static void sh_mmcif_single_read(struct sh_mmcif_host *host,
540 struct mmc_request *mrq)
541{
542 host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
543 BLOCK_SIZE_MASK) + 3;
544
545 host->wait_for = MMCIF_WAIT_FOR_READ;
546 schedule_delayed_work(&host->timeout_work, host->timeout);
477 547
478 /* buf read enable */ 548 /* buf read enable */
479 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); 549 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
480 time = wait_for_completion_interruptible_timeout(&host->intr_wait, 550}
481 host->timeout); 551
482 if (time <= 0 || host->sd_error) 552static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
483 return sh_mmcif_error_manage(host); 553{
484 554 struct mmc_data *data = host->mrq->data;
485 blocksize = (BLOCK_SIZE_MASK & 555 u32 *p = sg_virt(data->sg);
486 sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3; 556 int i;
487 for (i = 0; i < blocksize / 4; i++) 557
558 if (host->sd_error) {
559 data->error = sh_mmcif_error_manage(host);
560 return false;
561 }
562
563 for (i = 0; i < host->blocksize / 4; i++)
488 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); 564 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
489 565
490 /* buffer read end */ 566 /* buffer read end */
491 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); 567 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
492 time = wait_for_completion_interruptible_timeout(&host->intr_wait, 568 host->wait_for = MMCIF_WAIT_FOR_READ_END;
493 host->timeout);
494 if (time <= 0 || host->sd_error)
495 return sh_mmcif_error_manage(host);
496 569
497 return 0; 570 return true;
498} 571}
499 572
500static int sh_mmcif_multi_read(struct sh_mmcif_host *host, 573static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
501 struct mmc_request *mrq) 574 struct mmc_request *mrq)
502{ 575{
503 struct mmc_data *data = mrq->data; 576 struct mmc_data *data = mrq->data;
504 long time; 577
505 u32 blocksize, i, j, sec, *p; 578 if (!data->sg_len || !data->sg->length)
506 579 return;
507 blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr, 580
508 MMCIF_CE_BLOCK_SET); 581 host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
509 for (j = 0; j < data->sg_len; j++) { 582 BLOCK_SIZE_MASK;
510 p = sg_virt(data->sg); 583
511 for (sec = 0; sec < data->sg->length / blocksize; sec++) { 584 host->wait_for = MMCIF_WAIT_FOR_MREAD;
512 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); 585 host->sg_idx = 0;
513 /* buf read enable */ 586 host->sg_blkidx = 0;
514 time = wait_for_completion_interruptible_timeout(&host->intr_wait, 587 host->pio_ptr = sg_virt(data->sg);
515 host->timeout); 588 schedule_delayed_work(&host->timeout_work, host->timeout);
516 589 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
517 if (time <= 0 || host->sd_error) 590}
518 return sh_mmcif_error_manage(host); 591
519 592static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
520 for (i = 0; i < blocksize / 4; i++) 593{
521 *p++ = sh_mmcif_readl(host->addr, 594 struct mmc_data *data = host->mrq->data;
522 MMCIF_CE_DATA); 595 u32 *p = host->pio_ptr;
523 } 596 int i;
524 if (j < data->sg_len - 1) 597
525 data->sg++; 598 if (host->sd_error) {
599 data->error = sh_mmcif_error_manage(host);
600 return false;
526 } 601 }
527 return 0; 602
603 BUG_ON(!data->sg->length);
604
605 for (i = 0; i < host->blocksize / 4; i++)
606 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
607
608 if (!sh_mmcif_next_block(host, p))
609 return false;
610
611 schedule_delayed_work(&host->timeout_work, host->timeout);
612 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
613
614 return true;
528} 615}
529 616
530static int sh_mmcif_single_write(struct sh_mmcif_host *host, 617static void sh_mmcif_single_write(struct sh_mmcif_host *host,
531 struct mmc_request *mrq) 618 struct mmc_request *mrq)
532{ 619{
533 struct mmc_data *data = mrq->data; 620 host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
534 long time; 621 BLOCK_SIZE_MASK) + 3;
535 u32 blocksize, i, *p = sg_virt(data->sg);
536 622
537 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); 623 host->wait_for = MMCIF_WAIT_FOR_WRITE;
624 schedule_delayed_work(&host->timeout_work, host->timeout);
538 625
539 /* buf write enable */ 626 /* buf write enable */
540 time = wait_for_completion_interruptible_timeout(&host->intr_wait, 627 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
541 host->timeout); 628}
542 if (time <= 0 || host->sd_error) 629
543 return sh_mmcif_error_manage(host); 630static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
544 631{
545 blocksize = (BLOCK_SIZE_MASK & 632 struct mmc_data *data = host->mrq->data;
546 sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3; 633 u32 *p = sg_virt(data->sg);
547 for (i = 0; i < blocksize / 4; i++) 634 int i;
635
636 if (host->sd_error) {
637 data->error = sh_mmcif_error_manage(host);
638 return false;
639 }
640
641 for (i = 0; i < host->blocksize / 4; i++)
548 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); 642 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
549 643
550 /* buffer write end */ 644 /* buffer write end */
551 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); 645 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
646 host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
552 647
553 time = wait_for_completion_interruptible_timeout(&host->intr_wait, 648 return true;
554 host->timeout);
555 if (time <= 0 || host->sd_error)
556 return sh_mmcif_error_manage(host);
557
558 return 0;
559} 649}
560 650
561static int sh_mmcif_multi_write(struct sh_mmcif_host *host, 651static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
562 struct mmc_request *mrq) 652 struct mmc_request *mrq)
563{ 653{
564 struct mmc_data *data = mrq->data; 654 struct mmc_data *data = mrq->data;
565 long time;
566 u32 i, sec, j, blocksize, *p;
567 655
568 blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr, 656 if (!data->sg_len || !data->sg->length)
569 MMCIF_CE_BLOCK_SET); 657 return;
570 658
571 for (j = 0; j < data->sg_len; j++) { 659 host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
572 p = sg_virt(data->sg); 660 BLOCK_SIZE_MASK;
573 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
574 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
575 /* buf write enable*/
576 time = wait_for_completion_interruptible_timeout(&host->intr_wait,
577 host->timeout);
578 661
579 if (time <= 0 || host->sd_error) 662 host->wait_for = MMCIF_WAIT_FOR_MWRITE;
580 return sh_mmcif_error_manage(host); 663 host->sg_idx = 0;
664 host->sg_blkidx = 0;
665 host->pio_ptr = sg_virt(data->sg);
666 schedule_delayed_work(&host->timeout_work, host->timeout);
667 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
668}
581 669
582 for (i = 0; i < blocksize / 4; i++) 670static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
583 sh_mmcif_writel(host->addr, 671{
584 MMCIF_CE_DATA, *p++); 672 struct mmc_data *data = host->mrq->data;
585 } 673 u32 *p = host->pio_ptr;
586 if (j < data->sg_len - 1) 674 int i;
587 data->sg++; 675
676 if (host->sd_error) {
677 data->error = sh_mmcif_error_manage(host);
678 return false;
588 } 679 }
589 return 0; 680
681 BUG_ON(!data->sg->length);
682
683 for (i = 0; i < host->blocksize / 4; i++)
684 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
685
686 if (!sh_mmcif_next_block(host, p))
687 return false;
688
689 schedule_delayed_work(&host->timeout_work, host->timeout);
690 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
691
692 return true;
590} 693}
591 694
592static void sh_mmcif_get_response(struct sh_mmcif_host *host, 695static void sh_mmcif_get_response(struct sh_mmcif_host *host,
@@ -683,18 +786,22 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
683} 786}
684 787
685static int sh_mmcif_data_trans(struct sh_mmcif_host *host, 788static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
686 struct mmc_request *mrq, u32 opc) 789 struct mmc_request *mrq, u32 opc)
687{ 790{
688 switch (opc) { 791 switch (opc) {
689 case MMC_READ_MULTIPLE_BLOCK: 792 case MMC_READ_MULTIPLE_BLOCK:
690 return sh_mmcif_multi_read(host, mrq); 793 sh_mmcif_multi_read(host, mrq);
794 return 0;
691 case MMC_WRITE_MULTIPLE_BLOCK: 795 case MMC_WRITE_MULTIPLE_BLOCK:
692 return sh_mmcif_multi_write(host, mrq); 796 sh_mmcif_multi_write(host, mrq);
797 return 0;
693 case MMC_WRITE_BLOCK: 798 case MMC_WRITE_BLOCK:
694 return sh_mmcif_single_write(host, mrq); 799 sh_mmcif_single_write(host, mrq);
800 return 0;
695 case MMC_READ_SINGLE_BLOCK: 801 case MMC_READ_SINGLE_BLOCK:
696 case MMC_SEND_EXT_CSD: 802 case MMC_SEND_EXT_CSD:
697 return sh_mmcif_single_read(host, mrq); 803 sh_mmcif_single_read(host, mrq);
804 return 0;
698 default: 805 default:
699 dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc); 806 dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
700 return -EINVAL; 807 return -EINVAL;
@@ -705,9 +812,8 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
705 struct mmc_request *mrq) 812 struct mmc_request *mrq)
706{ 813{
707 struct mmc_command *cmd = mrq->cmd; 814 struct mmc_command *cmd = mrq->cmd;
708 long time; 815 u32 opc = cmd->opcode;
709 int ret = 0; 816 u32 mask;
710 u32 mask, opc = cmd->opcode;
711 817
712 switch (opc) { 818 switch (opc) {
713 /* response busy check */ 819 /* response busy check */
@@ -738,62 +844,14 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
738 /* set cmd */ 844 /* set cmd */
739 sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc); 845 sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
740 846
741 time = wait_for_completion_interruptible_timeout(&host->intr_wait, 847 host->wait_for = MMCIF_WAIT_FOR_CMD;
742 host->timeout); 848 schedule_delayed_work(&host->timeout_work, host->timeout);
743 if (time <= 0) {
744 cmd->error = sh_mmcif_error_manage(host);
745 return;
746 }
747 if (host->sd_error) {
748 switch (cmd->opcode) {
749 case MMC_ALL_SEND_CID:
750 case MMC_SELECT_CARD:
751 case MMC_APP_CMD:
752 cmd->error = -ETIMEDOUT;
753 break;
754 default:
755 dev_dbg(&host->pd->dev, "Cmd(d'%d) err\n",
756 cmd->opcode);
757 cmd->error = sh_mmcif_error_manage(host);
758 break;
759 }
760 host->sd_error = false;
761 return;
762 }
763 if (!(cmd->flags & MMC_RSP_PRESENT)) {
764 cmd->error = 0;
765 return;
766 }
767 sh_mmcif_get_response(host, cmd);
768 if (host->data) {
769 if (!host->dma_active) {
770 ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
771 } else {
772 long time =
773 wait_for_completion_interruptible_timeout(&host->dma_complete,
774 host->timeout);
775 if (!time)
776 ret = -ETIMEDOUT;
777 else if (time < 0)
778 ret = time;
779 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
780 BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
781 host->dma_active = false;
782 }
783 if (ret < 0)
784 mrq->data->bytes_xfered = 0;
785 else
786 mrq->data->bytes_xfered =
787 mrq->data->blocks * mrq->data->blksz;
788 }
789 cmd->error = ret;
790} 849}
791 850
792static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, 851static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
793 struct mmc_request *mrq) 852 struct mmc_request *mrq)
794{ 853{
795 struct mmc_command *cmd = mrq->stop; 854 struct mmc_command *cmd = mrq->stop;
796 long time;
797 855
798 if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK) 856 if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
799 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); 857 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
@@ -805,14 +863,8 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
805 return; 863 return;
806 } 864 }
807 865
808 time = wait_for_completion_interruptible_timeout(&host->intr_wait, 866 host->wait_for = MMCIF_WAIT_FOR_STOP;
809 host->timeout); 867 schedule_delayed_work(&host->timeout_work, host->timeout);
810 if (time <= 0 || host->sd_error) {
811 cmd->error = sh_mmcif_error_manage(host);
812 return;
813 }
814 sh_mmcif_get_cmd12response(host, cmd);
815 cmd->error = 0;
816} 868}
817 869
818static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) 870static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -851,23 +903,11 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
851 default: 903 default:
852 break; 904 break;
853 } 905 }
906
907 host->mrq = mrq;
854 host->data = mrq->data; 908 host->data = mrq->data;
855 if (mrq->data) {
856 if (mrq->data->flags & MMC_DATA_READ) {
857 if (host->chan_rx)
858 sh_mmcif_start_dma_rx(host);
859 } else {
860 if (host->chan_tx)
861 sh_mmcif_start_dma_tx(host);
862 }
863 }
864 sh_mmcif_start_cmd(host, mrq);
865 host->data = NULL;
866 909
867 if (!mrq->cmd->error && mrq->stop) 910 sh_mmcif_start_cmd(host, mrq);
868 sh_mmcif_stop_cmd(host, mrq);
869 host->state = STATE_IDLE;
870 mmc_request_done(mmc, mrq);
871} 911}
872 912
873static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 913static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -942,6 +982,157 @@ static struct mmc_host_ops sh_mmcif_ops = {
942 .get_cd = sh_mmcif_get_cd, 982 .get_cd = sh_mmcif_get_cd,
943}; 983};
944 984
985static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
986{
987 struct mmc_command *cmd = host->mrq->cmd;
988 long time;
989
990 if (host->sd_error) {
991 switch (cmd->opcode) {
992 case MMC_ALL_SEND_CID:
993 case MMC_SELECT_CARD:
994 case MMC_APP_CMD:
995 cmd->error = -ETIMEDOUT;
996 host->sd_error = false;
997 break;
998 default:
999 cmd->error = sh_mmcif_error_manage(host);
1000 dev_dbg(&host->pd->dev, "Cmd(d'%d) error %d\n",
1001 cmd->opcode, cmd->error);
1002 break;
1003 }
1004 return false;
1005 }
1006 if (!(cmd->flags & MMC_RSP_PRESENT)) {
1007 cmd->error = 0;
1008 return false;
1009 }
1010
1011 sh_mmcif_get_response(host, cmd);
1012
1013 if (!host->data)
1014 return false;
1015
1016 if (host->mrq->data->flags & MMC_DATA_READ) {
1017 if (host->chan_rx)
1018 sh_mmcif_start_dma_rx(host);
1019 } else {
1020 if (host->chan_tx)
1021 sh_mmcif_start_dma_tx(host);
1022 }
1023
1024 if (!host->dma_active) {
1025 host->data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
1026 if (!host->data->error)
1027 return true;
1028 return false;
1029 }
1030
1031 /* Running in the IRQ thread, can sleep */
1032 time = wait_for_completion_interruptible_timeout(&host->dma_complete,
1033 host->timeout);
1034 if (host->sd_error) {
1035 dev_err(host->mmc->parent,
1036 "Error IRQ while waiting for DMA completion!\n");
1037 /* Woken up by an error IRQ: abort DMA */
1038 if (host->data->flags & MMC_DATA_READ)
1039 dmaengine_terminate_all(host->chan_rx);
1040 else
1041 dmaengine_terminate_all(host->chan_tx);
1042 host->data->error = sh_mmcif_error_manage(host);
1043 } else if (!time) {
1044 host->data->error = -ETIMEDOUT;
1045 } else if (time < 0) {
1046 host->data->error = time;
1047 }
1048 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
1049 BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
1050 host->dma_active = false;
1051
1052 if (host->data->error)
1053 host->data->bytes_xfered = 0;
1054
1055 return false;
1056}
1057
1058static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1059{
1060 struct sh_mmcif_host *host = dev_id;
1061 struct mmc_request *mrq = host->mrq;
1062
1063 cancel_delayed_work_sync(&host->timeout_work);
1064
1065 /*
1066 * All handlers return true, if processing continues, and false, if the
1067 * request has to be completed - successfully or not
1068 */
1069 switch (host->wait_for) {
1070 case MMCIF_WAIT_FOR_REQUEST:
1071 /* We're too late, the timeout has already kicked in */
1072 return IRQ_HANDLED;
1073 case MMCIF_WAIT_FOR_CMD:
1074 if (sh_mmcif_end_cmd(host))
1075 /* Wait for data */
1076 return IRQ_HANDLED;
1077 break;
1078 case MMCIF_WAIT_FOR_MREAD:
1079 if (sh_mmcif_mread_block(host))
1080 /* Wait for more data */
1081 return IRQ_HANDLED;
1082 break;
1083 case MMCIF_WAIT_FOR_READ:
1084 if (sh_mmcif_read_block(host))
1085 /* Wait for data end */
1086 return IRQ_HANDLED;
1087 break;
1088 case MMCIF_WAIT_FOR_MWRITE:
1089 if (sh_mmcif_mwrite_block(host))
1090 /* Wait data to write */
1091 return IRQ_HANDLED;
1092 break;
1093 case MMCIF_WAIT_FOR_WRITE:
1094 if (sh_mmcif_write_block(host))
1095 /* Wait for data end */
1096 return IRQ_HANDLED;
1097 break;
1098 case MMCIF_WAIT_FOR_STOP:
1099 if (host->sd_error) {
1100 mrq->stop->error = sh_mmcif_error_manage(host);
1101 break;
1102 }
1103 sh_mmcif_get_cmd12response(host, mrq->stop);
1104 mrq->stop->error = 0;
1105 break;
1106 case MMCIF_WAIT_FOR_READ_END:
1107 case MMCIF_WAIT_FOR_WRITE_END:
1108 if (host->sd_error)
1109 mrq->data->error = sh_mmcif_error_manage(host);
1110 break;
1111 default:
1112 BUG();
1113 }
1114
1115 if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
1116 host->data = NULL;
1117
1118 if (!mrq->cmd->error && mrq->data && !mrq->data->error)
1119 mrq->data->bytes_xfered =
1120 mrq->data->blocks * mrq->data->blksz;
1121
1122 if (mrq->stop && !mrq->cmd->error && (!mrq->data || !mrq->data->error)) {
1123 sh_mmcif_stop_cmd(host, mrq);
1124 if (!mrq->stop->error)
1125 return IRQ_HANDLED;
1126 }
1127 }
1128
1129 host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1130 host->state = STATE_IDLE;
1131 mmc_request_done(host->mmc, mrq);
1132
1133 return IRQ_HANDLED;
1134}
1135
945static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) 1136static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
946{ 1137{
947 struct sh_mmcif_host *host = dev_id; 1138 struct sh_mmcif_host *host = dev_id;
@@ -993,14 +1184,58 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
993 host->sd_error = true; 1184 host->sd_error = true;
994 dev_dbg(&host->pd->dev, "int err state = %08x\n", state); 1185 dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
995 } 1186 }
996 if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) 1187 if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
997 complete(&host->intr_wait); 1188 if (!host->dma_active)
998 else 1189 return IRQ_WAKE_THREAD;
1190 else if (host->sd_error)
1191 mmcif_dma_complete(host);
1192 } else {
999 dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state); 1193 dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
1194 }
1000 1195
1001 return IRQ_HANDLED; 1196 return IRQ_HANDLED;
1002} 1197}
1003 1198
1199static void mmcif_timeout_work(struct work_struct *work)
1200{
1201 struct delayed_work *d = container_of(work, struct delayed_work, work);
1202 struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
1203 struct mmc_request *mrq = host->mrq;
1204
1205 if (host->dying)
1206 /* Don't run after mmc_remove_host() */
1207 return;
1208
1209 /*
1210 * Handle races with cancel_delayed_work(), unless
1211 * cancel_delayed_work_sync() is used
1212 */
1213 switch (host->wait_for) {
1214 case MMCIF_WAIT_FOR_CMD:
1215 mrq->cmd->error = sh_mmcif_error_manage(host);
1216 break;
1217 case MMCIF_WAIT_FOR_STOP:
1218 mrq->stop->error = sh_mmcif_error_manage(host);
1219 break;
1220 case MMCIF_WAIT_FOR_MREAD:
1221 case MMCIF_WAIT_FOR_MWRITE:
1222 case MMCIF_WAIT_FOR_READ:
1223 case MMCIF_WAIT_FOR_WRITE:
1224 case MMCIF_WAIT_FOR_READ_END:
1225 case MMCIF_WAIT_FOR_WRITE_END:
1226 host->data->error = sh_mmcif_error_manage(host);
1227 break;
1228 default:
1229 BUG();
1230 }
1231
1232 host->state = STATE_IDLE;
1233 host->wait_for = MMCIF_WAIT_FOR_REQUEST;
1234 host->data = NULL;
1235 host->mrq = NULL;
1236 mmc_request_done(host->mmc, mrq);
1237}
1238
1004static int __devinit sh_mmcif_probe(struct platform_device *pdev) 1239static int __devinit sh_mmcif_probe(struct platform_device *pdev)
1005{ 1240{
1006 int ret = 0, irq[2]; 1241 int ret = 0, irq[2];
@@ -1054,7 +1289,6 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
1054 host->clk = clk_get_rate(host->hclk); 1289 host->clk = clk_get_rate(host->hclk);
1055 host->pd = pdev; 1290 host->pd = pdev;
1056 1291
1057 init_completion(&host->intr_wait);
1058 spin_lock_init(&host->lock); 1292 spin_lock_init(&host->lock);
1059 1293
1060 mmc->ops = &sh_mmcif_ops; 1294 mmc->ops = &sh_mmcif_ops;
@@ -1091,18 +1325,20 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
1091 1325
1092 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); 1326 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1093 1327
1094 ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host); 1328 ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host);
1095 if (ret) { 1329 if (ret) {
1096 dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n"); 1330 dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
1097 goto clean_up3; 1331 goto clean_up3;
1098 } 1332 }
1099 ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host); 1333 ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host);
1100 if (ret) { 1334 if (ret) {
1101 free_irq(irq[0], host); 1335 free_irq(irq[0], host);
1102 dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); 1336 dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
1103 goto clean_up3; 1337 goto clean_up3;
1104 } 1338 }
1105 1339
1340 INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
1341
1106 mmc_detect_change(host->mmc, 0); 1342 mmc_detect_change(host->mmc, 0);
1107 1343
1108 dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); 1344 dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
@@ -1129,11 +1365,19 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
1129 struct sh_mmcif_host *host = platform_get_drvdata(pdev); 1365 struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1130 int irq[2]; 1366 int irq[2];
1131 1367
1368 host->dying = true;
1132 pm_runtime_get_sync(&pdev->dev); 1369 pm_runtime_get_sync(&pdev->dev);
1133 1370
1134 mmc_remove_host(host->mmc); 1371 mmc_remove_host(host->mmc);
1135 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); 1372 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1136 1373
1374 /*
1375 * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
1376 * mmc_remove_host() call above. But swapping order doesn't help either
1377 * (a query on the linux-mmc mailing list didn't bring any replies).
1378 */
1379 cancel_delayed_work_sync(&host->timeout_work);
1380
1137 if (host->addr) 1381 if (host->addr)
1138 iounmap(host->addr); 1382 iounmap(host->addr);
1139 1383