aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card/block.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/card/block.c')
-rw-r--r--drivers/mmc/card/block.c681
1 files changed, 464 insertions, 217 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index f85e42224559..1ff5486213fb 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -106,6 +106,16 @@ struct mmc_blk_data {
106 106
107static DEFINE_MUTEX(open_lock); 107static DEFINE_MUTEX(open_lock);
108 108
109enum mmc_blk_status {
110 MMC_BLK_SUCCESS = 0,
111 MMC_BLK_PARTIAL,
112 MMC_BLK_RETRY,
113 MMC_BLK_RETRY_SINGLE,
114 MMC_BLK_DATA_ERR,
115 MMC_BLK_CMD_ERR,
116 MMC_BLK_ABORT,
117};
118
109module_param(perdev_minors, int, 0444); 119module_param(perdev_minors, int, 0444);
110MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); 120MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
111 121
@@ -427,14 +437,6 @@ static const struct block_device_operations mmc_bdops = {
427#endif 437#endif
428}; 438};
429 439
430struct mmc_blk_request {
431 struct mmc_request mrq;
432 struct mmc_command sbc;
433 struct mmc_command cmd;
434 struct mmc_command stop;
435 struct mmc_data data;
436};
437
438static inline int mmc_blk_part_switch(struct mmc_card *card, 440static inline int mmc_blk_part_switch(struct mmc_card *card,
439 struct mmc_blk_data *md) 441 struct mmc_blk_data *md)
440{ 442{
@@ -525,7 +527,20 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
525 return result; 527 return result;
526} 528}
527 529
528static u32 get_card_status(struct mmc_card *card, struct request *req) 530static int send_stop(struct mmc_card *card, u32 *status)
531{
532 struct mmc_command cmd = {0};
533 int err;
534
535 cmd.opcode = MMC_STOP_TRANSMISSION;
536 cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
537 err = mmc_wait_for_cmd(card->host, &cmd, 5);
538 if (err == 0)
539 *status = cmd.resp[0];
540 return err;
541}
542
543static int get_card_status(struct mmc_card *card, u32 *status, int retries)
529{ 544{
530 struct mmc_command cmd = {0}; 545 struct mmc_command cmd = {0};
531 int err; 546 int err;
@@ -534,11 +549,141 @@ static u32 get_card_status(struct mmc_card *card, struct request *req)
534 if (!mmc_host_is_spi(card->host)) 549 if (!mmc_host_is_spi(card->host))
535 cmd.arg = card->rca << 16; 550 cmd.arg = card->rca << 16;
536 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; 551 cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
537 err = mmc_wait_for_cmd(card->host, &cmd, 0); 552 err = mmc_wait_for_cmd(card->host, &cmd, retries);
553 if (err == 0)
554 *status = cmd.resp[0];
555 return err;
556}
557
558#define ERR_RETRY 2
559#define ERR_ABORT 1
560#define ERR_CONTINUE 0
561
562static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
563 bool status_valid, u32 status)
564{
565 switch (error) {
566 case -EILSEQ:
567 /* response crc error, retry the r/w cmd */
568 pr_err("%s: %s sending %s command, card status %#x\n",
569 req->rq_disk->disk_name, "response CRC error",
570 name, status);
571 return ERR_RETRY;
572
573 case -ETIMEDOUT:
574 pr_err("%s: %s sending %s command, card status %#x\n",
575 req->rq_disk->disk_name, "timed out", name, status);
576
577 /* If the status cmd initially failed, retry the r/w cmd */
578 if (!status_valid)
579 return ERR_RETRY;
580
581 /*
582 * If it was a r/w cmd crc error, or illegal command
583 * (eg, issued in wrong state) then retry - we should
584 * have corrected the state problem above.
585 */
586 if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
587 return ERR_RETRY;
588
589 /* Otherwise abort the command */
590 return ERR_ABORT;
591
592 default:
593 /* We don't understand the error code the driver gave us */
594 pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
595 req->rq_disk->disk_name, error, status);
596 return ERR_ABORT;
597 }
598}
599
600/*
601 * Initial r/w and stop cmd error recovery.
602 * We don't know whether the card received the r/w cmd or not, so try to
603 * restore things back to a sane state. Essentially, we do this as follows:
604 * - Obtain card status. If the first attempt to obtain card status fails,
605 * the status word will reflect the failed status cmd, not the failed
606 * r/w cmd. If we fail to obtain card status, it suggests we can no
607 * longer communicate with the card.
608 * - Check the card state. If the card received the cmd but there was a
609 * transient problem with the response, it might still be in a data transfer
610 * mode. Try to send it a stop command. If this fails, we can't recover.
611 * - If the r/w cmd failed due to a response CRC error, it was probably
612 * transient, so retry the cmd.
613 * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
614 * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
615 * illegal cmd, retry.
616 * Otherwise we don't understand what happened, so abort.
617 */
618static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
619 struct mmc_blk_request *brq)
620{
621 bool prev_cmd_status_valid = true;
622 u32 status, stop_status = 0;
623 int err, retry;
624
625 /*
626 * Try to get card status which indicates both the card state
627 * and why there was no response. If the first attempt fails,
628 * we can't be sure the returned status is for the r/w command.
629 */
630 for (retry = 2; retry >= 0; retry--) {
631 err = get_card_status(card, &status, 0);
632 if (!err)
633 break;
634
635 prev_cmd_status_valid = false;
636 pr_err("%s: error %d sending status command, %sing\n",
637 req->rq_disk->disk_name, err, retry ? "retry" : "abort");
638 }
639
640 /* We couldn't get a response from the card. Give up. */
538 if (err) 641 if (err)
539 printk(KERN_ERR "%s: error %d sending status command", 642 return ERR_ABORT;
540 req->rq_disk->disk_name, err); 643
541 return cmd.resp[0]; 644 /*
645 * Check the current card state. If it is in some data transfer
646 * mode, tell it to stop (and hopefully transition back to TRAN.)
647 */
648 if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
649 R1_CURRENT_STATE(status) == R1_STATE_RCV) {
650 err = send_stop(card, &stop_status);
651 if (err)
652 pr_err("%s: error %d sending stop command\n",
653 req->rq_disk->disk_name, err);
654
655 /*
656 * If the stop cmd also timed out, the card is probably
657 * not present, so abort. Other errors are bad news too.
658 */
659 if (err)
660 return ERR_ABORT;
661 }
662
663 /* Check for set block count errors */
664 if (brq->sbc.error)
665 return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
666 prev_cmd_status_valid, status);
667
668 /* Check for r/w command errors */
669 if (brq->cmd.error)
670 return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
671 prev_cmd_status_valid, status);
672
673 /* Now for stop errors. These aren't fatal to the transfer. */
674 pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
675 req->rq_disk->disk_name, brq->stop.error,
676 brq->cmd.resp[0], status);
677
678 /*
679 * Subsitute in our own stop status as this will give the error
680 * state which happened during the execution of the r/w command.
681 */
682 if (stop_status) {
683 brq->stop.resp[0] = stop_status;
684 brq->stop.error = 0;
685 }
686 return ERR_CONTINUE;
542} 687}
543 688
544static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) 689static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
@@ -669,240 +814,324 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
669 } 814 }
670} 815}
671 816
672static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) 817#define CMD_ERRORS \
818 (R1_OUT_OF_RANGE | /* Command argument out of range */ \
819 R1_ADDRESS_ERROR | /* Misaligned address */ \
820 R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
821 R1_WP_VIOLATION | /* Tried to write to protected block */ \
822 R1_CC_ERROR | /* Card controller error */ \
823 R1_ERROR) /* General/unknown error */
824
825static int mmc_blk_err_check(struct mmc_card *card,
826 struct mmc_async_req *areq)
673{ 827{
674 struct mmc_blk_data *md = mq->data; 828 enum mmc_blk_status ret = MMC_BLK_SUCCESS;
675 struct mmc_card *card = md->queue.card; 829 struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
676 struct mmc_blk_request brq; 830 mmc_active);
677 int ret = 1, disable_multi = 0; 831 struct mmc_blk_request *brq = &mq_mrq->brq;
832 struct request *req = mq_mrq->req;
678 833
679 /* 834 /*
680 * Reliable writes are used to implement Forced Unit Access and 835 * sbc.error indicates a problem with the set block count
681 * REQ_META accesses, and are supported only on MMCs. 836 * command. No data will have been transferred.
837 *
838 * cmd.error indicates a problem with the r/w command. No
839 * data will have been transferred.
840 *
841 * stop.error indicates a problem with the stop command. Data
842 * may have been transferred, or may still be transferring.
682 */ 843 */
683 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || 844 if (brq->sbc.error || brq->cmd.error || brq->stop.error) {
684 (req->cmd_flags & REQ_META)) && 845 switch (mmc_blk_cmd_recovery(card, req, brq)) {
685 (rq_data_dir(req) == WRITE) && 846 case ERR_RETRY:
686 (md->flags & MMC_BLK_REL_WR); 847 return MMC_BLK_RETRY;
848 case ERR_ABORT:
849 return MMC_BLK_ABORT;
850 case ERR_CONTINUE:
851 break;
852 }
853 }
687 854
688 do { 855 /*
689 struct mmc_command cmd = {0}; 856 * Check for errors relating to the execution of the
690 u32 readcmd, writecmd, status = 0; 857 * initial command - such as address errors. No data
691 858 * has been transferred.
692 memset(&brq, 0, sizeof(struct mmc_blk_request)); 859 */
693 brq.mrq.cmd = &brq.cmd; 860 if (brq->cmd.resp[0] & CMD_ERRORS) {
694 brq.mrq.data = &brq.data; 861 pr_err("%s: r/w command failed, status = %#x\n",
695 862 req->rq_disk->disk_name, brq->cmd.resp[0]);
696 brq.cmd.arg = blk_rq_pos(req); 863 return MMC_BLK_ABORT;
697 if (!mmc_card_blockaddr(card)) 864 }
698 brq.cmd.arg <<= 9;
699 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
700 brq.data.blksz = 512;
701 brq.stop.opcode = MMC_STOP_TRANSMISSION;
702 brq.stop.arg = 0;
703 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
704 brq.data.blocks = blk_rq_sectors(req);
705 865
706 /* 866 /*
707 * The block layer doesn't support all sector count 867 * Everything else is either success, or a data error of some
708 * restrictions, so we need to be prepared for too big 868 * kind. If it was a write, we may have transitioned to
709 * requests. 869 * program mode, which we have to wait for it to complete.
710 */ 870 */
711 if (brq.data.blocks > card->host->max_blk_count) 871 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
712 brq.data.blocks = card->host->max_blk_count; 872 u32 status;
873 do {
874 int err = get_card_status(card, &status, 5);
875 if (err) {
876 printk(KERN_ERR "%s: error %d requesting status\n",
877 req->rq_disk->disk_name, err);
878 return MMC_BLK_CMD_ERR;
879 }
880 /*
881 * Some cards mishandle the status bits,
882 * so make sure to check both the busy
883 * indication and the card state.
884 */
885 } while (!(status & R1_READY_FOR_DATA) ||
886 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
887 }
713 888
714 /* 889 if (brq->data.error) {
715 * After a read error, we redo the request one sector at a time 890 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
716 * in order to accurately determine which sectors can be read 891 req->rq_disk->disk_name, brq->data.error,
717 * successfully. 892 (unsigned)blk_rq_pos(req),
718 */ 893 (unsigned)blk_rq_sectors(req),
719 if (disable_multi && brq.data.blocks > 1) 894 brq->cmd.resp[0], brq->stop.resp[0]);
720 brq.data.blocks = 1;
721 895
722 if (brq.data.blocks > 1 || do_rel_wr) {
723 /* SPI multiblock writes terminate using a special
724 * token, not a STOP_TRANSMISSION request.
725 */
726 if (!mmc_host_is_spi(card->host) ||
727 rq_data_dir(req) == READ)
728 brq.mrq.stop = &brq.stop;
729 readcmd = MMC_READ_MULTIPLE_BLOCK;
730 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
731 } else {
732 brq.mrq.stop = NULL;
733 readcmd = MMC_READ_SINGLE_BLOCK;
734 writecmd = MMC_WRITE_BLOCK;
735 }
736 if (rq_data_dir(req) == READ) { 896 if (rq_data_dir(req) == READ) {
737 brq.cmd.opcode = readcmd; 897 if (brq->data.blocks > 1) {
738 brq.data.flags |= MMC_DATA_READ; 898 /* Redo read one sector at a time */
899 pr_warning("%s: retrying using single block read\n",
900 req->rq_disk->disk_name);
901 return MMC_BLK_RETRY_SINGLE;
902 }
903 return MMC_BLK_DATA_ERR;
739 } else { 904 } else {
740 brq.cmd.opcode = writecmd; 905 return MMC_BLK_CMD_ERR;
741 brq.data.flags |= MMC_DATA_WRITE;
742 } 906 }
907 }
743 908
744 if (do_rel_wr) 909 if (ret == MMC_BLK_SUCCESS &&
745 mmc_apply_rel_rw(&brq, card, req); 910 blk_rq_bytes(req) != brq->data.bytes_xfered)
911 ret = MMC_BLK_PARTIAL;
746 912
747 /* 913 return ret;
748 * Pre-defined multi-block transfers are preferable to 914}
749 * open ended-ones (and necessary for reliable writes).
750 * However, it is not sufficient to just send CMD23,
751 * and avoid the final CMD12, as on an error condition
752 * CMD12 (stop) needs to be sent anyway. This, coupled
753 * with Auto-CMD23 enhancements provided by some
754 * hosts, means that the complexity of dealing
755 * with this is best left to the host. If CMD23 is
756 * supported by card and host, we'll fill sbc in and let
757 * the host deal with handling it correctly. This means
758 * that for hosts that don't expose MMC_CAP_CMD23, no
759 * change of behavior will be observed.
760 *
761 * N.B: Some MMC cards experience perf degradation.
762 * We'll avoid using CMD23-bounded multiblock writes for
763 * these, while retaining features like reliable writes.
764 */
765 915
766 if ((md->flags & MMC_BLK_CMD23) && 916static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
767 mmc_op_multi(brq.cmd.opcode) && 917 struct mmc_card *card,
768 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) { 918 int disable_multi,
769 brq.sbc.opcode = MMC_SET_BLOCK_COUNT; 919 struct mmc_queue *mq)
770 brq.sbc.arg = brq.data.blocks | 920{
771 (do_rel_wr ? (1 << 31) : 0); 921 u32 readcmd, writecmd;
772 brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; 922 struct mmc_blk_request *brq = &mqrq->brq;
773 brq.mrq.sbc = &brq.sbc; 923 struct request *req = mqrq->req;
774 } 924 struct mmc_blk_data *md = mq->data;
775 925
776 mmc_set_data_timeout(&brq.data, card); 926 /*
927 * Reliable writes are used to implement Forced Unit Access and
928 * REQ_META accesses, and are supported only on MMCs.
929 */
930 bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
931 (req->cmd_flags & REQ_META)) &&
932 (rq_data_dir(req) == WRITE) &&
933 (md->flags & MMC_BLK_REL_WR);
777 934
778 brq.data.sg = mq->sg; 935 memset(brq, 0, sizeof(struct mmc_blk_request));
779 brq.data.sg_len = mmc_queue_map_sg(mq); 936 brq->mrq.cmd = &brq->cmd;
937 brq->mrq.data = &brq->data;
780 938
781 /* 939 brq->cmd.arg = blk_rq_pos(req);
782 * Adjust the sg list so it is the same size as the 940 if (!mmc_card_blockaddr(card))
783 * request. 941 brq->cmd.arg <<= 9;
942 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
943 brq->data.blksz = 512;
944 brq->stop.opcode = MMC_STOP_TRANSMISSION;
945 brq->stop.arg = 0;
946 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
947 brq->data.blocks = blk_rq_sectors(req);
948
949 /*
950 * The block layer doesn't support all sector count
951 * restrictions, so we need to be prepared for too big
952 * requests.
953 */
954 if (brq->data.blocks > card->host->max_blk_count)
955 brq->data.blocks = card->host->max_blk_count;
956
957 /*
958 * After a read error, we redo the request one sector at a time
959 * in order to accurately determine which sectors can be read
960 * successfully.
961 */
962 if (disable_multi && brq->data.blocks > 1)
963 brq->data.blocks = 1;
964
965 if (brq->data.blocks > 1 || do_rel_wr) {
966 /* SPI multiblock writes terminate using a special
967 * token, not a STOP_TRANSMISSION request.
784 */ 968 */
785 if (brq.data.blocks != blk_rq_sectors(req)) { 969 if (!mmc_host_is_spi(card->host) ||
786 int i, data_size = brq.data.blocks << 9; 970 rq_data_dir(req) == READ)
787 struct scatterlist *sg; 971 brq->mrq.stop = &brq->stop;
788 972 readcmd = MMC_READ_MULTIPLE_BLOCK;
789 for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) { 973 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
790 data_size -= sg->length; 974 } else {
791 if (data_size <= 0) { 975 brq->mrq.stop = NULL;
792 sg->length += data_size; 976 readcmd = MMC_READ_SINGLE_BLOCK;
793 i++; 977 writecmd = MMC_WRITE_BLOCK;
794 break; 978 }
795 } 979 if (rq_data_dir(req) == READ) {
796 } 980 brq->cmd.opcode = readcmd;
797 brq.data.sg_len = i; 981 brq->data.flags |= MMC_DATA_READ;
798 } 982 } else {
983 brq->cmd.opcode = writecmd;
984 brq->data.flags |= MMC_DATA_WRITE;
985 }
799 986
800 mmc_queue_bounce_pre(mq); 987 if (do_rel_wr)
988 mmc_apply_rel_rw(brq, card, req);
801 989
802 mmc_wait_for_req(card->host, &brq.mrq); 990 /*
991 * Pre-defined multi-block transfers are preferable to
992 * open ended-ones (and necessary for reliable writes).
993 * However, it is not sufficient to just send CMD23,
994 * and avoid the final CMD12, as on an error condition
995 * CMD12 (stop) needs to be sent anyway. This, coupled
996 * with Auto-CMD23 enhancements provided by some
997 * hosts, means that the complexity of dealing
998 * with this is best left to the host. If CMD23 is
999 * supported by card and host, we'll fill sbc in and let
1000 * the host deal with handling it correctly. This means
1001 * that for hosts that don't expose MMC_CAP_CMD23, no
1002 * change of behavior will be observed.
1003 *
1004 * N.B: Some MMC cards experience perf degradation.
1005 * We'll avoid using CMD23-bounded multiblock writes for
1006 * these, while retaining features like reliable writes.
1007 */
803 1008
804 mmc_queue_bounce_post(mq); 1009 if ((md->flags & MMC_BLK_CMD23) &&
1010 mmc_op_multi(brq->cmd.opcode) &&
1011 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
1012 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
1013 brq->sbc.arg = brq->data.blocks |
1014 (do_rel_wr ? (1 << 31) : 0);
1015 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
1016 brq->mrq.sbc = &brq->sbc;
1017 }
805 1018
806 /* 1019 mmc_set_data_timeout(&brq->data, card);
807 * Check for errors here, but don't jump to cmd_err
808 * until later as we need to wait for the card to leave
809 * programming mode even when things go wrong.
810 */
811 if (brq.sbc.error || brq.cmd.error ||
812 brq.data.error || brq.stop.error) {
813 if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
814 /* Redo read one sector at a time */
815 printk(KERN_WARNING "%s: retrying using single "
816 "block read\n", req->rq_disk->disk_name);
817 disable_multi = 1;
818 continue;
819 }
820 status = get_card_status(card, req);
821 }
822 1020
823 if (brq.sbc.error) { 1021 brq->data.sg = mqrq->sg;
824 printk(KERN_ERR "%s: error %d sending SET_BLOCK_COUNT " 1022 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
825 "command, response %#x, card status %#x\n",
826 req->rq_disk->disk_name, brq.sbc.error,
827 brq.sbc.resp[0], status);
828 }
829 1023
830 if (brq.cmd.error) { 1024 /*
831 printk(KERN_ERR "%s: error %d sending read/write " 1025 * Adjust the sg list so it is the same size as the
832 "command, response %#x, card status %#x\n", 1026 * request.
833 req->rq_disk->disk_name, brq.cmd.error, 1027 */
834 brq.cmd.resp[0], status); 1028 if (brq->data.blocks != blk_rq_sectors(req)) {
1029 int i, data_size = brq->data.blocks << 9;
1030 struct scatterlist *sg;
1031
1032 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
1033 data_size -= sg->length;
1034 if (data_size <= 0) {
1035 sg->length += data_size;
1036 i++;
1037 break;
1038 }
835 } 1039 }
1040 brq->data.sg_len = i;
1041 }
836 1042
837 if (brq.data.error) { 1043 mqrq->mmc_active.mrq = &brq->mrq;
838 if (brq.data.error == -ETIMEDOUT && brq.mrq.stop) 1044 mqrq->mmc_active.err_check = mmc_blk_err_check;
839 /* 'Stop' response contains card status */
840 status = brq.mrq.stop->resp[0];
841 printk(KERN_ERR "%s: error %d transferring data,"
842 " sector %u, nr %u, card status %#x\n",
843 req->rq_disk->disk_name, brq.data.error,
844 (unsigned)blk_rq_pos(req),
845 (unsigned)blk_rq_sectors(req), status);
846 }
847 1045
848 if (brq.stop.error) { 1046 mmc_queue_bounce_pre(mqrq);
849 printk(KERN_ERR "%s: error %d sending stop command, " 1047}
850 "response %#x, card status %#x\n",
851 req->rq_disk->disk_name, brq.stop.error,
852 brq.stop.resp[0], status);
853 }
854 1048
855 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { 1049static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
856 do { 1050{
857 int err; 1051 struct mmc_blk_data *md = mq->data;
858 1052 struct mmc_card *card = md->queue.card;
859 cmd.opcode = MMC_SEND_STATUS; 1053 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
860 cmd.arg = card->rca << 16; 1054 int ret = 1, disable_multi = 0, retry = 0;
861 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; 1055 enum mmc_blk_status status;
862 err = mmc_wait_for_cmd(card->host, &cmd, 5); 1056 struct mmc_queue_req *mq_rq;
863 if (err) { 1057 struct request *req;
864 printk(KERN_ERR "%s: error %d requesting status\n", 1058 struct mmc_async_req *areq;
865 req->rq_disk->disk_name, err); 1059
866 goto cmd_err; 1060 if (!rqc && !mq->mqrq_prev->req)
867 } 1061 return 0;
868 /*
869 * Some cards mishandle the status bits,
870 * so make sure to check both the busy
871 * indication and the card state.
872 */
873 } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
874 (R1_CURRENT_STATE(cmd.resp[0]) == 7));
875
876#if 0
877 if (cmd.resp[0] & ~0x00000900)
878 printk(KERN_ERR "%s: status = %08x\n",
879 req->rq_disk->disk_name, cmd.resp[0]);
880 if (mmc_decode_status(cmd.resp))
881 goto cmd_err;
882#endif
883 }
884 1062
885 if (brq.cmd.error || brq.stop.error || brq.data.error) { 1063 do {
886 if (rq_data_dir(req) == READ) { 1064 if (rqc) {
1065 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1066 areq = &mq->mqrq_cur->mmc_active;
1067 } else
1068 areq = NULL;
1069 areq = mmc_start_req(card->host, areq, (int *) &status);
1070 if (!areq)
1071 return 0;
1072
1073 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1074 brq = &mq_rq->brq;
1075 req = mq_rq->req;
1076 mmc_queue_bounce_post(mq_rq);
1077
1078 switch (status) {
1079 case MMC_BLK_SUCCESS:
1080 case MMC_BLK_PARTIAL:
1081 /*
1082 * A block was successfully transferred.
1083 */
1084 spin_lock_irq(&md->lock);
1085 ret = __blk_end_request(req, 0,
1086 brq->data.bytes_xfered);
1087 spin_unlock_irq(&md->lock);
1088 if (status == MMC_BLK_SUCCESS && ret) {
887 /* 1089 /*
888 * After an error, we redo I/O one sector at a 1090 * The blk_end_request has returned non zero
889 * time, so we only reach here after trying to 1091 * even though all data is transfered and no
890 * read a single sector. 1092 * erros returned by host.
1093 * If this happen it's a bug.
891 */ 1094 */
892 spin_lock_irq(&md->lock); 1095 printk(KERN_ERR "%s BUG rq_tot %d d_xfer %d\n",
893 ret = __blk_end_request(req, -EIO, brq.data.blksz); 1096 __func__, blk_rq_bytes(req),
894 spin_unlock_irq(&md->lock); 1097 brq->data.bytes_xfered);
895 continue; 1098 rqc = NULL;
1099 goto cmd_abort;
896 } 1100 }
1101 break;
1102 case MMC_BLK_CMD_ERR:
897 goto cmd_err; 1103 goto cmd_err;
1104 case MMC_BLK_RETRY_SINGLE:
1105 disable_multi = 1;
1106 break;
1107 case MMC_BLK_RETRY:
1108 if (retry++ < 5)
1109 break;
1110 case MMC_BLK_ABORT:
1111 goto cmd_abort;
1112 case MMC_BLK_DATA_ERR:
1113 /*
1114 * After an error, we redo I/O one sector at a
1115 * time, so we only reach here after trying to
1116 * read a single sector.
1117 */
1118 spin_lock_irq(&md->lock);
1119 ret = __blk_end_request(req, -EIO,
1120 brq->data.blksz);
1121 spin_unlock_irq(&md->lock);
1122 if (!ret)
1123 goto start_new_req;
1124 break;
898 } 1125 }
899 1126
900 /* 1127 if (ret) {
901 * A block was successfully transferred. 1128 /*
902 */ 1129 * In case of a none complete request
903 spin_lock_irq(&md->lock); 1130 * prepare it again and resend.
904 ret = __blk_end_request(req, 0, brq.data.bytes_xfered); 1131 */
905 spin_unlock_irq(&md->lock); 1132 mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
1133 mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
1134 }
906 } while (ret); 1135 } while (ret);
907 1136
908 return 1; 1137 return 1;
@@ -927,15 +1156,22 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
927 } 1156 }
928 } else { 1157 } else {
929 spin_lock_irq(&md->lock); 1158 spin_lock_irq(&md->lock);
930 ret = __blk_end_request(req, 0, brq.data.bytes_xfered); 1159 ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
931 spin_unlock_irq(&md->lock); 1160 spin_unlock_irq(&md->lock);
932 } 1161 }
933 1162
1163 cmd_abort:
934 spin_lock_irq(&md->lock); 1164 spin_lock_irq(&md->lock);
935 while (ret) 1165 while (ret)
936 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); 1166 ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
937 spin_unlock_irq(&md->lock); 1167 spin_unlock_irq(&md->lock);
938 1168
1169 start_new_req:
1170 if (rqc) {
1171 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1172 mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
1173 }
1174
939 return 0; 1175 return 0;
940} 1176}
941 1177
@@ -945,26 +1181,37 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
945 struct mmc_blk_data *md = mq->data; 1181 struct mmc_blk_data *md = mq->data;
946 struct mmc_card *card = md->queue.card; 1182 struct mmc_card *card = md->queue.card;
947 1183
948 mmc_claim_host(card->host); 1184 if (req && !mq->mqrq_prev->req)
1185 /* claim host only for the first request */
1186 mmc_claim_host(card->host);
1187
949 ret = mmc_blk_part_switch(card, md); 1188 ret = mmc_blk_part_switch(card, md);
950 if (ret) { 1189 if (ret) {
951 ret = 0; 1190 ret = 0;
952 goto out; 1191 goto out;
953 } 1192 }
954 1193
955 if (req->cmd_flags & REQ_DISCARD) { 1194 if (req && req->cmd_flags & REQ_DISCARD) {
1195 /* complete ongoing async transfer before issuing discard */
1196 if (card->host->areq)
1197 mmc_blk_issue_rw_rq(mq, NULL);
956 if (req->cmd_flags & REQ_SECURE) 1198 if (req->cmd_flags & REQ_SECURE)
957 ret = mmc_blk_issue_secdiscard_rq(mq, req); 1199 ret = mmc_blk_issue_secdiscard_rq(mq, req);
958 else 1200 else
959 ret = mmc_blk_issue_discard_rq(mq, req); 1201 ret = mmc_blk_issue_discard_rq(mq, req);
960 } else if (req->cmd_flags & REQ_FLUSH) { 1202 } else if (req && req->cmd_flags & REQ_FLUSH) {
1203 /* complete ongoing async transfer before issuing flush */
1204 if (card->host->areq)
1205 mmc_blk_issue_rw_rq(mq, NULL);
961 ret = mmc_blk_issue_flush(mq, req); 1206 ret = mmc_blk_issue_flush(mq, req);
962 } else { 1207 } else {
963 ret = mmc_blk_issue_rw_rq(mq, req); 1208 ret = mmc_blk_issue_rw_rq(mq, req);
964 } 1209 }
965 1210
966out: 1211out:
967 mmc_release_host(card->host); 1212 if (!req)
1213 /* release host only when there are no more requests */
1214 mmc_release_host(card->host);
968 return ret; 1215 return ret;
969} 1216}
970 1217