diff options
-rw-r--r-- | drivers/mmc/card/block.c | 201 | ||||
-rw-r--r-- | drivers/mmc/core/core.c | 4 |
2 files changed, 144 insertions, 61 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 9b9072636687..66c7596c5546 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -94,6 +94,11 @@ struct mmc_blk_data { | |||
94 | unsigned int read_only; | 94 | unsigned int read_only; |
95 | unsigned int part_type; | 95 | unsigned int part_type; |
96 | unsigned int name_idx; | 96 | unsigned int name_idx; |
97 | unsigned int reset_done; | ||
98 | #define MMC_BLK_READ BIT(0) | ||
99 | #define MMC_BLK_WRITE BIT(1) | ||
100 | #define MMC_BLK_DISCARD BIT(2) | ||
101 | #define MMC_BLK_SECDISCARD BIT(3) | ||
97 | 102 | ||
98 | /* | 103 | /* |
99 | * Only set in main mmc_blk_data associated | 104 | * Only set in main mmc_blk_data associated |
@@ -109,11 +114,11 @@ static DEFINE_MUTEX(open_lock); | |||
109 | enum mmc_blk_status { | 114 | enum mmc_blk_status { |
110 | MMC_BLK_SUCCESS = 0, | 115 | MMC_BLK_SUCCESS = 0, |
111 | MMC_BLK_PARTIAL, | 116 | MMC_BLK_PARTIAL, |
112 | MMC_BLK_RETRY, | ||
113 | MMC_BLK_RETRY_SINGLE, | ||
114 | MMC_BLK_DATA_ERR, | ||
115 | MMC_BLK_CMD_ERR, | 117 | MMC_BLK_CMD_ERR, |
118 | MMC_BLK_RETRY, | ||
116 | MMC_BLK_ABORT, | 119 | MMC_BLK_ABORT, |
120 | MMC_BLK_DATA_ERR, | ||
121 | MMC_BLK_ECC_ERR, | ||
117 | }; | 122 | }; |
118 | 123 | ||
119 | module_param(perdev_minors, int, 0444); | 124 | module_param(perdev_minors, int, 0444); |
@@ -454,7 +459,7 @@ static inline int mmc_blk_part_switch(struct mmc_card *card, | |||
454 | card->ext_csd.part_time); | 459 | card->ext_csd.part_time); |
455 | if (ret) | 460 | if (ret) |
456 | return ret; | 461 | return ret; |
457 | } | 462 | } |
458 | 463 | ||
459 | main_md->part_curr = md->part_type; | 464 | main_md->part_curr = md->part_type; |
460 | return 0; | 465 | return 0; |
@@ -616,7 +621,7 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error, | |||
616 | * Otherwise we don't understand what happened, so abort. | 621 | * Otherwise we don't understand what happened, so abort. |
617 | */ | 622 | */ |
618 | static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, | 623 | static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, |
619 | struct mmc_blk_request *brq) | 624 | struct mmc_blk_request *brq, int *ecc_err) |
620 | { | 625 | { |
621 | bool prev_cmd_status_valid = true; | 626 | bool prev_cmd_status_valid = true; |
622 | u32 status, stop_status = 0; | 627 | u32 status, stop_status = 0; |
@@ -641,6 +646,12 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, | |||
641 | if (err) | 646 | if (err) |
642 | return ERR_ABORT; | 647 | return ERR_ABORT; |
643 | 648 | ||
649 | /* Flag ECC errors */ | ||
650 | if ((status & R1_CARD_ECC_FAILED) || | ||
651 | (brq->stop.resp[0] & R1_CARD_ECC_FAILED) || | ||
652 | (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) | ||
653 | *ecc_err = 1; | ||
654 | |||
644 | /* | 655 | /* |
645 | * Check the current card state. If it is in some data transfer | 656 | * Check the current card state. If it is in some data transfer |
646 | * mode, tell it to stop (and hopefully transition back to TRAN.) | 657 | * mode, tell it to stop (and hopefully transition back to TRAN.) |
@@ -658,6 +669,8 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, | |||
658 | */ | 669 | */ |
659 | if (err) | 670 | if (err) |
660 | return ERR_ABORT; | 671 | return ERR_ABORT; |
672 | if (stop_status & R1_CARD_ECC_FAILED) | ||
673 | *ecc_err = 1; | ||
661 | } | 674 | } |
662 | 675 | ||
663 | /* Check for set block count errors */ | 676 | /* Check for set block count errors */ |
@@ -670,6 +683,10 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, | |||
670 | return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, | 683 | return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, |
671 | prev_cmd_status_valid, status); | 684 | prev_cmd_status_valid, status); |
672 | 685 | ||
686 | /* Data errors */ | ||
687 | if (!brq->stop.error) | ||
688 | return ERR_CONTINUE; | ||
689 | |||
673 | /* Now for stop errors. These aren't fatal to the transfer. */ | 690 | /* Now for stop errors. These aren't fatal to the transfer. */ |
674 | pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", | 691 | pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", |
675 | req->rq_disk->disk_name, brq->stop.error, | 692 | req->rq_disk->disk_name, brq->stop.error, |
@@ -686,12 +703,45 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, | |||
686 | return ERR_CONTINUE; | 703 | return ERR_CONTINUE; |
687 | } | 704 | } |
688 | 705 | ||
706 | static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, | ||
707 | int type) | ||
708 | { | ||
709 | int err; | ||
710 | |||
711 | if (md->reset_done & type) | ||
712 | return -EEXIST; | ||
713 | |||
714 | md->reset_done |= type; | ||
715 | err = mmc_hw_reset(host); | ||
716 | /* Ensure we switch back to the correct partition */ | ||
717 | if (err != -EOPNOTSUPP) { | ||
718 | struct mmc_blk_data *main_md = mmc_get_drvdata(host->card); | ||
719 | int part_err; | ||
720 | |||
721 | main_md->part_curr = main_md->part_type; | ||
722 | part_err = mmc_blk_part_switch(host->card, md); | ||
723 | if (part_err) { | ||
724 | /* | ||
725 | * We have failed to get back into the correct | ||
726 | * partition, so we need to abort the whole request. | ||
727 | */ | ||
728 | return -ENODEV; | ||
729 | } | ||
730 | } | ||
731 | return err; | ||
732 | } | ||
733 | |||
734 | static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) | ||
735 | { | ||
736 | md->reset_done &= ~type; | ||
737 | } | ||
738 | |||
689 | static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) | 739 | static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) |
690 | { | 740 | { |
691 | struct mmc_blk_data *md = mq->data; | 741 | struct mmc_blk_data *md = mq->data; |
692 | struct mmc_card *card = md->queue.card; | 742 | struct mmc_card *card = md->queue.card; |
693 | unsigned int from, nr, arg; | 743 | unsigned int from, nr, arg; |
694 | int err = 0; | 744 | int err = 0, type = MMC_BLK_DISCARD; |
695 | 745 | ||
696 | if (!mmc_can_erase(card)) { | 746 | if (!mmc_can_erase(card)) { |
697 | err = -EOPNOTSUPP; | 747 | err = -EOPNOTSUPP; |
@@ -705,7 +755,7 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) | |||
705 | arg = MMC_TRIM_ARG; | 755 | arg = MMC_TRIM_ARG; |
706 | else | 756 | else |
707 | arg = MMC_ERASE_ARG; | 757 | arg = MMC_ERASE_ARG; |
708 | 758 | retry: | |
709 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { | 759 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
710 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 760 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
711 | INAND_CMD38_ARG_EXT_CSD, | 761 | INAND_CMD38_ARG_EXT_CSD, |
@@ -718,6 +768,10 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) | |||
718 | } | 768 | } |
719 | err = mmc_erase(card, from, nr, arg); | 769 | err = mmc_erase(card, from, nr, arg); |
720 | out: | 770 | out: |
771 | if (err == -EIO && !mmc_blk_reset(md, card->host, type)) | ||
772 | goto retry; | ||
773 | if (!err) | ||
774 | mmc_blk_reset_success(md, type); | ||
721 | spin_lock_irq(&md->lock); | 775 | spin_lock_irq(&md->lock); |
722 | __blk_end_request(req, err, blk_rq_bytes(req)); | 776 | __blk_end_request(req, err, blk_rq_bytes(req)); |
723 | spin_unlock_irq(&md->lock); | 777 | spin_unlock_irq(&md->lock); |
@@ -731,7 +785,7 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, | |||
731 | struct mmc_blk_data *md = mq->data; | 785 | struct mmc_blk_data *md = mq->data; |
732 | struct mmc_card *card = md->queue.card; | 786 | struct mmc_card *card = md->queue.card; |
733 | unsigned int from, nr, arg; | 787 | unsigned int from, nr, arg; |
734 | int err = 0; | 788 | int err = 0, type = MMC_BLK_SECDISCARD; |
735 | 789 | ||
736 | if (!mmc_can_secure_erase_trim(card)) { | 790 | if (!mmc_can_secure_erase_trim(card)) { |
737 | err = -EOPNOTSUPP; | 791 | err = -EOPNOTSUPP; |
@@ -745,7 +799,7 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, | |||
745 | arg = MMC_SECURE_TRIM1_ARG; | 799 | arg = MMC_SECURE_TRIM1_ARG; |
746 | else | 800 | else |
747 | arg = MMC_SECURE_ERASE_ARG; | 801 | arg = MMC_SECURE_ERASE_ARG; |
748 | 802 | retry: | |
749 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { | 803 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
750 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 804 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
751 | INAND_CMD38_ARG_EXT_CSD, | 805 | INAND_CMD38_ARG_EXT_CSD, |
@@ -769,6 +823,10 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, | |||
769 | err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); | 823 | err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); |
770 | } | 824 | } |
771 | out: | 825 | out: |
826 | if (err == -EIO && !mmc_blk_reset(md, card->host, type)) | ||
827 | goto retry; | ||
828 | if (!err) | ||
829 | mmc_blk_reset_success(md, type); | ||
772 | spin_lock_irq(&md->lock); | 830 | spin_lock_irq(&md->lock); |
773 | __blk_end_request(req, err, blk_rq_bytes(req)); | 831 | __blk_end_request(req, err, blk_rq_bytes(req)); |
774 | spin_unlock_irq(&md->lock); | 832 | spin_unlock_irq(&md->lock); |
@@ -825,11 +883,11 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, | |||
825 | static int mmc_blk_err_check(struct mmc_card *card, | 883 | static int mmc_blk_err_check(struct mmc_card *card, |
826 | struct mmc_async_req *areq) | 884 | struct mmc_async_req *areq) |
827 | { | 885 | { |
828 | enum mmc_blk_status ret = MMC_BLK_SUCCESS; | ||
829 | struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, | 886 | struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, |
830 | mmc_active); | 887 | mmc_active); |
831 | struct mmc_blk_request *brq = &mq_mrq->brq; | 888 | struct mmc_blk_request *brq = &mq_mrq->brq; |
832 | struct request *req = mq_mrq->req; | 889 | struct request *req = mq_mrq->req; |
890 | int ecc_err = 0; | ||
833 | 891 | ||
834 | /* | 892 | /* |
835 | * sbc.error indicates a problem with the set block count | 893 | * sbc.error indicates a problem with the set block count |
@@ -841,8 +899,9 @@ static int mmc_blk_err_check(struct mmc_card *card, | |||
841 | * stop.error indicates a problem with the stop command. Data | 899 | * stop.error indicates a problem with the stop command. Data |
842 | * may have been transferred, or may still be transferring. | 900 | * may have been transferred, or may still be transferring. |
843 | */ | 901 | */ |
844 | if (brq->sbc.error || brq->cmd.error || brq->stop.error) { | 902 | if (brq->sbc.error || brq->cmd.error || brq->stop.error || |
845 | switch (mmc_blk_cmd_recovery(card, req, brq)) { | 903 | brq->data.error) { |
904 | switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) { | ||
846 | case ERR_RETRY: | 905 | case ERR_RETRY: |
847 | return MMC_BLK_RETRY; | 906 | return MMC_BLK_RETRY; |
848 | case ERR_ABORT: | 907 | case ERR_ABORT: |
@@ -894,23 +953,21 @@ static int mmc_blk_err_check(struct mmc_card *card, | |||
894 | brq->cmd.resp[0], brq->stop.resp[0]); | 953 | brq->cmd.resp[0], brq->stop.resp[0]); |
895 | 954 | ||
896 | if (rq_data_dir(req) == READ) { | 955 | if (rq_data_dir(req) == READ) { |
897 | if (brq->data.blocks > 1) { | 956 | if (ecc_err) |
898 | /* Redo read one sector at a time */ | 957 | return MMC_BLK_ECC_ERR; |
899 | pr_warning("%s: retrying using single block read\n", | ||
900 | req->rq_disk->disk_name); | ||
901 | return MMC_BLK_RETRY_SINGLE; | ||
902 | } | ||
903 | return MMC_BLK_DATA_ERR; | 958 | return MMC_BLK_DATA_ERR; |
904 | } else { | 959 | } else { |
905 | return MMC_BLK_CMD_ERR; | 960 | return MMC_BLK_CMD_ERR; |
906 | } | 961 | } |
907 | } | 962 | } |
908 | 963 | ||
909 | if (ret == MMC_BLK_SUCCESS && | 964 | if (!brq->data.bytes_xfered) |
910 | blk_rq_bytes(req) != brq->data.bytes_xfered) | 965 | return MMC_BLK_RETRY; |
911 | ret = MMC_BLK_PARTIAL; | ||
912 | 966 | ||
913 | return ret; | 967 | if (blk_rq_bytes(req) != brq->data.bytes_xfered) |
968 | return MMC_BLK_PARTIAL; | ||
969 | |||
970 | return MMC_BLK_SUCCESS; | ||
914 | } | 971 | } |
915 | 972 | ||
916 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, | 973 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, |
@@ -1049,12 +1106,41 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, | |||
1049 | mmc_queue_bounce_pre(mqrq); | 1106 | mmc_queue_bounce_pre(mqrq); |
1050 | } | 1107 | } |
1051 | 1108 | ||
1109 | static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, | ||
1110 | struct mmc_blk_request *brq, struct request *req, | ||
1111 | int ret) | ||
1112 | { | ||
1113 | /* | ||
1114 | * If this is an SD card and we're writing, we can first | ||
1115 | * mark the known good sectors as ok. | ||
1116 | * | ||
1117 | * If the card is not SD, we can still ok written sectors | ||
1118 | * as reported by the controller (which might be less than | ||
1119 | * the real number of written sectors, but never more). | ||
1120 | */ | ||
1121 | if (mmc_card_sd(card)) { | ||
1122 | u32 blocks; | ||
1123 | |||
1124 | blocks = mmc_sd_num_wr_blocks(card); | ||
1125 | if (blocks != (u32)-1) { | ||
1126 | spin_lock_irq(&md->lock); | ||
1127 | ret = __blk_end_request(req, 0, blocks << 9); | ||
1128 | spin_unlock_irq(&md->lock); | ||
1129 | } | ||
1130 | } else { | ||
1131 | spin_lock_irq(&md->lock); | ||
1132 | ret = __blk_end_request(req, 0, brq->data.bytes_xfered); | ||
1133 | spin_unlock_irq(&md->lock); | ||
1134 | } | ||
1135 | return ret; | ||
1136 | } | ||
1137 | |||
1052 | static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | 1138 | static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) |
1053 | { | 1139 | { |
1054 | struct mmc_blk_data *md = mq->data; | 1140 | struct mmc_blk_data *md = mq->data; |
1055 | struct mmc_card *card = md->queue.card; | 1141 | struct mmc_card *card = md->queue.card; |
1056 | struct mmc_blk_request *brq = &mq->mqrq_cur->brq; | 1142 | struct mmc_blk_request *brq = &mq->mqrq_cur->brq; |
1057 | int ret = 1, disable_multi = 0, retry = 0; | 1143 | int ret = 1, disable_multi = 0, retry = 0, type; |
1058 | enum mmc_blk_status status; | 1144 | enum mmc_blk_status status; |
1059 | struct mmc_queue_req *mq_rq; | 1145 | struct mmc_queue_req *mq_rq; |
1060 | struct request *req; | 1146 | struct request *req; |
@@ -1076,6 +1162,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1076 | mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); | 1162 | mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); |
1077 | brq = &mq_rq->brq; | 1163 | brq = &mq_rq->brq; |
1078 | req = mq_rq->req; | 1164 | req = mq_rq->req; |
1165 | type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; | ||
1079 | mmc_queue_bounce_post(mq_rq); | 1166 | mmc_queue_bounce_post(mq_rq); |
1080 | 1167 | ||
1081 | switch (status) { | 1168 | switch (status) { |
@@ -1084,17 +1171,17 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1084 | /* | 1171 | /* |
1085 | * A block was successfully transferred. | 1172 | * A block was successfully transferred. |
1086 | */ | 1173 | */ |
1174 | mmc_blk_reset_success(md, type); | ||
1087 | spin_lock_irq(&md->lock); | 1175 | spin_lock_irq(&md->lock); |
1088 | ret = __blk_end_request(req, 0, | 1176 | ret = __blk_end_request(req, 0, |
1089 | brq->data.bytes_xfered); | 1177 | brq->data.bytes_xfered); |
1090 | spin_unlock_irq(&md->lock); | 1178 | spin_unlock_irq(&md->lock); |
1179 | /* | ||
1180 | * If the blk_end_request function returns non-zero even | ||
1181 | * though all data has been transferred and no errors | ||
1182 | * were returned by the host controller, it's a bug. | ||
1183 | */ | ||
1091 | if (status == MMC_BLK_SUCCESS && ret) { | 1184 | if (status == MMC_BLK_SUCCESS && ret) { |
1092 | /* | ||
1093 | * The blk_end_request has returned non zero | ||
1094 | * even though all data is transfered and no | ||
1095 | * erros returned by host. | ||
1096 | * If this happen it's a bug. | ||
1097 | */ | ||
1098 | printk(KERN_ERR "%s BUG rq_tot %d d_xfer %d\n", | 1185 | printk(KERN_ERR "%s BUG rq_tot %d d_xfer %d\n", |
1099 | __func__, blk_rq_bytes(req), | 1186 | __func__, blk_rq_bytes(req), |
1100 | brq->data.bytes_xfered); | 1187 | brq->data.bytes_xfered); |
@@ -1103,16 +1190,36 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1103 | } | 1190 | } |
1104 | break; | 1191 | break; |
1105 | case MMC_BLK_CMD_ERR: | 1192 | case MMC_BLK_CMD_ERR: |
1106 | goto cmd_err; | 1193 | ret = mmc_blk_cmd_err(md, card, brq, req, ret); |
1107 | case MMC_BLK_RETRY_SINGLE: | 1194 | if (!mmc_blk_reset(md, card->host, type)) |
1108 | disable_multi = 1; | 1195 | break; |
1109 | break; | 1196 | goto cmd_abort; |
1110 | case MMC_BLK_RETRY: | 1197 | case MMC_BLK_RETRY: |
1111 | if (retry++ < 5) | 1198 | if (retry++ < 5) |
1112 | break; | 1199 | break; |
1200 | /* Fall through */ | ||
1113 | case MMC_BLK_ABORT: | 1201 | case MMC_BLK_ABORT: |
1202 | if (!mmc_blk_reset(md, card->host, type)) | ||
1203 | break; | ||
1114 | goto cmd_abort; | 1204 | goto cmd_abort; |
1115 | case MMC_BLK_DATA_ERR: | 1205 | case MMC_BLK_DATA_ERR: { |
1206 | int err; | ||
1207 | |||
1208 | err = mmc_blk_reset(md, card->host, type); | ||
1209 | if (!err) | ||
1210 | break; | ||
1211 | if (err == -ENODEV) | ||
1212 | goto cmd_abort; | ||
1213 | /* Fall through */ | ||
1214 | } | ||
1215 | case MMC_BLK_ECC_ERR: | ||
1216 | if (brq->data.blocks > 1) { | ||
1217 | /* Redo read one sector at a time */ | ||
1218 | pr_warning("%s: retrying using single block read\n", | ||
1219 | req->rq_disk->disk_name); | ||
1220 | disable_multi = 1; | ||
1221 | break; | ||
1222 | } | ||
1116 | /* | 1223 | /* |
1117 | * After an error, we redo I/O one sector at a | 1224 | * After an error, we redo I/O one sector at a |
1118 | * time, so we only reach here after trying to | 1225 | * time, so we only reach here after trying to |
@@ -1129,7 +1236,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1129 | 1236 | ||
1130 | if (ret) { | 1237 | if (ret) { |
1131 | /* | 1238 | /* |
1132 | * In case of a none complete request | 1239 | * In case of a incomplete request |
1133 | * prepare it again and resend. | 1240 | * prepare it again and resend. |
1134 | */ | 1241 | */ |
1135 | mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); | 1242 | mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); |
@@ -1139,30 +1246,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1139 | 1246 | ||
1140 | return 1; | 1247 | return 1; |
1141 | 1248 | ||
1142 | cmd_err: | ||
1143 | /* | ||
1144 | * If this is an SD card and we're writing, we can first | ||
1145 | * mark the known good sectors as ok. | ||
1146 | * | ||
1147 | * If the card is not SD, we can still ok written sectors | ||
1148 | * as reported by the controller (which might be less than | ||
1149 | * the real number of written sectors, but never more). | ||
1150 | */ | ||
1151 | if (mmc_card_sd(card)) { | ||
1152 | u32 blocks; | ||
1153 | |||
1154 | blocks = mmc_sd_num_wr_blocks(card); | ||
1155 | if (blocks != (u32)-1) { | ||
1156 | spin_lock_irq(&md->lock); | ||
1157 | ret = __blk_end_request(req, 0, blocks << 9); | ||
1158 | spin_unlock_irq(&md->lock); | ||
1159 | } | ||
1160 | } else { | ||
1161 | spin_lock_irq(&md->lock); | ||
1162 | ret = __blk_end_request(req, 0, brq->data.bytes_xfered); | ||
1163 | spin_unlock_irq(&md->lock); | ||
1164 | } | ||
1165 | |||
1166 | cmd_abort: | 1249 | cmd_abort: |
1167 | spin_lock_irq(&md->lock); | 1250 | spin_lock_irq(&md->lock); |
1168 | while (ret) | 1251 | while (ret) |
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index da6bd95fa4bb..9698d8a2e166 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c | |||
@@ -1529,7 +1529,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, | |||
1529 | if (err) { | 1529 | if (err) { |
1530 | printk(KERN_ERR "mmc_erase: group start error %d, " | 1530 | printk(KERN_ERR "mmc_erase: group start error %d, " |
1531 | "status %#x\n", err, cmd.resp[0]); | 1531 | "status %#x\n", err, cmd.resp[0]); |
1532 | err = -EINVAL; | 1532 | err = -EIO; |
1533 | goto out; | 1533 | goto out; |
1534 | } | 1534 | } |
1535 | 1535 | ||
@@ -1544,7 +1544,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, | |||
1544 | if (err) { | 1544 | if (err) { |
1545 | printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n", | 1545 | printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n", |
1546 | err, cmd.resp[0]); | 1546 | err, cmd.resp[0]); |
1547 | err = -EINVAL; | 1547 | err = -EIO; |
1548 | goto out; | 1548 | goto out; |
1549 | } | 1549 | } |
1550 | 1550 | ||