aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card/block.c
diff options
context:
space:
mode:
authorPer Forlin <per.forlin@linaro.org>2011-07-01 12:55:30 -0400
committerChris Ball <cjb@laptop.org>2011-07-20 17:21:14 -0400
commitd78d4a8ad53f345dd3c0bb5f8d377baa523739f7 (patch)
tree3d4c7d9b7cf5599059689d0fa3991cc52f180700 /drivers/mmc/card/block.c
parent54d49d77628bed77e5491b8a2a1158a492843a19 (diff)
mmc: block: move error path in issue_rw_rq to a separate function.
Break out code without functional changes. This simplifies the code and makes way for handling two parallel requests. Signed-off-by: Per Forlin <per.forlin@linaro.org> Acked-by: Kyungmin Park <kyungmin.park@samsung.com> Acked-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Venkatraman S <svenkatr@ti.com> Tested-by: Sourav Poddar<sourav.poddar@ti.com> Tested-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc/card/block.c')
-rw-r--r--drivers/mmc/card/block.c220
1 files changed, 131 insertions, 89 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index a0a76f48b253..7ed2c68e8763 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -106,6 +106,16 @@ struct mmc_blk_data {
106 106
107static DEFINE_MUTEX(open_lock); 107static DEFINE_MUTEX(open_lock);
108 108
109enum mmc_blk_status {
110 MMC_BLK_SUCCESS = 0,
111 MMC_BLK_PARTIAL,
112 MMC_BLK_RETRY,
113 MMC_BLK_RETRY_SINGLE,
114 MMC_BLK_DATA_ERR,
115 MMC_BLK_CMD_ERR,
116 MMC_BLK_ABORT,
117};
118
109module_param(perdev_minors, int, 0444); 119module_param(perdev_minors, int, 0444);
110MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); 120MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
111 121
@@ -812,6 +822,95 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
812 R1_CC_ERROR | /* Card controller error */ \ 822 R1_CC_ERROR | /* Card controller error */ \
813 R1_ERROR) /* General/unknown error */ 823 R1_ERROR) /* General/unknown error */
814 824
825int mmc_blk_err_check(struct mmc_blk_request *brq,
826 struct request *req,
827 struct mmc_card *card,
828 struct mmc_blk_data *md)
829{
830 int ret = MMC_BLK_SUCCESS;
831
832 /*
833 * sbc.error indicates a problem with the set block count
834 * command. No data will have been transferred.
835 *
836 * cmd.error indicates a problem with the r/w command. No
837 * data will have been transferred.
838 *
839 * stop.error indicates a problem with the stop command. Data
840 * may have been transferred, or may still be transferring.
841 */
842 if (brq->sbc.error || brq->cmd.error || brq->stop.error) {
843 switch (mmc_blk_cmd_recovery(card, req, brq)) {
844 case ERR_RETRY:
845 return MMC_BLK_RETRY;
846 case ERR_ABORT:
847 return MMC_BLK_ABORT;
848 case ERR_CONTINUE:
849 break;
850 }
851 }
852
853 /*
854 * Check for errors relating to the execution of the
855 * initial command - such as address errors. No data
856 * has been transferred.
857 */
858 if (brq->cmd.resp[0] & CMD_ERRORS) {
859 pr_err("%s: r/w command failed, status = %#x\n",
860 req->rq_disk->disk_name, brq->cmd.resp[0]);
861 return MMC_BLK_ABORT;
862 }
863
864 /*
865 * Everything else is either success, or a data error of some
866 * kind. If it was a write, we may have transitioned to
867 * program mode, which we have to wait for it to complete.
868 */
869 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
870 u32 status;
871 do {
872 int err = get_card_status(card, &status, 5);
873 if (err) {
874 printk(KERN_ERR "%s: error %d requesting status\n",
875 req->rq_disk->disk_name, err);
876 return MMC_BLK_CMD_ERR;
877 }
878 /*
879 * Some cards mishandle the status bits,
880 * so make sure to check both the busy
881 * indication and the card state.
882 */
883 } while (!(status & R1_READY_FOR_DATA) ||
884 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
885 }
886
887 if (brq->data.error) {
888 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
889 req->rq_disk->disk_name, brq->data.error,
890 (unsigned)blk_rq_pos(req),
891 (unsigned)blk_rq_sectors(req),
892 brq->cmd.resp[0], brq->stop.resp[0]);
893
894 if (rq_data_dir(req) == READ) {
895 if (brq->data.blocks > 1) {
896 /* Redo read one sector at a time */
897 pr_warning("%s: retrying using single block read\n",
898 req->rq_disk->disk_name);
899 return MMC_BLK_RETRY_SINGLE;
900 }
901 return MMC_BLK_DATA_ERR;
902 } else {
903 return MMC_BLK_CMD_ERR;
904 }
905 }
906
907 if (ret == MMC_BLK_SUCCESS &&
908 blk_rq_bytes(req) != brq->data.bytes_xfered)
909 ret = MMC_BLK_PARTIAL;
910
911 return ret;
912}
913
815static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, 914static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
816 struct mmc_card *card, 915 struct mmc_card *card,
817 int disable_multi, 916 int disable_multi,
@@ -948,6 +1047,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
948 struct mmc_card *card = md->queue.card; 1047 struct mmc_card *card = md->queue.card;
949 struct mmc_blk_request *brq = &mq->mqrq_cur->brq; 1048 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
950 int ret = 1, disable_multi = 0, retry = 0; 1049 int ret = 1, disable_multi = 0, retry = 0;
1050 enum mmc_blk_status status;
951 1051
952 do { 1052 do {
953 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, disable_multi, mq); 1053 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, disable_multi, mq);
@@ -955,99 +1055,41 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
955 1055
956 mmc_queue_bounce_post(mq->mqrq_cur); 1056 mmc_queue_bounce_post(mq->mqrq_cur);
957 1057
958 /* 1058 status = mmc_blk_err_check(brq, req, card, md);
959 * sbc.error indicates a problem with the set block count 1059 switch (status) {
960 * command. No data will have been transferred. 1060 case MMC_BLK_SUCCESS:
961 * 1061 case MMC_BLK_PARTIAL:
962 * cmd.error indicates a problem with the r/w command. No 1062 /*
963 * data will have been transferred. 1063 * A block was successfully transferred.
964 * 1064 */
965 * stop.error indicates a problem with the stop command. Data 1065 spin_lock_irq(&md->lock);
966 * may have been transferred, or may still be transferring. 1066 ret = __blk_end_request(req, 0,
967 */ 1067 brq->data.bytes_xfered);
968 if (brq->sbc.error || brq->cmd.error || brq->stop.error) { 1068 spin_unlock_irq(&md->lock);
969 switch (mmc_blk_cmd_recovery(card, req, brq)) { 1069 break;
970 case ERR_RETRY: 1070 case MMC_BLK_CMD_ERR:
971 if (retry++ < 5) 1071 goto cmd_err;
972 continue; 1072 case MMC_BLK_RETRY_SINGLE:
973 case ERR_ABORT: 1073 disable_multi = 1;
974 goto cmd_abort; 1074 break;
975 case ERR_CONTINUE: 1075 case MMC_BLK_RETRY:
1076 if (retry++ < 5)
976 break; 1077 break;
977 } 1078 case MMC_BLK_ABORT:
978 }
979
980 /*
981 * Check for errors relating to the execution of the
982 * initial command - such as address errors. No data
983 * has been transferred.
984 */
985 if (brq->cmd.resp[0] & CMD_ERRORS) {
986 pr_err("%s: r/w command failed, status = %#x\n",
987 req->rq_disk->disk_name, brq->cmd.resp[0]);
988 goto cmd_abort; 1079 goto cmd_abort;
1080 case MMC_BLK_DATA_ERR:
1081 /*
1082 * After an error, we redo I/O one sector at a
1083 * time, so we only reach here after trying to
1084 * read a single sector.
1085 */
1086 spin_lock_irq(&md->lock);
1087 ret = __blk_end_request(req, -EIO,
1088 brq->data.blksz);
1089 spin_unlock_irq(&md->lock);
1090 break;
989 } 1091 }
990 1092
991 /*
992 * Everything else is either success, or a data error of some
993 * kind. If it was a write, we may have transitioned to
994 * program mode, which we have to wait for it to complete.
995 */
996 if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
997 u32 status;
998 do {
999 int err = get_card_status(card, &status, 5);
1000 if (err) {
1001 printk(KERN_ERR "%s: error %d requesting status\n",
1002 req->rq_disk->disk_name, err);
1003 goto cmd_err;
1004 }
1005 /*
1006 * Some cards mishandle the status bits,
1007 * so make sure to check both the busy
1008 * indication and the card state.
1009 */
1010 } while (!(status & R1_READY_FOR_DATA) ||
1011 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
1012 }
1013
1014 if (brq->data.error) {
1015 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1016 req->rq_disk->disk_name, brq->data.error,
1017 (unsigned)blk_rq_pos(req),
1018 (unsigned)blk_rq_sectors(req),
1019 brq->cmd.resp[0], brq->stop.resp[0]);
1020
1021 if (rq_data_dir(req) == READ) {
1022 if (brq->data.blocks > 1) {
1023 /* Redo read one sector at a time */
1024 pr_warning("%s: retrying using single block read\n",
1025 req->rq_disk->disk_name);
1026 disable_multi = 1;
1027 continue;
1028 }
1029
1030 /*
1031 * After an error, we redo I/O one sector at a
1032 * time, so we only reach here after trying to
1033 * read a single sector.
1034 */
1035 spin_lock_irq(&md->lock);
1036 ret = __blk_end_request(req, -EIO,
1037 brq->data.blksz);
1038 spin_unlock_irq(&md->lock);
1039 continue;
1040 } else {
1041 goto cmd_err;
1042 }
1043 }
1044
1045 /*
1046 * A block was successfully transferred.
1047 */
1048 spin_lock_irq(&md->lock);
1049 ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
1050 spin_unlock_irq(&md->lock);
1051 } while (ret); 1093 } while (ret);
1052 1094
1053 return 1; 1095 return 1;