diff options
Diffstat (limited to 'drivers/mmc/card/block.c')
-rw-r--r-- | drivers/mmc/card/block.c | 310 |
1 files changed, 212 insertions, 98 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 4c1a648d00fc..a1cb21f95302 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -94,6 +94,11 @@ struct mmc_blk_data { | |||
94 | unsigned int read_only; | 94 | unsigned int read_only; |
95 | unsigned int part_type; | 95 | unsigned int part_type; |
96 | unsigned int name_idx; | 96 | unsigned int name_idx; |
97 | unsigned int reset_done; | ||
98 | #define MMC_BLK_READ BIT(0) | ||
99 | #define MMC_BLK_WRITE BIT(1) | ||
100 | #define MMC_BLK_DISCARD BIT(2) | ||
101 | #define MMC_BLK_SECDISCARD BIT(3) | ||
97 | 102 | ||
98 | /* | 103 | /* |
99 | * Only set in main mmc_blk_data associated | 104 | * Only set in main mmc_blk_data associated |
@@ -109,11 +114,11 @@ static DEFINE_MUTEX(open_lock); | |||
109 | enum mmc_blk_status { | 114 | enum mmc_blk_status { |
110 | MMC_BLK_SUCCESS = 0, | 115 | MMC_BLK_SUCCESS = 0, |
111 | MMC_BLK_PARTIAL, | 116 | MMC_BLK_PARTIAL, |
112 | MMC_BLK_RETRY, | ||
113 | MMC_BLK_RETRY_SINGLE, | ||
114 | MMC_BLK_DATA_ERR, | ||
115 | MMC_BLK_CMD_ERR, | 117 | MMC_BLK_CMD_ERR, |
118 | MMC_BLK_RETRY, | ||
116 | MMC_BLK_ABORT, | 119 | MMC_BLK_ABORT, |
120 | MMC_BLK_DATA_ERR, | ||
121 | MMC_BLK_ECC_ERR, | ||
117 | }; | 122 | }; |
118 | 123 | ||
119 | module_param(perdev_minors, int, 0444); | 124 | module_param(perdev_minors, int, 0444); |
@@ -291,7 +296,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, | |||
291 | struct mmc_card *card; | 296 | struct mmc_card *card; |
292 | struct mmc_command cmd = {0}; | 297 | struct mmc_command cmd = {0}; |
293 | struct mmc_data data = {0}; | 298 | struct mmc_data data = {0}; |
294 | struct mmc_request mrq = {0}; | 299 | struct mmc_request mrq = {NULL}; |
295 | struct scatterlist sg; | 300 | struct scatterlist sg; |
296 | int err; | 301 | int err; |
297 | 302 | ||
@@ -442,19 +447,24 @@ static inline int mmc_blk_part_switch(struct mmc_card *card, | |||
442 | { | 447 | { |
443 | int ret; | 448 | int ret; |
444 | struct mmc_blk_data *main_md = mmc_get_drvdata(card); | 449 | struct mmc_blk_data *main_md = mmc_get_drvdata(card); |
450 | |||
445 | if (main_md->part_curr == md->part_type) | 451 | if (main_md->part_curr == md->part_type) |
446 | return 0; | 452 | return 0; |
447 | 453 | ||
448 | if (mmc_card_mmc(card)) { | 454 | if (mmc_card_mmc(card)) { |
449 | card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; | 455 | u8 part_config = card->ext_csd.part_config; |
450 | card->ext_csd.part_config |= md->part_type; | 456 | |
457 | part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; | ||
458 | part_config |= md->part_type; | ||
451 | 459 | ||
452 | ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 460 | ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
453 | EXT_CSD_PART_CONFIG, card->ext_csd.part_config, | 461 | EXT_CSD_PART_CONFIG, part_config, |
454 | card->ext_csd.part_time); | 462 | card->ext_csd.part_time); |
455 | if (ret) | 463 | if (ret) |
456 | return ret; | 464 | return ret; |
457 | } | 465 | |
466 | card->ext_csd.part_config = part_config; | ||
467 | } | ||
458 | 468 | ||
459 | main_md->part_curr = md->part_type; | 469 | main_md->part_curr = md->part_type; |
460 | return 0; | 470 | return 0; |
@@ -466,7 +476,7 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) | |||
466 | u32 result; | 476 | u32 result; |
467 | __be32 *blocks; | 477 | __be32 *blocks; |
468 | 478 | ||
469 | struct mmc_request mrq = {0}; | 479 | struct mmc_request mrq = {NULL}; |
470 | struct mmc_command cmd = {0}; | 480 | struct mmc_command cmd = {0}; |
471 | struct mmc_data data = {0}; | 481 | struct mmc_data data = {0}; |
472 | unsigned int timeout_us; | 482 | unsigned int timeout_us; |
@@ -616,7 +626,7 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error, | |||
616 | * Otherwise we don't understand what happened, so abort. | 626 | * Otherwise we don't understand what happened, so abort. |
617 | */ | 627 | */ |
618 | static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, | 628 | static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, |
619 | struct mmc_blk_request *brq) | 629 | struct mmc_blk_request *brq, int *ecc_err) |
620 | { | 630 | { |
621 | bool prev_cmd_status_valid = true; | 631 | bool prev_cmd_status_valid = true; |
622 | u32 status, stop_status = 0; | 632 | u32 status, stop_status = 0; |
@@ -641,6 +651,12 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, | |||
641 | if (err) | 651 | if (err) |
642 | return ERR_ABORT; | 652 | return ERR_ABORT; |
643 | 653 | ||
654 | /* Flag ECC errors */ | ||
655 | if ((status & R1_CARD_ECC_FAILED) || | ||
656 | (brq->stop.resp[0] & R1_CARD_ECC_FAILED) || | ||
657 | (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) | ||
658 | *ecc_err = 1; | ||
659 | |||
644 | /* | 660 | /* |
645 | * Check the current card state. If it is in some data transfer | 661 | * Check the current card state. If it is in some data transfer |
646 | * mode, tell it to stop (and hopefully transition back to TRAN.) | 662 | * mode, tell it to stop (and hopefully transition back to TRAN.) |
@@ -658,6 +674,8 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, | |||
658 | */ | 674 | */ |
659 | if (err) | 675 | if (err) |
660 | return ERR_ABORT; | 676 | return ERR_ABORT; |
677 | if (stop_status & R1_CARD_ECC_FAILED) | ||
678 | *ecc_err = 1; | ||
661 | } | 679 | } |
662 | 680 | ||
663 | /* Check for set block count errors */ | 681 | /* Check for set block count errors */ |
@@ -670,6 +688,10 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, | |||
670 | return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, | 688 | return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, |
671 | prev_cmd_status_valid, status); | 689 | prev_cmd_status_valid, status); |
672 | 690 | ||
691 | /* Data errors */ | ||
692 | if (!brq->stop.error) | ||
693 | return ERR_CONTINUE; | ||
694 | |||
673 | /* Now for stop errors. These aren't fatal to the transfer. */ | 695 | /* Now for stop errors. These aren't fatal to the transfer. */ |
674 | pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", | 696 | pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", |
675 | req->rq_disk->disk_name, brq->stop.error, | 697 | req->rq_disk->disk_name, brq->stop.error, |
@@ -686,12 +708,45 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, | |||
686 | return ERR_CONTINUE; | 708 | return ERR_CONTINUE; |
687 | } | 709 | } |
688 | 710 | ||
711 | static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, | ||
712 | int type) | ||
713 | { | ||
714 | int err; | ||
715 | |||
716 | if (md->reset_done & type) | ||
717 | return -EEXIST; | ||
718 | |||
719 | md->reset_done |= type; | ||
720 | err = mmc_hw_reset(host); | ||
721 | /* Ensure we switch back to the correct partition */ | ||
722 | if (err != -EOPNOTSUPP) { | ||
723 | struct mmc_blk_data *main_md = mmc_get_drvdata(host->card); | ||
724 | int part_err; | ||
725 | |||
726 | main_md->part_curr = main_md->part_type; | ||
727 | part_err = mmc_blk_part_switch(host->card, md); | ||
728 | if (part_err) { | ||
729 | /* | ||
730 | * We have failed to get back into the correct | ||
731 | * partition, so we need to abort the whole request. | ||
732 | */ | ||
733 | return -ENODEV; | ||
734 | } | ||
735 | } | ||
736 | return err; | ||
737 | } | ||
738 | |||
739 | static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) | ||
740 | { | ||
741 | md->reset_done &= ~type; | ||
742 | } | ||
743 | |||
689 | static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) | 744 | static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) |
690 | { | 745 | { |
691 | struct mmc_blk_data *md = mq->data; | 746 | struct mmc_blk_data *md = mq->data; |
692 | struct mmc_card *card = md->queue.card; | 747 | struct mmc_card *card = md->queue.card; |
693 | unsigned int from, nr, arg; | 748 | unsigned int from, nr, arg; |
694 | int err = 0; | 749 | int err = 0, type = MMC_BLK_DISCARD; |
695 | 750 | ||
696 | if (!mmc_can_erase(card)) { | 751 | if (!mmc_can_erase(card)) { |
697 | err = -EOPNOTSUPP; | 752 | err = -EOPNOTSUPP; |
@@ -701,11 +756,13 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) | |||
701 | from = blk_rq_pos(req); | 756 | from = blk_rq_pos(req); |
702 | nr = blk_rq_sectors(req); | 757 | nr = blk_rq_sectors(req); |
703 | 758 | ||
704 | if (mmc_can_trim(card)) | 759 | if (mmc_can_discard(card)) |
760 | arg = MMC_DISCARD_ARG; | ||
761 | else if (mmc_can_trim(card)) | ||
705 | arg = MMC_TRIM_ARG; | 762 | arg = MMC_TRIM_ARG; |
706 | else | 763 | else |
707 | arg = MMC_ERASE_ARG; | 764 | arg = MMC_ERASE_ARG; |
708 | 765 | retry: | |
709 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { | 766 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
710 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 767 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
711 | INAND_CMD38_ARG_EXT_CSD, | 768 | INAND_CMD38_ARG_EXT_CSD, |
@@ -718,6 +775,10 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) | |||
718 | } | 775 | } |
719 | err = mmc_erase(card, from, nr, arg); | 776 | err = mmc_erase(card, from, nr, arg); |
720 | out: | 777 | out: |
778 | if (err == -EIO && !mmc_blk_reset(md, card->host, type)) | ||
779 | goto retry; | ||
780 | if (!err) | ||
781 | mmc_blk_reset_success(md, type); | ||
721 | spin_lock_irq(&md->lock); | 782 | spin_lock_irq(&md->lock); |
722 | __blk_end_request(req, err, blk_rq_bytes(req)); | 783 | __blk_end_request(req, err, blk_rq_bytes(req)); |
723 | spin_unlock_irq(&md->lock); | 784 | spin_unlock_irq(&md->lock); |
@@ -731,13 +792,20 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, | |||
731 | struct mmc_blk_data *md = mq->data; | 792 | struct mmc_blk_data *md = mq->data; |
732 | struct mmc_card *card = md->queue.card; | 793 | struct mmc_card *card = md->queue.card; |
733 | unsigned int from, nr, arg; | 794 | unsigned int from, nr, arg; |
734 | int err = 0; | 795 | int err = 0, type = MMC_BLK_SECDISCARD; |
735 | 796 | ||
736 | if (!mmc_can_secure_erase_trim(card)) { | 797 | if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) { |
737 | err = -EOPNOTSUPP; | 798 | err = -EOPNOTSUPP; |
738 | goto out; | 799 | goto out; |
739 | } | 800 | } |
740 | 801 | ||
802 | /* The sanitize operation is supported at v4.5 only */ | ||
803 | if (mmc_can_sanitize(card)) { | ||
804 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | ||
805 | EXT_CSD_SANITIZE_START, 1, 0); | ||
806 | goto out; | ||
807 | } | ||
808 | |||
741 | from = blk_rq_pos(req); | 809 | from = blk_rq_pos(req); |
742 | nr = blk_rq_sectors(req); | 810 | nr = blk_rq_sectors(req); |
743 | 811 | ||
@@ -745,7 +813,7 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, | |||
745 | arg = MMC_SECURE_TRIM1_ARG; | 813 | arg = MMC_SECURE_TRIM1_ARG; |
746 | else | 814 | else |
747 | arg = MMC_SECURE_ERASE_ARG; | 815 | arg = MMC_SECURE_ERASE_ARG; |
748 | 816 | retry: | |
749 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { | 817 | if (card->quirks & MMC_QUIRK_INAND_CMD38) { |
750 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, | 818 | err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, |
751 | INAND_CMD38_ARG_EXT_CSD, | 819 | INAND_CMD38_ARG_EXT_CSD, |
@@ -769,6 +837,10 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, | |||
769 | err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); | 837 | err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); |
770 | } | 838 | } |
771 | out: | 839 | out: |
840 | if (err == -EIO && !mmc_blk_reset(md, card->host, type)) | ||
841 | goto retry; | ||
842 | if (!err) | ||
843 | mmc_blk_reset_success(md, type); | ||
772 | spin_lock_irq(&md->lock); | 844 | spin_lock_irq(&md->lock); |
773 | __blk_end_request(req, err, blk_rq_bytes(req)); | 845 | __blk_end_request(req, err, blk_rq_bytes(req)); |
774 | spin_unlock_irq(&md->lock); | 846 | spin_unlock_irq(&md->lock); |
@@ -779,16 +851,18 @@ out: | |||
779 | static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) | 851 | static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) |
780 | { | 852 | { |
781 | struct mmc_blk_data *md = mq->data; | 853 | struct mmc_blk_data *md = mq->data; |
854 | struct mmc_card *card = md->queue.card; | ||
855 | int ret = 0; | ||
856 | |||
857 | ret = mmc_flush_cache(card); | ||
858 | if (ret) | ||
859 | ret = -EIO; | ||
782 | 860 | ||
783 | /* | ||
784 | * No-op, only service this because we need REQ_FUA for reliable | ||
785 | * writes. | ||
786 | */ | ||
787 | spin_lock_irq(&md->lock); | 861 | spin_lock_irq(&md->lock); |
788 | __blk_end_request_all(req, 0); | 862 | __blk_end_request_all(req, ret); |
789 | spin_unlock_irq(&md->lock); | 863 | spin_unlock_irq(&md->lock); |
790 | 864 | ||
791 | return 1; | 865 | return ret ? 0 : 1; |
792 | } | 866 | } |
793 | 867 | ||
794 | /* | 868 | /* |
@@ -825,11 +899,11 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, | |||
825 | static int mmc_blk_err_check(struct mmc_card *card, | 899 | static int mmc_blk_err_check(struct mmc_card *card, |
826 | struct mmc_async_req *areq) | 900 | struct mmc_async_req *areq) |
827 | { | 901 | { |
828 | enum mmc_blk_status ret = MMC_BLK_SUCCESS; | ||
829 | struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, | 902 | struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, |
830 | mmc_active); | 903 | mmc_active); |
831 | struct mmc_blk_request *brq = &mq_mrq->brq; | 904 | struct mmc_blk_request *brq = &mq_mrq->brq; |
832 | struct request *req = mq_mrq->req; | 905 | struct request *req = mq_mrq->req; |
906 | int ecc_err = 0; | ||
833 | 907 | ||
834 | /* | 908 | /* |
835 | * sbc.error indicates a problem with the set block count | 909 | * sbc.error indicates a problem with the set block count |
@@ -841,8 +915,9 @@ static int mmc_blk_err_check(struct mmc_card *card, | |||
841 | * stop.error indicates a problem with the stop command. Data | 915 | * stop.error indicates a problem with the stop command. Data |
842 | * may have been transferred, or may still be transferring. | 916 | * may have been transferred, or may still be transferring. |
843 | */ | 917 | */ |
844 | if (brq->sbc.error || brq->cmd.error || brq->stop.error) { | 918 | if (brq->sbc.error || brq->cmd.error || brq->stop.error || |
845 | switch (mmc_blk_cmd_recovery(card, req, brq)) { | 919 | brq->data.error) { |
920 | switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) { | ||
846 | case ERR_RETRY: | 921 | case ERR_RETRY: |
847 | return MMC_BLK_RETRY; | 922 | return MMC_BLK_RETRY; |
848 | case ERR_ABORT: | 923 | case ERR_ABORT: |
@@ -873,7 +948,7 @@ static int mmc_blk_err_check(struct mmc_card *card, | |||
873 | do { | 948 | do { |
874 | int err = get_card_status(card, &status, 5); | 949 | int err = get_card_status(card, &status, 5); |
875 | if (err) { | 950 | if (err) { |
876 | printk(KERN_ERR "%s: error %d requesting status\n", | 951 | pr_err("%s: error %d requesting status\n", |
877 | req->rq_disk->disk_name, err); | 952 | req->rq_disk->disk_name, err); |
878 | return MMC_BLK_CMD_ERR; | 953 | return MMC_BLK_CMD_ERR; |
879 | } | 954 | } |
@@ -894,23 +969,21 @@ static int mmc_blk_err_check(struct mmc_card *card, | |||
894 | brq->cmd.resp[0], brq->stop.resp[0]); | 969 | brq->cmd.resp[0], brq->stop.resp[0]); |
895 | 970 | ||
896 | if (rq_data_dir(req) == READ) { | 971 | if (rq_data_dir(req) == READ) { |
897 | if (brq->data.blocks > 1) { | 972 | if (ecc_err) |
898 | /* Redo read one sector at a time */ | 973 | return MMC_BLK_ECC_ERR; |
899 | pr_warning("%s: retrying using single block read\n", | ||
900 | req->rq_disk->disk_name); | ||
901 | return MMC_BLK_RETRY_SINGLE; | ||
902 | } | ||
903 | return MMC_BLK_DATA_ERR; | 974 | return MMC_BLK_DATA_ERR; |
904 | } else { | 975 | } else { |
905 | return MMC_BLK_CMD_ERR; | 976 | return MMC_BLK_CMD_ERR; |
906 | } | 977 | } |
907 | } | 978 | } |
908 | 979 | ||
909 | if (ret == MMC_BLK_SUCCESS && | 980 | if (!brq->data.bytes_xfered) |
910 | blk_rq_bytes(req) != brq->data.bytes_xfered) | 981 | return MMC_BLK_RETRY; |
911 | ret = MMC_BLK_PARTIAL; | ||
912 | 982 | ||
913 | return ret; | 983 | if (blk_rq_bytes(req) != brq->data.bytes_xfered) |
984 | return MMC_BLK_PARTIAL; | ||
985 | |||
986 | return MMC_BLK_SUCCESS; | ||
914 | } | 987 | } |
915 | 988 | ||
916 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, | 989 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, |
@@ -957,13 +1030,20 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, | |||
957 | if (brq->data.blocks > card->host->max_blk_count) | 1030 | if (brq->data.blocks > card->host->max_blk_count) |
958 | brq->data.blocks = card->host->max_blk_count; | 1031 | brq->data.blocks = card->host->max_blk_count; |
959 | 1032 | ||
960 | /* | 1033 | if (brq->data.blocks > 1) { |
961 | * After a read error, we redo the request one sector at a time | 1034 | /* |
962 | * in order to accurately determine which sectors can be read | 1035 | * After a read error, we redo the request one sector |
963 | * successfully. | 1036 | * at a time in order to accurately determine which |
964 | */ | 1037 | * sectors can be read successfully. |
965 | if (disable_multi && brq->data.blocks > 1) | 1038 | */ |
966 | brq->data.blocks = 1; | 1039 | if (disable_multi) |
1040 | brq->data.blocks = 1; | ||
1041 | |||
1042 | /* Some controllers can't do multiblock reads due to hw bugs */ | ||
1043 | if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ && | ||
1044 | rq_data_dir(req) == READ) | ||
1045 | brq->data.blocks = 1; | ||
1046 | } | ||
967 | 1047 | ||
968 | if (brq->data.blocks > 1 || do_rel_wr) { | 1048 | if (brq->data.blocks > 1 || do_rel_wr) { |
969 | /* SPI multiblock writes terminate using a special | 1049 | /* SPI multiblock writes terminate using a special |
@@ -1049,12 +1129,41 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, | |||
1049 | mmc_queue_bounce_pre(mqrq); | 1129 | mmc_queue_bounce_pre(mqrq); |
1050 | } | 1130 | } |
1051 | 1131 | ||
1132 | static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, | ||
1133 | struct mmc_blk_request *brq, struct request *req, | ||
1134 | int ret) | ||
1135 | { | ||
1136 | /* | ||
1137 | * If this is an SD card and we're writing, we can first | ||
1138 | * mark the known good sectors as ok. | ||
1139 | * | ||
1140 | * If the card is not SD, we can still ok written sectors | ||
1141 | * as reported by the controller (which might be less than | ||
1142 | * the real number of written sectors, but never more). | ||
1143 | */ | ||
1144 | if (mmc_card_sd(card)) { | ||
1145 | u32 blocks; | ||
1146 | |||
1147 | blocks = mmc_sd_num_wr_blocks(card); | ||
1148 | if (blocks != (u32)-1) { | ||
1149 | spin_lock_irq(&md->lock); | ||
1150 | ret = __blk_end_request(req, 0, blocks << 9); | ||
1151 | spin_unlock_irq(&md->lock); | ||
1152 | } | ||
1153 | } else { | ||
1154 | spin_lock_irq(&md->lock); | ||
1155 | ret = __blk_end_request(req, 0, brq->data.bytes_xfered); | ||
1156 | spin_unlock_irq(&md->lock); | ||
1157 | } | ||
1158 | return ret; | ||
1159 | } | ||
1160 | |||
1052 | static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | 1161 | static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) |
1053 | { | 1162 | { |
1054 | struct mmc_blk_data *md = mq->data; | 1163 | struct mmc_blk_data *md = mq->data; |
1055 | struct mmc_card *card = md->queue.card; | 1164 | struct mmc_card *card = md->queue.card; |
1056 | struct mmc_blk_request *brq = &mq->mqrq_cur->brq; | 1165 | struct mmc_blk_request *brq = &mq->mqrq_cur->brq; |
1057 | int ret = 1, disable_multi = 0, retry = 0; | 1166 | int ret = 1, disable_multi = 0, retry = 0, type; |
1058 | enum mmc_blk_status status; | 1167 | enum mmc_blk_status status; |
1059 | struct mmc_queue_req *mq_rq; | 1168 | struct mmc_queue_req *mq_rq; |
1060 | struct request *req; | 1169 | struct request *req; |
@@ -1076,6 +1185,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1076 | mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); | 1185 | mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); |
1077 | brq = &mq_rq->brq; | 1186 | brq = &mq_rq->brq; |
1078 | req = mq_rq->req; | 1187 | req = mq_rq->req; |
1188 | type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; | ||
1079 | mmc_queue_bounce_post(mq_rq); | 1189 | mmc_queue_bounce_post(mq_rq); |
1080 | 1190 | ||
1081 | switch (status) { | 1191 | switch (status) { |
@@ -1084,18 +1194,18 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1084 | /* | 1194 | /* |
1085 | * A block was successfully transferred. | 1195 | * A block was successfully transferred. |
1086 | */ | 1196 | */ |
1197 | mmc_blk_reset_success(md, type); | ||
1087 | spin_lock_irq(&md->lock); | 1198 | spin_lock_irq(&md->lock); |
1088 | ret = __blk_end_request(req, 0, | 1199 | ret = __blk_end_request(req, 0, |
1089 | brq->data.bytes_xfered); | 1200 | brq->data.bytes_xfered); |
1090 | spin_unlock_irq(&md->lock); | 1201 | spin_unlock_irq(&md->lock); |
1202 | /* | ||
1203 | * If the blk_end_request function returns non-zero even | ||
1204 | * though all data has been transferred and no errors | ||
1205 | * were returned by the host controller, it's a bug. | ||
1206 | */ | ||
1091 | if (status == MMC_BLK_SUCCESS && ret) { | 1207 | if (status == MMC_BLK_SUCCESS && ret) { |
1092 | /* | 1208 | pr_err("%s BUG rq_tot %d d_xfer %d\n", |
1093 | * The blk_end_request has returned non zero | ||
1094 | * even though all data is transfered and no | ||
1095 | * erros returned by host. | ||
1096 | * If this happen it's a bug. | ||
1097 | */ | ||
1098 | printk(KERN_ERR "%s BUG rq_tot %d d_xfer %d\n", | ||
1099 | __func__, blk_rq_bytes(req), | 1209 | __func__, blk_rq_bytes(req), |
1100 | brq->data.bytes_xfered); | 1210 | brq->data.bytes_xfered); |
1101 | rqc = NULL; | 1211 | rqc = NULL; |
@@ -1103,16 +1213,36 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1103 | } | 1213 | } |
1104 | break; | 1214 | break; |
1105 | case MMC_BLK_CMD_ERR: | 1215 | case MMC_BLK_CMD_ERR: |
1106 | goto cmd_err; | 1216 | ret = mmc_blk_cmd_err(md, card, brq, req, ret); |
1107 | case MMC_BLK_RETRY_SINGLE: | 1217 | if (!mmc_blk_reset(md, card->host, type)) |
1108 | disable_multi = 1; | 1218 | break; |
1109 | break; | 1219 | goto cmd_abort; |
1110 | case MMC_BLK_RETRY: | 1220 | case MMC_BLK_RETRY: |
1111 | if (retry++ < 5) | 1221 | if (retry++ < 5) |
1112 | break; | 1222 | break; |
1223 | /* Fall through */ | ||
1113 | case MMC_BLK_ABORT: | 1224 | case MMC_BLK_ABORT: |
1225 | if (!mmc_blk_reset(md, card->host, type)) | ||
1226 | break; | ||
1114 | goto cmd_abort; | 1227 | goto cmd_abort; |
1115 | case MMC_BLK_DATA_ERR: | 1228 | case MMC_BLK_DATA_ERR: { |
1229 | int err; | ||
1230 | |||
1231 | err = mmc_blk_reset(md, card->host, type); | ||
1232 | if (!err) | ||
1233 | break; | ||
1234 | if (err == -ENODEV) | ||
1235 | goto cmd_abort; | ||
1236 | /* Fall through */ | ||
1237 | } | ||
1238 | case MMC_BLK_ECC_ERR: | ||
1239 | if (brq->data.blocks > 1) { | ||
1240 | /* Redo read one sector at a time */ | ||
1241 | pr_warning("%s: retrying using single block read\n", | ||
1242 | req->rq_disk->disk_name); | ||
1243 | disable_multi = 1; | ||
1244 | break; | ||
1245 | } | ||
1116 | /* | 1246 | /* |
1117 | * After an error, we redo I/O one sector at a | 1247 | * After an error, we redo I/O one sector at a |
1118 | * time, so we only reach here after trying to | 1248 | * time, so we only reach here after trying to |
@@ -1129,7 +1259,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1129 | 1259 | ||
1130 | if (ret) { | 1260 | if (ret) { |
1131 | /* | 1261 | /* |
1132 | * In case of a none complete request | 1262 | * In case of a incomplete request |
1133 | * prepare it again and resend. | 1263 | * prepare it again and resend. |
1134 | */ | 1264 | */ |
1135 | mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); | 1265 | mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); |
@@ -1139,30 +1269,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) | |||
1139 | 1269 | ||
1140 | return 1; | 1270 | return 1; |
1141 | 1271 | ||
1142 | cmd_err: | ||
1143 | /* | ||
1144 | * If this is an SD card and we're writing, we can first | ||
1145 | * mark the known good sectors as ok. | ||
1146 | * | ||
1147 | * If the card is not SD, we can still ok written sectors | ||
1148 | * as reported by the controller (which might be less than | ||
1149 | * the real number of written sectors, but never more). | ||
1150 | */ | ||
1151 | if (mmc_card_sd(card)) { | ||
1152 | u32 blocks; | ||
1153 | |||
1154 | blocks = mmc_sd_num_wr_blocks(card); | ||
1155 | if (blocks != (u32)-1) { | ||
1156 | spin_lock_irq(&md->lock); | ||
1157 | ret = __blk_end_request(req, 0, blocks << 9); | ||
1158 | spin_unlock_irq(&md->lock); | ||
1159 | } | ||
1160 | } else { | ||
1161 | spin_lock_irq(&md->lock); | ||
1162 | ret = __blk_end_request(req, 0, brq->data.bytes_xfered); | ||
1163 | spin_unlock_irq(&md->lock); | ||
1164 | } | ||
1165 | |||
1166 | cmd_abort: | 1272 | cmd_abort: |
1167 | spin_lock_irq(&md->lock); | 1273 | spin_lock_irq(&md->lock); |
1168 | while (ret) | 1274 | while (ret) |
@@ -1190,6 +1296,11 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
1190 | 1296 | ||
1191 | ret = mmc_blk_part_switch(card, md); | 1297 | ret = mmc_blk_part_switch(card, md); |
1192 | if (ret) { | 1298 | if (ret) { |
1299 | if (req) { | ||
1300 | spin_lock_irq(&md->lock); | ||
1301 | __blk_end_request_all(req, -EIO); | ||
1302 | spin_unlock_irq(&md->lock); | ||
1303 | } | ||
1193 | ret = 0; | 1304 | ret = 0; |
1194 | goto out; | 1305 | goto out; |
1195 | } | 1306 | } |
@@ -1374,32 +1485,35 @@ static int mmc_blk_alloc_part(struct mmc_card *card, | |||
1374 | 1485 | ||
1375 | string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2, | 1486 | string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2, |
1376 | cap_str, sizeof(cap_str)); | 1487 | cap_str, sizeof(cap_str)); |
1377 | printk(KERN_INFO "%s: %s %s partition %u %s\n", | 1488 | pr_info("%s: %s %s partition %u %s\n", |
1378 | part_md->disk->disk_name, mmc_card_id(card), | 1489 | part_md->disk->disk_name, mmc_card_id(card), |
1379 | mmc_card_name(card), part_md->part_type, cap_str); | 1490 | mmc_card_name(card), part_md->part_type, cap_str); |
1380 | return 0; | 1491 | return 0; |
1381 | } | 1492 | } |
1382 | 1493 | ||
1494 | /* MMC Physical partitions consist of two boot partitions and | ||
1495 | * up to four general purpose partitions. | ||
1496 | * For each partition enabled in EXT_CSD a block device will be allocatedi | ||
1497 | * to provide access to the partition. | ||
1498 | */ | ||
1499 | |||
1383 | static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) | 1500 | static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) |
1384 | { | 1501 | { |
1385 | int ret = 0; | 1502 | int idx, ret = 0; |
1386 | 1503 | ||
1387 | if (!mmc_card_mmc(card)) | 1504 | if (!mmc_card_mmc(card)) |
1388 | return 0; | 1505 | return 0; |
1389 | 1506 | ||
1390 | if (card->ext_csd.boot_size) { | 1507 | for (idx = 0; idx < card->nr_parts; idx++) { |
1391 | ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0, | 1508 | if (card->part[idx].size) { |
1392 | card->ext_csd.boot_size >> 9, | 1509 | ret = mmc_blk_alloc_part(card, md, |
1393 | true, | 1510 | card->part[idx].part_cfg, |
1394 | "boot0"); | 1511 | card->part[idx].size >> 9, |
1395 | if (ret) | 1512 | card->part[idx].force_ro, |
1396 | return ret; | 1513 | card->part[idx].name); |
1397 | ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1, | 1514 | if (ret) |
1398 | card->ext_csd.boot_size >> 9, | 1515 | return ret; |
1399 | true, | 1516 | } |
1400 | "boot1"); | ||
1401 | if (ret) | ||
1402 | return ret; | ||
1403 | } | 1517 | } |
1404 | 1518 | ||
1405 | return ret; | 1519 | return ret; |
@@ -1415,7 +1529,7 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card) | |||
1415 | mmc_release_host(card->host); | 1529 | mmc_release_host(card->host); |
1416 | 1530 | ||
1417 | if (err) { | 1531 | if (err) { |
1418 | printk(KERN_ERR "%s: unable to set block size to 512: %d\n", | 1532 | pr_err("%s: unable to set block size to 512: %d\n", |
1419 | md->disk->disk_name, err); | 1533 | md->disk->disk_name, err); |
1420 | return -EINVAL; | 1534 | return -EINVAL; |
1421 | } | 1535 | } |
@@ -1517,7 +1631,7 @@ static int mmc_blk_probe(struct mmc_card *card) | |||
1517 | 1631 | ||
1518 | string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2, | 1632 | string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2, |
1519 | cap_str, sizeof(cap_str)); | 1633 | cap_str, sizeof(cap_str)); |
1520 | printk(KERN_INFO "%s: %s %s %s %s\n", | 1634 | pr_info("%s: %s %s %s %s\n", |
1521 | md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), | 1635 | md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), |
1522 | cap_str, md->read_only ? "(ro)" : ""); | 1636 | cap_str, md->read_only ? "(ro)" : ""); |
1523 | 1637 | ||