diff options
author | Per Forlin <per.forlin@linaro.org> | 2011-07-01 12:55:29 -0400 |
---|---|---|
committer | Chris Ball <cjb@laptop.org> | 2011-07-20 17:21:13 -0400 |
commit | 54d49d77628bed77e5491b8a2a1158a492843a19 (patch) | |
tree | 6001701b192b9ef07f3826b96b8e67147a48f8b6 /drivers/mmc/card/block.c | |
parent | 97868a2bdfc2fc79a987b64f1611034b56a3f8c4 (diff) |
mmc: block: add a block request prepare function
Break out code from mmc_blk_issue_rw_rq to create a block request prepare
function. This doesn't change any functionallity. This helps when handling
more than one active block request.
Signed-off-by: Per Forlin <per.forlin@linaro.org>
Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Reviewed-by: Venkatraman S <svenkatr@ti.com>
Tested-by: Sourav Poddar <sourav.poddar@ti.com>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc/card/block.c')
-rw-r--r-- | drivers/mmc/card/block.c | 218 |
1 files changed, 114 insertions, 104 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 88bcc4e0be21..a0a76f48b253 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -812,12 +812,15 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, | |||
812 | R1_CC_ERROR | /* Card controller error */ \ | 812 | R1_CC_ERROR | /* Card controller error */ \ |
813 | R1_ERROR) /* General/unknown error */ | 813 | R1_ERROR) /* General/unknown error */ |
814 | 814 | ||
815 | static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) | 815 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, |
816 | struct mmc_card *card, | ||
817 | int disable_multi, | ||
818 | struct mmc_queue *mq) | ||
816 | { | 819 | { |
820 | u32 readcmd, writecmd; | ||
821 | struct mmc_blk_request *brq = &mqrq->brq; | ||
822 | struct request *req = mqrq->req; | ||
817 | struct mmc_blk_data *md = mq->data; | 823 | struct mmc_blk_data *md = mq->data; |
818 | struct mmc_card *card = md->queue.card; | ||
819 | struct mmc_blk_request *brq = &mq->mqrq_cur->brq; | ||
820 | int ret = 1, disable_multi = 0, retry = 0; | ||
821 | 824 | ||
822 | /* | 825 | /* |
823 | * Reliable writes are used to implement Forced Unit Access and | 826 | * Reliable writes are used to implement Forced Unit Access and |
@@ -828,119 +831,126 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) | |||
828 | (rq_data_dir(req) == WRITE) && | 831 | (rq_data_dir(req) == WRITE) && |
829 | (md->flags & MMC_BLK_REL_WR); | 832 | (md->flags & MMC_BLK_REL_WR); |
830 | 833 | ||
831 | do { | 834 | memset(brq, 0, sizeof(struct mmc_blk_request)); |
832 | u32 readcmd, writecmd; | 835 | brq->mrq.cmd = &brq->cmd; |
833 | 836 | brq->mrq.data = &brq->data; | |
834 | memset(brq, 0, sizeof(struct mmc_blk_request)); | ||
835 | brq->mrq.cmd = &brq->cmd; | ||
836 | brq->mrq.data = &brq->data; | ||
837 | |||
838 | brq->cmd.arg = blk_rq_pos(req); | ||
839 | if (!mmc_card_blockaddr(card)) | ||
840 | brq->cmd.arg <<= 9; | ||
841 | brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; | ||
842 | brq->data.blksz = 512; | ||
843 | brq->stop.opcode = MMC_STOP_TRANSMISSION; | ||
844 | brq->stop.arg = 0; | ||
845 | brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; | ||
846 | brq->data.blocks = blk_rq_sectors(req); | ||
847 | |||
848 | /* | ||
849 | * The block layer doesn't support all sector count | ||
850 | * restrictions, so we need to be prepared for too big | ||
851 | * requests. | ||
852 | */ | ||
853 | if (brq->data.blocks > card->host->max_blk_count) | ||
854 | brq->data.blocks = card->host->max_blk_count; | ||
855 | 837 | ||
856 | /* | 838 | brq->cmd.arg = blk_rq_pos(req); |
857 | * After a read error, we redo the request one sector at a time | 839 | if (!mmc_card_blockaddr(card)) |
858 | * in order to accurately determine which sectors can be read | 840 | brq->cmd.arg <<= 9; |
859 | * successfully. | 841 | brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; |
860 | */ | 842 | brq->data.blksz = 512; |
861 | if (disable_multi && brq->data.blocks > 1) | 843 | brq->stop.opcode = MMC_STOP_TRANSMISSION; |
862 | brq->data.blocks = 1; | 844 | brq->stop.arg = 0; |
845 | brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; | ||
846 | brq->data.blocks = blk_rq_sectors(req); | ||
863 | 847 | ||
864 | if (brq->data.blocks > 1 || do_rel_wr) { | 848 | /* |
865 | /* SPI multiblock writes terminate using a special | 849 | * The block layer doesn't support all sector count |
866 | * token, not a STOP_TRANSMISSION request. | 850 | * restrictions, so we need to be prepared for too big |
867 | */ | 851 | * requests. |
868 | if (!mmc_host_is_spi(card->host) || | 852 | */ |
869 | rq_data_dir(req) == READ) | 853 | if (brq->data.blocks > card->host->max_blk_count) |
870 | brq->mrq.stop = &brq->stop; | 854 | brq->data.blocks = card->host->max_blk_count; |
871 | readcmd = MMC_READ_MULTIPLE_BLOCK; | ||
872 | writecmd = MMC_WRITE_MULTIPLE_BLOCK; | ||
873 | } else { | ||
874 | brq->mrq.stop = NULL; | ||
875 | readcmd = MMC_READ_SINGLE_BLOCK; | ||
876 | writecmd = MMC_WRITE_BLOCK; | ||
877 | } | ||
878 | if (rq_data_dir(req) == READ) { | ||
879 | brq->cmd.opcode = readcmd; | ||
880 | brq->data.flags |= MMC_DATA_READ; | ||
881 | } else { | ||
882 | brq->cmd.opcode = writecmd; | ||
883 | brq->data.flags |= MMC_DATA_WRITE; | ||
884 | } | ||
885 | 855 | ||
886 | if (do_rel_wr) | 856 | /* |
887 | mmc_apply_rel_rw(brq, card, req); | 857 | * After a read error, we redo the request one sector at a time |
858 | * in order to accurately determine which sectors can be read | ||
859 | * successfully. | ||
860 | */ | ||
861 | if (disable_multi && brq->data.blocks > 1) | ||
862 | brq->data.blocks = 1; | ||
888 | 863 | ||
889 | /* | 864 | if (brq->data.blocks > 1 || do_rel_wr) { |
890 | * Pre-defined multi-block transfers are preferable to | 865 | /* SPI multiblock writes terminate using a special |
891 | * open ended-ones (and necessary for reliable writes). | 866 | * token, not a STOP_TRANSMISSION request. |
892 | * However, it is not sufficient to just send CMD23, | ||
893 | * and avoid the final CMD12, as on an error condition | ||
894 | * CMD12 (stop) needs to be sent anyway. This, coupled | ||
895 | * with Auto-CMD23 enhancements provided by some | ||
896 | * hosts, means that the complexity of dealing | ||
897 | * with this is best left to the host. If CMD23 is | ||
898 | * supported by card and host, we'll fill sbc in and let | ||
899 | * the host deal with handling it correctly. This means | ||
900 | * that for hosts that don't expose MMC_CAP_CMD23, no | ||
901 | * change of behavior will be observed. | ||
902 | * | ||
903 | * N.B: Some MMC cards experience perf degradation. | ||
904 | * We'll avoid using CMD23-bounded multiblock writes for | ||
905 | * these, while retaining features like reliable writes. | ||
906 | */ | 867 | */ |
868 | if (!mmc_host_is_spi(card->host) || | ||
869 | rq_data_dir(req) == READ) | ||
870 | brq->mrq.stop = &brq->stop; | ||
871 | readcmd = MMC_READ_MULTIPLE_BLOCK; | ||
872 | writecmd = MMC_WRITE_MULTIPLE_BLOCK; | ||
873 | } else { | ||
874 | brq->mrq.stop = NULL; | ||
875 | readcmd = MMC_READ_SINGLE_BLOCK; | ||
876 | writecmd = MMC_WRITE_BLOCK; | ||
877 | } | ||
878 | if (rq_data_dir(req) == READ) { | ||
879 | brq->cmd.opcode = readcmd; | ||
880 | brq->data.flags |= MMC_DATA_READ; | ||
881 | } else { | ||
882 | brq->cmd.opcode = writecmd; | ||
883 | brq->data.flags |= MMC_DATA_WRITE; | ||
884 | } | ||
907 | 885 | ||
908 | if ((md->flags & MMC_BLK_CMD23) && | 886 | if (do_rel_wr) |
909 | mmc_op_multi(brq->cmd.opcode) && | 887 | mmc_apply_rel_rw(brq, card, req); |
910 | (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) { | ||
911 | brq->sbc.opcode = MMC_SET_BLOCK_COUNT; | ||
912 | brq->sbc.arg = brq->data.blocks | | ||
913 | (do_rel_wr ? (1 << 31) : 0); | ||
914 | brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; | ||
915 | brq->mrq.sbc = &brq->sbc; | ||
916 | } | ||
917 | 888 | ||
918 | mmc_set_data_timeout(&brq->data, card); | 889 | /* |
890 | * Pre-defined multi-block transfers are preferable to | ||
891 | * open ended-ones (and necessary for reliable writes). | ||
892 | * However, it is not sufficient to just send CMD23, | ||
893 | * and avoid the final CMD12, as on an error condition | ||
894 | * CMD12 (stop) needs to be sent anyway. This, coupled | ||
895 | * with Auto-CMD23 enhancements provided by some | ||
896 | * hosts, means that the complexity of dealing | ||
897 | * with this is best left to the host. If CMD23 is | ||
898 | * supported by card and host, we'll fill sbc in and let | ||
899 | * the host deal with handling it correctly. This means | ||
900 | * that for hosts that don't expose MMC_CAP_CMD23, no | ||
901 | * change of behavior will be observed. | ||
902 | * | ||
903 | * N.B: Some MMC cards experience perf degradation. | ||
904 | * We'll avoid using CMD23-bounded multiblock writes for | ||
905 | * these, while retaining features like reliable writes. | ||
906 | */ | ||
919 | 907 | ||
920 | brq->data.sg = mq->mqrq_cur->sg; | 908 | if ((md->flags & MMC_BLK_CMD23) && |
921 | brq->data.sg_len = mmc_queue_map_sg(mq, mq->mqrq_cur); | 909 | mmc_op_multi(brq->cmd.opcode) && |
910 | (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) { | ||
911 | brq->sbc.opcode = MMC_SET_BLOCK_COUNT; | ||
912 | brq->sbc.arg = brq->data.blocks | | ||
913 | (do_rel_wr ? (1 << 31) : 0); | ||
914 | brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; | ||
915 | brq->mrq.sbc = &brq->sbc; | ||
916 | } | ||
922 | 917 | ||
923 | /* | 918 | mmc_set_data_timeout(&brq->data, card); |
924 | * Adjust the sg list so it is the same size as the | 919 | |
925 | * request. | 920 | brq->data.sg = mqrq->sg; |
926 | */ | 921 | brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); |
927 | if (brq->data.blocks != blk_rq_sectors(req)) { | 922 | |
928 | int i, data_size = brq->data.blocks << 9; | 923 | /* |
929 | struct scatterlist *sg; | 924 | * Adjust the sg list so it is the same size as the |
930 | 925 | * request. | |
931 | for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { | 926 | */ |
932 | data_size -= sg->length; | 927 | if (brq->data.blocks != blk_rq_sectors(req)) { |
933 | if (data_size <= 0) { | 928 | int i, data_size = brq->data.blocks << 9; |
934 | sg->length += data_size; | 929 | struct scatterlist *sg; |
935 | i++; | 930 | |
936 | break; | 931 | for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { |
937 | } | 932 | data_size -= sg->length; |
933 | if (data_size <= 0) { | ||
934 | sg->length += data_size; | ||
935 | i++; | ||
936 | break; | ||
938 | } | 937 | } |
939 | brq->data.sg_len = i; | ||
940 | } | 938 | } |
939 | brq->data.sg_len = i; | ||
940 | } | ||
941 | 941 | ||
942 | mmc_queue_bounce_pre(mq->mqrq_cur); | 942 | mmc_queue_bounce_pre(mqrq); |
943 | } | ||
943 | 944 | ||
945 | static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) | ||
946 | { | ||
947 | struct mmc_blk_data *md = mq->data; | ||
948 | struct mmc_card *card = md->queue.card; | ||
949 | struct mmc_blk_request *brq = &mq->mqrq_cur->brq; | ||
950 | int ret = 1, disable_multi = 0, retry = 0; | ||
951 | |||
952 | do { | ||
953 | mmc_blk_rw_rq_prep(mq->mqrq_cur, card, disable_multi, mq); | ||
944 | mmc_wait_for_req(card->host, &brq->mrq); | 954 | mmc_wait_for_req(card->host, &brq->mrq); |
945 | 955 | ||
946 | mmc_queue_bounce_post(mq->mqrq_cur); | 956 | mmc_queue_bounce_post(mq->mqrq_cur); |