aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card/block.c
diff options
context:
space:
mode:
authorPer Forlin <per.forlin@linaro.org>2011-07-09 17:12:36 -0400
committerChris Ball <cjb@laptop.org>2011-07-20 17:21:13 -0400
commit97868a2bdfc2fc79a987b64f1611034b56a3f8c4 (patch)
treee2d76b7706ae6a4d999265e225654b814fa35ad3 /drivers/mmc/card/block.c
parentbf043330362b1ccb0c0611b8fc394e06ba8498b0 (diff)
mmc: block: add member in mmc queue struct to hold request data
The way the request data is organized in the mmc queue struct, it only allows processing of one request at a time. This patch adds a new struct to hold mmc queue request data such as sg list, request, blk request and bounce buffers, and updates any functions depending on the mmc queue struct. This prepares for using multiple active requests in one mmc queue. Signed-off-by: Per Forlin <per.forlin@linaro.org> Acked-by: Kyungmin Park <kyungmin.park@samsung.com> Acked-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Venkatraman S <svenkatr@ti.com> Tested-by: Sourav Poddar <sourav.poddar@ti.com> Tested-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc/card/block.c')
-rw-r--r--drivers/mmc/card/block.c109
1 files changed, 51 insertions, 58 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index bee21063748b..88bcc4e0be21 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -427,14 +427,6 @@ static const struct block_device_operations mmc_bdops = {
427#endif 427#endif
428}; 428};
429 429
430struct mmc_blk_request {
431 struct mmc_request mrq;
432 struct mmc_command sbc;
433 struct mmc_command cmd;
434 struct mmc_command stop;
435 struct mmc_data data;
436};
437
438static inline int mmc_blk_part_switch(struct mmc_card *card, 430static inline int mmc_blk_part_switch(struct mmc_card *card,
439 struct mmc_blk_data *md) 431 struct mmc_blk_data *md)
440{ 432{
@@ -824,7 +816,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
824{ 816{
825 struct mmc_blk_data *md = mq->data; 817 struct mmc_blk_data *md = mq->data;
826 struct mmc_card *card = md->queue.card; 818 struct mmc_card *card = md->queue.card;
827 struct mmc_blk_request brq; 819 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
828 int ret = 1, disable_multi = 0, retry = 0; 820 int ret = 1, disable_multi = 0, retry = 0;
829 821
830 /* 822 /*
@@ -839,60 +831,60 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
839 do { 831 do {
840 u32 readcmd, writecmd; 832 u32 readcmd, writecmd;
841 833
842 memset(&brq, 0, sizeof(struct mmc_blk_request)); 834 memset(brq, 0, sizeof(struct mmc_blk_request));
843 brq.mrq.cmd = &brq.cmd; 835 brq->mrq.cmd = &brq->cmd;
844 brq.mrq.data = &brq.data; 836 brq->mrq.data = &brq->data;
845 837
846 brq.cmd.arg = blk_rq_pos(req); 838 brq->cmd.arg = blk_rq_pos(req);
847 if (!mmc_card_blockaddr(card)) 839 if (!mmc_card_blockaddr(card))
848 brq.cmd.arg <<= 9; 840 brq->cmd.arg <<= 9;
849 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 841 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
850 brq.data.blksz = 512; 842 brq->data.blksz = 512;
851 brq.stop.opcode = MMC_STOP_TRANSMISSION; 843 brq->stop.opcode = MMC_STOP_TRANSMISSION;
852 brq.stop.arg = 0; 844 brq->stop.arg = 0;
853 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 845 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
854 brq.data.blocks = blk_rq_sectors(req); 846 brq->data.blocks = blk_rq_sectors(req);
855 847
856 /* 848 /*
857 * The block layer doesn't support all sector count 849 * The block layer doesn't support all sector count
858 * restrictions, so we need to be prepared for too big 850 * restrictions, so we need to be prepared for too big
859 * requests. 851 * requests.
860 */ 852 */
861 if (brq.data.blocks > card->host->max_blk_count) 853 if (brq->data.blocks > card->host->max_blk_count)
862 brq.data.blocks = card->host->max_blk_count; 854 brq->data.blocks = card->host->max_blk_count;
863 855
864 /* 856 /*
865 * After a read error, we redo the request one sector at a time 857 * After a read error, we redo the request one sector at a time
866 * in order to accurately determine which sectors can be read 858 * in order to accurately determine which sectors can be read
867 * successfully. 859 * successfully.
868 */ 860 */
869 if (disable_multi && brq.data.blocks > 1) 861 if (disable_multi && brq->data.blocks > 1)
870 brq.data.blocks = 1; 862 brq->data.blocks = 1;
871 863
872 if (brq.data.blocks > 1 || do_rel_wr) { 864 if (brq->data.blocks > 1 || do_rel_wr) {
873 /* SPI multiblock writes terminate using a special 865 /* SPI multiblock writes terminate using a special
874 * token, not a STOP_TRANSMISSION request. 866 * token, not a STOP_TRANSMISSION request.
875 */ 867 */
876 if (!mmc_host_is_spi(card->host) || 868 if (!mmc_host_is_spi(card->host) ||
877 rq_data_dir(req) == READ) 869 rq_data_dir(req) == READ)
878 brq.mrq.stop = &brq.stop; 870 brq->mrq.stop = &brq->stop;
879 readcmd = MMC_READ_MULTIPLE_BLOCK; 871 readcmd = MMC_READ_MULTIPLE_BLOCK;
880 writecmd = MMC_WRITE_MULTIPLE_BLOCK; 872 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
881 } else { 873 } else {
882 brq.mrq.stop = NULL; 874 brq->mrq.stop = NULL;
883 readcmd = MMC_READ_SINGLE_BLOCK; 875 readcmd = MMC_READ_SINGLE_BLOCK;
884 writecmd = MMC_WRITE_BLOCK; 876 writecmd = MMC_WRITE_BLOCK;
885 } 877 }
886 if (rq_data_dir(req) == READ) { 878 if (rq_data_dir(req) == READ) {
887 brq.cmd.opcode = readcmd; 879 brq->cmd.opcode = readcmd;
888 brq.data.flags |= MMC_DATA_READ; 880 brq->data.flags |= MMC_DATA_READ;
889 } else { 881 } else {
890 brq.cmd.opcode = writecmd; 882 brq->cmd.opcode = writecmd;
891 brq.data.flags |= MMC_DATA_WRITE; 883 brq->data.flags |= MMC_DATA_WRITE;
892 } 884 }
893 885
894 if (do_rel_wr) 886 if (do_rel_wr)
895 mmc_apply_rel_rw(&brq, card, req); 887 mmc_apply_rel_rw(brq, card, req);
896 888
897 /* 889 /*
898 * Pre-defined multi-block transfers are preferable to 890 * Pre-defined multi-block transfers are preferable to
@@ -914,29 +906,29 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
914 */ 906 */
915 907
916 if ((md->flags & MMC_BLK_CMD23) && 908 if ((md->flags & MMC_BLK_CMD23) &&
917 mmc_op_multi(brq.cmd.opcode) && 909 mmc_op_multi(brq->cmd.opcode) &&
918 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) { 910 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
919 brq.sbc.opcode = MMC_SET_BLOCK_COUNT; 911 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
920 brq.sbc.arg = brq.data.blocks | 912 brq->sbc.arg = brq->data.blocks |
921 (do_rel_wr ? (1 << 31) : 0); 913 (do_rel_wr ? (1 << 31) : 0);
922 brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; 914 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
923 brq.mrq.sbc = &brq.sbc; 915 brq->mrq.sbc = &brq->sbc;
924 } 916 }
925 917
926 mmc_set_data_timeout(&brq.data, card); 918 mmc_set_data_timeout(&brq->data, card);
927 919
928 brq.data.sg = mq->sg; 920 brq->data.sg = mq->mqrq_cur->sg;
929 brq.data.sg_len = mmc_queue_map_sg(mq); 921 brq->data.sg_len = mmc_queue_map_sg(mq, mq->mqrq_cur);
930 922
931 /* 923 /*
932 * Adjust the sg list so it is the same size as the 924 * Adjust the sg list so it is the same size as the
933 * request. 925 * request.
934 */ 926 */
935 if (brq.data.blocks != blk_rq_sectors(req)) { 927 if (brq->data.blocks != blk_rq_sectors(req)) {
936 int i, data_size = brq.data.blocks << 9; 928 int i, data_size = brq->data.blocks << 9;
937 struct scatterlist *sg; 929 struct scatterlist *sg;
938 930
939 for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) { 931 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
940 data_size -= sg->length; 932 data_size -= sg->length;
941 if (data_size <= 0) { 933 if (data_size <= 0) {
942 sg->length += data_size; 934 sg->length += data_size;
@@ -944,14 +936,14 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
944 break; 936 break;
945 } 937 }
946 } 938 }
947 brq.data.sg_len = i; 939 brq->data.sg_len = i;
948 } 940 }
949 941
950 mmc_queue_bounce_pre(mq); 942 mmc_queue_bounce_pre(mq->mqrq_cur);
951 943
952 mmc_wait_for_req(card->host, &brq.mrq); 944 mmc_wait_for_req(card->host, &brq->mrq);
953 945
954 mmc_queue_bounce_post(mq); 946 mmc_queue_bounce_post(mq->mqrq_cur);
955 947
956 /* 948 /*
957 * sbc.error indicates a problem with the set block count 949 * sbc.error indicates a problem with the set block count
@@ -963,8 +955,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
963 * stop.error indicates a problem with the stop command. Data 955 * stop.error indicates a problem with the stop command. Data
964 * may have been transferred, or may still be transferring. 956 * may have been transferred, or may still be transferring.
965 */ 957 */
966 if (brq.sbc.error || brq.cmd.error || brq.stop.error) { 958 if (brq->sbc.error || brq->cmd.error || brq->stop.error) {
967 switch (mmc_blk_cmd_recovery(card, req, &brq)) { 959 switch (mmc_blk_cmd_recovery(card, req, brq)) {
968 case ERR_RETRY: 960 case ERR_RETRY:
969 if (retry++ < 5) 961 if (retry++ < 5)
970 continue; 962 continue;
@@ -980,9 +972,9 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
980 * initial command - such as address errors. No data 972 * initial command - such as address errors. No data
981 * has been transferred. 973 * has been transferred.
982 */ 974 */
983 if (brq.cmd.resp[0] & CMD_ERRORS) { 975 if (brq->cmd.resp[0] & CMD_ERRORS) {
984 pr_err("%s: r/w command failed, status = %#x\n", 976 pr_err("%s: r/w command failed, status = %#x\n",
985 req->rq_disk->disk_name, brq.cmd.resp[0]); 977 req->rq_disk->disk_name, brq->cmd.resp[0]);
986 goto cmd_abort; 978 goto cmd_abort;
987 } 979 }
988 980
@@ -1009,15 +1001,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
1009 (R1_CURRENT_STATE(status) == R1_STATE_PRG)); 1001 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
1010 } 1002 }
1011 1003
1012 if (brq.data.error) { 1004 if (brq->data.error) {
1013 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n", 1005 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1014 req->rq_disk->disk_name, brq.data.error, 1006 req->rq_disk->disk_name, brq->data.error,
1015 (unsigned)blk_rq_pos(req), 1007 (unsigned)blk_rq_pos(req),
1016 (unsigned)blk_rq_sectors(req), 1008 (unsigned)blk_rq_sectors(req),
1017 brq.cmd.resp[0], brq.stop.resp[0]); 1009 brq->cmd.resp[0], brq->stop.resp[0]);
1018 1010
1019 if (rq_data_dir(req) == READ) { 1011 if (rq_data_dir(req) == READ) {
1020 if (brq.data.blocks > 1) { 1012 if (brq->data.blocks > 1) {
1021 /* Redo read one sector at a time */ 1013 /* Redo read one sector at a time */
1022 pr_warning("%s: retrying using single block read\n", 1014 pr_warning("%s: retrying using single block read\n",
1023 req->rq_disk->disk_name); 1015 req->rq_disk->disk_name);
@@ -1031,7 +1023,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
1031 * read a single sector. 1023 * read a single sector.
1032 */ 1024 */
1033 spin_lock_irq(&md->lock); 1025 spin_lock_irq(&md->lock);
1034 ret = __blk_end_request(req, -EIO, brq.data.blksz); 1026 ret = __blk_end_request(req, -EIO,
1027 brq->data.blksz);
1035 spin_unlock_irq(&md->lock); 1028 spin_unlock_irq(&md->lock);
1036 continue; 1029 continue;
1037 } else { 1030 } else {
@@ -1043,7 +1036,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
1043 * A block was successfully transferred. 1036 * A block was successfully transferred.
1044 */ 1037 */
1045 spin_lock_irq(&md->lock); 1038 spin_lock_irq(&md->lock);
1046 ret = __blk_end_request(req, 0, brq.data.bytes_xfered); 1039 ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
1047 spin_unlock_irq(&md->lock); 1040 spin_unlock_irq(&md->lock);
1048 } while (ret); 1041 } while (ret);
1049 1042
@@ -1069,7 +1062,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
1069 } 1062 }
1070 } else { 1063 } else {
1071 spin_lock_irq(&md->lock); 1064 spin_lock_irq(&md->lock);
1072 ret = __blk_end_request(req, 0, brq.data.bytes_xfered); 1065 ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
1073 spin_unlock_irq(&md->lock); 1066 spin_unlock_irq(&md->lock);
1074 } 1067 }
1075 1068