aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorPer Forlin <per.forlin@linaro.org>2011-07-09 17:12:36 -0400
committerChris Ball <cjb@laptop.org>2011-07-20 17:21:13 -0400
commit97868a2bdfc2fc79a987b64f1611034b56a3f8c4 (patch)
treee2d76b7706ae6a4d999265e225654b814fa35ad3 /drivers
parentbf043330362b1ccb0c0611b8fc394e06ba8498b0 (diff)
mmc: block: add member in mmc queue struct to hold request data
The way the request data is organized in the mmc queue struct, it only allows processing of one request at a time. This patch adds a new struct to hold mmc queue request data such as sg list, request, blk request and bounce buffers, and updates any functions depending on the mmc queue struct. This prepares for using multiple active requests in one mmc queue. Signed-off-by: Per Forlin <per.forlin@linaro.org> Acked-by: Kyungmin Park <kyungmin.park@samsung.com> Acked-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Venkatraman S <svenkatr@ti.com> Tested-by: Sourav Poddar <sourav.poddar@ti.com> Tested-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mmc/card/block.c109
-rw-r--r--drivers/mmc/card/queue.c129
-rw-r--r--drivers/mmc/card/queue.h31
3 files changed, 141 insertions, 128 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index bee21063748..88bcc4e0be2 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -427,14 +427,6 @@ static const struct block_device_operations mmc_bdops = {
427#endif 427#endif
428}; 428};
429 429
430struct mmc_blk_request {
431 struct mmc_request mrq;
432 struct mmc_command sbc;
433 struct mmc_command cmd;
434 struct mmc_command stop;
435 struct mmc_data data;
436};
437
438static inline int mmc_blk_part_switch(struct mmc_card *card, 430static inline int mmc_blk_part_switch(struct mmc_card *card,
439 struct mmc_blk_data *md) 431 struct mmc_blk_data *md)
440{ 432{
@@ -824,7 +816,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
824{ 816{
825 struct mmc_blk_data *md = mq->data; 817 struct mmc_blk_data *md = mq->data;
826 struct mmc_card *card = md->queue.card; 818 struct mmc_card *card = md->queue.card;
827 struct mmc_blk_request brq; 819 struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
828 int ret = 1, disable_multi = 0, retry = 0; 820 int ret = 1, disable_multi = 0, retry = 0;
829 821
830 /* 822 /*
@@ -839,60 +831,60 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
839 do { 831 do {
840 u32 readcmd, writecmd; 832 u32 readcmd, writecmd;
841 833
842 memset(&brq, 0, sizeof(struct mmc_blk_request)); 834 memset(brq, 0, sizeof(struct mmc_blk_request));
843 brq.mrq.cmd = &brq.cmd; 835 brq->mrq.cmd = &brq->cmd;
844 brq.mrq.data = &brq.data; 836 brq->mrq.data = &brq->data;
845 837
846 brq.cmd.arg = blk_rq_pos(req); 838 brq->cmd.arg = blk_rq_pos(req);
847 if (!mmc_card_blockaddr(card)) 839 if (!mmc_card_blockaddr(card))
848 brq.cmd.arg <<= 9; 840 brq->cmd.arg <<= 9;
849 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; 841 brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
850 brq.data.blksz = 512; 842 brq->data.blksz = 512;
851 brq.stop.opcode = MMC_STOP_TRANSMISSION; 843 brq->stop.opcode = MMC_STOP_TRANSMISSION;
852 brq.stop.arg = 0; 844 brq->stop.arg = 0;
853 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 845 brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
854 brq.data.blocks = blk_rq_sectors(req); 846 brq->data.blocks = blk_rq_sectors(req);
855 847
856 /* 848 /*
857 * The block layer doesn't support all sector count 849 * The block layer doesn't support all sector count
858 * restrictions, so we need to be prepared for too big 850 * restrictions, so we need to be prepared for too big
859 * requests. 851 * requests.
860 */ 852 */
861 if (brq.data.blocks > card->host->max_blk_count) 853 if (brq->data.blocks > card->host->max_blk_count)
862 brq.data.blocks = card->host->max_blk_count; 854 brq->data.blocks = card->host->max_blk_count;
863 855
864 /* 856 /*
865 * After a read error, we redo the request one sector at a time 857 * After a read error, we redo the request one sector at a time
866 * in order to accurately determine which sectors can be read 858 * in order to accurately determine which sectors can be read
867 * successfully. 859 * successfully.
868 */ 860 */
869 if (disable_multi && brq.data.blocks > 1) 861 if (disable_multi && brq->data.blocks > 1)
870 brq.data.blocks = 1; 862 brq->data.blocks = 1;
871 863
872 if (brq.data.blocks > 1 || do_rel_wr) { 864 if (brq->data.blocks > 1 || do_rel_wr) {
873 /* SPI multiblock writes terminate using a special 865 /* SPI multiblock writes terminate using a special
874 * token, not a STOP_TRANSMISSION request. 866 * token, not a STOP_TRANSMISSION request.
875 */ 867 */
876 if (!mmc_host_is_spi(card->host) || 868 if (!mmc_host_is_spi(card->host) ||
877 rq_data_dir(req) == READ) 869 rq_data_dir(req) == READ)
878 brq.mrq.stop = &brq.stop; 870 brq->mrq.stop = &brq->stop;
879 readcmd = MMC_READ_MULTIPLE_BLOCK; 871 readcmd = MMC_READ_MULTIPLE_BLOCK;
880 writecmd = MMC_WRITE_MULTIPLE_BLOCK; 872 writecmd = MMC_WRITE_MULTIPLE_BLOCK;
881 } else { 873 } else {
882 brq.mrq.stop = NULL; 874 brq->mrq.stop = NULL;
883 readcmd = MMC_READ_SINGLE_BLOCK; 875 readcmd = MMC_READ_SINGLE_BLOCK;
884 writecmd = MMC_WRITE_BLOCK; 876 writecmd = MMC_WRITE_BLOCK;
885 } 877 }
886 if (rq_data_dir(req) == READ) { 878 if (rq_data_dir(req) == READ) {
887 brq.cmd.opcode = readcmd; 879 brq->cmd.opcode = readcmd;
888 brq.data.flags |= MMC_DATA_READ; 880 brq->data.flags |= MMC_DATA_READ;
889 } else { 881 } else {
890 brq.cmd.opcode = writecmd; 882 brq->cmd.opcode = writecmd;
891 brq.data.flags |= MMC_DATA_WRITE; 883 brq->data.flags |= MMC_DATA_WRITE;
892 } 884 }
893 885
894 if (do_rel_wr) 886 if (do_rel_wr)
895 mmc_apply_rel_rw(&brq, card, req); 887 mmc_apply_rel_rw(brq, card, req);
896 888
897 /* 889 /*
898 * Pre-defined multi-block transfers are preferable to 890 * Pre-defined multi-block transfers are preferable to
@@ -914,29 +906,29 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
914 */ 906 */
915 907
916 if ((md->flags & MMC_BLK_CMD23) && 908 if ((md->flags & MMC_BLK_CMD23) &&
917 mmc_op_multi(brq.cmd.opcode) && 909 mmc_op_multi(brq->cmd.opcode) &&
918 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) { 910 (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
919 brq.sbc.opcode = MMC_SET_BLOCK_COUNT; 911 brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
920 brq.sbc.arg = brq.data.blocks | 912 brq->sbc.arg = brq->data.blocks |
921 (do_rel_wr ? (1 << 31) : 0); 913 (do_rel_wr ? (1 << 31) : 0);
922 brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; 914 brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
923 brq.mrq.sbc = &brq.sbc; 915 brq->mrq.sbc = &brq->sbc;
924 } 916 }
925 917
926 mmc_set_data_timeout(&brq.data, card); 918 mmc_set_data_timeout(&brq->data, card);
927 919
928 brq.data.sg = mq->sg; 920 brq->data.sg = mq->mqrq_cur->sg;
929 brq.data.sg_len = mmc_queue_map_sg(mq); 921 brq->data.sg_len = mmc_queue_map_sg(mq, mq->mqrq_cur);
930 922
931 /* 923 /*
932 * Adjust the sg list so it is the same size as the 924 * Adjust the sg list so it is the same size as the
933 * request. 925 * request.
934 */ 926 */
935 if (brq.data.blocks != blk_rq_sectors(req)) { 927 if (brq->data.blocks != blk_rq_sectors(req)) {
936 int i, data_size = brq.data.blocks << 9; 928 int i, data_size = brq->data.blocks << 9;
937 struct scatterlist *sg; 929 struct scatterlist *sg;
938 930
939 for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) { 931 for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
940 data_size -= sg->length; 932 data_size -= sg->length;
941 if (data_size <= 0) { 933 if (data_size <= 0) {
942 sg->length += data_size; 934 sg->length += data_size;
@@ -944,14 +936,14 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
944 break; 936 break;
945 } 937 }
946 } 938 }
947 brq.data.sg_len = i; 939 brq->data.sg_len = i;
948 } 940 }
949 941
950 mmc_queue_bounce_pre(mq); 942 mmc_queue_bounce_pre(mq->mqrq_cur);
951 943
952 mmc_wait_for_req(card->host, &brq.mrq); 944 mmc_wait_for_req(card->host, &brq->mrq);
953 945
954 mmc_queue_bounce_post(mq); 946 mmc_queue_bounce_post(mq->mqrq_cur);
955 947
956 /* 948 /*
957 * sbc.error indicates a problem with the set block count 949 * sbc.error indicates a problem with the set block count
@@ -963,8 +955,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
963 * stop.error indicates a problem with the stop command. Data 955 * stop.error indicates a problem with the stop command. Data
964 * may have been transferred, or may still be transferring. 956 * may have been transferred, or may still be transferring.
965 */ 957 */
966 if (brq.sbc.error || brq.cmd.error || brq.stop.error) { 958 if (brq->sbc.error || brq->cmd.error || brq->stop.error) {
967 switch (mmc_blk_cmd_recovery(card, req, &brq)) { 959 switch (mmc_blk_cmd_recovery(card, req, brq)) {
968 case ERR_RETRY: 960 case ERR_RETRY:
969 if (retry++ < 5) 961 if (retry++ < 5)
970 continue; 962 continue;
@@ -980,9 +972,9 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
980 * initial command - such as address errors. No data 972 * initial command - such as address errors. No data
981 * has been transferred. 973 * has been transferred.
982 */ 974 */
983 if (brq.cmd.resp[0] & CMD_ERRORS) { 975 if (brq->cmd.resp[0] & CMD_ERRORS) {
984 pr_err("%s: r/w command failed, status = %#x\n", 976 pr_err("%s: r/w command failed, status = %#x\n",
985 req->rq_disk->disk_name, brq.cmd.resp[0]); 977 req->rq_disk->disk_name, brq->cmd.resp[0]);
986 goto cmd_abort; 978 goto cmd_abort;
987 } 979 }
988 980
@@ -1009,15 +1001,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
1009 (R1_CURRENT_STATE(status) == R1_STATE_PRG)); 1001 (R1_CURRENT_STATE(status) == R1_STATE_PRG));
1010 } 1002 }
1011 1003
1012 if (brq.data.error) { 1004 if (brq->data.error) {
1013 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n", 1005 pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
1014 req->rq_disk->disk_name, brq.data.error, 1006 req->rq_disk->disk_name, brq->data.error,
1015 (unsigned)blk_rq_pos(req), 1007 (unsigned)blk_rq_pos(req),
1016 (unsigned)blk_rq_sectors(req), 1008 (unsigned)blk_rq_sectors(req),
1017 brq.cmd.resp[0], brq.stop.resp[0]); 1009 brq->cmd.resp[0], brq->stop.resp[0]);
1018 1010
1019 if (rq_data_dir(req) == READ) { 1011 if (rq_data_dir(req) == READ) {
1020 if (brq.data.blocks > 1) { 1012 if (brq->data.blocks > 1) {
1021 /* Redo read one sector at a time */ 1013 /* Redo read one sector at a time */
1022 pr_warning("%s: retrying using single block read\n", 1014 pr_warning("%s: retrying using single block read\n",
1023 req->rq_disk->disk_name); 1015 req->rq_disk->disk_name);
@@ -1031,7 +1023,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
1031 * read a single sector. 1023 * read a single sector.
1032 */ 1024 */
1033 spin_lock_irq(&md->lock); 1025 spin_lock_irq(&md->lock);
1034 ret = __blk_end_request(req, -EIO, brq.data.blksz); 1026 ret = __blk_end_request(req, -EIO,
1027 brq->data.blksz);
1035 spin_unlock_irq(&md->lock); 1028 spin_unlock_irq(&md->lock);
1036 continue; 1029 continue;
1037 } else { 1030 } else {
@@ -1043,7 +1036,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
1043 * A block was successfully transferred. 1036 * A block was successfully transferred.
1044 */ 1037 */
1045 spin_lock_irq(&md->lock); 1038 spin_lock_irq(&md->lock);
1046 ret = __blk_end_request(req, 0, brq.data.bytes_xfered); 1039 ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
1047 spin_unlock_irq(&md->lock); 1040 spin_unlock_irq(&md->lock);
1048 } while (ret); 1041 } while (ret);
1049 1042
@@ -1069,7 +1062,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
1069 } 1062 }
1070 } else { 1063 } else {
1071 spin_lock_irq(&md->lock); 1064 spin_lock_irq(&md->lock);
1072 ret = __blk_end_request(req, 0, brq.data.bytes_xfered); 1065 ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
1073 spin_unlock_irq(&md->lock); 1066 spin_unlock_irq(&md->lock);
1074 } 1067 }
1075 1068
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index defc11b4572..9122ff5f39c 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -56,7 +56,7 @@ static int mmc_queue_thread(void *d)
56 spin_lock_irq(q->queue_lock); 56 spin_lock_irq(q->queue_lock);
57 set_current_state(TASK_INTERRUPTIBLE); 57 set_current_state(TASK_INTERRUPTIBLE);
58 req = blk_fetch_request(q); 58 req = blk_fetch_request(q);
59 mq->req = req; 59 mq->mqrq_cur->req = req;
60 spin_unlock_irq(q->queue_lock); 60 spin_unlock_irq(q->queue_lock);
61 61
62 if (!req) { 62 if (!req) {
@@ -97,10 +97,25 @@ static void mmc_request(struct request_queue *q)
97 return; 97 return;
98 } 98 }
99 99
100 if (!mq->req) 100 if (!mq->mqrq_cur->req)
101 wake_up_process(mq->thread); 101 wake_up_process(mq->thread);
102} 102}
103 103
104struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
105{
106 struct scatterlist *sg;
107
108 sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
109 if (!sg)
110 *err = -ENOMEM;
111 else {
112 *err = 0;
113 sg_init_table(sg, sg_len);
114 }
115
116 return sg;
117}
118
104static void mmc_queue_setup_discard(struct request_queue *q, 119static void mmc_queue_setup_discard(struct request_queue *q,
105 struct mmc_card *card) 120 struct mmc_card *card)
106{ 121{
@@ -137,6 +152,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
137 struct mmc_host *host = card->host; 152 struct mmc_host *host = card->host;
138 u64 limit = BLK_BOUNCE_HIGH; 153 u64 limit = BLK_BOUNCE_HIGH;
139 int ret; 154 int ret;
155 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
140 156
141 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 157 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
142 limit = *mmc_dev(host)->dma_mask; 158 limit = *mmc_dev(host)->dma_mask;
@@ -146,8 +162,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
146 if (!mq->queue) 162 if (!mq->queue)
147 return -ENOMEM; 163 return -ENOMEM;
148 164
165 memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
166 mq->mqrq_cur = mqrq_cur;
149 mq->queue->queuedata = mq; 167 mq->queue->queuedata = mq;
150 mq->req = NULL;
151 168
152 blk_queue_prep_rq(mq->queue, mmc_prep_request); 169 blk_queue_prep_rq(mq->queue, mmc_prep_request);
153 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 170 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
@@ -168,53 +185,44 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
168 bouncesz = host->max_blk_count * 512; 185 bouncesz = host->max_blk_count * 512;
169 186
170 if (bouncesz > 512) { 187 if (bouncesz > 512) {
171 mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); 188 mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
172 if (!mq->bounce_buf) { 189 if (!mqrq_cur->bounce_buf) {
173 printk(KERN_WARNING "%s: unable to " 190 printk(KERN_WARNING "%s: unable to "
174 "allocate bounce buffer\n", 191 "allocate bounce cur buffer\n",
175 mmc_card_name(card)); 192 mmc_card_name(card));
176 } 193 }
177 } 194 }
178 195
179 if (mq->bounce_buf) { 196 if (mqrq_cur->bounce_buf) {
180 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 197 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
181 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); 198 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
182 blk_queue_max_segments(mq->queue, bouncesz / 512); 199 blk_queue_max_segments(mq->queue, bouncesz / 512);
183 blk_queue_max_segment_size(mq->queue, bouncesz); 200 blk_queue_max_segment_size(mq->queue, bouncesz);
184 201
185 mq->sg = kmalloc(sizeof(struct scatterlist), 202 mqrq_cur->sg = mmc_alloc_sg(1, &ret);
186 GFP_KERNEL); 203 if (ret)
187 if (!mq->sg) {
188 ret = -ENOMEM;
189 goto cleanup_queue; 204 goto cleanup_queue;
190 }
191 sg_init_table(mq->sg, 1);
192 205
193 mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * 206 mqrq_cur->bounce_sg =
194 bouncesz / 512, GFP_KERNEL); 207 mmc_alloc_sg(bouncesz / 512, &ret);
195 if (!mq->bounce_sg) { 208 if (ret)
196 ret = -ENOMEM;
197 goto cleanup_queue; 209 goto cleanup_queue;
198 } 210
199 sg_init_table(mq->bounce_sg, bouncesz / 512);
200 } 211 }
201 } 212 }
202#endif 213#endif
203 214
204 if (!mq->bounce_buf) { 215 if (!mqrq_cur->bounce_buf) {
205 blk_queue_bounce_limit(mq->queue, limit); 216 blk_queue_bounce_limit(mq->queue, limit);
206 blk_queue_max_hw_sectors(mq->queue, 217 blk_queue_max_hw_sectors(mq->queue,
207 min(host->max_blk_count, host->max_req_size / 512)); 218 min(host->max_blk_count, host->max_req_size / 512));
208 blk_queue_max_segments(mq->queue, host->max_segs); 219 blk_queue_max_segments(mq->queue, host->max_segs);
209 blk_queue_max_segment_size(mq->queue, host->max_seg_size); 220 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
210 221
211 mq->sg = kmalloc(sizeof(struct scatterlist) * 222 mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
212 host->max_segs, GFP_KERNEL); 223 if (ret)
213 if (!mq->sg) {
214 ret = -ENOMEM;
215 goto cleanup_queue; 224 goto cleanup_queue;
216 } 225
217 sg_init_table(mq->sg, host->max_segs);
218 } 226 }
219 227
220 sema_init(&mq->thread_sem, 1); 228 sema_init(&mq->thread_sem, 1);
@@ -229,16 +237,15 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
229 237
230 return 0; 238 return 0;
231 free_bounce_sg: 239 free_bounce_sg:
232 if (mq->bounce_sg) 240 kfree(mqrq_cur->bounce_sg);
233 kfree(mq->bounce_sg); 241 mqrq_cur->bounce_sg = NULL;
234 mq->bounce_sg = NULL; 242
235 cleanup_queue: 243 cleanup_queue:
236 if (mq->sg) 244 kfree(mqrq_cur->sg);
237 kfree(mq->sg); 245 mqrq_cur->sg = NULL;
238 mq->sg = NULL; 246 kfree(mqrq_cur->bounce_buf);
239 if (mq->bounce_buf) 247 mqrq_cur->bounce_buf = NULL;
240 kfree(mq->bounce_buf); 248
241 mq->bounce_buf = NULL;
242 blk_cleanup_queue(mq->queue); 249 blk_cleanup_queue(mq->queue);
243 return ret; 250 return ret;
244} 251}
@@ -247,6 +254,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
247{ 254{
248 struct request_queue *q = mq->queue; 255 struct request_queue *q = mq->queue;
249 unsigned long flags; 256 unsigned long flags;
257 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
250 258
251 /* Make sure the queue isn't suspended, as that will deadlock */ 259 /* Make sure the queue isn't suspended, as that will deadlock */
252 mmc_queue_resume(mq); 260 mmc_queue_resume(mq);
@@ -260,16 +268,14 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
260 blk_start_queue(q); 268 blk_start_queue(q);
261 spin_unlock_irqrestore(q->queue_lock, flags); 269 spin_unlock_irqrestore(q->queue_lock, flags);
262 270
263 if (mq->bounce_sg) 271 kfree(mqrq_cur->bounce_sg);
264 kfree(mq->bounce_sg); 272 mqrq_cur->bounce_sg = NULL;
265 mq->bounce_sg = NULL;
266 273
267 kfree(mq->sg); 274 kfree(mqrq_cur->sg);
268 mq->sg = NULL; 275 mqrq_cur->sg = NULL;
269 276
270 if (mq->bounce_buf) 277 kfree(mqrq_cur->bounce_buf);
271 kfree(mq->bounce_buf); 278 mqrq_cur->bounce_buf = NULL;
272 mq->bounce_buf = NULL;
273 279
274 mq->card = NULL; 280 mq->card = NULL;
275} 281}
@@ -322,27 +328,27 @@ void mmc_queue_resume(struct mmc_queue *mq)
322/* 328/*
323 * Prepare the sg list(s) to be handed of to the host driver 329 * Prepare the sg list(s) to be handed of to the host driver
324 */ 330 */
325unsigned int mmc_queue_map_sg(struct mmc_queue *mq) 331unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
326{ 332{
327 unsigned int sg_len; 333 unsigned int sg_len;
328 size_t buflen; 334 size_t buflen;
329 struct scatterlist *sg; 335 struct scatterlist *sg;
330 int i; 336 int i;
331 337
332 if (!mq->bounce_buf) 338 if (!mqrq->bounce_buf)
333 return blk_rq_map_sg(mq->queue, mq->req, mq->sg); 339 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
334 340
335 BUG_ON(!mq->bounce_sg); 341 BUG_ON(!mqrq->bounce_sg);
336 342
337 sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg); 343 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
338 344
339 mq->bounce_sg_len = sg_len; 345 mqrq->bounce_sg_len = sg_len;
340 346
341 buflen = 0; 347 buflen = 0;
342 for_each_sg(mq->bounce_sg, sg, sg_len, i) 348 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
343 buflen += sg->length; 349 buflen += sg->length;
344 350
345 sg_init_one(mq->sg, mq->bounce_buf, buflen); 351 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
346 352
347 return 1; 353 return 1;
348} 354}
@@ -351,31 +357,30 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
351 * If writing, bounce the data to the buffer before the request 357 * If writing, bounce the data to the buffer before the request
352 * is sent to the host driver 358 * is sent to the host driver
353 */ 359 */
354void mmc_queue_bounce_pre(struct mmc_queue *mq) 360void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
355{ 361{
356 if (!mq->bounce_buf) 362 if (!mqrq->bounce_buf)
357 return; 363 return;
358 364
359 if (rq_data_dir(mq->req) != WRITE) 365 if (rq_data_dir(mqrq->req) != WRITE)
360 return; 366 return;
361 367
362 sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, 368 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
363 mq->bounce_buf, mq->sg[0].length); 369 mqrq->bounce_buf, mqrq->sg[0].length);
364} 370}
365 371
366/* 372/*
367 * If reading, bounce the data from the buffer after the request 373 * If reading, bounce the data from the buffer after the request
368 * has been handled by the host driver 374 * has been handled by the host driver
369 */ 375 */
370void mmc_queue_bounce_post(struct mmc_queue *mq) 376void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
371{ 377{
372 if (!mq->bounce_buf) 378 if (!mqrq->bounce_buf)
373 return; 379 return;
374 380
375 if (rq_data_dir(mq->req) != READ) 381 if (rq_data_dir(mqrq->req) != READ)
376 return; 382 return;
377 383
378 sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, 384 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
379 mq->bounce_buf, mq->sg[0].length); 385 mqrq->bounce_buf, mqrq->sg[0].length);
380} 386}
381
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 6223ef8dc9c..c1a69ac6fff 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -4,19 +4,33 @@
4struct request; 4struct request;
5struct task_struct; 5struct task_struct;
6 6
7struct mmc_blk_request {
8 struct mmc_request mrq;
9 struct mmc_command sbc;
10 struct mmc_command cmd;
11 struct mmc_command stop;
12 struct mmc_data data;
13};
14
15struct mmc_queue_req {
16 struct request *req;
17 struct mmc_blk_request brq;
18 struct scatterlist *sg;
19 char *bounce_buf;
20 struct scatterlist *bounce_sg;
21 unsigned int bounce_sg_len;
22};
23
7struct mmc_queue { 24struct mmc_queue {
8 struct mmc_card *card; 25 struct mmc_card *card;
9 struct task_struct *thread; 26 struct task_struct *thread;
10 struct semaphore thread_sem; 27 struct semaphore thread_sem;
11 unsigned int flags; 28 unsigned int flags;
12 struct request *req;
13 int (*issue_fn)(struct mmc_queue *, struct request *); 29 int (*issue_fn)(struct mmc_queue *, struct request *);
14 void *data; 30 void *data;
15 struct request_queue *queue; 31 struct request_queue *queue;
16 struct scatterlist *sg; 32 struct mmc_queue_req mqrq[1];
17 char *bounce_buf; 33 struct mmc_queue_req *mqrq_cur;
18 struct scatterlist *bounce_sg;
19 unsigned int bounce_sg_len;
20}; 34};
21 35
22extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *, 36extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
@@ -25,8 +39,9 @@ extern void mmc_cleanup_queue(struct mmc_queue *);
25extern void mmc_queue_suspend(struct mmc_queue *); 39extern void mmc_queue_suspend(struct mmc_queue *);
26extern void mmc_queue_resume(struct mmc_queue *); 40extern void mmc_queue_resume(struct mmc_queue *);
27 41
28extern unsigned int mmc_queue_map_sg(struct mmc_queue *); 42extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
29extern void mmc_queue_bounce_pre(struct mmc_queue *); 43 struct mmc_queue_req *);
30extern void mmc_queue_bounce_post(struct mmc_queue *); 44extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
45extern void mmc_queue_bounce_post(struct mmc_queue_req *);
31 46
32#endif 47#endif