diff options
author | Per Forlin <per.forlin@linaro.org> | 2011-07-01 12:55:33 -0400 |
---|---|---|
committer | Chris Ball <cjb@laptop.org> | 2011-07-20 17:21:15 -0400 |
commit | ee8a43a51c7681f19fe23b6b936e1d8094a8b7d1 (patch) | |
tree | 8d4e11c82e24b2538c87fb655e499e7d7f9b99ca /drivers/mmc/card | |
parent | 04296b7bfda45295a568b4b312e03828fae801dc (diff) |
mmc: block: add handling for two parallel block requests in issue_rw_rq
Change mmc_blk_issue_rw_rq() to become asynchronous.
The execution flow looks like this:
* The mmc-queue calls issue_rw_rq(), which sends the request
to the host and returns back to the mmc-queue.
* The mmc-queue calls issue_rw_rq() again with a new request.
* This new request is prepared in issue_rw_rq(), then it waits for
the active request to complete before pushing it to the host.
* When the mmc-queue is empty it will call issue_rw_rq() with a NULL
req to finish off the active request without starting a new request.
Signed-off-by: Per Forlin <per.forlin@linaro.org>
Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Reviewed-by: Venkatraman S <svenkatr@ti.com>
Tested-by: Sourav Poddar <sourav.poddar@ti.com>
Tested-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r-- | drivers/mmc/card/block.c | 86 | ||||
-rw-r--r-- | drivers/mmc/card/queue.c | 17 | ||||
-rw-r--r-- | drivers/mmc/card/queue.h | 1 |
3 files changed, 84 insertions, 20 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 7ed2c68e8763..38d01492a52b 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -822,12 +822,14 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, | |||
822 | R1_CC_ERROR | /* Card controller error */ \ | 822 | R1_CC_ERROR | /* Card controller error */ \ |
823 | R1_ERROR) /* General/unknown error */ | 823 | R1_ERROR) /* General/unknown error */ |
824 | 824 | ||
825 | int mmc_blk_err_check(struct mmc_blk_request *brq, | 825 | static int mmc_blk_err_check(struct mmc_card *card, |
826 | struct request *req, | 826 | struct mmc_async_req *areq) |
827 | struct mmc_card *card, | ||
828 | struct mmc_blk_data *md) | ||
829 | { | 827 | { |
830 | int ret = MMC_BLK_SUCCESS; | 828 | enum mmc_blk_status ret = MMC_BLK_SUCCESS; |
829 | struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, | ||
830 | mmc_active); | ||
831 | struct mmc_blk_request *brq = &mq_mrq->brq; | ||
832 | struct request *req = mq_mrq->req; | ||
831 | 833 | ||
832 | /* | 834 | /* |
833 | * sbc.error indicates a problem with the set block count | 835 | * sbc.error indicates a problem with the set block count |
@@ -1038,24 +1040,41 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, | |||
1038 | brq->data.sg_len = i; | 1040 | brq->data.sg_len = i; |
1039 | } | 1041 | } |
1040 | 1042 | ||
1043 | mqrq->mmc_active.mrq = &brq->mrq; | ||
1044 | mqrq->mmc_active.err_check = mmc_blk_err_check; | ||
1045 | |||
1041 | mmc_queue_bounce_pre(mqrq); | 1046 | mmc_queue_bounce_pre(mqrq); |
1042 | } | 1047 | } |
1043 | 1048 | ||
1044 | static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) | 1049 | static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) |
1045 | { | 1050 | { |
1046 | struct mmc_blk_data *md = mq->data; | 1051 | struct mmc_blk_data *md = mq->data; |
1047 | struct mmc_card *card = md->queue.card; | 1052 | struct mmc_card *card = md->queue.card; |
1048 | struct mmc_blk_request *brq = &mq->mqrq_cur->brq; | 1053 | struct mmc_blk_request *brq = &mq->mqrq_cur->brq; |
1049 | int ret = 1, disable_multi = 0, retry = 0; | 1054 | int ret = 1, disable_multi = 0, retry = 0; |
1050 | enum mmc_blk_status status; | 1055 | enum mmc_blk_status status; |
1056 | struct mmc_queue_req *mq_rq; | ||
1057 | struct request *req; | ||
1058 | struct mmc_async_req *areq; | ||
1051 | 1059 | ||
1052 | do { | 1060 | if (!rqc && !mq->mqrq_prev->req) |
1053 | mmc_blk_rw_rq_prep(mq->mqrq_cur, card, disable_multi, mq); | 1061 | return 0; |
1054 | mmc_wait_for_req(card->host, &brq->mrq); | ||
1055 | 1062 | ||
1056 | mmc_queue_bounce_post(mq->mqrq_cur); | 1063 | do { |
1064 | if (rqc) { | ||
1065 | mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); | ||
1066 | areq = &mq->mqrq_cur->mmc_active; | ||
1067 | } else | ||
1068 | areq = NULL; | ||
1069 | areq = mmc_start_req(card->host, areq, (int *) &status); | ||
1070 | if (!areq) | ||
1071 | return 0; | ||
1072 | |||
1073 | mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); | ||
1074 | brq = &mq_rq->brq; | ||
1075 | req = mq_rq->req; | ||
1076 | mmc_queue_bounce_post(mq_rq); | ||
1057 | 1077 | ||
1058 | status = mmc_blk_err_check(brq, req, card, md); | ||
1059 | switch (status) { | 1078 | switch (status) { |
1060 | case MMC_BLK_SUCCESS: | 1079 | case MMC_BLK_SUCCESS: |
1061 | case MMC_BLK_PARTIAL: | 1080 | case MMC_BLK_PARTIAL: |
@@ -1066,6 +1085,19 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) | |||
1066 | ret = __blk_end_request(req, 0, | 1085 | ret = __blk_end_request(req, 0, |
1067 | brq->data.bytes_xfered); | 1086 | brq->data.bytes_xfered); |
1068 | spin_unlock_irq(&md->lock); | 1087 | spin_unlock_irq(&md->lock); |
1088 | if (status == MMC_BLK_SUCCESS && ret) { | ||
1089 | /* | ||
1090 | * The blk_end_request has returned non zero | ||
1091 | * even though all data is transfered and no | ||
1092 | * erros returned by host. | ||
1093 | * If this happen it's a bug. | ||
1094 | */ | ||
1095 | printk(KERN_ERR "%s BUG rq_tot %d d_xfer %d\n", | ||
1096 | __func__, blk_rq_bytes(req), | ||
1097 | brq->data.bytes_xfered); | ||
1098 | rqc = NULL; | ||
1099 | goto cmd_abort; | ||
1100 | } | ||
1069 | break; | 1101 | break; |
1070 | case MMC_BLK_CMD_ERR: | 1102 | case MMC_BLK_CMD_ERR: |
1071 | goto cmd_err; | 1103 | goto cmd_err; |
@@ -1087,9 +1119,19 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) | |||
1087 | ret = __blk_end_request(req, -EIO, | 1119 | ret = __blk_end_request(req, -EIO, |
1088 | brq->data.blksz); | 1120 | brq->data.blksz); |
1089 | spin_unlock_irq(&md->lock); | 1121 | spin_unlock_irq(&md->lock); |
1122 | if (!ret) | ||
1123 | goto start_new_req; | ||
1090 | break; | 1124 | break; |
1091 | } | 1125 | } |
1092 | 1126 | ||
1127 | if (ret) { | ||
1128 | /* | ||
1129 | * In case of a none complete request | ||
1130 | * prepare it again and resend. | ||
1131 | */ | ||
1132 | mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); | ||
1133 | mmc_start_req(card->host, &mq_rq->mmc_active, NULL); | ||
1134 | } | ||
1093 | } while (ret); | 1135 | } while (ret); |
1094 | 1136 | ||
1095 | return 1; | 1137 | return 1; |
@@ -1124,6 +1166,12 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) | |||
1124 | ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); | 1166 | ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); |
1125 | spin_unlock_irq(&md->lock); | 1167 | spin_unlock_irq(&md->lock); |
1126 | 1168 | ||
1169 | start_new_req: | ||
1170 | if (rqc) { | ||
1171 | mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); | ||
1172 | mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL); | ||
1173 | } | ||
1174 | |||
1127 | return 0; | 1175 | return 0; |
1128 | } | 1176 | } |
1129 | 1177 | ||
@@ -1133,26 +1181,34 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
1133 | struct mmc_blk_data *md = mq->data; | 1181 | struct mmc_blk_data *md = mq->data; |
1134 | struct mmc_card *card = md->queue.card; | 1182 | struct mmc_card *card = md->queue.card; |
1135 | 1183 | ||
1136 | mmc_claim_host(card->host); | 1184 | if (req && !mq->mqrq_prev->req) |
1185 | /* claim host only for the first request */ | ||
1186 | mmc_claim_host(card->host); | ||
1187 | |||
1137 | ret = mmc_blk_part_switch(card, md); | 1188 | ret = mmc_blk_part_switch(card, md); |
1138 | if (ret) { | 1189 | if (ret) { |
1139 | ret = 0; | 1190 | ret = 0; |
1140 | goto out; | 1191 | goto out; |
1141 | } | 1192 | } |
1142 | 1193 | ||
1143 | if (req->cmd_flags & REQ_DISCARD) { | 1194 | if (req && req->cmd_flags & REQ_DISCARD) { |
1195 | /* complete ongoing async transfer before issuing discard */ | ||
1196 | if (card->host->areq) | ||
1197 | mmc_blk_issue_rw_rq(mq, NULL); | ||
1144 | if (req->cmd_flags & REQ_SECURE) | 1198 | if (req->cmd_flags & REQ_SECURE) |
1145 | ret = mmc_blk_issue_secdiscard_rq(mq, req); | 1199 | ret = mmc_blk_issue_secdiscard_rq(mq, req); |
1146 | else | 1200 | else |
1147 | ret = mmc_blk_issue_discard_rq(mq, req); | 1201 | ret = mmc_blk_issue_discard_rq(mq, req); |
1148 | } else if (req->cmd_flags & REQ_FLUSH) { | 1202 | } else if (req && req->cmd_flags & REQ_FLUSH) { |
1149 | ret = mmc_blk_issue_flush(mq, req); | 1203 | ret = mmc_blk_issue_flush(mq, req); |
1150 | } else { | 1204 | } else { |
1151 | ret = mmc_blk_issue_rw_rq(mq, req); | 1205 | ret = mmc_blk_issue_rw_rq(mq, req); |
1152 | } | 1206 | } |
1153 | 1207 | ||
1154 | out: | 1208 | out: |
1155 | mmc_release_host(card->host); | 1209 | if (!req) |
1210 | /* release host only when there are no more requests */ | ||
1211 | mmc_release_host(card->host); | ||
1156 | return ret; | 1212 | return ret; |
1157 | } | 1213 | } |
1158 | 1214 | ||
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index a38d310f5030..45fb362e3f01 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -52,6 +52,7 @@ static int mmc_queue_thread(void *d) | |||
52 | down(&mq->thread_sem); | 52 | down(&mq->thread_sem); |
53 | do { | 53 | do { |
54 | struct request *req = NULL; | 54 | struct request *req = NULL; |
55 | struct mmc_queue_req *tmp; | ||
55 | 56 | ||
56 | spin_lock_irq(q->queue_lock); | 57 | spin_lock_irq(q->queue_lock); |
57 | set_current_state(TASK_INTERRUPTIBLE); | 58 | set_current_state(TASK_INTERRUPTIBLE); |
@@ -59,7 +60,10 @@ static int mmc_queue_thread(void *d) | |||
59 | mq->mqrq_cur->req = req; | 60 | mq->mqrq_cur->req = req; |
60 | spin_unlock_irq(q->queue_lock); | 61 | spin_unlock_irq(q->queue_lock); |
61 | 62 | ||
62 | if (!req) { | 63 | if (req || mq->mqrq_prev->req) { |
64 | set_current_state(TASK_RUNNING); | ||
65 | mq->issue_fn(mq, req); | ||
66 | } else { | ||
63 | if (kthread_should_stop()) { | 67 | if (kthread_should_stop()) { |
64 | set_current_state(TASK_RUNNING); | 68 | set_current_state(TASK_RUNNING); |
65 | break; | 69 | break; |
@@ -67,11 +71,14 @@ static int mmc_queue_thread(void *d) | |||
67 | up(&mq->thread_sem); | 71 | up(&mq->thread_sem); |
68 | schedule(); | 72 | schedule(); |
69 | down(&mq->thread_sem); | 73 | down(&mq->thread_sem); |
70 | continue; | ||
71 | } | 74 | } |
72 | set_current_state(TASK_RUNNING); | ||
73 | 75 | ||
74 | mq->issue_fn(mq, req); | 76 | /* Current request becomes previous request and vice versa. */ |
77 | mq->mqrq_prev->brq.mrq.data = NULL; | ||
78 | mq->mqrq_prev->req = NULL; | ||
79 | tmp = mq->mqrq_prev; | ||
80 | mq->mqrq_prev = mq->mqrq_cur; | ||
81 | mq->mqrq_cur = tmp; | ||
75 | } while (1); | 82 | } while (1); |
76 | up(&mq->thread_sem); | 83 | up(&mq->thread_sem); |
77 | 84 | ||
@@ -97,7 +104,7 @@ static void mmc_request(struct request_queue *q) | |||
97 | return; | 104 | return; |
98 | } | 105 | } |
99 | 106 | ||
100 | if (!mq->mqrq_cur->req) | 107 | if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) |
101 | wake_up_process(mq->thread); | 108 | wake_up_process(mq->thread); |
102 | } | 109 | } |
103 | 110 | ||
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index 1a637d2e2ca6..d2a1eb4b9f9f 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h | |||
@@ -19,6 +19,7 @@ struct mmc_queue_req { | |||
19 | char *bounce_buf; | 19 | char *bounce_buf; |
20 | struct scatterlist *bounce_sg; | 20 | struct scatterlist *bounce_sg; |
21 | unsigned int bounce_sg_len; | 21 | unsigned int bounce_sg_len; |
22 | struct mmc_async_req mmc_active; | ||
22 | }; | 23 | }; |
23 | 24 | ||
24 | struct mmc_queue { | 25 | struct mmc_queue { |