aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorPer Forlin <per.forlin@linaro.org>2011-07-01 12:55:31 -0400
committerChris Ball <cjb@laptop.org>2011-07-20 17:21:15 -0400
commit04296b7bfda45295a568b4b312e03828fae801dc (patch)
treed7e61107bab3cb25d3f881506056ba2754eb2284 /drivers/mmc
parentd78d4a8ad53f345dd3c0bb5f8d377baa523739f7 (diff)
mmc: queue: add a second mmc queue request member
Add an additional mmc queue request instance to make way for two active block requests. One request may be active while the other request is being prepared. Signed-off-by: Per Forlin <per.forlin@linaro.org> Acked-by: Kyungmin Park <kyungmin.park@samsung.com> Acked-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Venkatraman S <svenkatr@ti.com> Tested-by: Sourav Poddar <sourav.poddar@ti.com> Tested-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/card/queue.c44
-rw-r--r--drivers/mmc/card/queue.h3
2 files changed, 44 insertions, 3 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 9122ff5f39c8..a38d310f5030 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -153,6 +153,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
153 u64 limit = BLK_BOUNCE_HIGH; 153 u64 limit = BLK_BOUNCE_HIGH;
154 int ret; 154 int ret;
155 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; 155 struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
156 struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
156 157
157 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) 158 if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
158 limit = *mmc_dev(host)->dma_mask; 159 limit = *mmc_dev(host)->dma_mask;
@@ -163,7 +164,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
163 return -ENOMEM; 164 return -ENOMEM;
164 165
165 memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur)); 166 memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
167 memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
166 mq->mqrq_cur = mqrq_cur; 168 mq->mqrq_cur = mqrq_cur;
169 mq->mqrq_prev = mqrq_prev;
167 mq->queue->queuedata = mq; 170 mq->queue->queuedata = mq;
168 171
169 blk_queue_prep_rq(mq->queue, mmc_prep_request); 172 blk_queue_prep_rq(mq->queue, mmc_prep_request);
@@ -191,9 +194,17 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
191 "allocate bounce cur buffer\n", 194 "allocate bounce cur buffer\n",
192 mmc_card_name(card)); 195 mmc_card_name(card));
193 } 196 }
197 mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
198 if (!mqrq_prev->bounce_buf) {
199 printk(KERN_WARNING "%s: unable to "
200 "allocate bounce prev buffer\n",
201 mmc_card_name(card));
202 kfree(mqrq_cur->bounce_buf);
203 mqrq_cur->bounce_buf = NULL;
204 }
194 } 205 }
195 206
196 if (mqrq_cur->bounce_buf) { 207 if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
197 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); 208 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
198 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); 209 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
199 blk_queue_max_segments(mq->queue, bouncesz / 512); 210 blk_queue_max_segments(mq->queue, bouncesz / 512);
@@ -208,11 +219,19 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
208 if (ret) 219 if (ret)
209 goto cleanup_queue; 220 goto cleanup_queue;
210 221
222 mqrq_prev->sg = mmc_alloc_sg(1, &ret);
223 if (ret)
224 goto cleanup_queue;
225
226 mqrq_prev->bounce_sg =
227 mmc_alloc_sg(bouncesz / 512, &ret);
228 if (ret)
229 goto cleanup_queue;
211 } 230 }
212 } 231 }
213#endif 232#endif
214 233
215 if (!mqrq_cur->bounce_buf) { 234 if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
216 blk_queue_bounce_limit(mq->queue, limit); 235 blk_queue_bounce_limit(mq->queue, limit);
217 blk_queue_max_hw_sectors(mq->queue, 236 blk_queue_max_hw_sectors(mq->queue,
218 min(host->max_blk_count, host->max_req_size / 512)); 237 min(host->max_blk_count, host->max_req_size / 512));
@@ -223,6 +242,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
223 if (ret) 242 if (ret)
224 goto cleanup_queue; 243 goto cleanup_queue;
225 244
245
246 mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
247 if (ret)
248 goto cleanup_queue;
226 } 249 }
227 250
228 sema_init(&mq->thread_sem, 1); 251 sema_init(&mq->thread_sem, 1);
@@ -239,6 +262,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
239 free_bounce_sg: 262 free_bounce_sg:
240 kfree(mqrq_cur->bounce_sg); 263 kfree(mqrq_cur->bounce_sg);
241 mqrq_cur->bounce_sg = NULL; 264 mqrq_cur->bounce_sg = NULL;
265 kfree(mqrq_prev->bounce_sg);
266 mqrq_prev->bounce_sg = NULL;
242 267
243 cleanup_queue: 268 cleanup_queue:
244 kfree(mqrq_cur->sg); 269 kfree(mqrq_cur->sg);
@@ -246,6 +271,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
246 kfree(mqrq_cur->bounce_buf); 271 kfree(mqrq_cur->bounce_buf);
247 mqrq_cur->bounce_buf = NULL; 272 mqrq_cur->bounce_buf = NULL;
248 273
274 kfree(mqrq_prev->sg);
275 mqrq_prev->sg = NULL;
276 kfree(mqrq_prev->bounce_buf);
277 mqrq_prev->bounce_buf = NULL;
278
249 blk_cleanup_queue(mq->queue); 279 blk_cleanup_queue(mq->queue);
250 return ret; 280 return ret;
251} 281}
@@ -255,6 +285,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
255 struct request_queue *q = mq->queue; 285 struct request_queue *q = mq->queue;
256 unsigned long flags; 286 unsigned long flags;
257 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; 287 struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
288 struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
258 289
259 /* Make sure the queue isn't suspended, as that will deadlock */ 290 /* Make sure the queue isn't suspended, as that will deadlock */
260 mmc_queue_resume(mq); 291 mmc_queue_resume(mq);
@@ -277,6 +308,15 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
277 kfree(mqrq_cur->bounce_buf); 308 kfree(mqrq_cur->bounce_buf);
278 mqrq_cur->bounce_buf = NULL; 309 mqrq_cur->bounce_buf = NULL;
279 310
311 kfree(mqrq_prev->bounce_sg);
312 mqrq_prev->bounce_sg = NULL;
313
314 kfree(mqrq_prev->sg);
315 mqrq_prev->sg = NULL;
316
317 kfree(mqrq_prev->bounce_buf);
318 mqrq_prev->bounce_buf = NULL;
319
280 mq->card = NULL; 320 mq->card = NULL;
281} 321}
282EXPORT_SYMBOL(mmc_cleanup_queue); 322EXPORT_SYMBOL(mmc_cleanup_queue);
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index c1a69ac6fff0..1a637d2e2ca6 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -29,8 +29,9 @@ struct mmc_queue {
29 int (*issue_fn)(struct mmc_queue *, struct request *); 29 int (*issue_fn)(struct mmc_queue *, struct request *);
30 void *data; 30 void *data;
31 struct request_queue *queue; 31 struct request_queue *queue;
32 struct mmc_queue_req mqrq[1]; 32 struct mmc_queue_req mqrq[2];
33 struct mmc_queue_req *mqrq_cur; 33 struct mmc_queue_req *mqrq_cur;
34 struct mmc_queue_req *mqrq_prev;
34}; 35};
35 36
36extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *, 37extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,