aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/core
diff options
context:
space:
mode:
authorKonstantin Dorfman <kdorfman@codeaurora.org>2013-01-14 14:28:17 -0500
committerChris Ball <cjb@laptop.org>2013-02-11 13:28:49 -0500
commit2220eedfd7aea69008173a224975e10284fbe854 (patch)
tree71906c2638d2e0052b15a40e71310f6984f3ee8f /drivers/mmc/core
parent369d321ed1baa7748e770aaaae4d8effad699633 (diff)
mmc: fix async request mechanism for sequential read scenarios
When current request is running on the bus and if next request fetched by mmcqd is NULL, mmc context (mmcqd thread) gets blocked until the current request completes. This means that if new request comes in while the mmcqd thread is blocked, this new request can not be prepared in parallel to current ongoing request. This may result in delaying the new request execution and increase it's latency. This change allows to wake up the MMC thread on new request arrival. Now once the MMC thread is woken up, a new request can be fetched and prepared in parallel to the current running request which means this new request can be started immediately after the current running request completes. With this change read throughput is improved by 16%. Signed-off-by: Konstantin Dorfman <kdorfman@codeaurora.org> Reviewed-by: Seungwon Jeon <tgih.jun@samsung.com> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc/core')
-rw-r--r--drivers/mmc/core/bus.c1
-rw-r--r--drivers/mmc/core/core.c121
-rw-r--r--drivers/mmc/core/core.h1
3 files changed, 120 insertions, 3 deletions
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 420cb6753c1e..e219c97a02a4 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -321,6 +321,7 @@ int mmc_add_card(struct mmc_card *card)
321#ifdef CONFIG_DEBUG_FS 321#ifdef CONFIG_DEBUG_FS
322 mmc_add_card_debugfs(card); 322 mmc_add_card_debugfs(card);
323#endif 323#endif
324 mmc_init_context_info(card->host);
324 325
325 ret = device_add(&card->dev); 326 ret = device_add(&card->dev);
326 if (ret) 327 if (ret)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index aaed7687cf09..8b3a1222e665 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -319,11 +319,44 @@ out:
319} 319}
320EXPORT_SYMBOL(mmc_start_bkops); 320EXPORT_SYMBOL(mmc_start_bkops);
321 321
322/*
323 * mmc_wait_data_done() - done callback for data request
324 * @mrq: done data request
325 *
326 * Wakes up mmc context, passed as a callback to host controller driver
327 */
328static void mmc_wait_data_done(struct mmc_request *mrq)
329{
330 mrq->host->context_info.is_done_rcv = true;
331 wake_up_interruptible(&mrq->host->context_info.wait);
332}
333
322static void mmc_wait_done(struct mmc_request *mrq) 334static void mmc_wait_done(struct mmc_request *mrq)
323{ 335{
324 complete(&mrq->completion); 336 complete(&mrq->completion);
325} 337}
326 338
339/*
340 *__mmc_start_data_req() - starts data request
341 * @host: MMC host to start the request
342 * @mrq: data request to start
343 *
344 * Sets the done callback to be called when request is completed by the card.
345 * Starts data mmc request execution
346 */
347static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq)
348{
349 mrq->done = mmc_wait_data_done;
350 mrq->host = host;
351 if (mmc_card_removed(host->card)) {
352 mrq->cmd->error = -ENOMEDIUM;
353 return -ENOMEDIUM;
354 }
355 mmc_start_request(host, mrq);
356
357 return 0;
358}
359
327static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) 360static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
328{ 361{
329 init_completion(&mrq->completion); 362 init_completion(&mrq->completion);
@@ -337,6 +370,62 @@ static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
337 return 0; 370 return 0;
338} 371}
339 372
373/*
374 * mmc_wait_for_data_req_done() - wait for request completed
375 * @host: MMC host to prepare the command.
376 * @mrq: MMC request to wait for
377 *
378 * Blocks MMC context till host controller will ack end of data request
379 * execution or new request notification arrives from the block layer.
380 * Handles command retries.
381 *
382 * Returns enum mmc_blk_status after checking errors.
383 */
384static int mmc_wait_for_data_req_done(struct mmc_host *host,
385 struct mmc_request *mrq,
386 struct mmc_async_req *next_req)
387{
388 struct mmc_command *cmd;
389 struct mmc_context_info *context_info = &host->context_info;
390 int err;
391 unsigned long flags;
392
393 while (1) {
394 wait_event_interruptible(context_info->wait,
395 (context_info->is_done_rcv ||
396 context_info->is_new_req));
397 spin_lock_irqsave(&context_info->lock, flags);
398 context_info->is_waiting_last_req = false;
399 spin_unlock_irqrestore(&context_info->lock, flags);
400 if (context_info->is_done_rcv) {
401 context_info->is_done_rcv = false;
402 context_info->is_new_req = false;
403 cmd = mrq->cmd;
404 if (!cmd->error || !cmd->retries ||
405 mmc_card_removed(host->card)) {
406 err = host->areq->err_check(host->card,
407 host->areq);
408 break; /* return err */
409 } else {
410 pr_info("%s: req failed (CMD%u): %d, retrying...\n",
411 mmc_hostname(host),
412 cmd->opcode, cmd->error);
413 cmd->retries--;
414 cmd->error = 0;
415 host->ops->request(host, mrq);
416 continue; /* wait for done/new event again */
417 }
418 } else if (context_info->is_new_req) {
419 context_info->is_new_req = false;
420 if (!next_req) {
421 err = MMC_BLK_NEW_REQUEST;
422 break; /* return err */
423 }
424 }
425 }
426 return err;
427}
428
340static void mmc_wait_for_req_done(struct mmc_host *host, 429static void mmc_wait_for_req_done(struct mmc_host *host,
341 struct mmc_request *mrq) 430 struct mmc_request *mrq)
342{ 431{
@@ -426,8 +515,17 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
426 mmc_pre_req(host, areq->mrq, !host->areq); 515 mmc_pre_req(host, areq->mrq, !host->areq);
427 516
428 if (host->areq) { 517 if (host->areq) {
429 mmc_wait_for_req_done(host, host->areq->mrq); 518 err = mmc_wait_for_data_req_done(host, host->areq->mrq,
430 err = host->areq->err_check(host->card, host->areq); 519 areq);
520 if (err == MMC_BLK_NEW_REQUEST) {
521 if (error)
522 *error = err;
523 /*
524 * The previous request was not completed,
525 * nothing to return
526 */
527 return NULL;
528 }
431 /* 529 /*
432 * Check BKOPS urgency for each R1 response 530 * Check BKOPS urgency for each R1 response
433 */ 531 */
@@ -439,7 +537,7 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
439 } 537 }
440 538
441 if (!err && areq) 539 if (!err && areq)
442 start_err = __mmc_start_req(host, areq->mrq); 540 start_err = __mmc_start_data_req(host, areq->mrq);
443 541
444 if (host->areq) 542 if (host->areq)
445 mmc_post_req(host, host->areq->mrq, 0); 543 mmc_post_req(host, host->areq->mrq, 0);
@@ -2581,6 +2679,23 @@ int mmc_pm_notify(struct notifier_block *notify_block,
2581} 2679}
2582#endif 2680#endif
2583 2681
2682/**
2683 * mmc_init_context_info() - init synchronization context
2684 * @host: mmc host
2685 *
2686 * Init struct context_info needed to implement asynchronous
2687 * request mechanism, used by mmc core, host driver and mmc requests
2688 * supplier.
2689 */
2690void mmc_init_context_info(struct mmc_host *host)
2691{
2692 spin_lock_init(&host->context_info.lock);
2693 host->context_info.is_new_req = false;
2694 host->context_info.is_done_rcv = false;
2695 host->context_info.is_waiting_last_req = false;
2696 init_waitqueue_head(&host->context_info.wait);
2697}
2698
2584static int __init mmc_init(void) 2699static int __init mmc_init(void)
2585{ 2700{
2586 int ret; 2701 int ret;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 3bdafbca354f..0272b3284b5e 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -76,5 +76,6 @@ void mmc_remove_host_debugfs(struct mmc_host *host);
76void mmc_add_card_debugfs(struct mmc_card *card); 76void mmc_add_card_debugfs(struct mmc_card *card);
77void mmc_remove_card_debugfs(struct mmc_card *card); 77void mmc_remove_card_debugfs(struct mmc_card *card);
78 78
79void mmc_init_context_info(struct mmc_host *host);
79#endif 80#endif
80 81