aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card
diff options
context:
space:
mode:
authorKonstantin Dorfman <kdorfman@codeaurora.org>2013-01-14 14:28:17 -0500
committerChris Ball <cjb@laptop.org>2013-02-11 13:28:49 -0500
commit2220eedfd7aea69008173a224975e10284fbe854 (patch)
tree71906c2638d2e0052b15a40e71310f6984f3ee8f /drivers/mmc/card
parent369d321ed1baa7748e770aaaae4d8effad699633 (diff)
mmc: fix async request mechanism for sequential read scenarios
When current request is running on the bus and if next request fetched by mmcqd is NULL, mmc context (mmcqd thread) gets blocked until the current request completes. This means that if new request comes in while the mmcqd thread is blocked, this new request can not be prepared in parallel to current ongoing request. This may result in delaying the new request execution and increase it's latency. This change allows to wake up the MMC thread on new request arrival. Now once the MMC thread is woken up, a new request can be fetched and prepared in parallel to the current running request which means this new request can be started immediately after the current running request completes. With this change read throughput is improved by 16%. Signed-off-by: Konstantin Dorfman <kdorfman@codeaurora.org> Reviewed-by: Seungwon Jeon <tgih.jun@samsung.com> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r--drivers/mmc/card/block.c30
-rw-r--r--drivers/mmc/card/queue.c22
-rw-r--r--drivers/mmc/card/queue.h3
3 files changed, 40 insertions, 15 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 21056b9ef0a0..f79b4688e471 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -113,17 +113,6 @@ struct mmc_blk_data {
113 113
114static DEFINE_MUTEX(open_lock); 114static DEFINE_MUTEX(open_lock);
115 115
116enum mmc_blk_status {
117 MMC_BLK_SUCCESS = 0,
118 MMC_BLK_PARTIAL,
119 MMC_BLK_CMD_ERR,
120 MMC_BLK_RETRY,
121 MMC_BLK_ABORT,
122 MMC_BLK_DATA_ERR,
123 MMC_BLK_ECC_ERR,
124 MMC_BLK_NOMEDIUM,
125};
126
127module_param(perdev_minors, int, 0444); 116module_param(perdev_minors, int, 0444);
128MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); 117MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
129 118
@@ -1364,8 +1353,11 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1364 } else 1353 } else
1365 areq = NULL; 1354 areq = NULL;
1366 areq = mmc_start_req(card->host, areq, (int *) &status); 1355 areq = mmc_start_req(card->host, areq, (int *) &status);
1367 if (!areq) 1356 if (!areq) {
1357 if (status == MMC_BLK_NEW_REQUEST)
1358 mq->flags |= MMC_QUEUE_NEW_REQUEST;
1368 return 0; 1359 return 0;
1360 }
1369 1361
1370 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); 1362 mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
1371 brq = &mq_rq->brq; 1363 brq = &mq_rq->brq;
@@ -1438,6 +1430,10 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1438 break; 1430 break;
1439 case MMC_BLK_NOMEDIUM: 1431 case MMC_BLK_NOMEDIUM:
1440 goto cmd_abort; 1432 goto cmd_abort;
1433 default:
1434 pr_err("%s: Unhandled return value (%d)",
1435 req->rq_disk->disk_name, status);
1436 goto cmd_abort;
1441 } 1437 }
1442 1438
1443 if (ret) { 1439 if (ret) {
@@ -1472,6 +1468,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1472 int ret; 1468 int ret;
1473 struct mmc_blk_data *md = mq->data; 1469 struct mmc_blk_data *md = mq->data;
1474 struct mmc_card *card = md->queue.card; 1470 struct mmc_card *card = md->queue.card;
1471 struct mmc_host *host = card->host;
1472 unsigned long flags;
1475 1473
1476 if (req && !mq->mqrq_prev->req) 1474 if (req && !mq->mqrq_prev->req)
1477 /* claim host only for the first request */ 1475 /* claim host only for the first request */
@@ -1486,6 +1484,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1486 goto out; 1484 goto out;
1487 } 1485 }
1488 1486
1487 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
1489 if (req && req->cmd_flags & REQ_DISCARD) { 1488 if (req && req->cmd_flags & REQ_DISCARD) {
1490 /* complete ongoing async transfer before issuing discard */ 1489 /* complete ongoing async transfer before issuing discard */
1491 if (card->host->areq) 1490 if (card->host->areq)
@@ -1501,11 +1500,16 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
1501 mmc_blk_issue_rw_rq(mq, NULL); 1500 mmc_blk_issue_rw_rq(mq, NULL);
1502 ret = mmc_blk_issue_flush(mq, req); 1501 ret = mmc_blk_issue_flush(mq, req);
1503 } else { 1502 } else {
1503 if (!req && host->areq) {
1504 spin_lock_irqsave(&host->context_info.lock, flags);
1505 host->context_info.is_waiting_last_req = true;
1506 spin_unlock_irqrestore(&host->context_info.lock, flags);
1507 }
1504 ret = mmc_blk_issue_rw_rq(mq, req); 1508 ret = mmc_blk_issue_rw_rq(mq, req);
1505 } 1509 }
1506 1510
1507out: 1511out:
1508 if (!req) 1512 if (!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST))
1509 /* release host only when there are no more requests */ 1513 /* release host only when there are no more requests */
1510 mmc_release_host(card->host); 1514 mmc_release_host(card->host);
1511 return ret; 1515 return ret;
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index d630d9861e7b..5e0971016ac5 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -22,7 +22,6 @@
22 22
23#define MMC_QUEUE_BOUNCESZ 65536 23#define MMC_QUEUE_BOUNCESZ 65536
24 24
25#define MMC_QUEUE_SUSPENDED (1 << 0)
26 25
27#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH) 26#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH)
28 27
@@ -72,6 +71,10 @@ static int mmc_queue_thread(void *d)
72 set_current_state(TASK_RUNNING); 71 set_current_state(TASK_RUNNING);
73 cmd_flags = req ? req->cmd_flags : 0; 72 cmd_flags = req ? req->cmd_flags : 0;
74 mq->issue_fn(mq, req); 73 mq->issue_fn(mq, req);
74 if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
75 mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
76 continue; /* fetch again */
77 }
75 78
76 /* 79 /*
77 * Current request becomes previous request 80 * Current request becomes previous request
@@ -113,6 +116,8 @@ static void mmc_request_fn(struct request_queue *q)
113{ 116{
114 struct mmc_queue *mq = q->queuedata; 117 struct mmc_queue *mq = q->queuedata;
115 struct request *req; 118 struct request *req;
119 unsigned long flags;
120 struct mmc_context_info *cntx;
116 121
117 if (!mq) { 122 if (!mq) {
118 while ((req = blk_fetch_request(q)) != NULL) { 123 while ((req = blk_fetch_request(q)) != NULL) {
@@ -122,7 +127,20 @@ static void mmc_request_fn(struct request_queue *q)
122 return; 127 return;
123 } 128 }
124 129
125 if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) 130 cntx = &mq->card->host->context_info;
131 if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
132 /*
133 * New MMC request arrived when MMC thread may be
134 * blocked on the previous request to be complete
135 * with no current request fetched
136 */
137 spin_lock_irqsave(&cntx->lock, flags);
138 if (cntx->is_waiting_last_req) {
139 cntx->is_new_req = true;
140 wake_up_interruptible(&cntx->wait);
141 }
142 spin_unlock_irqrestore(&cntx->lock, flags);
143 } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
126 wake_up_process(mq->thread); 144 wake_up_process(mq->thread);
127} 145}
128 146
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index d2a1eb4b9f9f..e20c27b2b8b4 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -27,6 +27,9 @@ struct mmc_queue {
27 struct task_struct *thread; 27 struct task_struct *thread;
28 struct semaphore thread_sem; 28 struct semaphore thread_sem;
29 unsigned int flags; 29 unsigned int flags;
30#define MMC_QUEUE_SUSPENDED (1 << 0)
31#define MMC_QUEUE_NEW_REQUEST (1 << 1)
32
30 int (*issue_fn)(struct mmc_queue *, struct request *); 33 int (*issue_fn)(struct mmc_queue *, struct request *);
31 void *data; 34 void *data;
32 struct request_queue *queue; 35 struct request_queue *queue;