aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/core/queue.c
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@intel.com>2017-03-13 08:36:35 -0400
committerUlf Hansson <ulf.hansson@linaro.org>2017-04-24 15:42:00 -0400
commitcdf8a6fb48882651049e468e6b16956fb83db86c (patch)
tree4637e5764e797b5e97b3327433b4c9d9d2927405 /drivers/mmc/core/queue.c
parent8ddfe07e18c9c82f7567d3cfbd68d8b59764d015 (diff)
mmc: block: Introduce queue semantics
Change from viewing the requests in progress as 'current' and 'previous', to viewing them as a queue. The current request is allocated to the first free slot. The presence of incomplete requests is determined from the count (mq->qcnt) of entries in the queue. Non-read-write requests (i.e. discards and flushes) are not added to the queue at all and require no special handling. Also no special handling is needed for the MMC_BLK_NEW_REQUEST case. As well as allowing an arbitrarily sized queue, the queue thread function is significantly simpler. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Diffstat (limited to 'drivers/mmc/core/queue.c')
-rw-r--r--drivers/mmc/core/queue.c75
1 files changed, 47 insertions, 28 deletions
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index 493eb10ce580..4a2045527b62 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -40,6 +40,35 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
40 return BLKPREP_OK; 40 return BLKPREP_OK;
41} 41}
42 42
43struct mmc_queue_req *mmc_queue_req_find(struct mmc_queue *mq,
44 struct request *req)
45{
46 struct mmc_queue_req *mqrq;
47 int i = ffz(mq->qslots);
48
49 if (i >= mq->qdepth)
50 return NULL;
51
52 mqrq = &mq->mqrq[i];
53 WARN_ON(mqrq->req || mq->qcnt >= mq->qdepth ||
54 test_bit(mqrq->task_id, &mq->qslots));
55 mqrq->req = req;
56 mq->qcnt += 1;
57 __set_bit(mqrq->task_id, &mq->qslots);
58
59 return mqrq;
60}
61
62void mmc_queue_req_free(struct mmc_queue *mq,
63 struct mmc_queue_req *mqrq)
64{
65 WARN_ON(!mqrq->req || mq->qcnt < 1 ||
66 !test_bit(mqrq->task_id, &mq->qslots));
67 mqrq->req = NULL;
68 mq->qcnt -= 1;
69 __clear_bit(mqrq->task_id, &mq->qslots);
70}
71
43static int mmc_queue_thread(void *d) 72static int mmc_queue_thread(void *d)
44{ 73{
45 struct mmc_queue *mq = d; 74 struct mmc_queue *mq = d;
@@ -50,7 +79,7 @@ static int mmc_queue_thread(void *d)
50 79
51 down(&mq->thread_sem); 80 down(&mq->thread_sem);
52 do { 81 do {
53 struct request *req = NULL; 82 struct request *req;
54 83
55 spin_lock_irq(q->queue_lock); 84 spin_lock_irq(q->queue_lock);
56 set_current_state(TASK_INTERRUPTIBLE); 85 set_current_state(TASK_INTERRUPTIBLE);
@@ -63,38 +92,17 @@ static int mmc_queue_thread(void *d)
63 * Dispatch queue is empty so set flags for 92 * Dispatch queue is empty so set flags for
64 * mmc_request_fn() to wake us up. 93 * mmc_request_fn() to wake us up.
65 */ 94 */
66 if (mq->mqrq_prev->req) 95 if (mq->qcnt)
67 cntx->is_waiting_last_req = true; 96 cntx->is_waiting_last_req = true;
68 else 97 else
69 mq->asleep = true; 98 mq->asleep = true;
70 } 99 }
71 mq->mqrq_cur->req = req;
72 spin_unlock_irq(q->queue_lock); 100 spin_unlock_irq(q->queue_lock);
73 101
74 if (req || mq->mqrq_prev->req) { 102 if (req || mq->qcnt) {
75 bool req_is_special = mmc_req_is_special(req);
76
77 set_current_state(TASK_RUNNING); 103 set_current_state(TASK_RUNNING);
78 mmc_blk_issue_rq(mq, req); 104 mmc_blk_issue_rq(mq, req);
79 cond_resched(); 105 cond_resched();
80 if (mq->new_request) {
81 mq->new_request = false;
82 continue; /* fetch again */
83 }
84
85 /*
86 * Current request becomes previous request
87 * and vice versa.
88 * In case of special requests, current request
89 * has been finished. Do not assign it to previous
90 * request.
91 */
92 if (req_is_special)
93 mq->mqrq_cur->req = NULL;
94
95 mq->mqrq_prev->brq.mrq.data = NULL;
96 mq->mqrq_prev->req = NULL;
97 swap(mq->mqrq_prev, mq->mqrq_cur);
98 } else { 106 } else {
99 if (kthread_should_stop()) { 107 if (kthread_should_stop()) {
100 set_current_state(TASK_RUNNING); 108 set_current_state(TASK_RUNNING);
@@ -177,6 +185,20 @@ static void mmc_queue_setup_discard(struct request_queue *q,
177 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); 185 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
178} 186}
179 187
188static struct mmc_queue_req *mmc_queue_alloc_mqrqs(int qdepth)
189{
190 struct mmc_queue_req *mqrq;
191 int i;
192
193 mqrq = kcalloc(qdepth, sizeof(*mqrq), GFP_KERNEL);
194 if (mqrq) {
195 for (i = 0; i < qdepth; i++)
196 mqrq[i].task_id = i;
197 }
198
199 return mqrq;
200}
201
180#ifdef CONFIG_MMC_BLOCK_BOUNCE 202#ifdef CONFIG_MMC_BLOCK_BOUNCE
181static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq, 203static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq,
182 unsigned int bouncesz) 204 unsigned int bouncesz)
@@ -279,12 +301,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
279 return -ENOMEM; 301 return -ENOMEM;
280 302
281 mq->qdepth = 2; 303 mq->qdepth = 2;
282 mq->mqrq = kcalloc(mq->qdepth, sizeof(struct mmc_queue_req), 304 mq->mqrq = mmc_queue_alloc_mqrqs(mq->qdepth);
283 GFP_KERNEL);
284 if (!mq->mqrq) 305 if (!mq->mqrq)
285 goto blk_cleanup; 306 goto blk_cleanup;
286 mq->mqrq_cur = &mq->mqrq[0];
287 mq->mqrq_prev = &mq->mqrq[1];
288 mq->queue->queuedata = mq; 307 mq->queue->queuedata = mq;
289 308
290 blk_queue_prep_rq(mq->queue, mmc_prep_request); 309 blk_queue_prep_rq(mq->queue, mmc_prep_request);