aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/card/queue.c
diff options
context:
space:
mode:
authorPer Forlin <per.forlin@linaro.org>2011-07-01 12:55:33 -0400
committerChris Ball <cjb@laptop.org>2011-07-20 17:21:15 -0400
commitee8a43a51c7681f19fe23b6b936e1d8094a8b7d1 (patch)
tree8d4e11c82e24b2538c87fb655e499e7d7f9b99ca /drivers/mmc/card/queue.c
parent04296b7bfda45295a568b4b312e03828fae801dc (diff)
mmc: block: add handling for two parallel block requests in issue_rw_rq
Change mmc_blk_issue_rw_rq() to become asynchronous. The execution flow looks like this: * The mmc-queue calls issue_rw_rq(), which sends the request to the host and returns back to the mmc-queue. * The mmc-queue calls issue_rw_rq() again with a new request. * This new request is prepared in issue_rw_rq(), then it waits for the active request to complete before pushing it to the host. * When the mmc-queue is empty it will call issue_rw_rq() with a NULL req to finish off the active request without starting a new request. Signed-off-by: Per Forlin <per.forlin@linaro.org> Acked-by: Kyungmin Park <kyungmin.park@samsung.com> Acked-by: Arnd Bergmann <arnd@arndb.de> Reviewed-by: Venkatraman S <svenkatr@ti.com> Tested-by: Sourav Poddar <sourav.poddar@ti.com> Tested-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc/card/queue.c')
-rw-r--r--drivers/mmc/card/queue.c17
1 files changed, 12 insertions, 5 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index a38d310f5030..45fb362e3f01 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -52,6 +52,7 @@ static int mmc_queue_thread(void *d)
52 down(&mq->thread_sem); 52 down(&mq->thread_sem);
53 do { 53 do {
54 struct request *req = NULL; 54 struct request *req = NULL;
55 struct mmc_queue_req *tmp;
55 56
56 spin_lock_irq(q->queue_lock); 57 spin_lock_irq(q->queue_lock);
57 set_current_state(TASK_INTERRUPTIBLE); 58 set_current_state(TASK_INTERRUPTIBLE);
@@ -59,7 +60,10 @@ static int mmc_queue_thread(void *d)
59 mq->mqrq_cur->req = req; 60 mq->mqrq_cur->req = req;
60 spin_unlock_irq(q->queue_lock); 61 spin_unlock_irq(q->queue_lock);
61 62
62 if (!req) { 63 if (req || mq->mqrq_prev->req) {
64 set_current_state(TASK_RUNNING);
65 mq->issue_fn(mq, req);
66 } else {
63 if (kthread_should_stop()) { 67 if (kthread_should_stop()) {
64 set_current_state(TASK_RUNNING); 68 set_current_state(TASK_RUNNING);
65 break; 69 break;
@@ -67,11 +71,14 @@ static int mmc_queue_thread(void *d)
67 up(&mq->thread_sem); 71 up(&mq->thread_sem);
68 schedule(); 72 schedule();
69 down(&mq->thread_sem); 73 down(&mq->thread_sem);
70 continue;
71 } 74 }
72 set_current_state(TASK_RUNNING);
73 75
74 mq->issue_fn(mq, req); 76 /* Current request becomes previous request and vice versa. */
77 mq->mqrq_prev->brq.mrq.data = NULL;
78 mq->mqrq_prev->req = NULL;
79 tmp = mq->mqrq_prev;
80 mq->mqrq_prev = mq->mqrq_cur;
81 mq->mqrq_cur = tmp;
75 } while (1); 82 } while (1);
76 up(&mq->thread_sem); 83 up(&mq->thread_sem);
77 84
@@ -97,7 +104,7 @@ static void mmc_request(struct request_queue *q)
97 return; 104 return;
98 } 105 }
99 106
100 if (!mq->mqrq_cur->req) 107 if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
101 wake_up_process(mq->thread); 108 wake_up_process(mq->thread);
102} 109}
103 110