diff options
author | Tejun Heo <tj@kernel.org> | 2009-05-07 22:54:15 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-05-11 03:52:17 -0400 |
commit | 296b2f6ae654581adc27f0d6f0af454c7f3d06ee (patch) | |
tree | 8fab2b91741336d41e559a839b547d7ac3090524 /drivers/mmc | |
parent | fb3ac7f6b811eac8e0aafa3df1c16ed872e898a8 (diff) |
block: convert to dequeueing model (easy ones)
plat-omap/mailbox, floppy, viocd, mspro_block, i2o_block and
mmc/card/queue are already pretty close to dequeueing model and can be
converted with simple changes. Convert them.
While at it,
* xen-blkfront: !fs check moved downwards to share dequeue call with
normal path.
* mspro_block: __blk_end_request(..., blk_rq_cur_byte()) converted to
__blk_end_request_cur()
* mmc/card/queue: loop of __blk_end_request() converted to
__blk_end_request_all()
[ Impact: dequeue in-flight request ]
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Alex Dubov <oakad@yahoo.com>
Cc: Markus Lidel <Markus.Lidel@shadowconnect.com>
Cc: Pierre Ossman <drzeus@drzeus.cx>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/mmc')
-rw-r--r-- | drivers/mmc/card/queue.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 7a72e75d5c67..4b70f1e28347 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -54,8 +54,11 @@ static int mmc_queue_thread(void *d) | |||
54 | 54 | ||
55 | spin_lock_irq(q->queue_lock); | 55 | spin_lock_irq(q->queue_lock); |
56 | set_current_state(TASK_INTERRUPTIBLE); | 56 | set_current_state(TASK_INTERRUPTIBLE); |
57 | if (!blk_queue_plugged(q)) | 57 | if (!blk_queue_plugged(q)) { |
58 | req = elv_next_request(q); | 58 | req = elv_next_request(q); |
59 | if (req) | ||
60 | blkdev_dequeue_request(req); | ||
61 | } | ||
59 | mq->req = req; | 62 | mq->req = req; |
60 | spin_unlock_irq(q->queue_lock); | 63 | spin_unlock_irq(q->queue_lock); |
61 | 64 | ||
@@ -88,15 +91,12 @@ static void mmc_request(struct request_queue *q) | |||
88 | { | 91 | { |
89 | struct mmc_queue *mq = q->queuedata; | 92 | struct mmc_queue *mq = q->queuedata; |
90 | struct request *req; | 93 | struct request *req; |
91 | int ret; | ||
92 | 94 | ||
93 | if (!mq) { | 95 | if (!mq) { |
94 | printk(KERN_ERR "MMC: killing requests for dead queue\n"); | 96 | printk(KERN_ERR "MMC: killing requests for dead queue\n"); |
95 | while ((req = elv_next_request(q)) != NULL) { | 97 | while ((req = elv_next_request(q)) != NULL) { |
96 | do { | 98 | blkdev_dequeue_request(req); |
97 | ret = __blk_end_request(req, -EIO, | 99 | __blk_end_request_all(req, -EIO); |
98 | blk_rq_cur_bytes(req)); | ||
99 | } while (ret); | ||
100 | } | 100 | } |
101 | return; | 101 | return; |
102 | } | 102 | } |