diff options
author | Tejun Heo <tj@kernel.org> | 2009-05-07 22:54:15 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-05-11 03:52:17 -0400 |
commit | 296b2f6ae654581adc27f0d6f0af454c7f3d06ee (patch) | |
tree | 8fab2b91741336d41e559a839b547d7ac3090524 | |
parent | fb3ac7f6b811eac8e0aafa3df1c16ed872e898a8 (diff) |
block: convert to dequeueing model (easy ones)
plat-omap/mailbox, floppy, viocd, mspro_block, i2o_block and
mmc/card/queue are already pretty close to dequeueing model and can be
converted with simple changes. Convert them.
While at it,
* xen-blkfront: !fs check moved downwards to share dequeue call with
normal path.
* mspro_block: __blk_end_request(..., blk_rq_cur_byte()) converted to
__blk_end_request_cur()
* mmc/card/queue: loop of __blk_end_request() converted to
__blk_end_request_all()
[ Impact: dequeue in-flight request ]
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Alex Dubov <oakad@yahoo.com>
Cc: Markus Lidel <Markus.Lidel@shadowconnect.com>
Cc: Pierre Ossman <drzeus@drzeus.cx>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | arch/arm/plat-omap/mailbox.c | 9 | ||||
-rw-r--r-- | drivers/block/floppy.c | 4 | ||||
-rw-r--r-- | drivers/block/xen-blkfront.c | 13 | ||||
-rw-r--r-- | drivers/cdrom/viocd.c | 2 | ||||
-rw-r--r-- | drivers/memstick/core/mspro_block.c | 8 | ||||
-rw-r--r-- | drivers/message/i2o/i2o_block.c | 6 | ||||
-rw-r--r-- | drivers/mmc/card/queue.c | 12 |
7 files changed, 36 insertions, 18 deletions
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c index 538ba7541d3..7a1f5c25fd1 100644 --- a/arch/arm/plat-omap/mailbox.c +++ b/arch/arm/plat-omap/mailbox.c | |||
@@ -198,6 +198,8 @@ static void mbox_tx_work(struct work_struct *work) | |||
198 | 198 | ||
199 | spin_lock(q->queue_lock); | 199 | spin_lock(q->queue_lock); |
200 | rq = elv_next_request(q); | 200 | rq = elv_next_request(q); |
201 | if (rq) | ||
202 | blkdev_dequeue_request(rq); | ||
201 | spin_unlock(q->queue_lock); | 203 | spin_unlock(q->queue_lock); |
202 | 204 | ||
203 | if (!rq) | 205 | if (!rq) |
@@ -208,6 +210,9 @@ static void mbox_tx_work(struct work_struct *work) | |||
208 | ret = __mbox_msg_send(mbox, tx_data->msg, tx_data->arg); | 210 | ret = __mbox_msg_send(mbox, tx_data->msg, tx_data->arg); |
209 | if (ret) { | 211 | if (ret) { |
210 | enable_mbox_irq(mbox, IRQ_TX); | 212 | enable_mbox_irq(mbox, IRQ_TX); |
213 | spin_lock(q->queue_lock); | ||
214 | blk_requeue_request(q, rq); | ||
215 | spin_unlock(q->queue_lock); | ||
211 | return; | 216 | return; |
212 | } | 217 | } |
213 | 218 | ||
@@ -238,6 +243,8 @@ static void mbox_rx_work(struct work_struct *work) | |||
238 | while (1) { | 243 | while (1) { |
239 | spin_lock_irqsave(q->queue_lock, flags); | 244 | spin_lock_irqsave(q->queue_lock, flags); |
240 | rq = elv_next_request(q); | 245 | rq = elv_next_request(q); |
246 | if (rq) | ||
247 | blkdev_dequeue_request(rq); | ||
241 | spin_unlock_irqrestore(q->queue_lock, flags); | 248 | spin_unlock_irqrestore(q->queue_lock, flags); |
242 | if (!rq) | 249 | if (!rq) |
243 | break; | 250 | break; |
@@ -345,6 +352,8 @@ omap_mbox_read(struct device *dev, struct device_attribute *attr, char *buf) | |||
345 | while (1) { | 352 | while (1) { |
346 | spin_lock_irqsave(q->queue_lock, flags); | 353 | spin_lock_irqsave(q->queue_lock, flags); |
347 | rq = elv_next_request(q); | 354 | rq = elv_next_request(q); |
355 | if (rq) | ||
356 | blkdev_dequeue_request(rq); | ||
348 | spin_unlock_irqrestore(q->queue_lock, flags); | 357 | spin_unlock_irqrestore(q->queue_lock, flags); |
349 | 358 | ||
350 | if (!rq) | 359 | if (!rq) |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 1e27ed9208b..e2c70d2085a 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -931,7 +931,7 @@ static inline void unlock_fdc(void) | |||
931 | del_timer(&fd_timeout); | 931 | del_timer(&fd_timeout); |
932 | cont = NULL; | 932 | cont = NULL; |
933 | clear_bit(0, &fdc_busy); | 933 | clear_bit(0, &fdc_busy); |
934 | if (elv_next_request(floppy_queue)) | 934 | if (current_req || elv_next_request(floppy_queue)) |
935 | do_fd_request(floppy_queue); | 935 | do_fd_request(floppy_queue); |
936 | spin_unlock_irqrestore(&floppy_lock, flags); | 936 | spin_unlock_irqrestore(&floppy_lock, flags); |
937 | wake_up(&fdc_wait); | 937 | wake_up(&fdc_wait); |
@@ -2913,6 +2913,8 @@ static void redo_fd_request(void) | |||
2913 | 2913 | ||
2914 | spin_lock_irq(floppy_queue->queue_lock); | 2914 | spin_lock_irq(floppy_queue->queue_lock); |
2915 | req = elv_next_request(floppy_queue); | 2915 | req = elv_next_request(floppy_queue); |
2916 | if (req) | ||
2917 | blkdev_dequeue_request(req); | ||
2916 | spin_unlock_irq(floppy_queue->queue_lock); | 2918 | spin_unlock_irq(floppy_queue->queue_lock); |
2917 | if (!req) { | 2919 | if (!req) { |
2918 | do_floppy = NULL; | 2920 | do_floppy = NULL; |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 91fc56597e9..66f834571b8 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -301,22 +301,23 @@ static void do_blkif_request(struct request_queue *rq) | |||
301 | 301 | ||
302 | while ((req = elv_next_request(rq)) != NULL) { | 302 | while ((req = elv_next_request(rq)) != NULL) { |
303 | info = req->rq_disk->private_data; | 303 | info = req->rq_disk->private_data; |
304 | if (!blk_fs_request(req)) { | ||
305 | __blk_end_request_cur(req, -EIO); | ||
306 | continue; | ||
307 | } | ||
308 | 304 | ||
309 | if (RING_FULL(&info->ring)) | 305 | if (RING_FULL(&info->ring)) |
310 | goto wait; | 306 | goto wait; |
311 | 307 | ||
308 | blkdev_dequeue_request(req); | ||
309 | |||
310 | if (!blk_fs_request(req)) { | ||
311 | __blk_end_request_all(req, -EIO); | ||
312 | continue; | ||
313 | } | ||
314 | |||
312 | pr_debug("do_blk_req %p: cmd %p, sec %lx, " | 315 | pr_debug("do_blk_req %p: cmd %p, sec %lx, " |
313 | "(%u/%u) buffer:%p [%s]\n", | 316 | "(%u/%u) buffer:%p [%s]\n", |
314 | req, req->cmd, (unsigned long)blk_rq_pos(req), | 317 | req, req->cmd, (unsigned long)blk_rq_pos(req), |
315 | blk_rq_cur_sectors(req), blk_rq_sectors(req), | 318 | blk_rq_cur_sectors(req), blk_rq_sectors(req), |
316 | req->buffer, rq_data_dir(req) ? "write" : "read"); | 319 | req->buffer, rq_data_dir(req) ? "write" : "read"); |
317 | 320 | ||
318 | |||
319 | blkdev_dequeue_request(req); | ||
320 | if (blkif_queue_request(req)) { | 321 | if (blkif_queue_request(req)) { |
321 | blk_requeue_request(rq, req); | 322 | blk_requeue_request(rq, req); |
322 | wait: | 323 | wait: |
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index 6e190a93d8d..bbe9f086734 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c | |||
@@ -298,6 +298,8 @@ static void do_viocd_request(struct request_queue *q) | |||
298 | struct request *req; | 298 | struct request *req; |
299 | 299 | ||
300 | while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) { | 300 | while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) { |
301 | blkdev_dequeue_request(req); | ||
302 | |||
301 | if (!blk_fs_request(req)) | 303 | if (!blk_fs_request(req)) |
302 | __blk_end_request_all(req, -EIO); | 304 | __blk_end_request_all(req, -EIO); |
303 | else if (send_request(req) < 0) { | 305 | else if (send_request(req) < 0) { |
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c index 93b2c618565..58f5be8cd69 100644 --- a/drivers/memstick/core/mspro_block.c +++ b/drivers/memstick/core/mspro_block.c | |||
@@ -672,8 +672,7 @@ try_again: | |||
672 | msb->req_sg); | 672 | msb->req_sg); |
673 | 673 | ||
674 | if (!msb->seg_count) { | 674 | if (!msb->seg_count) { |
675 | chunk = __blk_end_request(msb->block_req, -ENOMEM, | 675 | chunk = __blk_end_request_cur(msb->block_req, -ENOMEM); |
676 | blk_rq_cur_bytes(msb->block_req)); | ||
677 | continue; | 676 | continue; |
678 | } | 677 | } |
679 | 678 | ||
@@ -711,6 +710,7 @@ try_again: | |||
711 | dev_dbg(&card->dev, "issue end\n"); | 710 | dev_dbg(&card->dev, "issue end\n"); |
712 | return -EAGAIN; | 711 | return -EAGAIN; |
713 | } | 712 | } |
713 | blkdev_dequeue_request(msb->block_req); | ||
714 | 714 | ||
715 | dev_dbg(&card->dev, "trying again\n"); | 715 | dev_dbg(&card->dev, "trying again\n"); |
716 | chunk = 1; | 716 | chunk = 1; |
@@ -825,8 +825,10 @@ static void mspro_block_submit_req(struct request_queue *q) | |||
825 | return; | 825 | return; |
826 | 826 | ||
827 | if (msb->eject) { | 827 | if (msb->eject) { |
828 | while ((req = elv_next_request(q)) != NULL) | 828 | while ((req = elv_next_request(q)) != NULL) { |
829 | blkdev_dequeue_request(req); | ||
829 | __blk_end_request_all(req, -ENODEV); | 830 | __blk_end_request_all(req, -ENODEV); |
831 | } | ||
830 | 832 | ||
831 | return; | 833 | return; |
832 | } | 834 | } |
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index e153f5d5237..8b5cbfc3ba9 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c | |||
@@ -916,8 +916,10 @@ static void i2o_block_request_fn(struct request_queue *q) | |||
916 | blk_stop_queue(q); | 916 | blk_stop_queue(q); |
917 | break; | 917 | break; |
918 | } | 918 | } |
919 | } else | 919 | } else { |
920 | __blk_end_request_cur(req, -EIO); | 920 | blkdev_dequeue_request(req); |
921 | __blk_end_request_all(req, -EIO); | ||
922 | } | ||
921 | } | 923 | } |
922 | }; | 924 | }; |
923 | 925 | ||
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 7a72e75d5c6..4b70f1e2834 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -54,8 +54,11 @@ static int mmc_queue_thread(void *d) | |||
54 | 54 | ||
55 | spin_lock_irq(q->queue_lock); | 55 | spin_lock_irq(q->queue_lock); |
56 | set_current_state(TASK_INTERRUPTIBLE); | 56 | set_current_state(TASK_INTERRUPTIBLE); |
57 | if (!blk_queue_plugged(q)) | 57 | if (!blk_queue_plugged(q)) { |
58 | req = elv_next_request(q); | 58 | req = elv_next_request(q); |
59 | if (req) | ||
60 | blkdev_dequeue_request(req); | ||
61 | } | ||
59 | mq->req = req; | 62 | mq->req = req; |
60 | spin_unlock_irq(q->queue_lock); | 63 | spin_unlock_irq(q->queue_lock); |
61 | 64 | ||
@@ -88,15 +91,12 @@ static void mmc_request(struct request_queue *q) | |||
88 | { | 91 | { |
89 | struct mmc_queue *mq = q->queuedata; | 92 | struct mmc_queue *mq = q->queuedata; |
90 | struct request *req; | 93 | struct request *req; |
91 | int ret; | ||
92 | 94 | ||
93 | if (!mq) { | 95 | if (!mq) { |
94 | printk(KERN_ERR "MMC: killing requests for dead queue\n"); | 96 | printk(KERN_ERR "MMC: killing requests for dead queue\n"); |
95 | while ((req = elv_next_request(q)) != NULL) { | 97 | while ((req = elv_next_request(q)) != NULL) { |
96 | do { | 98 | blkdev_dequeue_request(req); |
97 | ret = __blk_end_request(req, -EIO, | 99 | __blk_end_request_all(req, -EIO); |
98 | blk_rq_cur_bytes(req)); | ||
99 | } while (ret); | ||
100 | } | 100 | } |
101 | return; | 101 | return; |
102 | } | 102 | } |