aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-04-22 22:05:19 -0400
committerJens Axboe <jens.axboe@oracle.com>2009-04-28 01:37:35 -0400
commit40cbbb781d3eba5d6ac0860db078af490e5c7c6b (patch)
treedec374543cf045fc630bccddbb7646c695094b0d
parentb243ddcbe9be146172baa544dadecebf156eda0e (diff)
block: implement and use [__]blk_end_request_all()
There are many [__]blk_end_request() call sites which call it with full request length and expect full completion. Many of them ensure that the request actually completes by doing BUG_ON() the return value, which is awkward and error-prone. This patch adds [__]blk_end_request_all() which takes @rq and @error and fully completes the request. BUG_ON() is added to to ensure that this actually happens. Most conversions are simple but there are a few noteworthy ones. * cdrom/viocd: viocd_end_request() replaced with direct calls to __blk_end_request_all(). * s390/block/dasd: dasd_end_request() replaced with direct calls to __blk_end_request_all(). * s390/char/tape_block: tapeblock_end_request() replaced with direct calls to blk_end_request_all(). [ Impact: cleanup ] Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Mike Miller <mike.miller@hp.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Jeff Garzik <jgarzik@pobox.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Alex Dubov <oakad@yahoo.com> Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
-rw-r--r--arch/arm/plat-omap/mailbox.c11
-rw-r--r--block/blk-barrier.c9
-rw-r--r--block/blk-core.c2
-rw-r--r--block/elevator.c2
-rw-r--r--drivers/block/cpqarray.c3
-rw-r--r--drivers/block/sx8.c3
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--drivers/block/xen-blkfront.c4
-rw-r--r--drivers/cdrom/gdrom.c2
-rw-r--r--drivers/cdrom/viocd.c25
-rw-r--r--drivers/memstick/core/mspro_block.c2
-rw-r--r--drivers/s390/block/dasd.c17
-rw-r--r--drivers/s390/char/tape_block.c15
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--include/linux/blkdev.h32
15 files changed, 58 insertions, 73 deletions
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index 0abfbaa59871..cf81bad8aec2 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -192,8 +192,7 @@ static void mbox_tx_work(struct work_struct *work)
192 } 192 }
193 193
194 spin_lock(q->queue_lock); 194 spin_lock(q->queue_lock);
195 if (__blk_end_request(rq, 0, 0)) 195 __blk_end_request_all(rq, 0);
196 BUG();
197 spin_unlock(q->queue_lock); 196 spin_unlock(q->queue_lock);
198 } 197 }
199} 198}
@@ -224,10 +223,7 @@ static void mbox_rx_work(struct work_struct *work)
224 break; 223 break;
225 224
226 msg = (mbox_msg_t) rq->data; 225 msg = (mbox_msg_t) rq->data;
227 226 blk_end_request_all(rq, 0);
228 if (blk_end_request(rq, 0, 0))
229 BUG();
230
231 mbox->rxq->callback((void *)msg); 227 mbox->rxq->callback((void *)msg);
232 } 228 }
233} 229}
@@ -337,8 +333,7 @@ omap_mbox_read(struct device *dev, struct device_attribute *attr, char *buf)
337 333
338 *p = (mbox_msg_t) rq->data; 334 *p = (mbox_msg_t) rq->data;
339 335
340 if (blk_end_request(rq, 0, 0)) 336 blk_end_request_all(rq, 0);
341 BUG();
342 337
343 if (unlikely(mbox_seq_test(mbox, *p))) { 338 if (unlikely(mbox_seq_test(mbox, *p))) {
344 pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p); 339 pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p);
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 20b4111fa050..c8d087655eff 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -106,10 +106,7 @@ bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
106 */ 106 */
107 q->ordseq = 0; 107 q->ordseq = 0;
108 rq = q->orig_bar_rq; 108 rq = q->orig_bar_rq;
109 109 __blk_end_request_all(rq, q->orderr);
110 if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
111 BUG();
112
113 return true; 110 return true;
114} 111}
115 112
@@ -252,9 +249,7 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
252 * with prejudice. 249 * with prejudice.
253 */ 250 */
254 elv_dequeue_request(q, rq); 251 elv_dequeue_request(q, rq);
255 if (__blk_end_request(rq, -EOPNOTSUPP, 252 __blk_end_request_all(rq, -EOPNOTSUPP);
256 blk_rq_bytes(rq)))
257 BUG();
258 *rqp = NULL; 253 *rqp = NULL;
259 return false; 254 return false;
260 } 255 }
diff --git a/block/blk-core.c b/block/blk-core.c
index b84250d3019b..0520cc704585 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1780,7 +1780,7 @@ struct request *elv_next_request(struct request_queue *q)
1780 break; 1780 break;
1781 } else if (ret == BLKPREP_KILL) { 1781 } else if (ret == BLKPREP_KILL) {
1782 rq->cmd_flags |= REQ_QUIET; 1782 rq->cmd_flags |= REQ_QUIET;
1783 __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); 1783 __blk_end_request_all(rq, -EIO);
1784 } else { 1784 } else {
1785 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 1785 printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
1786 break; 1786 break;
diff --git a/block/elevator.c b/block/elevator.c
index b03b8752e18b..1af5d9f04aff 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -810,7 +810,7 @@ void elv_abort_queue(struct request_queue *q)
810 rq = list_entry_rq(q->queue_head.next); 810 rq = list_entry_rq(q->queue_head.next);
811 rq->cmd_flags |= REQ_QUIET; 811 rq->cmd_flags |= REQ_QUIET;
812 trace_block_rq_abort(q, rq); 812 trace_block_rq_abort(q, rq);
813 __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); 813 __blk_end_request_all(rq, -EIO);
814 } 814 }
815} 815}
816EXPORT_SYMBOL(elv_abort_queue); 816EXPORT_SYMBOL(elv_abort_queue);
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index ca268ca11159..488a8f4a60aa 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -1024,8 +1024,7 @@ static inline void complete_command(cmdlist_t *cmd, int timeout)
1024 cmd->req.sg[i].size, ddir); 1024 cmd->req.sg[i].size, ddir);
1025 1025
1026 DBGPX(printk("Done with %p\n", rq);); 1026 DBGPX(printk("Done with %p\n", rq););
1027 if (__blk_end_request(rq, error, blk_rq_bytes(rq))) 1027 __blk_end_request_all(rq, error);
1028 BUG();
1029} 1028}
1030 1029
1031/* 1030/*
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index ff0448e4bf03..60e85bb6f790 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -749,8 +749,7 @@ static inline void carm_end_request_queued(struct carm_host *host,
749 struct request *req = crq->rq; 749 struct request *req = crq->rq;
750 int rc; 750 int rc;
751 751
752 rc = __blk_end_request(req, error, blk_rq_bytes(req)); 752 __blk_end_request_all(req, error);
753 assert(rc == 0);
754 753
755 rc = carm_put_request(host, crq); 754 rc = carm_put_request(host, crq);
756 assert(rc == 0); 755 assert(rc == 0);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 5d34764c8a87..50745e64414e 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -62,7 +62,7 @@ static void blk_done(struct virtqueue *vq)
62 break; 62 break;
63 } 63 }
64 64
65 __blk_end_request(vbr->req, error, blk_rq_bytes(vbr->req)); 65 __blk_end_request_all(vbr->req, error);
66 list_del(&vbr->list); 66 list_del(&vbr->list);
67 mempool_free(vbr, vblk->pool); 67 mempool_free(vbr, vblk->pool);
68 } 68 }
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8f905089b72b..cd6cfe3b51e1 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -551,7 +551,6 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
551 551
552 for (i = info->ring.rsp_cons; i != rp; i++) { 552 for (i = info->ring.rsp_cons; i != rp; i++) {
553 unsigned long id; 553 unsigned long id;
554 int ret;
555 554
556 bret = RING_GET_RESPONSE(&info->ring, i); 555 bret = RING_GET_RESPONSE(&info->ring, i);
557 id = bret->id; 556 id = bret->id;
@@ -578,8 +577,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
578 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " 577 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
579 "request: %x\n", bret->status); 578 "request: %x\n", bret->status);
580 579
581 ret = __blk_end_request(req, error, blk_rq_bytes(req)); 580 __blk_end_request_all(req, error);
582 BUG_ON(ret);
583 break; 581 break;
584 default: 582 default:
585 BUG(); 583 BUG();
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 2eecb779437b..fee9a9e83fc9 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -632,7 +632,7 @@ static void gdrom_readdisk_dma(struct work_struct *work)
632 * before handling ending the request */ 632 * before handling ending the request */
633 spin_lock(&gdrom_lock); 633 spin_lock(&gdrom_lock);
634 list_del_init(&req->queuelist); 634 list_del_init(&req->queuelist);
635 __blk_end_request(req, err, blk_rq_bytes(req)); 635 __blk_end_request_all(req, err);
636 } 636 }
637 spin_unlock(&gdrom_lock); 637 spin_unlock(&gdrom_lock);
638 kfree(read_command); 638 kfree(read_command);
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c
index 13929356135c..cc3efa096e1a 100644
--- a/drivers/cdrom/viocd.c
+++ b/drivers/cdrom/viocd.c
@@ -291,23 +291,6 @@ static int send_request(struct request *req)
291 return 0; 291 return 0;
292} 292}
293 293
294static void viocd_end_request(struct request *req, int error)
295{
296 int nsectors = req->hard_nr_sectors;
297
298 /*
299 * Make sure it's fully ended, and ensure that we process
300 * at least one sector.
301 */
302 if (blk_pc_request(req))
303 nsectors = (req->data_len + 511) >> 9;
304 if (!nsectors)
305 nsectors = 1;
306
307 if (__blk_end_request(req, error, nsectors << 9))
308 BUG();
309}
310
311static int rwreq; 294static int rwreq;
312 295
313static void do_viocd_request(struct request_queue *q) 296static void do_viocd_request(struct request_queue *q)
@@ -316,11 +299,11 @@ static void do_viocd_request(struct request_queue *q)
316 299
317 while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) { 300 while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) {
318 if (!blk_fs_request(req)) 301 if (!blk_fs_request(req))
319 viocd_end_request(req, -EIO); 302 __blk_end_request_all(req, -EIO);
320 else if (send_request(req) < 0) { 303 else if (send_request(req) < 0) {
321 printk(VIOCD_KERN_WARNING 304 printk(VIOCD_KERN_WARNING
322 "unable to send message to OS/400!"); 305 "unable to send message to OS/400!");
323 viocd_end_request(req, -EIO); 306 __blk_end_request_all(req, -EIO);
324 } else 307 } else
325 rwreq++; 308 rwreq++;
326 } 309 }
@@ -531,9 +514,9 @@ return_complete:
531 "with rc %d:0x%04X: %s\n", 514 "with rc %d:0x%04X: %s\n",
532 req, event->xRc, 515 req, event->xRc,
533 bevent->sub_result, err->msg); 516 bevent->sub_result, err->msg);
534 viocd_end_request(req, -EIO); 517 __blk_end_request_all(req, -EIO);
535 } else 518 } else
536 viocd_end_request(req, 0); 519 __blk_end_request_all(req, 0);
537 520
538 /* restart handling of incoming requests */ 521 /* restart handling of incoming requests */
539 spin_unlock_irqrestore(&viocd_reqlock, flags); 522 spin_unlock_irqrestore(&viocd_reqlock, flags);
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index de143deb06f0..a41634699f84 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -826,7 +826,7 @@ static void mspro_block_submit_req(struct request_queue *q)
826 826
827 if (msb->eject) { 827 if (msb->eject) {
828 while ((req = elv_next_request(q)) != NULL) 828 while ((req = elv_next_request(q)) != NULL)
829 __blk_end_request(req, -ENODEV, blk_rq_bytes(req)); 829 __blk_end_request_all(req, -ENODEV);
830 830
831 return; 831 return;
832 } 832 }
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index d1815272c435..fabec95686b0 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1614,15 +1614,6 @@ void dasd_block_clear_timer(struct dasd_block *block)
1614} 1614}
1615 1615
1616/* 1616/*
1617 * posts the buffer_cache about a finalized request
1618 */
1619static inline void dasd_end_request(struct request *req, int error)
1620{
1621 if (__blk_end_request(req, error, blk_rq_bytes(req)))
1622 BUG();
1623}
1624
1625/*
1626 * Process finished error recovery ccw. 1617 * Process finished error recovery ccw.
1627 */ 1618 */
1628static inline void __dasd_block_process_erp(struct dasd_block *block, 1619static inline void __dasd_block_process_erp(struct dasd_block *block,
@@ -1676,7 +1667,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1676 "Rejecting write request %p", 1667 "Rejecting write request %p",
1677 req); 1668 req);
1678 blkdev_dequeue_request(req); 1669 blkdev_dequeue_request(req);
1679 dasd_end_request(req, -EIO); 1670 __blk_end_request_all(req, -EIO);
1680 continue; 1671 continue;
1681 } 1672 }
1682 cqr = basedev->discipline->build_cp(basedev, block, req); 1673 cqr = basedev->discipline->build_cp(basedev, block, req);
@@ -1705,7 +1696,7 @@ static void __dasd_process_request_queue(struct dasd_block *block)
1705 "on request %p", 1696 "on request %p",
1706 PTR_ERR(cqr), req); 1697 PTR_ERR(cqr), req);
1707 blkdev_dequeue_request(req); 1698 blkdev_dequeue_request(req);
1708 dasd_end_request(req, -EIO); 1699 __blk_end_request_all(req, -EIO);
1709 continue; 1700 continue;
1710 } 1701 }
1711 /* 1702 /*
@@ -1731,7 +1722,7 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1731 status = cqr->block->base->discipline->free_cp(cqr, req); 1722 status = cqr->block->base->discipline->free_cp(cqr, req);
1732 if (status <= 0) 1723 if (status <= 0)
1733 error = status ? status : -EIO; 1724 error = status ? status : -EIO;
1734 dasd_end_request(req, error); 1725 __blk_end_request_all(req, error);
1735} 1726}
1736 1727
1737/* 1728/*
@@ -2040,7 +2031,7 @@ static void dasd_flush_request_queue(struct dasd_block *block)
2040 spin_lock_irq(&block->request_queue_lock); 2031 spin_lock_irq(&block->request_queue_lock);
2041 while ((req = elv_next_request(block->request_queue))) { 2032 while ((req = elv_next_request(block->request_queue))) {
2042 blkdev_dequeue_request(req); 2033 blkdev_dequeue_request(req);
2043 dasd_end_request(req, -EIO); 2034 __blk_end_request_all(req, -EIO);
2044 } 2035 }
2045 spin_unlock_irq(&block->request_queue_lock); 2036 spin_unlock_irq(&block->request_queue_lock);
2046} 2037}
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index f32e89e7c4f2..86596d3813b5 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -74,13 +74,6 @@ tapeblock_trigger_requeue(struct tape_device *device)
74 * Post finished request. 74 * Post finished request.
75 */ 75 */
76static void 76static void
77tapeblock_end_request(struct request *req, int error)
78{
79 if (blk_end_request(req, error, blk_rq_bytes(req)))
80 BUG();
81}
82
83static void
84__tapeblock_end_request(struct tape_request *ccw_req, void *data) 77__tapeblock_end_request(struct tape_request *ccw_req, void *data)
85{ 78{
86 struct tape_device *device; 79 struct tape_device *device;
@@ -90,7 +83,7 @@ __tapeblock_end_request(struct tape_request *ccw_req, void *data)
90 83
91 device = ccw_req->device; 84 device = ccw_req->device;
92 req = (struct request *) data; 85 req = (struct request *) data;
93 tapeblock_end_request(req, (ccw_req->rc == 0) ? 0 : -EIO); 86 blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO);
94 if (ccw_req->rc == 0) 87 if (ccw_req->rc == 0)
95 /* Update position. */ 88 /* Update position. */
96 device->blk_data.block_position = 89 device->blk_data.block_position =
@@ -118,7 +111,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
118 ccw_req = device->discipline->bread(device, req); 111 ccw_req = device->discipline->bread(device, req);
119 if (IS_ERR(ccw_req)) { 112 if (IS_ERR(ccw_req)) {
120 DBF_EVENT(1, "TBLOCK: bread failed\n"); 113 DBF_EVENT(1, "TBLOCK: bread failed\n");
121 tapeblock_end_request(req, -EIO); 114 blk_end_request_all(req, -EIO);
122 return PTR_ERR(ccw_req); 115 return PTR_ERR(ccw_req);
123 } 116 }
124 ccw_req->callback = __tapeblock_end_request; 117 ccw_req->callback = __tapeblock_end_request;
@@ -131,7 +124,7 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
131 * Start/enqueueing failed. No retries in 124 * Start/enqueueing failed. No retries in
132 * this case. 125 * this case.
133 */ 126 */
134 tapeblock_end_request(req, -EIO); 127 blk_end_request_all(req, -EIO);
135 device->discipline->free_bread(ccw_req); 128 device->discipline->free_bread(ccw_req);
136 } 129 }
137 130
@@ -177,7 +170,7 @@ tapeblock_requeue(struct work_struct *work) {
177 DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); 170 DBF_EVENT(1, "TBLOCK: Rejecting write request\n");
178 blkdev_dequeue_request(req); 171 blkdev_dequeue_request(req);
179 spin_unlock_irq(&device->blk_data.request_queue_lock); 172 spin_unlock_irq(&device->blk_data.request_queue_lock);
180 tapeblock_end_request(req, -EIO); 173 blk_end_request_all(req, -EIO);
181 spin_lock_irq(&device->blk_data.request_queue_lock); 174 spin_lock_irq(&device->blk_data.request_queue_lock);
182 continue; 175 continue;
183 } 176 }
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d1cb64ad1a3f..756ac7c93de0 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -922,7 +922,7 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
922 if (driver_byte(result) & DRIVER_SENSE) 922 if (driver_byte(result) & DRIVER_SENSE)
923 scsi_print_sense("", cmd); 923 scsi_print_sense("", cmd);
924 } 924 }
925 blk_end_request(req, -EIO, blk_rq_bytes(req)); 925 blk_end_request_all(req, -EIO);
926 scsi_next_command(cmd); 926 scsi_next_command(cmd);
927 break; 927 break;
928 case ACTION_REPREP: 928 case ACTION_REPREP:
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 501f6845cc73..e33c8356b3da 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -883,6 +883,22 @@ static inline bool blk_end_request(struct request *rq, int error,
883} 883}
884 884
885/** 885/**
886 * blk_end_request_all - Helper function for drives to finish the request.
887 * @rq: the request to finish
888 * @err: %0 for success, < %0 for error
889 *
890 * Description:
891 * Completely finish @rq.
892 */
893static inline void blk_end_request_all(struct request *rq, int error)
894{
895 bool pending;
896
897 pending = blk_end_request(rq, error, blk_rq_bytes(rq));
898 BUG_ON(pending);
899}
900
901/**
886 * __blk_end_request - Helper function for drivers to complete the request. 902 * __blk_end_request - Helper function for drivers to complete the request.
887 * @rq: the request being processed 903 * @rq: the request being processed
888 * @error: %0 for success, < %0 for error 904 * @error: %0 for success, < %0 for error
@@ -902,6 +918,22 @@ static inline bool __blk_end_request(struct request *rq, int error,
902} 918}
903 919
904/** 920/**
921 * __blk_end_request_all - Helper function for drives to finish the request.
922 * @rq: the request to finish
923 * @err: %0 for success, < %0 for error
924 *
925 * Description:
926 * Completely finish @rq. Must be called with queue lock held.
927 */
928static inline void __blk_end_request_all(struct request *rq, int error)
929{
930 bool pending;
931
932 pending = __blk_end_request(rq, error, blk_rq_bytes(rq));
933 BUG_ON(pending);
934}
935
936/**
905 * end_request - end I/O on the current segment of the request 937 * end_request - end I/O on the current segment of the request
906 * @rq: the request being processed 938 * @rq: the request being processed
907 * @uptodate: error value or %0/%1 uptodate flag 939 * @uptodate: error value or %0/%1 uptodate flag