diff options
author | Kiyoshi Ueda <k-ueda@ct.jp.nec.com> | 2007-12-11 17:50:21 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-01-28 04:37:00 -0500 |
commit | ea6f06f416347448645e60294d92c0c19aba8589 (patch) | |
tree | 01220f12cf20b74d962e3a7a6fd12acee0a4dccb | |
parent | 3daeea29f9348263e0dda89a565074390475bdf8 (diff) |
blk_end_request: changing cpqarray (take 4)
This patch converts cpqarray to use blk_end_request interfaces.
Related 'ok' arguments are converted to 'error'.
cpqarray is a little bit different from "normal" drivers.
cpqarray directly calls bio_endio() and disk_stat_add()
when completing request. But those can be replaced with
__end_that_request_first().
After the replacement, request completion procedures of
those drivers become like the following:
o end_that_request_first()
o add_disk_randomness()
o end_that_request_last()
This can be converted to __blk_end_request() by following
the rule (b) mentioned in the patch subject
"[PATCH 01/30] blk_end_request: add new request completion interface".
Cc: Mike Miller <mike.miller@hp.com>
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | drivers/block/cpqarray.c | 36 |
1 files changed, 7 insertions, 29 deletions
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index c8132d958795..69199185ff4b 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c | |||
@@ -167,7 +167,6 @@ static void start_io(ctlr_info_t *h); | |||
167 | 167 | ||
168 | static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c); | 168 | static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c); |
169 | static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c); | 169 | static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c); |
170 | static inline void complete_buffers(struct bio *bio, int ok); | ||
171 | static inline void complete_command(cmdlist_t *cmd, int timeout); | 170 | static inline void complete_command(cmdlist_t *cmd, int timeout); |
172 | 171 | ||
173 | static irqreturn_t do_ida_intr(int irq, void *dev_id); | 172 | static irqreturn_t do_ida_intr(int irq, void *dev_id); |
@@ -980,26 +979,13 @@ static void start_io(ctlr_info_t *h) | |||
980 | } | 979 | } |
981 | } | 980 | } |
982 | 981 | ||
983 | static inline void complete_buffers(struct bio *bio, int ok) | ||
984 | { | ||
985 | struct bio *xbh; | ||
986 | |||
987 | while (bio) { | ||
988 | xbh = bio->bi_next; | ||
989 | bio->bi_next = NULL; | ||
990 | |||
991 | bio_endio(bio, ok ? 0 : -EIO); | ||
992 | |||
993 | bio = xbh; | ||
994 | } | ||
995 | } | ||
996 | /* | 982 | /* |
997 | * Mark all buffers that cmd was responsible for | 983 | * Mark all buffers that cmd was responsible for |
998 | */ | 984 | */ |
999 | static inline void complete_command(cmdlist_t *cmd, int timeout) | 985 | static inline void complete_command(cmdlist_t *cmd, int timeout) |
1000 | { | 986 | { |
1001 | struct request *rq = cmd->rq; | 987 | struct request *rq = cmd->rq; |
1002 | int ok=1; | 988 | int error = 0; |
1003 | int i, ddir; | 989 | int i, ddir; |
1004 | 990 | ||
1005 | if (cmd->req.hdr.rcode & RCODE_NONFATAL && | 991 | if (cmd->req.hdr.rcode & RCODE_NONFATAL && |
@@ -1011,16 +997,17 @@ static inline void complete_command(cmdlist_t *cmd, int timeout) | |||
1011 | if (cmd->req.hdr.rcode & RCODE_FATAL) { | 997 | if (cmd->req.hdr.rcode & RCODE_FATAL) { |
1012 | printk(KERN_WARNING "Fatal error on ida/c%dd%d\n", | 998 | printk(KERN_WARNING "Fatal error on ida/c%dd%d\n", |
1013 | cmd->ctlr, cmd->hdr.unit); | 999 | cmd->ctlr, cmd->hdr.unit); |
1014 | ok = 0; | 1000 | error = -EIO; |
1015 | } | 1001 | } |
1016 | if (cmd->req.hdr.rcode & RCODE_INVREQ) { | 1002 | if (cmd->req.hdr.rcode & RCODE_INVREQ) { |
1017 | printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n", | 1003 | printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n", |
1018 | cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd, | 1004 | cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd, |
1019 | cmd->req.hdr.blk, cmd->req.hdr.blk_cnt, | 1005 | cmd->req.hdr.blk, cmd->req.hdr.blk_cnt, |
1020 | cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode); | 1006 | cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode); |
1021 | ok = 0; | 1007 | error = -EIO; |
1022 | } | 1008 | } |
1023 | if (timeout) ok = 0; | 1009 | if (timeout) |
1010 | error = -EIO; | ||
1024 | /* unmap the DMA mapping for all the scatter gather elements */ | 1011 | /* unmap the DMA mapping for all the scatter gather elements */ |
1025 | if (cmd->req.hdr.cmd == IDA_READ) | 1012 | if (cmd->req.hdr.cmd == IDA_READ) |
1026 | ddir = PCI_DMA_FROMDEVICE; | 1013 | ddir = PCI_DMA_FROMDEVICE; |
@@ -1030,18 +1017,9 @@ static inline void complete_command(cmdlist_t *cmd, int timeout) | |||
1030 | pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr, | 1017 | pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr, |
1031 | cmd->req.sg[i].size, ddir); | 1018 | cmd->req.sg[i].size, ddir); |
1032 | 1019 | ||
1033 | complete_buffers(rq->bio, ok); | ||
1034 | |||
1035 | if (blk_fs_request(rq)) { | ||
1036 | const int rw = rq_data_dir(rq); | ||
1037 | |||
1038 | disk_stat_add(rq->rq_disk, sectors[rw], rq->nr_sectors); | ||
1039 | } | ||
1040 | |||
1041 | add_disk_randomness(rq->rq_disk); | ||
1042 | |||
1043 | DBGPX(printk("Done with %p\n", rq);); | 1020 | DBGPX(printk("Done with %p\n", rq);); |
1044 | end_that_request_last(rq, ok ? 1 : -EIO); | 1021 | if (__blk_end_request(rq, error, blk_rq_bytes(rq))) |
1022 | BUG(); | ||
1045 | } | 1023 | } |
1046 | 1024 | ||
1047 | /* | 1025 | /* |