diff options
author | Tejun Heo <tj@kernel.org> | 2009-04-28 00:06:12 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-04-28 02:14:51 -0400 |
commit | 467ca759fc83fc35cb7d15aec0d74c62cffc4481 (patch) | |
tree | c3b1e83b73df349ce246ada97775d2f7ed6ddd58 /drivers/block/swim3.c | |
parent | e091eb67af957bac4e4f7410c5d1aa263ee483a4 (diff) |
swim3: clean up request completion paths
swim3 curiously tries to update request parameters before calling
__blk_end_request() when __blk_end_request() will do it anyway, and it
updates request for partial completion manually instead of using
blk_update_request(). Also, it does some spurious checks on rq such
as testing whether rq->sector is negative or current_nr_sectors is
zero right after fetching.
Drop unnecessary stuff and use standard block layer mechanisms.
[ Impact: cleanup ]
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/block/swim3.c')
-rw-r--r-- | drivers/block/swim3.c | 31 |
1 files changed, 5 insertions, 26 deletions
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index 5904f7b73c6e..424855945b9b 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c | |||
@@ -319,14 +319,10 @@ static void start_request(struct floppy_state *fs) | |||
319 | req->errors, req->current_nr_sectors); | 319 | req->errors, req->current_nr_sectors); |
320 | #endif | 320 | #endif |
321 | 321 | ||
322 | if (req->sector < 0 || req->sector >= fs->total_secs) { | 322 | if (req->sector >= fs->total_secs) { |
323 | __blk_end_request_cur(req, -EIO); | 323 | __blk_end_request_cur(req, -EIO); |
324 | continue; | 324 | continue; |
325 | } | 325 | } |
326 | if (req->current_nr_sectors == 0) { | ||
327 | __blk_end_request_cur(req, 0); | ||
328 | continue; | ||
329 | } | ||
330 | if (fs->ejected) { | 326 | if (fs->ejected) { |
331 | __blk_end_request_cur(req, -EIO); | 327 | __blk_end_request_cur(req, -EIO); |
332 | continue; | 328 | continue; |
@@ -593,8 +589,6 @@ static void xfer_timeout(unsigned long data) | |||
593 | struct floppy_state *fs = (struct floppy_state *) data; | 589 | struct floppy_state *fs = (struct floppy_state *) data; |
594 | struct swim3 __iomem *sw = fs->swim3; | 590 | struct swim3 __iomem *sw = fs->swim3; |
595 | struct dbdma_regs __iomem *dr = fs->dma; | 591 | struct dbdma_regs __iomem *dr = fs->dma; |
596 | struct dbdma_cmd *cp = fs->dma_cmd; | ||
597 | unsigned long s; | ||
598 | int n; | 592 | int n; |
599 | 593 | ||
600 | fs->timeout_pending = 0; | 594 | fs->timeout_pending = 0; |
@@ -605,14 +599,6 @@ static void xfer_timeout(unsigned long data) | |||
605 | out_8(&sw->intr_enable, 0); | 599 | out_8(&sw->intr_enable, 0); |
606 | out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); | 600 | out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); |
607 | out_8(&sw->select, RELAX); | 601 | out_8(&sw->select, RELAX); |
608 | if (rq_data_dir(fd_req) == WRITE) | ||
609 | ++cp; | ||
610 | if (ld_le16(&cp->xfer_status) != 0) | ||
611 | s = fs->scount - ((ld_le16(&cp->res_count) + 511) >> 9); | ||
612 | else | ||
613 | s = 0; | ||
614 | fd_req->sector += s; | ||
615 | fd_req->current_nr_sectors -= s; | ||
616 | printk(KERN_ERR "swim3: timeout %sing sector %ld\n", | 602 | printk(KERN_ERR "swim3: timeout %sing sector %ld\n", |
617 | (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector); | 603 | (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector); |
618 | __blk_end_request_cur(fd_req, -EIO); | 604 | __blk_end_request_cur(fd_req, -EIO); |
@@ -719,9 +705,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id) | |||
719 | if (intr & ERROR_INTR) { | 705 | if (intr & ERROR_INTR) { |
720 | n = fs->scount - 1 - resid / 512; | 706 | n = fs->scount - 1 - resid / 512; |
721 | if (n > 0) { | 707 | if (n > 0) { |
722 | fd_req->sector += n; | 708 | blk_update_request(fd_req, 0, n << 9); |
723 | fd_req->current_nr_sectors -= n; | ||
724 | fd_req->buffer += n * 512; | ||
725 | fs->req_sector += n; | 709 | fs->req_sector += n; |
726 | } | 710 | } |
727 | if (fs->retries < 5) { | 711 | if (fs->retries < 5) { |
@@ -745,13 +729,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id) | |||
745 | start_request(fs); | 729 | start_request(fs); |
746 | break; | 730 | break; |
747 | } | 731 | } |
748 | fd_req->sector += fs->scount; | 732 | if (__blk_end_request(fd_req, 0, fs->scount << 9)) { |
749 | fd_req->current_nr_sectors -= fs->scount; | ||
750 | fd_req->buffer += fs->scount * 512; | ||
751 | if (fd_req->current_nr_sectors <= 0) { | ||
752 | __blk_end_request_cur(fd_req, 0); | ||
753 | fs->state = idle; | ||
754 | } else { | ||
755 | fs->req_sector += fs->scount; | 733 | fs->req_sector += fs->scount; |
756 | if (fs->req_sector > fs->secpertrack) { | 734 | if (fs->req_sector > fs->secpertrack) { |
757 | fs->req_sector -= fs->secpertrack; | 735 | fs->req_sector -= fs->secpertrack; |
@@ -761,7 +739,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id) | |||
761 | } | 739 | } |
762 | } | 740 | } |
763 | act(fs); | 741 | act(fs); |
764 | } | 742 | } else |
743 | fs->state = idle; | ||
765 | } | 744 | } |
766 | if (fs->state == idle) | 745 | if (fs->state == idle) |
767 | start_request(fs); | 746 | start_request(fs); |