diff options
author | Tejun Heo <tj@kernel.org> | 2009-04-28 00:06:11 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-04-28 02:14:51 -0400 |
commit | e091eb67af957bac4e4f7410c5d1aa263ee483a4 (patch) | |
tree | 2be45bf08417bb1e2159f0a7519e9c79ac7d5a8f /drivers/block/hd.c | |
parent | f81f2f7c9fee307e371f37424577d46f9eaf8692 (diff) |
hd: clean up request completion paths
hd read/write_intr() functions manually manipulate request to
incrementally complete it, which block layer already supports. Simply
use block layer completion routines instead of manual partial
completion.
While at it, clear unnecessary elv_next_request() check at the tail of
read_intr(). This also makes read and write_intr() more consistent.
[ Impact: cleanup ]
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/block/hd.c')
-rw-r--r-- | drivers/block/hd.c | 36 |
1 files changed, 12 insertions, 24 deletions
diff --git a/drivers/block/hd.c b/drivers/block/hd.c index 5cb300b81c6a..75b9ca95c4eb 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c | |||
@@ -452,32 +452,25 @@ static void read_intr(void) | |||
452 | bad_rw_intr(); | 452 | bad_rw_intr(); |
453 | hd_request(); | 453 | hd_request(); |
454 | return; | 454 | return; |
455 | |||
455 | ok_to_read: | 456 | ok_to_read: |
456 | req = CURRENT; | 457 | req = CURRENT; |
457 | insw(HD_DATA, req->buffer, 256); | 458 | insw(HD_DATA, req->buffer, 256); |
458 | req->sector++; | ||
459 | req->buffer += 512; | ||
460 | req->errors = 0; | ||
461 | i = --req->nr_sectors; | ||
462 | --req->current_nr_sectors; | ||
463 | #ifdef DEBUG | 459 | #ifdef DEBUG |
464 | printk("%s: read: sector %ld, remaining = %ld, buffer=%p\n", | 460 | printk("%s: read: sector %ld, remaining = %ld, buffer=%p\n", |
465 | req->rq_disk->disk_name, req->sector, req->nr_sectors, | 461 | req->rq_disk->disk_name, req->sector + 1, req->nr_sectors - 1, |
466 | req->buffer+512); | 462 | req->buffer+512); |
467 | #endif | 463 | #endif |
468 | if (req->current_nr_sectors <= 0) | 464 | if (__blk_end_request(req, 0, 512)) { |
469 | __blk_end_request_cur(req, 0); | ||
470 | if (i > 0) { | ||
471 | SET_HANDLER(&read_intr); | 465 | SET_HANDLER(&read_intr); |
472 | return; | 466 | return; |
473 | } | 467 | } |
468 | |||
474 | (void) inb_p(HD_STATUS); | 469 | (void) inb_p(HD_STATUS); |
475 | #if (HD_DELAY > 0) | 470 | #if (HD_DELAY > 0) |
476 | last_req = read_timer(); | 471 | last_req = read_timer(); |
477 | #endif | 472 | #endif |
478 | if (elv_next_request(QUEUE)) | 473 | hd_request(); |
479 | hd_request(); | ||
480 | return; | ||
481 | } | 474 | } |
482 | 475 | ||
483 | static void write_intr(void) | 476 | static void write_intr(void) |
@@ -499,23 +492,18 @@ static void write_intr(void) | |||
499 | bad_rw_intr(); | 492 | bad_rw_intr(); |
500 | hd_request(); | 493 | hd_request(); |
501 | return; | 494 | return; |
495 | |||
502 | ok_to_write: | 496 | ok_to_write: |
503 | req->sector++; | 497 | if (__blk_end_request(req, 0, 512)) { |
504 | i = --req->nr_sectors; | ||
505 | --req->current_nr_sectors; | ||
506 | req->buffer += 512; | ||
507 | if (!i || (req->bio && req->current_nr_sectors <= 0)) | ||
508 | __blk_end_request_cur(req, 0); | ||
509 | if (i > 0) { | ||
510 | SET_HANDLER(&write_intr); | 498 | SET_HANDLER(&write_intr); |
511 | outsw(HD_DATA, req->buffer, 256); | 499 | outsw(HD_DATA, req->buffer, 256); |
512 | } else { | 500 | return; |
501 | } | ||
502 | |||
513 | #if (HD_DELAY > 0) | 503 | #if (HD_DELAY > 0) |
514 | last_req = read_timer(); | 504 | last_req = read_timer(); |
515 | #endif | 505 | #endif |
516 | hd_request(); | 506 | hd_request(); |
517 | } | ||
518 | return; | ||
519 | } | 507 | } |
520 | 508 | ||
521 | static void recal_intr(void) | 509 | static void recal_intr(void) |