diff options
author | Tejun Heo <tj@kernel.org> | 2009-04-28 00:06:15 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-04-28 02:14:51 -0400 |
commit | a03bb5a32fff4aa23f081a3cff7e98d4084104cd (patch) | |
tree | 9aca610d0e94a6266f17458de4464402c7f53c6b /drivers/block/mg_disk.c | |
parent | eec9462088a26c046d4db3100796a340a50890b8 (diff) |
mg_disk: clean up request completion paths
mg_disk implements its own partial completion. Convert to standard
block layer partial completion.
[ Impact: cleanup ]
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: unsik Kim <donari75@gmail.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers/block/mg_disk.c')
-rw-r--r-- | drivers/block/mg_disk.c | 117 |
1 files changed, 33 insertions, 84 deletions
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index 2c6127ee8343..1a4cc968cfee 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c | |||
@@ -503,95 +503,68 @@ static unsigned int mg_out(struct mg_host *host, | |||
503 | 503 | ||
504 | static void mg_read(struct request *req) | 504 | static void mg_read(struct request *req) |
505 | { | 505 | { |
506 | u32 remains, j; | 506 | u32 j; |
507 | struct mg_host *host = req->rq_disk->private_data; | 507 | struct mg_host *host = req->rq_disk->private_data; |
508 | 508 | ||
509 | remains = req->nr_sectors; | ||
510 | |||
511 | if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_RD, NULL) != | 509 | if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_RD, NULL) != |
512 | MG_ERR_NONE) | 510 | MG_ERR_NONE) |
513 | mg_bad_rw_intr(host); | 511 | mg_bad_rw_intr(host); |
514 | 512 | ||
515 | MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", | 513 | MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", |
516 | remains, req->sector, req->buffer); | 514 | req->nr_sectors, req->sector, req->buffer); |
515 | |||
516 | do { | ||
517 | u16 *buff = (u16 *)req->buffer; | ||
517 | 518 | ||
518 | while (remains) { | ||
519 | if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, | 519 | if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, |
520 | MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) { | 520 | MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) { |
521 | mg_bad_rw_intr(host); | 521 | mg_bad_rw_intr(host); |
522 | return; | 522 | return; |
523 | } | 523 | } |
524 | for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) { | 524 | for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) |
525 | *(u16 *)req->buffer = | 525 | *buff++ = inw((unsigned long)host->dev_base + |
526 | inw((unsigned long)host->dev_base + | 526 | MG_BUFF_OFFSET + (j << 1)); |
527 | MG_BUFF_OFFSET + (j << 1)); | ||
528 | req->buffer += 2; | ||
529 | } | ||
530 | |||
531 | req->sector++; | ||
532 | req->errors = 0; | ||
533 | remains = --req->nr_sectors; | ||
534 | --req->current_nr_sectors; | ||
535 | |||
536 | if (req->current_nr_sectors <= 0) { | ||
537 | MG_DBG("remain : %d sects\n", remains); | ||
538 | __blk_end_request_cur(req, 0); | ||
539 | if (remains > 0) | ||
540 | req = elv_next_request(host->breq); | ||
541 | } | ||
542 | 527 | ||
543 | outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + | 528 | outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + |
544 | MG_REG_COMMAND); | 529 | MG_REG_COMMAND); |
545 | } | 530 | } while (__blk_end_request(req, 0, MG_SECTOR_SIZE)); |
546 | } | 531 | } |
547 | 532 | ||
548 | static void mg_write(struct request *req) | 533 | static void mg_write(struct request *req) |
549 | { | 534 | { |
550 | u32 remains, j; | 535 | u32 j; |
551 | struct mg_host *host = req->rq_disk->private_data; | 536 | struct mg_host *host = req->rq_disk->private_data; |
552 | 537 | ||
553 | remains = req->nr_sectors; | ||
554 | |||
555 | if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_WR, NULL) != | 538 | if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_WR, NULL) != |
556 | MG_ERR_NONE) { | 539 | MG_ERR_NONE) { |
557 | mg_bad_rw_intr(host); | 540 | mg_bad_rw_intr(host); |
558 | return; | 541 | return; |
559 | } | 542 | } |
560 | 543 | ||
561 | |||
562 | MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", | 544 | MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", |
563 | remains, req->sector, req->buffer); | 545 | req->nr_sectors, req->sector, req->buffer); |
564 | while (remains) { | 546 | |
547 | do { | ||
548 | u16 *buff = (u16 *)req->buffer; | ||
549 | |||
565 | if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, | 550 | if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, |
566 | MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { | 551 | MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { |
567 | mg_bad_rw_intr(host); | 552 | mg_bad_rw_intr(host); |
568 | return; | 553 | return; |
569 | } | 554 | } |
570 | for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) { | 555 | for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) |
571 | outw(*(u16 *)req->buffer, | 556 | outw(*buff++, (unsigned long)host->dev_base + |
572 | (unsigned long)host->dev_base + | 557 | MG_BUFF_OFFSET + (j << 1)); |
573 | MG_BUFF_OFFSET + (j << 1)); | ||
574 | req->buffer += 2; | ||
575 | } | ||
576 | req->sector++; | ||
577 | remains = --req->nr_sectors; | ||
578 | --req->current_nr_sectors; | ||
579 | |||
580 | if (req->current_nr_sectors <= 0) { | ||
581 | MG_DBG("remain : %d sects\n", remains); | ||
582 | __blk_end_request_cur(req, 0); | ||
583 | if (remains > 0) | ||
584 | req = elv_next_request(host->breq); | ||
585 | } | ||
586 | 558 | ||
587 | outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + | 559 | outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + |
588 | MG_REG_COMMAND); | 560 | MG_REG_COMMAND); |
589 | } | 561 | } while (__blk_end_request(req, 0, MG_SECTOR_SIZE)); |
590 | } | 562 | } |
591 | 563 | ||
592 | static void mg_read_intr(struct mg_host *host) | 564 | static void mg_read_intr(struct mg_host *host) |
593 | { | 565 | { |
594 | u32 i; | 566 | u32 i; |
567 | u16 *buff; | ||
595 | struct request *req; | 568 | struct request *req; |
596 | 569 | ||
597 | /* check status */ | 570 | /* check status */ |
@@ -612,39 +585,24 @@ static void mg_read_intr(struct mg_host *host) | |||
612 | ok_to_read: | 585 | ok_to_read: |
613 | /* get current segment of request */ | 586 | /* get current segment of request */ |
614 | req = elv_next_request(host->breq); | 587 | req = elv_next_request(host->breq); |
588 | buff = (u16 *)req->buffer; | ||
615 | 589 | ||
616 | /* read 1 sector */ | 590 | /* read 1 sector */ |
617 | for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) { | 591 | for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) |
618 | *(u16 *)req->buffer = | 592 | *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET + |
619 | inw((unsigned long)host->dev_base + MG_BUFF_OFFSET + | 593 | (i << 1)); |
620 | (i << 1)); | ||
621 | req->buffer += 2; | ||
622 | } | ||
623 | 594 | ||
624 | /* manipulate request */ | ||
625 | MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", | 595 | MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", |
626 | req->sector, req->nr_sectors - 1, req->buffer); | 596 | req->sector, req->nr_sectors - 1, req->buffer); |
627 | 597 | ||
628 | req->sector++; | ||
629 | req->errors = 0; | ||
630 | i = --req->nr_sectors; | ||
631 | --req->current_nr_sectors; | ||
632 | |||
633 | /* let know if current segment done */ | ||
634 | if (req->current_nr_sectors <= 0) | ||
635 | __blk_end_request_cur(req, 0); | ||
636 | |||
637 | /* set handler if read remains */ | ||
638 | if (i > 0) { | ||
639 | host->mg_do_intr = mg_read_intr; | ||
640 | mod_timer(&host->timer, jiffies + 3 * HZ); | ||
641 | } | ||
642 | |||
643 | /* send read confirm */ | 598 | /* send read confirm */ |
644 | outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); | 599 | outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); |
645 | 600 | ||
646 | /* goto next request */ | 601 | if (__blk_end_request(req, 0, MG_SECTOR_SIZE)) { |
647 | if (!i) | 602 | /* set handler if read remains */ |
603 | host->mg_do_intr = mg_read_intr; | ||
604 | mod_timer(&host->timer, jiffies + 3 * HZ); | ||
605 | } else /* goto next request */ | ||
648 | mg_request(host->breq); | 606 | mg_request(host->breq); |
649 | } | 607 | } |
650 | 608 | ||
@@ -653,6 +611,7 @@ static void mg_write_intr(struct mg_host *host) | |||
653 | u32 i, j; | 611 | u32 i, j; |
654 | u16 *buff; | 612 | u16 *buff; |
655 | struct request *req; | 613 | struct request *req; |
614 | bool rem; | ||
656 | 615 | ||
657 | /* get current segment of request */ | 616 | /* get current segment of request */ |
658 | req = elv_next_request(host->breq); | 617 | req = elv_next_request(host->breq); |
@@ -673,18 +632,8 @@ static void mg_write_intr(struct mg_host *host) | |||
673 | return; | 632 | return; |
674 | 633 | ||
675 | ok_to_write: | 634 | ok_to_write: |
676 | /* manipulate request */ | 635 | if ((rem = __blk_end_request(req, 0, MG_SECTOR_SIZE))) { |
677 | req->sector++; | 636 | /* write 1 sector and set handler if remains */ |
678 | i = --req->nr_sectors; | ||
679 | --req->current_nr_sectors; | ||
680 | req->buffer += MG_SECTOR_SIZE; | ||
681 | |||
682 | /* let know if current segment or all done */ | ||
683 | if (!i || (req->bio && req->current_nr_sectors <= 0)) | ||
684 | __blk_end_request_cur(req, 0); | ||
685 | |||
686 | /* write 1 sector and set handler if remains */ | ||
687 | if (i > 0) { | ||
688 | buff = (u16 *)req->buffer; | 637 | buff = (u16 *)req->buffer; |
689 | for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) { | 638 | for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) { |
690 | outw(*buff, (unsigned long)host->dev_base + | 639 | outw(*buff, (unsigned long)host->dev_base + |
@@ -700,7 +649,7 @@ ok_to_write: | |||
700 | /* send write confirm */ | 649 | /* send write confirm */ |
701 | outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); | 650 | outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); |
702 | 651 | ||
703 | if (!i) | 652 | if (!rem) |
704 | mg_request(host->breq); | 653 | mg_request(host->breq); |
705 | } | 654 | } |
706 | 655 | ||