diff options
author | Tejun Heo <tj@kernel.org> | 2009-05-07 22:54:01 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-05-11 03:52:15 -0400 |
commit | 5b36ad6000ddea390aca3c3b67ead7616ace2ffc (patch) | |
tree | dedca03fffd6729487fbd9bc3959428038e0cdfb /drivers | |
parent | 9a8d23d8855e554fc5887f14cb008b55c4300ccc (diff) |
mg_disk: dequeue and track in-flight request
mg_disk has at most single request in flight per device. Till now,
whenever it needs to access the in-flight request it called
elv_next_request(). This patch makes mg_disk track the in-flight
request directly using mg_host->req and dequeue it when processing
starts.
q->queuedata is set to mg_host so that mg_host can be determined
without fetching request from the queue.
[ Impact: dequeue in-flight request, one elv_next_request() per request ]
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: unsik Kim <donari75@gmail.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/block/mg_disk.c | 109 |
1 files changed, 59 insertions, 50 deletions
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index be323880f24a..1ca5d1423fa3 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c | |||
@@ -135,6 +135,7 @@ struct mg_host { | |||
135 | struct device *dev; | 135 | struct device *dev; |
136 | 136 | ||
137 | struct request_queue *breq; | 137 | struct request_queue *breq; |
138 | struct request *req; | ||
138 | spinlock_t lock; | 139 | spinlock_t lock; |
139 | struct gendisk *gd; | 140 | struct gendisk *gd; |
140 | 141 | ||
@@ -171,17 +172,27 @@ struct mg_host { | |||
171 | 172 | ||
172 | static void mg_request(struct request_queue *); | 173 | static void mg_request(struct request_queue *); |
173 | 174 | ||
175 | static bool mg_end_request(struct mg_host *host, int err, unsigned int nr_bytes) | ||
176 | { | ||
177 | if (__blk_end_request(host->req, err, nr_bytes)) | ||
178 | return true; | ||
179 | |||
180 | host->req = NULL; | ||
181 | return false; | ||
182 | } | ||
183 | |||
184 | static bool mg_end_request_cur(struct mg_host *host, int err) | ||
185 | { | ||
186 | return mg_end_request(host, err, blk_rq_cur_bytes(host->req)); | ||
187 | } | ||
188 | |||
174 | static void mg_dump_status(const char *msg, unsigned int stat, | 189 | static void mg_dump_status(const char *msg, unsigned int stat, |
175 | struct mg_host *host) | 190 | struct mg_host *host) |
176 | { | 191 | { |
177 | char *name = MG_DISK_NAME; | 192 | char *name = MG_DISK_NAME; |
178 | struct request *req; | ||
179 | 193 | ||
180 | if (host->breq) { | 194 | if (host->req) |
181 | req = elv_next_request(host->breq); | 195 | name = host->req->rq_disk->disk_name; |
182 | if (req) | ||
183 | name = req->rq_disk->disk_name; | ||
184 | } | ||
185 | 196 | ||
186 | printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff); | 197 | printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff); |
187 | if (stat & ATA_BUSY) | 198 | if (stat & ATA_BUSY) |
@@ -217,13 +228,9 @@ static void mg_dump_status(const char *msg, unsigned int stat, | |||
217 | printk("AddrMarkNotFound "); | 228 | printk("AddrMarkNotFound "); |
218 | printk("}"); | 229 | printk("}"); |
219 | if (host->error & (ATA_BBK | ATA_UNC | ATA_IDNF | ATA_AMNF)) { | 230 | if (host->error & (ATA_BBK | ATA_UNC | ATA_IDNF | ATA_AMNF)) { |
220 | if (host->breq) { | 231 | if (host->req) |
221 | req = elv_next_request(host->breq); | 232 | printk(", sector=%u", |
222 | if (req) | 233 | (unsigned int)blk_rq_pos(host->req)); |
223 | printk(", sector=%u", | ||
224 | (unsigned int)blk_rq_pos(req)); | ||
225 | } | ||
226 | |||
227 | } | 234 | } |
228 | printk("\n"); | 235 | printk("\n"); |
229 | } | 236 | } |
@@ -453,11 +460,10 @@ static int mg_disk_init(struct mg_host *host) | |||
453 | 460 | ||
454 | static void mg_bad_rw_intr(struct mg_host *host) | 461 | static void mg_bad_rw_intr(struct mg_host *host) |
455 | { | 462 | { |
456 | struct request *req = elv_next_request(host->breq); | 463 | if (host->req) |
457 | if (req != NULL) | 464 | if (++host->req->errors >= MG_MAX_ERRORS || |
458 | if (++req->errors >= MG_MAX_ERRORS || | 465 | host->error == MG_ERR_TIMEOUT) |
459 | host->error == MG_ERR_TIMEOUT) | 466 | mg_end_request_cur(host, -EIO); |
460 | __blk_end_request_cur(req, -EIO); | ||
461 | } | 467 | } |
462 | 468 | ||
463 | static unsigned int mg_out(struct mg_host *host, | 469 | static unsigned int mg_out(struct mg_host *host, |
@@ -515,7 +521,7 @@ static void mg_read(struct request *req) | |||
515 | 521 | ||
516 | outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + | 522 | outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + |
517 | MG_REG_COMMAND); | 523 | MG_REG_COMMAND); |
518 | } while (__blk_end_request(req, 0, MG_SECTOR_SIZE)); | 524 | } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); |
519 | } | 525 | } |
520 | 526 | ||
521 | static void mg_write(struct request *req) | 527 | static void mg_write(struct request *req) |
@@ -545,14 +551,14 @@ static void mg_write(struct request *req) | |||
545 | 551 | ||
546 | outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + | 552 | outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + |
547 | MG_REG_COMMAND); | 553 | MG_REG_COMMAND); |
548 | } while (__blk_end_request(req, 0, MG_SECTOR_SIZE)); | 554 | } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); |
549 | } | 555 | } |
550 | 556 | ||
551 | static void mg_read_intr(struct mg_host *host) | 557 | static void mg_read_intr(struct mg_host *host) |
552 | { | 558 | { |
559 | struct request *req = host->req; | ||
553 | u32 i; | 560 | u32 i; |
554 | u16 *buff; | 561 | u16 *buff; |
555 | struct request *req; | ||
556 | 562 | ||
557 | /* check status */ | 563 | /* check status */ |
558 | do { | 564 | do { |
@@ -571,7 +577,6 @@ static void mg_read_intr(struct mg_host *host) | |||
571 | 577 | ||
572 | ok_to_read: | 578 | ok_to_read: |
573 | /* get current segment of request */ | 579 | /* get current segment of request */ |
574 | req = elv_next_request(host->breq); | ||
575 | buff = (u16 *)req->buffer; | 580 | buff = (u16 *)req->buffer; |
576 | 581 | ||
577 | /* read 1 sector */ | 582 | /* read 1 sector */ |
@@ -585,7 +590,7 @@ ok_to_read: | |||
585 | /* send read confirm */ | 590 | /* send read confirm */ |
586 | outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); | 591 | outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); |
587 | 592 | ||
588 | if (__blk_end_request(req, 0, MG_SECTOR_SIZE)) { | 593 | if (mg_end_request(host, 0, MG_SECTOR_SIZE)) { |
589 | /* set handler if read remains */ | 594 | /* set handler if read remains */ |
590 | host->mg_do_intr = mg_read_intr; | 595 | host->mg_do_intr = mg_read_intr; |
591 | mod_timer(&host->timer, jiffies + 3 * HZ); | 596 | mod_timer(&host->timer, jiffies + 3 * HZ); |
@@ -595,14 +600,11 @@ ok_to_read: | |||
595 | 600 | ||
596 | static void mg_write_intr(struct mg_host *host) | 601 | static void mg_write_intr(struct mg_host *host) |
597 | { | 602 | { |
603 | struct request *req = host->req; | ||
598 | u32 i, j; | 604 | u32 i, j; |
599 | u16 *buff; | 605 | u16 *buff; |
600 | struct request *req; | ||
601 | bool rem; | 606 | bool rem; |
602 | 607 | ||
603 | /* get current segment of request */ | ||
604 | req = elv_next_request(host->breq); | ||
605 | |||
606 | /* check status */ | 608 | /* check status */ |
607 | do { | 609 | do { |
608 | i = inb((unsigned long)host->dev_base + MG_REG_STATUS); | 610 | i = inb((unsigned long)host->dev_base + MG_REG_STATUS); |
@@ -619,7 +621,7 @@ static void mg_write_intr(struct mg_host *host) | |||
619 | return; | 621 | return; |
620 | 622 | ||
621 | ok_to_write: | 623 | ok_to_write: |
622 | if ((rem = __blk_end_request(req, 0, MG_SECTOR_SIZE))) { | 624 | if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) { |
623 | /* write 1 sector and set handler if remains */ | 625 | /* write 1 sector and set handler if remains */ |
624 | buff = (u16 *)req->buffer; | 626 | buff = (u16 *)req->buffer; |
625 | for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) { | 627 | for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) { |
@@ -644,44 +646,47 @@ void mg_times_out(unsigned long data) | |||
644 | { | 646 | { |
645 | struct mg_host *host = (struct mg_host *)data; | 647 | struct mg_host *host = (struct mg_host *)data; |
646 | char *name; | 648 | char *name; |
647 | struct request *req; | ||
648 | 649 | ||
649 | spin_lock_irq(&host->lock); | 650 | spin_lock_irq(&host->lock); |
650 | 651 | ||
651 | req = elv_next_request(host->breq); | 652 | if (!host->req) |
652 | if (!req) | ||
653 | goto out_unlock; | 653 | goto out_unlock; |
654 | 654 | ||
655 | host->mg_do_intr = NULL; | 655 | host->mg_do_intr = NULL; |
656 | 656 | ||
657 | name = req->rq_disk->disk_name; | 657 | name = host->req->rq_disk->disk_name; |
658 | printk(KERN_DEBUG "%s: timeout\n", name); | 658 | printk(KERN_DEBUG "%s: timeout\n", name); |
659 | 659 | ||
660 | host->error = MG_ERR_TIMEOUT; | 660 | host->error = MG_ERR_TIMEOUT; |
661 | mg_bad_rw_intr(host); | 661 | mg_bad_rw_intr(host); |
662 | 662 | ||
663 | mg_request(host->breq); | ||
664 | out_unlock: | 663 | out_unlock: |
664 | mg_request(host->breq); | ||
665 | spin_unlock_irq(&host->lock); | 665 | spin_unlock_irq(&host->lock); |
666 | } | 666 | } |
667 | 667 | ||
668 | static void mg_request_poll(struct request_queue *q) | 668 | static void mg_request_poll(struct request_queue *q) |
669 | { | 669 | { |
670 | struct request *req; | 670 | struct mg_host *host = q->queuedata; |
671 | struct mg_host *host; | ||
672 | 671 | ||
673 | while ((req = elv_next_request(q)) != NULL) { | 672 | while (1) { |
674 | host = req->rq_disk->private_data; | 673 | if (!host->req) { |
674 | host->req = elv_next_request(q); | ||
675 | if (host->req) | ||
676 | blkdev_dequeue_request(host->req); | ||
677 | else | ||
678 | break; | ||
679 | } | ||
675 | 680 | ||
676 | if (unlikely(!blk_fs_request(req))) { | 681 | if (unlikely(!blk_fs_request(host->req))) { |
677 | __blk_end_request_cur(req, -EIO); | 682 | mg_end_request_cur(host, -EIO); |
678 | continue; | 683 | continue; |
679 | } | 684 | } |
680 | 685 | ||
681 | if (rq_data_dir(req) == READ) | 686 | if (rq_data_dir(host->req) == READ) |
682 | mg_read(req); | 687 | mg_read(host->req); |
683 | else | 688 | else |
684 | mg_write(req); | 689 | mg_write(host->req); |
685 | } | 690 | } |
686 | } | 691 | } |
687 | 692 | ||
@@ -733,16 +738,19 @@ static unsigned int mg_issue_req(struct request *req, | |||
733 | /* This function also called from IRQ context */ | 738 | /* This function also called from IRQ context */ |
734 | static void mg_request(struct request_queue *q) | 739 | static void mg_request(struct request_queue *q) |
735 | { | 740 | { |
741 | struct mg_host *host = q->queuedata; | ||
736 | struct request *req; | 742 | struct request *req; |
737 | struct mg_host *host; | ||
738 | u32 sect_num, sect_cnt; | 743 | u32 sect_num, sect_cnt; |
739 | 744 | ||
740 | while (1) { | 745 | while (1) { |
741 | req = elv_next_request(q); | 746 | if (!host->req) { |
742 | if (!req) | 747 | host->req = elv_next_request(q); |
743 | return; | 748 | if (host->req) |
744 | 749 | blkdev_dequeue_request(host->req); | |
745 | host = req->rq_disk->private_data; | 750 | else |
751 | break; | ||
752 | } | ||
753 | req = host->req; | ||
746 | 754 | ||
747 | /* check unwanted request call */ | 755 | /* check unwanted request call */ |
748 | if (host->mg_do_intr) | 756 | if (host->mg_do_intr) |
@@ -762,12 +770,12 @@ static void mg_request(struct request_queue *q) | |||
762 | "%s: bad access: sector=%d, count=%d\n", | 770 | "%s: bad access: sector=%d, count=%d\n", |
763 | req->rq_disk->disk_name, | 771 | req->rq_disk->disk_name, |
764 | sect_num, sect_cnt); | 772 | sect_num, sect_cnt); |
765 | __blk_end_request_cur(req, -EIO); | 773 | mg_end_request_cur(host, -EIO); |
766 | continue; | 774 | continue; |
767 | } | 775 | } |
768 | 776 | ||
769 | if (unlikely(!blk_fs_request(req))) { | 777 | if (unlikely(!blk_fs_request(req))) { |
770 | __blk_end_request_cur(req, -EIO); | 778 | mg_end_request_cur(host, -EIO); |
771 | continue; | 779 | continue; |
772 | } | 780 | } |
773 | 781 | ||
@@ -981,6 +989,7 @@ static int mg_probe(struct platform_device *plat_dev) | |||
981 | __func__, __LINE__); | 989 | __func__, __LINE__); |
982 | goto probe_err_5; | 990 | goto probe_err_5; |
983 | } | 991 | } |
992 | host->breq->queuedata = host; | ||
984 | 993 | ||
985 | /* mflash is random device, thanx for the noop */ | 994 | /* mflash is random device, thanx for the noop */ |
986 | elevator_exit(host->breq->elevator); | 995 | elevator_exit(host->breq->elevator); |