diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2009-04-06 08:48:01 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-06 11:04:53 -0400 |
commit | 1faa16d22877f4839bd433547d770c676d1d964c (patch) | |
tree | 9a0d50be1ef0358c1f53d7107413100904e7d526 /block/blk-core.c | |
parent | 0221c81b1b8eb0cbb6b30a0ced52ead32d2b4e4c (diff) |
block: change the request allocation/congestion logic to be sync/async based
This makes sure that we never wait on async IO for sync requests, instead
of doing the split on writes vs reads.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 70 |
1 files changed, 35 insertions, 35 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 996ed906d8ca..a32b571aaaa2 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -484,11 +484,11 @@ static int blk_init_free_list(struct request_queue *q) | |||
484 | { | 484 | { |
485 | struct request_list *rl = &q->rq; | 485 | struct request_list *rl = &q->rq; |
486 | 486 | ||
487 | rl->count[READ] = rl->count[WRITE] = 0; | 487 | rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; |
488 | rl->starved[READ] = rl->starved[WRITE] = 0; | 488 | rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; |
489 | rl->elvpriv = 0; | 489 | rl->elvpriv = 0; |
490 | init_waitqueue_head(&rl->wait[READ]); | 490 | init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); |
491 | init_waitqueue_head(&rl->wait[WRITE]); | 491 | init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); |
492 | 492 | ||
493 | rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, | 493 | rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, |
494 | mempool_free_slab, request_cachep, q->node); | 494 | mempool_free_slab, request_cachep, q->node); |
@@ -699,18 +699,18 @@ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) | |||
699 | ioc->last_waited = jiffies; | 699 | ioc->last_waited = jiffies; |
700 | } | 700 | } |
701 | 701 | ||
702 | static void __freed_request(struct request_queue *q, int rw) | 702 | static void __freed_request(struct request_queue *q, int sync) |
703 | { | 703 | { |
704 | struct request_list *rl = &q->rq; | 704 | struct request_list *rl = &q->rq; |
705 | 705 | ||
706 | if (rl->count[rw] < queue_congestion_off_threshold(q)) | 706 | if (rl->count[sync] < queue_congestion_off_threshold(q)) |
707 | blk_clear_queue_congested(q, rw); | 707 | blk_clear_queue_congested(q, sync); |
708 | 708 | ||
709 | if (rl->count[rw] + 1 <= q->nr_requests) { | 709 | if (rl->count[sync] + 1 <= q->nr_requests) { |
710 | if (waitqueue_active(&rl->wait[rw])) | 710 | if (waitqueue_active(&rl->wait[sync])) |
711 | wake_up(&rl->wait[rw]); | 711 | wake_up(&rl->wait[sync]); |
712 | 712 | ||
713 | blk_clear_queue_full(q, rw); | 713 | blk_clear_queue_full(q, sync); |
714 | } | 714 | } |
715 | } | 715 | } |
716 | 716 | ||
@@ -718,18 +718,18 @@ static void __freed_request(struct request_queue *q, int rw) | |||
718 | * A request has just been released. Account for it, update the full and | 718 | * A request has just been released. Account for it, update the full and |
719 | * congestion status, wake up any waiters. Called under q->queue_lock. | 719 | * congestion status, wake up any waiters. Called under q->queue_lock. |
720 | */ | 720 | */ |
721 | static void freed_request(struct request_queue *q, int rw, int priv) | 721 | static void freed_request(struct request_queue *q, int sync, int priv) |
722 | { | 722 | { |
723 | struct request_list *rl = &q->rq; | 723 | struct request_list *rl = &q->rq; |
724 | 724 | ||
725 | rl->count[rw]--; | 725 | rl->count[sync]--; |
726 | if (priv) | 726 | if (priv) |
727 | rl->elvpriv--; | 727 | rl->elvpriv--; |
728 | 728 | ||
729 | __freed_request(q, rw); | 729 | __freed_request(q, sync); |
730 | 730 | ||
731 | if (unlikely(rl->starved[rw ^ 1])) | 731 | if (unlikely(rl->starved[sync ^ 1])) |
732 | __freed_request(q, rw ^ 1); | 732 | __freed_request(q, sync ^ 1); |
733 | } | 733 | } |
734 | 734 | ||
735 | /* | 735 | /* |
@@ -743,15 +743,15 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
743 | struct request *rq = NULL; | 743 | struct request *rq = NULL; |
744 | struct request_list *rl = &q->rq; | 744 | struct request_list *rl = &q->rq; |
745 | struct io_context *ioc = NULL; | 745 | struct io_context *ioc = NULL; |
746 | const int rw = rw_flags & 0x01; | 746 | const bool is_sync = rw_is_sync(rw_flags) != 0; |
747 | int may_queue, priv; | 747 | int may_queue, priv; |
748 | 748 | ||
749 | may_queue = elv_may_queue(q, rw_flags); | 749 | may_queue = elv_may_queue(q, rw_flags); |
750 | if (may_queue == ELV_MQUEUE_NO) | 750 | if (may_queue == ELV_MQUEUE_NO) |
751 | goto rq_starved; | 751 | goto rq_starved; |
752 | 752 | ||
753 | if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) { | 753 | if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { |
754 | if (rl->count[rw]+1 >= q->nr_requests) { | 754 | if (rl->count[is_sync]+1 >= q->nr_requests) { |
755 | ioc = current_io_context(GFP_ATOMIC, q->node); | 755 | ioc = current_io_context(GFP_ATOMIC, q->node); |
756 | /* | 756 | /* |
757 | * The queue will fill after this allocation, so set | 757 | * The queue will fill after this allocation, so set |
@@ -759,9 +759,9 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
759 | * This process will be allowed to complete a batch of | 759 | * This process will be allowed to complete a batch of |
760 | * requests, others will be blocked. | 760 | * requests, others will be blocked. |
761 | */ | 761 | */ |
762 | if (!blk_queue_full(q, rw)) { | 762 | if (!blk_queue_full(q, is_sync)) { |
763 | ioc_set_batching(q, ioc); | 763 | ioc_set_batching(q, ioc); |
764 | blk_set_queue_full(q, rw); | 764 | blk_set_queue_full(q, is_sync); |
765 | } else { | 765 | } else { |
766 | if (may_queue != ELV_MQUEUE_MUST | 766 | if (may_queue != ELV_MQUEUE_MUST |
767 | && !ioc_batching(q, ioc)) { | 767 | && !ioc_batching(q, ioc)) { |
@@ -774,7 +774,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
774 | } | 774 | } |
775 | } | 775 | } |
776 | } | 776 | } |
777 | blk_set_queue_congested(q, rw); | 777 | blk_set_queue_congested(q, is_sync); |
778 | } | 778 | } |
779 | 779 | ||
780 | /* | 780 | /* |
@@ -782,11 +782,11 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
782 | * limit of requests, otherwise we could have thousands of requests | 782 | * limit of requests, otherwise we could have thousands of requests |
783 | * allocated with any setting of ->nr_requests | 783 | * allocated with any setting of ->nr_requests |
784 | */ | 784 | */ |
785 | if (rl->count[rw] >= (3 * q->nr_requests / 2)) | 785 | if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) |
786 | goto out; | 786 | goto out; |
787 | 787 | ||
788 | rl->count[rw]++; | 788 | rl->count[is_sync]++; |
789 | rl->starved[rw] = 0; | 789 | rl->starved[is_sync] = 0; |
790 | 790 | ||
791 | priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 791 | priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); |
792 | if (priv) | 792 | if (priv) |
@@ -804,7 +804,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
804 | * wait queue, but this is pretty rare. | 804 | * wait queue, but this is pretty rare. |
805 | */ | 805 | */ |
806 | spin_lock_irq(q->queue_lock); | 806 | spin_lock_irq(q->queue_lock); |
807 | freed_request(q, rw, priv); | 807 | freed_request(q, is_sync, priv); |
808 | 808 | ||
809 | /* | 809 | /* |
810 | * in the very unlikely event that allocation failed and no | 810 | * in the very unlikely event that allocation failed and no |
@@ -814,8 +814,8 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
814 | * rq mempool into READ and WRITE | 814 | * rq mempool into READ and WRITE |
815 | */ | 815 | */ |
816 | rq_starved: | 816 | rq_starved: |
817 | if (unlikely(rl->count[rw] == 0)) | 817 | if (unlikely(rl->count[is_sync] == 0)) |
818 | rl->starved[rw] = 1; | 818 | rl->starved[is_sync] = 1; |
819 | 819 | ||
820 | goto out; | 820 | goto out; |
821 | } | 821 | } |
@@ -829,7 +829,7 @@ rq_starved: | |||
829 | if (ioc_batching(q, ioc)) | 829 | if (ioc_batching(q, ioc)) |
830 | ioc->nr_batch_requests--; | 830 | ioc->nr_batch_requests--; |
831 | 831 | ||
832 | trace_block_getrq(q, bio, rw); | 832 | trace_block_getrq(q, bio, rw_flags & 1); |
833 | out: | 833 | out: |
834 | return rq; | 834 | return rq; |
835 | } | 835 | } |
@@ -843,7 +843,7 @@ out: | |||
843 | static struct request *get_request_wait(struct request_queue *q, int rw_flags, | 843 | static struct request *get_request_wait(struct request_queue *q, int rw_flags, |
844 | struct bio *bio) | 844 | struct bio *bio) |
845 | { | 845 | { |
846 | const int rw = rw_flags & 0x01; | 846 | const bool is_sync = rw_is_sync(rw_flags) != 0; |
847 | struct request *rq; | 847 | struct request *rq; |
848 | 848 | ||
849 | rq = get_request(q, rw_flags, bio, GFP_NOIO); | 849 | rq = get_request(q, rw_flags, bio, GFP_NOIO); |
@@ -852,10 +852,10 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, | |||
852 | struct io_context *ioc; | 852 | struct io_context *ioc; |
853 | struct request_list *rl = &q->rq; | 853 | struct request_list *rl = &q->rq; |
854 | 854 | ||
855 | prepare_to_wait_exclusive(&rl->wait[rw], &wait, | 855 | prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, |
856 | TASK_UNINTERRUPTIBLE); | 856 | TASK_UNINTERRUPTIBLE); |
857 | 857 | ||
858 | trace_block_sleeprq(q, bio, rw); | 858 | trace_block_sleeprq(q, bio, rw_flags & 1); |
859 | 859 | ||
860 | __generic_unplug_device(q); | 860 | __generic_unplug_device(q); |
861 | spin_unlock_irq(q->queue_lock); | 861 | spin_unlock_irq(q->queue_lock); |
@@ -871,7 +871,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, | |||
871 | ioc_set_batching(q, ioc); | 871 | ioc_set_batching(q, ioc); |
872 | 872 | ||
873 | spin_lock_irq(q->queue_lock); | 873 | spin_lock_irq(q->queue_lock); |
874 | finish_wait(&rl->wait[rw], &wait); | 874 | finish_wait(&rl->wait[is_sync], &wait); |
875 | 875 | ||
876 | rq = get_request(q, rw_flags, bio, GFP_NOIO); | 876 | rq = get_request(q, rw_flags, bio, GFP_NOIO); |
877 | }; | 877 | }; |
@@ -1070,14 +1070,14 @@ void __blk_put_request(struct request_queue *q, struct request *req) | |||
1070 | * it didn't come out of our reserved rq pools | 1070 | * it didn't come out of our reserved rq pools |
1071 | */ | 1071 | */ |
1072 | if (req->cmd_flags & REQ_ALLOCED) { | 1072 | if (req->cmd_flags & REQ_ALLOCED) { |
1073 | int rw = rq_data_dir(req); | 1073 | int is_sync = rq_is_sync(req) != 0; |
1074 | int priv = req->cmd_flags & REQ_ELVPRIV; | 1074 | int priv = req->cmd_flags & REQ_ELVPRIV; |
1075 | 1075 | ||
1076 | BUG_ON(!list_empty(&req->queuelist)); | 1076 | BUG_ON(!list_empty(&req->queuelist)); |
1077 | BUG_ON(!hlist_unhashed(&req->hash)); | 1077 | BUG_ON(!hlist_unhashed(&req->hash)); |
1078 | 1078 | ||
1079 | blk_free_request(q, req); | 1079 | blk_free_request(q, req); |
1080 | freed_request(q, rw, priv); | 1080 | freed_request(q, is_sync, priv); |
1081 | } | 1081 | } |
1082 | } | 1082 | } |
1083 | EXPORT_SYMBOL_GPL(__blk_put_request); | 1083 | EXPORT_SYMBOL_GPL(__blk_put_request); |