diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2009-04-06 08:48:01 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-06 11:04:53 -0400 |
commit | 1faa16d22877f4839bd433547d770c676d1d964c (patch) | |
tree | 9a0d50be1ef0358c1f53d7107413100904e7d526 | |
parent | 0221c81b1b8eb0cbb6b30a0ced52ead32d2b4e4c (diff) |
block: change the request allocation/congestion logic to be sync/async based
This makes sure that we never wait on async IO for sync requests, instead
of doing the split on writes vs reads.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | block/blk-core.c | 70 | ||||
-rw-r--r-- | block/blk-sysfs.c | 40 | ||||
-rw-r--r-- | block/elevator.c | 2 | ||||
-rw-r--r-- | include/linux/backing-dev.h | 12 | ||||
-rw-r--r-- | include/linux/blkdev.h | 52 | ||||
-rw-r--r-- | mm/backing-dev.c | 10 |
6 files changed, 102 insertions, 84 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 996ed906d8ca..a32b571aaaa2 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -484,11 +484,11 @@ static int blk_init_free_list(struct request_queue *q) | |||
484 | { | 484 | { |
485 | struct request_list *rl = &q->rq; | 485 | struct request_list *rl = &q->rq; |
486 | 486 | ||
487 | rl->count[READ] = rl->count[WRITE] = 0; | 487 | rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; |
488 | rl->starved[READ] = rl->starved[WRITE] = 0; | 488 | rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; |
489 | rl->elvpriv = 0; | 489 | rl->elvpriv = 0; |
490 | init_waitqueue_head(&rl->wait[READ]); | 490 | init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); |
491 | init_waitqueue_head(&rl->wait[WRITE]); | 491 | init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); |
492 | 492 | ||
493 | rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, | 493 | rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, |
494 | mempool_free_slab, request_cachep, q->node); | 494 | mempool_free_slab, request_cachep, q->node); |
@@ -699,18 +699,18 @@ static void ioc_set_batching(struct request_queue *q, struct io_context *ioc) | |||
699 | ioc->last_waited = jiffies; | 699 | ioc->last_waited = jiffies; |
700 | } | 700 | } |
701 | 701 | ||
702 | static void __freed_request(struct request_queue *q, int rw) | 702 | static void __freed_request(struct request_queue *q, int sync) |
703 | { | 703 | { |
704 | struct request_list *rl = &q->rq; | 704 | struct request_list *rl = &q->rq; |
705 | 705 | ||
706 | if (rl->count[rw] < queue_congestion_off_threshold(q)) | 706 | if (rl->count[sync] < queue_congestion_off_threshold(q)) |
707 | blk_clear_queue_congested(q, rw); | 707 | blk_clear_queue_congested(q, sync); |
708 | 708 | ||
709 | if (rl->count[rw] + 1 <= q->nr_requests) { | 709 | if (rl->count[sync] + 1 <= q->nr_requests) { |
710 | if (waitqueue_active(&rl->wait[rw])) | 710 | if (waitqueue_active(&rl->wait[sync])) |
711 | wake_up(&rl->wait[rw]); | 711 | wake_up(&rl->wait[sync]); |
712 | 712 | ||
713 | blk_clear_queue_full(q, rw); | 713 | blk_clear_queue_full(q, sync); |
714 | } | 714 | } |
715 | } | 715 | } |
716 | 716 | ||
@@ -718,18 +718,18 @@ static void __freed_request(struct request_queue *q, int rw) | |||
718 | * A request has just been released. Account for it, update the full and | 718 | * A request has just been released. Account for it, update the full and |
719 | * congestion status, wake up any waiters. Called under q->queue_lock. | 719 | * congestion status, wake up any waiters. Called under q->queue_lock. |
720 | */ | 720 | */ |
721 | static void freed_request(struct request_queue *q, int rw, int priv) | 721 | static void freed_request(struct request_queue *q, int sync, int priv) |
722 | { | 722 | { |
723 | struct request_list *rl = &q->rq; | 723 | struct request_list *rl = &q->rq; |
724 | 724 | ||
725 | rl->count[rw]--; | 725 | rl->count[sync]--; |
726 | if (priv) | 726 | if (priv) |
727 | rl->elvpriv--; | 727 | rl->elvpriv--; |
728 | 728 | ||
729 | __freed_request(q, rw); | 729 | __freed_request(q, sync); |
730 | 730 | ||
731 | if (unlikely(rl->starved[rw ^ 1])) | 731 | if (unlikely(rl->starved[sync ^ 1])) |
732 | __freed_request(q, rw ^ 1); | 732 | __freed_request(q, sync ^ 1); |
733 | } | 733 | } |
734 | 734 | ||
735 | /* | 735 | /* |
@@ -743,15 +743,15 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
743 | struct request *rq = NULL; | 743 | struct request *rq = NULL; |
744 | struct request_list *rl = &q->rq; | 744 | struct request_list *rl = &q->rq; |
745 | struct io_context *ioc = NULL; | 745 | struct io_context *ioc = NULL; |
746 | const int rw = rw_flags & 0x01; | 746 | const bool is_sync = rw_is_sync(rw_flags) != 0; |
747 | int may_queue, priv; | 747 | int may_queue, priv; |
748 | 748 | ||
749 | may_queue = elv_may_queue(q, rw_flags); | 749 | may_queue = elv_may_queue(q, rw_flags); |
750 | if (may_queue == ELV_MQUEUE_NO) | 750 | if (may_queue == ELV_MQUEUE_NO) |
751 | goto rq_starved; | 751 | goto rq_starved; |
752 | 752 | ||
753 | if (rl->count[rw]+1 >= queue_congestion_on_threshold(q)) { | 753 | if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { |
754 | if (rl->count[rw]+1 >= q->nr_requests) { | 754 | if (rl->count[is_sync]+1 >= q->nr_requests) { |
755 | ioc = current_io_context(GFP_ATOMIC, q->node); | 755 | ioc = current_io_context(GFP_ATOMIC, q->node); |
756 | /* | 756 | /* |
757 | * The queue will fill after this allocation, so set | 757 | * The queue will fill after this allocation, so set |
@@ -759,9 +759,9 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
759 | * This process will be allowed to complete a batch of | 759 | * This process will be allowed to complete a batch of |
760 | * requests, others will be blocked. | 760 | * requests, others will be blocked. |
761 | */ | 761 | */ |
762 | if (!blk_queue_full(q, rw)) { | 762 | if (!blk_queue_full(q, is_sync)) { |
763 | ioc_set_batching(q, ioc); | 763 | ioc_set_batching(q, ioc); |
764 | blk_set_queue_full(q, rw); | 764 | blk_set_queue_full(q, is_sync); |
765 | } else { | 765 | } else { |
766 | if (may_queue != ELV_MQUEUE_MUST | 766 | if (may_queue != ELV_MQUEUE_MUST |
767 | && !ioc_batching(q, ioc)) { | 767 | && !ioc_batching(q, ioc)) { |
@@ -774,7 +774,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
774 | } | 774 | } |
775 | } | 775 | } |
776 | } | 776 | } |
777 | blk_set_queue_congested(q, rw); | 777 | blk_set_queue_congested(q, is_sync); |
778 | } | 778 | } |
779 | 779 | ||
780 | /* | 780 | /* |
@@ -782,11 +782,11 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
782 | * limit of requests, otherwise we could have thousands of requests | 782 | * limit of requests, otherwise we could have thousands of requests |
783 | * allocated with any setting of ->nr_requests | 783 | * allocated with any setting of ->nr_requests |
784 | */ | 784 | */ |
785 | if (rl->count[rw] >= (3 * q->nr_requests / 2)) | 785 | if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) |
786 | goto out; | 786 | goto out; |
787 | 787 | ||
788 | rl->count[rw]++; | 788 | rl->count[is_sync]++; |
789 | rl->starved[rw] = 0; | 789 | rl->starved[is_sync] = 0; |
790 | 790 | ||
791 | priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 791 | priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); |
792 | if (priv) | 792 | if (priv) |
@@ -804,7 +804,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
804 | * wait queue, but this is pretty rare. | 804 | * wait queue, but this is pretty rare. |
805 | */ | 805 | */ |
806 | spin_lock_irq(q->queue_lock); | 806 | spin_lock_irq(q->queue_lock); |
807 | freed_request(q, rw, priv); | 807 | freed_request(q, is_sync, priv); |
808 | 808 | ||
809 | /* | 809 | /* |
810 | * in the very unlikely event that allocation failed and no | 810 | * in the very unlikely event that allocation failed and no |
@@ -814,8 +814,8 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
814 | * rq mempool into READ and WRITE | 814 | * rq mempool into READ and WRITE |
815 | */ | 815 | */ |
816 | rq_starved: | 816 | rq_starved: |
817 | if (unlikely(rl->count[rw] == 0)) | 817 | if (unlikely(rl->count[is_sync] == 0)) |
818 | rl->starved[rw] = 1; | 818 | rl->starved[is_sync] = 1; |
819 | 819 | ||
820 | goto out; | 820 | goto out; |
821 | } | 821 | } |
@@ -829,7 +829,7 @@ rq_starved: | |||
829 | if (ioc_batching(q, ioc)) | 829 | if (ioc_batching(q, ioc)) |
830 | ioc->nr_batch_requests--; | 830 | ioc->nr_batch_requests--; |
831 | 831 | ||
832 | trace_block_getrq(q, bio, rw); | 832 | trace_block_getrq(q, bio, rw_flags & 1); |
833 | out: | 833 | out: |
834 | return rq; | 834 | return rq; |
835 | } | 835 | } |
@@ -843,7 +843,7 @@ out: | |||
843 | static struct request *get_request_wait(struct request_queue *q, int rw_flags, | 843 | static struct request *get_request_wait(struct request_queue *q, int rw_flags, |
844 | struct bio *bio) | 844 | struct bio *bio) |
845 | { | 845 | { |
846 | const int rw = rw_flags & 0x01; | 846 | const bool is_sync = rw_is_sync(rw_flags) != 0; |
847 | struct request *rq; | 847 | struct request *rq; |
848 | 848 | ||
849 | rq = get_request(q, rw_flags, bio, GFP_NOIO); | 849 | rq = get_request(q, rw_flags, bio, GFP_NOIO); |
@@ -852,10 +852,10 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, | |||
852 | struct io_context *ioc; | 852 | struct io_context *ioc; |
853 | struct request_list *rl = &q->rq; | 853 | struct request_list *rl = &q->rq; |
854 | 854 | ||
855 | prepare_to_wait_exclusive(&rl->wait[rw], &wait, | 855 | prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, |
856 | TASK_UNINTERRUPTIBLE); | 856 | TASK_UNINTERRUPTIBLE); |
857 | 857 | ||
858 | trace_block_sleeprq(q, bio, rw); | 858 | trace_block_sleeprq(q, bio, rw_flags & 1); |
859 | 859 | ||
860 | __generic_unplug_device(q); | 860 | __generic_unplug_device(q); |
861 | spin_unlock_irq(q->queue_lock); | 861 | spin_unlock_irq(q->queue_lock); |
@@ -871,7 +871,7 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, | |||
871 | ioc_set_batching(q, ioc); | 871 | ioc_set_batching(q, ioc); |
872 | 872 | ||
873 | spin_lock_irq(q->queue_lock); | 873 | spin_lock_irq(q->queue_lock); |
874 | finish_wait(&rl->wait[rw], &wait); | 874 | finish_wait(&rl->wait[is_sync], &wait); |
875 | 875 | ||
876 | rq = get_request(q, rw_flags, bio, GFP_NOIO); | 876 | rq = get_request(q, rw_flags, bio, GFP_NOIO); |
877 | }; | 877 | }; |
@@ -1070,14 +1070,14 @@ void __blk_put_request(struct request_queue *q, struct request *req) | |||
1070 | * it didn't come out of our reserved rq pools | 1070 | * it didn't come out of our reserved rq pools |
1071 | */ | 1071 | */ |
1072 | if (req->cmd_flags & REQ_ALLOCED) { | 1072 | if (req->cmd_flags & REQ_ALLOCED) { |
1073 | int rw = rq_data_dir(req); | 1073 | int is_sync = rq_is_sync(req) != 0; |
1074 | int priv = req->cmd_flags & REQ_ELVPRIV; | 1074 | int priv = req->cmd_flags & REQ_ELVPRIV; |
1075 | 1075 | ||
1076 | BUG_ON(!list_empty(&req->queuelist)); | 1076 | BUG_ON(!list_empty(&req->queuelist)); |
1077 | BUG_ON(!hlist_unhashed(&req->hash)); | 1077 | BUG_ON(!hlist_unhashed(&req->hash)); |
1078 | 1078 | ||
1079 | blk_free_request(q, req); | 1079 | blk_free_request(q, req); |
1080 | freed_request(q, rw, priv); | 1080 | freed_request(q, is_sync, priv); |
1081 | } | 1081 | } |
1082 | } | 1082 | } |
1083 | EXPORT_SYMBOL_GPL(__blk_put_request); | 1083 | EXPORT_SYMBOL_GPL(__blk_put_request); |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index e29ddfc73cf4..3ff9bba3379a 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -48,28 +48,28 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) | |||
48 | q->nr_requests = nr; | 48 | q->nr_requests = nr; |
49 | blk_queue_congestion_threshold(q); | 49 | blk_queue_congestion_threshold(q); |
50 | 50 | ||
51 | if (rl->count[READ] >= queue_congestion_on_threshold(q)) | 51 | if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) |
52 | blk_set_queue_congested(q, READ); | 52 | blk_set_queue_congested(q, BLK_RW_SYNC); |
53 | else if (rl->count[READ] < queue_congestion_off_threshold(q)) | 53 | else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) |
54 | blk_clear_queue_congested(q, READ); | 54 | blk_clear_queue_congested(q, BLK_RW_SYNC); |
55 | 55 | ||
56 | if (rl->count[WRITE] >= queue_congestion_on_threshold(q)) | 56 | if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) |
57 | blk_set_queue_congested(q, WRITE); | 57 | blk_set_queue_congested(q, BLK_RW_ASYNC); |
58 | else if (rl->count[WRITE] < queue_congestion_off_threshold(q)) | 58 | else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) |
59 | blk_clear_queue_congested(q, WRITE); | 59 | blk_clear_queue_congested(q, BLK_RW_ASYNC); |
60 | 60 | ||
61 | if (rl->count[READ] >= q->nr_requests) { | 61 | if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { |
62 | blk_set_queue_full(q, READ); | 62 | blk_set_queue_full(q, BLK_RW_SYNC); |
63 | } else if (rl->count[READ]+1 <= q->nr_requests) { | 63 | } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) { |
64 | blk_clear_queue_full(q, READ); | 64 | blk_clear_queue_full(q, BLK_RW_SYNC); |
65 | wake_up(&rl->wait[READ]); | 65 | wake_up(&rl->wait[BLK_RW_SYNC]); |
66 | } | 66 | } |
67 | 67 | ||
68 | if (rl->count[WRITE] >= q->nr_requests) { | 68 | if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { |
69 | blk_set_queue_full(q, WRITE); | 69 | blk_set_queue_full(q, BLK_RW_ASYNC); |
70 | } else if (rl->count[WRITE]+1 <= q->nr_requests) { | 70 | } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) { |
71 | blk_clear_queue_full(q, WRITE); | 71 | blk_clear_queue_full(q, BLK_RW_ASYNC); |
72 | wake_up(&rl->wait[WRITE]); | 72 | wake_up(&rl->wait[BLK_RW_ASYNC]); |
73 | } | 73 | } |
74 | spin_unlock_irq(q->queue_lock); | 74 | spin_unlock_irq(q->queue_lock); |
75 | return ret; | 75 | return ret; |
diff --git a/block/elevator.c b/block/elevator.c index 98259eda0ef6..ca6788a0195a 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -677,7 +677,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) | |||
677 | } | 677 | } |
678 | 678 | ||
679 | if (unplug_it && blk_queue_plugged(q)) { | 679 | if (unplug_it && blk_queue_plugged(q)) { |
680 | int nrq = q->rq.count[READ] + q->rq.count[WRITE] | 680 | int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC] |
681 | - q->in_flight; | 681 | - q->in_flight; |
682 | 682 | ||
683 | if (nrq >= q->unplug_thresh) | 683 | if (nrq >= q->unplug_thresh) |
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index bee52abb8a4d..0ec2c594868e 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
@@ -24,8 +24,8 @@ struct dentry; | |||
24 | */ | 24 | */ |
25 | enum bdi_state { | 25 | enum bdi_state { |
26 | BDI_pdflush, /* A pdflush thread is working this device */ | 26 | BDI_pdflush, /* A pdflush thread is working this device */ |
27 | BDI_write_congested, /* The write queue is getting full */ | 27 | BDI_async_congested, /* The async (write) queue is getting full */ |
28 | BDI_read_congested, /* The read queue is getting full */ | 28 | BDI_sync_congested, /* The sync queue is getting full */ |
29 | BDI_unused, /* Available bits start here */ | 29 | BDI_unused, /* Available bits start here */ |
30 | }; | 30 | }; |
31 | 31 | ||
@@ -215,18 +215,18 @@ static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits) | |||
215 | 215 | ||
216 | static inline int bdi_read_congested(struct backing_dev_info *bdi) | 216 | static inline int bdi_read_congested(struct backing_dev_info *bdi) |
217 | { | 217 | { |
218 | return bdi_congested(bdi, 1 << BDI_read_congested); | 218 | return bdi_congested(bdi, 1 << BDI_sync_congested); |
219 | } | 219 | } |
220 | 220 | ||
221 | static inline int bdi_write_congested(struct backing_dev_info *bdi) | 221 | static inline int bdi_write_congested(struct backing_dev_info *bdi) |
222 | { | 222 | { |
223 | return bdi_congested(bdi, 1 << BDI_write_congested); | 223 | return bdi_congested(bdi, 1 << BDI_async_congested); |
224 | } | 224 | } |
225 | 225 | ||
226 | static inline int bdi_rw_congested(struct backing_dev_info *bdi) | 226 | static inline int bdi_rw_congested(struct backing_dev_info *bdi) |
227 | { | 227 | { |
228 | return bdi_congested(bdi, (1 << BDI_read_congested)| | 228 | return bdi_congested(bdi, (1 << BDI_sync_congested) | |
229 | (1 << BDI_write_congested)); | 229 | (1 << BDI_async_congested)); |
230 | } | 230 | } |
231 | 231 | ||
232 | void clear_bdi_congested(struct backing_dev_info *bdi, int rw); | 232 | void clear_bdi_congested(struct backing_dev_info *bdi, int rw); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 465d6babc847..67dae3bd881c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -38,6 +38,10 @@ struct request; | |||
38 | typedef void (rq_end_io_fn)(struct request *, int); | 38 | typedef void (rq_end_io_fn)(struct request *, int); |
39 | 39 | ||
40 | struct request_list { | 40 | struct request_list { |
41 | /* | ||
42 | * count[], starved[], and wait[] are indexed by | ||
43 | * BLK_RW_SYNC/BLK_RW_ASYNC | ||
44 | */ | ||
41 | int count[2]; | 45 | int count[2]; |
42 | int starved[2]; | 46 | int starved[2]; |
43 | int elvpriv; | 47 | int elvpriv; |
@@ -66,6 +70,11 @@ enum rq_cmd_type_bits { | |||
66 | REQ_TYPE_ATA_PC, | 70 | REQ_TYPE_ATA_PC, |
67 | }; | 71 | }; |
68 | 72 | ||
73 | enum { | ||
74 | BLK_RW_ASYNC = 0, | ||
75 | BLK_RW_SYNC = 1, | ||
76 | }; | ||
77 | |||
69 | /* | 78 | /* |
70 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being | 79 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being |
71 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a | 80 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a |
@@ -103,7 +112,7 @@ enum rq_flag_bits { | |||
103 | __REQ_QUIET, /* don't worry about errors */ | 112 | __REQ_QUIET, /* don't worry about errors */ |
104 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ | 113 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ |
105 | __REQ_ORDERED_COLOR, /* is before or after barrier */ | 114 | __REQ_ORDERED_COLOR, /* is before or after barrier */ |
106 | __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ | 115 | __REQ_RW_SYNC, /* request is sync (sync write or read) */ |
107 | __REQ_ALLOCED, /* request came from our alloc pool */ | 116 | __REQ_ALLOCED, /* request came from our alloc pool */ |
108 | __REQ_RW_META, /* metadata io request */ | 117 | __REQ_RW_META, /* metadata io request */ |
109 | __REQ_COPY_USER, /* contains copies of user pages */ | 118 | __REQ_COPY_USER, /* contains copies of user pages */ |
@@ -438,8 +447,8 @@ struct request_queue | |||
438 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ | 447 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ |
439 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 448 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
440 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ | 449 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ |
441 | #define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ | 450 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ |
442 | #define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ | 451 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ |
443 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ | 452 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ |
444 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ | 453 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ |
445 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ | 454 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ |
@@ -611,32 +620,41 @@ enum { | |||
611 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) | 620 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) |
612 | 621 | ||
613 | /* | 622 | /* |
614 | * We regard a request as sync, if it's a READ or a SYNC write. | 623 | * We regard a request as sync, if either a read or a sync write |
615 | */ | 624 | */ |
616 | #define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) | 625 | static inline bool rw_is_sync(unsigned int rw_flags) |
626 | { | ||
627 | return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); | ||
628 | } | ||
629 | |||
630 | static inline bool rq_is_sync(struct request *rq) | ||
631 | { | ||
632 | return rw_is_sync(rq->cmd_flags); | ||
633 | } | ||
634 | |||
617 | #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) | 635 | #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) |
618 | 636 | ||
619 | static inline int blk_queue_full(struct request_queue *q, int rw) | 637 | static inline int blk_queue_full(struct request_queue *q, int sync) |
620 | { | 638 | { |
621 | if (rw == READ) | 639 | if (sync) |
622 | return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); | 640 | return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); |
623 | return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); | 641 | return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); |
624 | } | 642 | } |
625 | 643 | ||
626 | static inline void blk_set_queue_full(struct request_queue *q, int rw) | 644 | static inline void blk_set_queue_full(struct request_queue *q, int sync) |
627 | { | 645 | { |
628 | if (rw == READ) | 646 | if (sync) |
629 | queue_flag_set(QUEUE_FLAG_READFULL, q); | 647 | queue_flag_set(QUEUE_FLAG_SYNCFULL, q); |
630 | else | 648 | else |
631 | queue_flag_set(QUEUE_FLAG_WRITEFULL, q); | 649 | queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); |
632 | } | 650 | } |
633 | 651 | ||
634 | static inline void blk_clear_queue_full(struct request_queue *q, int rw) | 652 | static inline void blk_clear_queue_full(struct request_queue *q, int sync) |
635 | { | 653 | { |
636 | if (rw == READ) | 654 | if (sync) |
637 | queue_flag_clear(QUEUE_FLAG_READFULL, q); | 655 | queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); |
638 | else | 656 | else |
639 | queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); | 657 | queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); |
640 | } | 658 | } |
641 | 659 | ||
642 | 660 | ||
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index be68c956a660..493b468a5035 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -284,12 +284,12 @@ static wait_queue_head_t congestion_wqh[2] = { | |||
284 | }; | 284 | }; |
285 | 285 | ||
286 | 286 | ||
287 | void clear_bdi_congested(struct backing_dev_info *bdi, int rw) | 287 | void clear_bdi_congested(struct backing_dev_info *bdi, int sync) |
288 | { | 288 | { |
289 | enum bdi_state bit; | 289 | enum bdi_state bit; |
290 | wait_queue_head_t *wqh = &congestion_wqh[rw]; | 290 | wait_queue_head_t *wqh = &congestion_wqh[sync]; |
291 | 291 | ||
292 | bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; | 292 | bit = sync ? BDI_sync_congested : BDI_async_congested; |
293 | clear_bit(bit, &bdi->state); | 293 | clear_bit(bit, &bdi->state); |
294 | smp_mb__after_clear_bit(); | 294 | smp_mb__after_clear_bit(); |
295 | if (waitqueue_active(wqh)) | 295 | if (waitqueue_active(wqh)) |
@@ -297,11 +297,11 @@ void clear_bdi_congested(struct backing_dev_info *bdi, int rw) | |||
297 | } | 297 | } |
298 | EXPORT_SYMBOL(clear_bdi_congested); | 298 | EXPORT_SYMBOL(clear_bdi_congested); |
299 | 299 | ||
300 | void set_bdi_congested(struct backing_dev_info *bdi, int rw) | 300 | void set_bdi_congested(struct backing_dev_info *bdi, int sync) |
301 | { | 301 | { |
302 | enum bdi_state bit; | 302 | enum bdi_state bit; |
303 | 303 | ||
304 | bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; | 304 | bit = sync ? BDI_sync_congested : BDI_async_congested; |
305 | set_bit(bit, &bdi->state); | 305 | set_bit(bit, &bdi->state); |
306 | } | 306 | } |
307 | EXPORT_SYMBOL(set_bdi_congested); | 307 | EXPORT_SYMBOL(set_bdi_congested); |