diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2009-04-06 08:48:01 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-06 11:04:53 -0400 |
commit | 1faa16d22877f4839bd433547d770c676d1d964c (patch) | |
tree | 9a0d50be1ef0358c1f53d7107413100904e7d526 /include/linux | |
parent | 0221c81b1b8eb0cbb6b30a0ced52ead32d2b4e4c (diff) |
block: change the request allocation/congestion logic to be sync/async based
This makes sure that we never wait on async IO for sync requests, instead
of doing the split on writes vs reads.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/backing-dev.h | 12 | ||||
-rw-r--r-- | include/linux/blkdev.h | 52 |
2 files changed, 41 insertions, 23 deletions
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index bee52abb8a4d..0ec2c594868e 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
@@ -24,8 +24,8 @@ struct dentry; | |||
24 | */ | 24 | */ |
25 | enum bdi_state { | 25 | enum bdi_state { |
26 | BDI_pdflush, /* A pdflush thread is working this device */ | 26 | BDI_pdflush, /* A pdflush thread is working this device */ |
27 | BDI_write_congested, /* The write queue is getting full */ | 27 | BDI_async_congested, /* The async (write) queue is getting full */ |
28 | BDI_read_congested, /* The read queue is getting full */ | 28 | BDI_sync_congested, /* The sync queue is getting full */ |
29 | BDI_unused, /* Available bits start here */ | 29 | BDI_unused, /* Available bits start here */ |
30 | }; | 30 | }; |
31 | 31 | ||
@@ -215,18 +215,18 @@ static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits) | |||
215 | 215 | ||
216 | static inline int bdi_read_congested(struct backing_dev_info *bdi) | 216 | static inline int bdi_read_congested(struct backing_dev_info *bdi) |
217 | { | 217 | { |
218 | return bdi_congested(bdi, 1 << BDI_read_congested); | 218 | return bdi_congested(bdi, 1 << BDI_sync_congested); |
219 | } | 219 | } |
220 | 220 | ||
221 | static inline int bdi_write_congested(struct backing_dev_info *bdi) | 221 | static inline int bdi_write_congested(struct backing_dev_info *bdi) |
222 | { | 222 | { |
223 | return bdi_congested(bdi, 1 << BDI_write_congested); | 223 | return bdi_congested(bdi, 1 << BDI_async_congested); |
224 | } | 224 | } |
225 | 225 | ||
226 | static inline int bdi_rw_congested(struct backing_dev_info *bdi) | 226 | static inline int bdi_rw_congested(struct backing_dev_info *bdi) |
227 | { | 227 | { |
228 | return bdi_congested(bdi, (1 << BDI_read_congested)| | 228 | return bdi_congested(bdi, (1 << BDI_sync_congested) | |
229 | (1 << BDI_write_congested)); | 229 | (1 << BDI_async_congested)); |
230 | } | 230 | } |
231 | 231 | ||
232 | void clear_bdi_congested(struct backing_dev_info *bdi, int rw); | 232 | void clear_bdi_congested(struct backing_dev_info *bdi, int rw); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 465d6babc847..67dae3bd881c 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -38,6 +38,10 @@ struct request; | |||
38 | typedef void (rq_end_io_fn)(struct request *, int); | 38 | typedef void (rq_end_io_fn)(struct request *, int); |
39 | 39 | ||
40 | struct request_list { | 40 | struct request_list { |
41 | /* | ||
42 | * count[], starved[], and wait[] are indexed by | ||
43 | * BLK_RW_SYNC/BLK_RW_ASYNC | ||
44 | */ | ||
41 | int count[2]; | 45 | int count[2]; |
42 | int starved[2]; | 46 | int starved[2]; |
43 | int elvpriv; | 47 | int elvpriv; |
@@ -66,6 +70,11 @@ enum rq_cmd_type_bits { | |||
66 | REQ_TYPE_ATA_PC, | 70 | REQ_TYPE_ATA_PC, |
67 | }; | 71 | }; |
68 | 72 | ||
73 | enum { | ||
74 | BLK_RW_ASYNC = 0, | ||
75 | BLK_RW_SYNC = 1, | ||
76 | }; | ||
77 | |||
69 | /* | 78 | /* |
70 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being | 79 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being |
71 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a | 80 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a |
@@ -103,7 +112,7 @@ enum rq_flag_bits { | |||
103 | __REQ_QUIET, /* don't worry about errors */ | 112 | __REQ_QUIET, /* don't worry about errors */ |
104 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ | 113 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ |
105 | __REQ_ORDERED_COLOR, /* is before or after barrier */ | 114 | __REQ_ORDERED_COLOR, /* is before or after barrier */ |
106 | __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ | 115 | __REQ_RW_SYNC, /* request is sync (sync write or read) */ |
107 | __REQ_ALLOCED, /* request came from our alloc pool */ | 116 | __REQ_ALLOCED, /* request came from our alloc pool */ |
108 | __REQ_RW_META, /* metadata io request */ | 117 | __REQ_RW_META, /* metadata io request */ |
109 | __REQ_COPY_USER, /* contains copies of user pages */ | 118 | __REQ_COPY_USER, /* contains copies of user pages */ |
@@ -438,8 +447,8 @@ struct request_queue | |||
438 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ | 447 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ |
439 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 448 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
440 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ | 449 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ |
441 | #define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ | 450 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ |
442 | #define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ | 451 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ |
443 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ | 452 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ |
444 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ | 453 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ |
445 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ | 454 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ |
@@ -611,32 +620,41 @@ enum { | |||
611 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) | 620 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) |
612 | 621 | ||
613 | /* | 622 | /* |
614 | * We regard a request as sync, if it's a READ or a SYNC write. | 623 | * We regard a request as sync, if either a read or a sync write |
615 | */ | 624 | */ |
616 | #define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) | 625 | static inline bool rw_is_sync(unsigned int rw_flags) |
626 | { | ||
627 | return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); | ||
628 | } | ||
629 | |||
630 | static inline bool rq_is_sync(struct request *rq) | ||
631 | { | ||
632 | return rw_is_sync(rq->cmd_flags); | ||
633 | } | ||
634 | |||
617 | #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) | 635 | #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) |
618 | 636 | ||
619 | static inline int blk_queue_full(struct request_queue *q, int rw) | 637 | static inline int blk_queue_full(struct request_queue *q, int sync) |
620 | { | 638 | { |
621 | if (rw == READ) | 639 | if (sync) |
622 | return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); | 640 | return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); |
623 | return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); | 641 | return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); |
624 | } | 642 | } |
625 | 643 | ||
626 | static inline void blk_set_queue_full(struct request_queue *q, int rw) | 644 | static inline void blk_set_queue_full(struct request_queue *q, int sync) |
627 | { | 645 | { |
628 | if (rw == READ) | 646 | if (sync) |
629 | queue_flag_set(QUEUE_FLAG_READFULL, q); | 647 | queue_flag_set(QUEUE_FLAG_SYNCFULL, q); |
630 | else | 648 | else |
631 | queue_flag_set(QUEUE_FLAG_WRITEFULL, q); | 649 | queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); |
632 | } | 650 | } |
633 | 651 | ||
634 | static inline void blk_clear_queue_full(struct request_queue *q, int rw) | 652 | static inline void blk_clear_queue_full(struct request_queue *q, int sync) |
635 | { | 653 | { |
636 | if (rw == READ) | 654 | if (sync) |
637 | queue_flag_clear(QUEUE_FLAG_READFULL, q); | 655 | queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); |
638 | else | 656 | else |
639 | queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); | 657 | queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); |
640 | } | 658 | } |
641 | 659 | ||
642 | 660 | ||