aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2009-04-06 08:48:01 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-04-06 11:04:53 -0400
commit1faa16d22877f4839bd433547d770c676d1d964c (patch)
tree9a0d50be1ef0358c1f53d7107413100904e7d526 /include/linux/blkdev.h
parent0221c81b1b8eb0cbb6b30a0ced52ead32d2b4e4c (diff)
block: change the request allocation/congestion logic to be sync/async based
This makes sure that we never wait on async IO for sync requests, instead of doing the split on writes vs reads. Signed-off-by: Jens Axboe <jens.axboe@oracle.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h52
1 files changed, 35 insertions, 17 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 465d6babc847..67dae3bd881c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -38,6 +38,10 @@ struct request;
38typedef void (rq_end_io_fn)(struct request *, int); 38typedef void (rq_end_io_fn)(struct request *, int);
39 39
40struct request_list { 40struct request_list {
41 /*
42 * count[], starved[], and wait[] are indexed by
43 * BLK_RW_SYNC/BLK_RW_ASYNC
44 */
41 int count[2]; 45 int count[2];
42 int starved[2]; 46 int starved[2];
43 int elvpriv; 47 int elvpriv;
@@ -66,6 +70,11 @@ enum rq_cmd_type_bits {
66 REQ_TYPE_ATA_PC, 70 REQ_TYPE_ATA_PC,
67}; 71};
68 72
73enum {
74 BLK_RW_ASYNC = 0,
75 BLK_RW_SYNC = 1,
76};
77
69/* 78/*
70 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being 79 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
71 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a 80 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
@@ -103,7 +112,7 @@ enum rq_flag_bits {
103 __REQ_QUIET, /* don't worry about errors */ 112 __REQ_QUIET, /* don't worry about errors */
104 __REQ_PREEMPT, /* set for "ide_preempt" requests */ 113 __REQ_PREEMPT, /* set for "ide_preempt" requests */
105 __REQ_ORDERED_COLOR, /* is before or after barrier */ 114 __REQ_ORDERED_COLOR, /* is before or after barrier */
106 __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ 115 __REQ_RW_SYNC, /* request is sync (sync write or read) */
107 __REQ_ALLOCED, /* request came from our alloc pool */ 116 __REQ_ALLOCED, /* request came from our alloc pool */
108 __REQ_RW_META, /* metadata io request */ 117 __REQ_RW_META, /* metadata io request */
109 __REQ_COPY_USER, /* contains copies of user pages */ 118 __REQ_COPY_USER, /* contains copies of user pages */
@@ -438,8 +447,8 @@ struct request_queue
438#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 447#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
439#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 448#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
440#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 449#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
441#define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ 450#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
442#define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ 451#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
443#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 452#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
444#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 453#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
445#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 454#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
@@ -611,32 +620,41 @@ enum {
611#define rq_data_dir(rq) ((rq)->cmd_flags & 1) 620#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
612 621
613/* 622/*
614 * We regard a request as sync, if it's a READ or a SYNC write. 623 * We regard a request as sync, if either a read or a sync write
615 */ 624 */
616#define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) 625static inline bool rw_is_sync(unsigned int rw_flags)
626{
627 return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC);
628}
629
630static inline bool rq_is_sync(struct request *rq)
631{
632 return rw_is_sync(rq->cmd_flags);
633}
634
617#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) 635#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META)
618 636
619static inline int blk_queue_full(struct request_queue *q, int rw) 637static inline int blk_queue_full(struct request_queue *q, int sync)
620{ 638{
621 if (rw == READ) 639 if (sync)
622 return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 640 return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
623 return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 641 return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
624} 642}
625 643
626static inline void blk_set_queue_full(struct request_queue *q, int rw) 644static inline void blk_set_queue_full(struct request_queue *q, int sync)
627{ 645{
628 if (rw == READ) 646 if (sync)
629 queue_flag_set(QUEUE_FLAG_READFULL, q); 647 queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
630 else 648 else
631 queue_flag_set(QUEUE_FLAG_WRITEFULL, q); 649 queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
632} 650}
633 651
634static inline void blk_clear_queue_full(struct request_queue *q, int rw) 652static inline void blk_clear_queue_full(struct request_queue *q, int sync)
635{ 653{
636 if (rw == READ) 654 if (sync)
637 queue_flag_clear(QUEUE_FLAG_READFULL, q); 655 queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
638 else 656 else
639 queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); 657 queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
640} 658}
641 659
642 660