diff options
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 55 |
1 files changed, 38 insertions, 17 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 465d6babc847..e03660964e02 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -38,6 +38,10 @@ struct request; | |||
38 | typedef void (rq_end_io_fn)(struct request *, int); | 38 | typedef void (rq_end_io_fn)(struct request *, int); |
39 | 39 | ||
40 | struct request_list { | 40 | struct request_list { |
41 | /* | ||
42 | * count[], starved[], and wait[] are indexed by | ||
43 | * BLK_RW_SYNC/BLK_RW_ASYNC | ||
44 | */ | ||
41 | int count[2]; | 45 | int count[2]; |
42 | int starved[2]; | 46 | int starved[2]; |
43 | int elvpriv; | 47 | int elvpriv; |
@@ -66,6 +70,11 @@ enum rq_cmd_type_bits { | |||
66 | REQ_TYPE_ATA_PC, | 70 | REQ_TYPE_ATA_PC, |
67 | }; | 71 | }; |
68 | 72 | ||
73 | enum { | ||
74 | BLK_RW_ASYNC = 0, | ||
75 | BLK_RW_SYNC = 1, | ||
76 | }; | ||
77 | |||
69 | /* | 78 | /* |
70 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being | 79 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being |
71 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a | 80 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a |
@@ -103,12 +112,13 @@ enum rq_flag_bits { | |||
103 | __REQ_QUIET, /* don't worry about errors */ | 112 | __REQ_QUIET, /* don't worry about errors */ |
104 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ | 113 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ |
105 | __REQ_ORDERED_COLOR, /* is before or after barrier */ | 114 | __REQ_ORDERED_COLOR, /* is before or after barrier */ |
106 | __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ | 115 | __REQ_RW_SYNC, /* request is sync (sync write or read) */ |
107 | __REQ_ALLOCED, /* request came from our alloc pool */ | 116 | __REQ_ALLOCED, /* request came from our alloc pool */ |
108 | __REQ_RW_META, /* metadata io request */ | 117 | __REQ_RW_META, /* metadata io request */ |
109 | __REQ_COPY_USER, /* contains copies of user pages */ | 118 | __REQ_COPY_USER, /* contains copies of user pages */ |
110 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ | 119 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ |
111 | __REQ_UNPLUG, /* unplug queue on submission */ | 120 | __REQ_UNPLUG, /* unplug queue on submission */ |
121 | __REQ_NOIDLE, /* Don't anticipate more IO after this one */ | ||
112 | __REQ_NR_BITS, /* stops here */ | 122 | __REQ_NR_BITS, /* stops here */ |
113 | }; | 123 | }; |
114 | 124 | ||
@@ -136,6 +146,7 @@ enum rq_flag_bits { | |||
136 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) | 146 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) |
137 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) | 147 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) |
138 | #define REQ_UNPLUG (1 << __REQ_UNPLUG) | 148 | #define REQ_UNPLUG (1 << __REQ_UNPLUG) |
149 | #define REQ_NOIDLE (1 << __REQ_NOIDLE) | ||
139 | 150 | ||
140 | #define BLK_MAX_CDB 16 | 151 | #define BLK_MAX_CDB 16 |
141 | 152 | ||
@@ -438,8 +449,8 @@ struct request_queue | |||
438 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ | 449 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ |
439 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 450 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
440 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ | 451 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ |
441 | #define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ | 452 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ |
442 | #define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ | 453 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ |
443 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ | 454 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ |
444 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ | 455 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ |
445 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ | 456 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ |
@@ -611,32 +622,42 @@ enum { | |||
611 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) | 622 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) |
612 | 623 | ||
613 | /* | 624 | /* |
614 | * We regard a request as sync, if it's a READ or a SYNC write. | 625 | * We regard a request as sync, if either a read or a sync write |
615 | */ | 626 | */ |
616 | #define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) | 627 | static inline bool rw_is_sync(unsigned int rw_flags) |
628 | { | ||
629 | return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); | ||
630 | } | ||
631 | |||
632 | static inline bool rq_is_sync(struct request *rq) | ||
633 | { | ||
634 | return rw_is_sync(rq->cmd_flags); | ||
635 | } | ||
636 | |||
617 | #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) | 637 | #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) |
638 | #define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE) | ||
618 | 639 | ||
619 | static inline int blk_queue_full(struct request_queue *q, int rw) | 640 | static inline int blk_queue_full(struct request_queue *q, int sync) |
620 | { | 641 | { |
621 | if (rw == READ) | 642 | if (sync) |
622 | return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); | 643 | return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); |
623 | return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); | 644 | return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); |
624 | } | 645 | } |
625 | 646 | ||
626 | static inline void blk_set_queue_full(struct request_queue *q, int rw) | 647 | static inline void blk_set_queue_full(struct request_queue *q, int sync) |
627 | { | 648 | { |
628 | if (rw == READ) | 649 | if (sync) |
629 | queue_flag_set(QUEUE_FLAG_READFULL, q); | 650 | queue_flag_set(QUEUE_FLAG_SYNCFULL, q); |
630 | else | 651 | else |
631 | queue_flag_set(QUEUE_FLAG_WRITEFULL, q); | 652 | queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); |
632 | } | 653 | } |
633 | 654 | ||
634 | static inline void blk_clear_queue_full(struct request_queue *q, int rw) | 655 | static inline void blk_clear_queue_full(struct request_queue *q, int sync) |
635 | { | 656 | { |
636 | if (rw == READ) | 657 | if (sync) |
637 | queue_flag_clear(QUEUE_FLAG_READFULL, q); | 658 | queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); |
638 | else | 659 | else |
639 | queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); | 660 | queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); |
640 | } | 661 | } |
641 | 662 | ||
642 | 663 | ||