diff options
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 61 |
1 files changed, 42 insertions, 19 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 465d6babc847..b4f71f1a4af7 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -38,6 +38,10 @@ struct request; | |||
| 38 | typedef void (rq_end_io_fn)(struct request *, int); | 38 | typedef void (rq_end_io_fn)(struct request *, int); |
| 39 | 39 | ||
| 40 | struct request_list { | 40 | struct request_list { |
| 41 | /* | ||
| 42 | * count[], starved[], and wait[] are indexed by | ||
| 43 | * BLK_RW_SYNC/BLK_RW_ASYNC | ||
| 44 | */ | ||
| 41 | int count[2]; | 45 | int count[2]; |
| 42 | int starved[2]; | 46 | int starved[2]; |
| 43 | int elvpriv; | 47 | int elvpriv; |
| @@ -66,6 +70,11 @@ enum rq_cmd_type_bits { | |||
| 66 | REQ_TYPE_ATA_PC, | 70 | REQ_TYPE_ATA_PC, |
| 67 | }; | 71 | }; |
| 68 | 72 | ||
| 73 | enum { | ||
| 74 | BLK_RW_ASYNC = 0, | ||
| 75 | BLK_RW_SYNC = 1, | ||
| 76 | }; | ||
| 77 | |||
| 69 | /* | 78 | /* |
| 70 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being | 79 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being |
| 71 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a | 80 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a |
| @@ -103,12 +112,13 @@ enum rq_flag_bits { | |||
| 103 | __REQ_QUIET, /* don't worry about errors */ | 112 | __REQ_QUIET, /* don't worry about errors */ |
| 104 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ | 113 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ |
| 105 | __REQ_ORDERED_COLOR, /* is before or after barrier */ | 114 | __REQ_ORDERED_COLOR, /* is before or after barrier */ |
| 106 | __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ | 115 | __REQ_RW_SYNC, /* request is sync (sync write or read) */ |
| 107 | __REQ_ALLOCED, /* request came from our alloc pool */ | 116 | __REQ_ALLOCED, /* request came from our alloc pool */ |
| 108 | __REQ_RW_META, /* metadata io request */ | 117 | __REQ_RW_META, /* metadata io request */ |
| 109 | __REQ_COPY_USER, /* contains copies of user pages */ | 118 | __REQ_COPY_USER, /* contains copies of user pages */ |
| 110 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ | 119 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ |
| 111 | __REQ_UNPLUG, /* unplug queue on submission */ | 120 | __REQ_NOIDLE, /* Don't anticipate more IO after this one */ |
| 121 | __REQ_IO_STAT, /* account I/O stat */ | ||
| 112 | __REQ_NR_BITS, /* stops here */ | 122 | __REQ_NR_BITS, /* stops here */ |
| 113 | }; | 123 | }; |
| 114 | 124 | ||
| @@ -135,7 +145,8 @@ enum rq_flag_bits { | |||
| 135 | #define REQ_RW_META (1 << __REQ_RW_META) | 145 | #define REQ_RW_META (1 << __REQ_RW_META) |
| 136 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) | 146 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) |
| 137 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) | 147 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) |
| 138 | #define REQ_UNPLUG (1 << __REQ_UNPLUG) | 148 | #define REQ_NOIDLE (1 << __REQ_NOIDLE) |
| 149 | #define REQ_IO_STAT (1 << __REQ_IO_STAT) | ||
| 139 | 150 | ||
| 140 | #define BLK_MAX_CDB 16 | 151 | #define BLK_MAX_CDB 16 |
| 141 | 152 | ||
| @@ -438,8 +449,8 @@ struct request_queue | |||
| 438 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ | 449 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ |
| 439 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 450 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
| 440 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ | 451 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ |
| 441 | #define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ | 452 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ |
| 442 | #define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ | 453 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ |
| 443 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ | 454 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ |
| 444 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ | 455 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ |
| 445 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ | 456 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ |
| @@ -589,6 +600,8 @@ enum { | |||
| 589 | blk_failfast_transport(rq) || \ | 600 | blk_failfast_transport(rq) || \ |
| 590 | blk_failfast_driver(rq)) | 601 | blk_failfast_driver(rq)) |
| 591 | #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) | 602 | #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) |
| 603 | #define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT) | ||
| 604 | #define blk_rq_quiet(rq) ((rq)->cmd_flags & REQ_QUIET) | ||
| 592 | 605 | ||
| 593 | #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) | 606 | #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) |
| 594 | 607 | ||
| @@ -611,32 +624,42 @@ enum { | |||
| 611 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) | 624 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) |
| 612 | 625 | ||
| 613 | /* | 626 | /* |
| 614 | * We regard a request as sync, if it's a READ or a SYNC write. | 627 | * We regard a request as sync, if either a read or a sync write |
| 615 | */ | 628 | */ |
| 616 | #define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) | 629 | static inline bool rw_is_sync(unsigned int rw_flags) |
| 630 | { | ||
| 631 | return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); | ||
| 632 | } | ||
| 633 | |||
| 634 | static inline bool rq_is_sync(struct request *rq) | ||
| 635 | { | ||
| 636 | return rw_is_sync(rq->cmd_flags); | ||
| 637 | } | ||
| 638 | |||
| 617 | #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) | 639 | #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) |
| 640 | #define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE) | ||
| 618 | 641 | ||
| 619 | static inline int blk_queue_full(struct request_queue *q, int rw) | 642 | static inline int blk_queue_full(struct request_queue *q, int sync) |
| 620 | { | 643 | { |
| 621 | if (rw == READ) | 644 | if (sync) |
| 622 | return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); | 645 | return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); |
| 623 | return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); | 646 | return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); |
| 624 | } | 647 | } |
| 625 | 648 | ||
| 626 | static inline void blk_set_queue_full(struct request_queue *q, int rw) | 649 | static inline void blk_set_queue_full(struct request_queue *q, int sync) |
| 627 | { | 650 | { |
| 628 | if (rw == READ) | 651 | if (sync) |
| 629 | queue_flag_set(QUEUE_FLAG_READFULL, q); | 652 | queue_flag_set(QUEUE_FLAG_SYNCFULL, q); |
| 630 | else | 653 | else |
| 631 | queue_flag_set(QUEUE_FLAG_WRITEFULL, q); | 654 | queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); |
| 632 | } | 655 | } |
| 633 | 656 | ||
| 634 | static inline void blk_clear_queue_full(struct request_queue *q, int rw) | 657 | static inline void blk_clear_queue_full(struct request_queue *q, int sync) |
| 635 | { | 658 | { |
| 636 | if (rw == READ) | 659 | if (sync) |
| 637 | queue_flag_clear(QUEUE_FLAG_READFULL, q); | 660 | queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); |
| 638 | else | 661 | else |
| 639 | queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); | 662 | queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); |
| 640 | } | 663 | } |
| 641 | 664 | ||
| 642 | 665 | ||
