diff options
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 66 |
1 files changed, 49 insertions, 17 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 044467ef7b11..2755d5c6da22 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -38,6 +38,10 @@ struct request; | |||
| 38 | typedef void (rq_end_io_fn)(struct request *, int); | 38 | typedef void (rq_end_io_fn)(struct request *, int); |
| 39 | 39 | ||
| 40 | struct request_list { | 40 | struct request_list { |
| 41 | /* | ||
| 42 | * count[], starved[], and wait[] are indexed by | ||
| 43 | * BLK_RW_SYNC/BLK_RW_ASYNC | ||
| 44 | */ | ||
| 41 | int count[2]; | 45 | int count[2]; |
| 42 | int starved[2]; | 46 | int starved[2]; |
| 43 | int elvpriv; | 47 | int elvpriv; |
| @@ -66,6 +70,11 @@ enum rq_cmd_type_bits { | |||
| 66 | REQ_TYPE_ATA_PC, | 70 | REQ_TYPE_ATA_PC, |
| 67 | }; | 71 | }; |
| 68 | 72 | ||
| 73 | enum { | ||
| 74 | BLK_RW_ASYNC = 0, | ||
| 75 | BLK_RW_SYNC = 1, | ||
| 76 | }; | ||
| 77 | |||
| 69 | /* | 78 | /* |
| 70 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being | 79 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being |
| 71 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a | 80 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a |
| @@ -103,11 +112,13 @@ enum rq_flag_bits { | |||
| 103 | __REQ_QUIET, /* don't worry about errors */ | 112 | __REQ_QUIET, /* don't worry about errors */ |
| 104 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ | 113 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ |
| 105 | __REQ_ORDERED_COLOR, /* is before or after barrier */ | 114 | __REQ_ORDERED_COLOR, /* is before or after barrier */ |
| 106 | __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ | 115 | __REQ_RW_SYNC, /* request is sync (sync write or read) */ |
| 107 | __REQ_ALLOCED, /* request came from our alloc pool */ | 116 | __REQ_ALLOCED, /* request came from our alloc pool */ |
| 108 | __REQ_RW_META, /* metadata io request */ | 117 | __REQ_RW_META, /* metadata io request */ |
| 109 | __REQ_COPY_USER, /* contains copies of user pages */ | 118 | __REQ_COPY_USER, /* contains copies of user pages */ |
| 110 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ | 119 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ |
| 120 | __REQ_NOIDLE, /* Don't anticipate more IO after this one */ | ||
| 121 | __REQ_IO_STAT, /* account I/O stat */ | ||
| 111 | __REQ_NR_BITS, /* stops here */ | 122 | __REQ_NR_BITS, /* stops here */ |
| 112 | }; | 123 | }; |
| 113 | 124 | ||
| @@ -134,6 +145,8 @@ enum rq_flag_bits { | |||
| 134 | #define REQ_RW_META (1 << __REQ_RW_META) | 145 | #define REQ_RW_META (1 << __REQ_RW_META) |
| 135 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) | 146 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) |
| 136 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) | 147 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) |
| 148 | #define REQ_NOIDLE (1 << __REQ_NOIDLE) | ||
| 149 | #define REQ_IO_STAT (1 << __REQ_IO_STAT) | ||
| 137 | 150 | ||
| 138 | #define BLK_MAX_CDB 16 | 151 | #define BLK_MAX_CDB 16 |
| 139 | 152 | ||
| @@ -436,8 +449,8 @@ struct request_queue | |||
| 436 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ | 449 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ |
| 437 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 450 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
| 438 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ | 451 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ |
| 439 | #define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ | 452 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ |
| 440 | #define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ | 453 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ |
| 441 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ | 454 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ |
| 442 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ | 455 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ |
| 443 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ | 456 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ |
| @@ -449,6 +462,11 @@ struct request_queue | |||
| 449 | #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ | 462 | #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ |
| 450 | #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ | 463 | #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ |
| 451 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ | 464 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
| 465 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | ||
| 466 | |||
| 467 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | ||
| 468 | (1 << QUEUE_FLAG_CLUSTER) | \ | ||
| 469 | (1 << QUEUE_FLAG_STACKABLE)) | ||
| 452 | 470 | ||
| 453 | static inline int queue_is_locked(struct request_queue *q) | 471 | static inline int queue_is_locked(struct request_queue *q) |
| 454 | { | 472 | { |
| @@ -565,6 +583,7 @@ enum { | |||
| 565 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 583 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
| 566 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 584 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
| 567 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | 585 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) |
| 586 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) | ||
| 568 | #define blk_queue_flushing(q) ((q)->ordseq) | 587 | #define blk_queue_flushing(q) ((q)->ordseq) |
| 569 | #define blk_queue_stackable(q) \ | 588 | #define blk_queue_stackable(q) \ |
| 570 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | 589 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) |
| @@ -581,6 +600,7 @@ enum { | |||
| 581 | blk_failfast_transport(rq) || \ | 600 | blk_failfast_transport(rq) || \ |
| 582 | blk_failfast_driver(rq)) | 601 | blk_failfast_driver(rq)) |
| 583 | #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) | 602 | #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) |
| 603 | #define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT) | ||
| 584 | 604 | ||
| 585 | #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) | 605 | #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) |
| 586 | 606 | ||
| @@ -603,32 +623,42 @@ enum { | |||
| 603 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) | 623 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) |
| 604 | 624 | ||
| 605 | /* | 625 | /* |
| 606 | * We regard a request as sync, if it's a READ or a SYNC write. | 626 | * We regard a request as sync, if either a read or a sync write |
| 607 | */ | 627 | */ |
| 608 | #define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) | 628 | static inline bool rw_is_sync(unsigned int rw_flags) |
| 629 | { | ||
| 630 | return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); | ||
| 631 | } | ||
| 632 | |||
| 633 | static inline bool rq_is_sync(struct request *rq) | ||
| 634 | { | ||
| 635 | return rw_is_sync(rq->cmd_flags); | ||
| 636 | } | ||
| 637 | |||
| 609 | #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) | 638 | #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) |
| 639 | #define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE) | ||
| 610 | 640 | ||
| 611 | static inline int blk_queue_full(struct request_queue *q, int rw) | 641 | static inline int blk_queue_full(struct request_queue *q, int sync) |
| 612 | { | 642 | { |
| 613 | if (rw == READ) | 643 | if (sync) |
| 614 | return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); | 644 | return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags); |
| 615 | return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); | 645 | return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags); |
| 616 | } | 646 | } |
| 617 | 647 | ||
| 618 | static inline void blk_set_queue_full(struct request_queue *q, int rw) | 648 | static inline void blk_set_queue_full(struct request_queue *q, int sync) |
| 619 | { | 649 | { |
| 620 | if (rw == READ) | 650 | if (sync) |
| 621 | queue_flag_set(QUEUE_FLAG_READFULL, q); | 651 | queue_flag_set(QUEUE_FLAG_SYNCFULL, q); |
| 622 | else | 652 | else |
| 623 | queue_flag_set(QUEUE_FLAG_WRITEFULL, q); | 653 | queue_flag_set(QUEUE_FLAG_ASYNCFULL, q); |
| 624 | } | 654 | } |
| 625 | 655 | ||
| 626 | static inline void blk_clear_queue_full(struct request_queue *q, int rw) | 656 | static inline void blk_clear_queue_full(struct request_queue *q, int sync) |
| 627 | { | 657 | { |
| 628 | if (rw == READ) | 658 | if (sync) |
| 629 | queue_flag_clear(QUEUE_FLAG_READFULL, q); | 659 | queue_flag_clear(QUEUE_FLAG_SYNCFULL, q); |
| 630 | else | 660 | else |
| 631 | queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); | 661 | queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q); |
| 632 | } | 662 | } |
| 633 | 663 | ||
| 634 | 664 | ||
| @@ -700,6 +730,8 @@ struct req_iterator { | |||
| 700 | }; | 730 | }; |
| 701 | 731 | ||
| 702 | /* This should not be used directly - use rq_for_each_segment */ | 732 | /* This should not be used directly - use rq_for_each_segment */ |
| 733 | #define for_each_bio(_bio) \ | ||
| 734 | for (; _bio; _bio = _bio->bi_next) | ||
| 703 | #define __rq_for_each_bio(_bio, rq) \ | 735 | #define __rq_for_each_bio(_bio, rq) \ |
| 704 | if ((rq->bio)) \ | 736 | if ((rq->bio)) \ |
| 705 | for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) | 737 | for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) |
