aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h63
1 files changed, 46 insertions, 17 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 044467ef7b11..ba54c834a590 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -38,6 +38,10 @@ struct request;
38typedef void (rq_end_io_fn)(struct request *, int); 38typedef void (rq_end_io_fn)(struct request *, int);
39 39
40struct request_list { 40struct request_list {
41 /*
42 * count[], starved[], and wait[] are indexed by
43 * BLK_RW_SYNC/BLK_RW_ASYNC
44 */
41 int count[2]; 45 int count[2];
42 int starved[2]; 46 int starved[2];
43 int elvpriv; 47 int elvpriv;
@@ -66,6 +70,11 @@ enum rq_cmd_type_bits {
66 REQ_TYPE_ATA_PC, 70 REQ_TYPE_ATA_PC,
67}; 71};
68 72
73enum {
74 BLK_RW_ASYNC = 0,
75 BLK_RW_SYNC = 1,
76};
77
69/* 78/*
70 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being 79 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
71 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a 80 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
@@ -103,11 +112,12 @@ enum rq_flag_bits {
103 __REQ_QUIET, /* don't worry about errors */ 112 __REQ_QUIET, /* don't worry about errors */
104 __REQ_PREEMPT, /* set for "ide_preempt" requests */ 113 __REQ_PREEMPT, /* set for "ide_preempt" requests */
105 __REQ_ORDERED_COLOR, /* is before or after barrier */ 114 __REQ_ORDERED_COLOR, /* is before or after barrier */
106 __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ 115 __REQ_RW_SYNC, /* request is sync (sync write or read) */
107 __REQ_ALLOCED, /* request came from our alloc pool */ 116 __REQ_ALLOCED, /* request came from our alloc pool */
108 __REQ_RW_META, /* metadata io request */ 117 __REQ_RW_META, /* metadata io request */
109 __REQ_COPY_USER, /* contains copies of user pages */ 118 __REQ_COPY_USER, /* contains copies of user pages */
110 __REQ_INTEGRITY, /* integrity metadata has been remapped */ 119 __REQ_INTEGRITY, /* integrity metadata has been remapped */
120 __REQ_NOIDLE, /* Don't anticipate more IO after this one */
111 __REQ_NR_BITS, /* stops here */ 121 __REQ_NR_BITS, /* stops here */
112}; 122};
113 123
@@ -134,6 +144,7 @@ enum rq_flag_bits {
134#define REQ_RW_META (1 << __REQ_RW_META) 144#define REQ_RW_META (1 << __REQ_RW_META)
135#define REQ_COPY_USER (1 << __REQ_COPY_USER) 145#define REQ_COPY_USER (1 << __REQ_COPY_USER)
136#define REQ_INTEGRITY (1 << __REQ_INTEGRITY) 146#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
147#define REQ_NOIDLE (1 << __REQ_NOIDLE)
137 148
138#define BLK_MAX_CDB 16 149#define BLK_MAX_CDB 16
139 150
@@ -436,8 +447,8 @@ struct request_queue
436#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 447#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
437#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 448#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
438#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 449#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
439#define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ 450#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
440#define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ 451#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
441#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 452#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
442#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 453#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
443#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 454#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
@@ -449,6 +460,11 @@ struct request_queue
449#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ 460#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
450#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ 461#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
451#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 462#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
463#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
464
465#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
466 (1 << QUEUE_FLAG_CLUSTER) | \
467 (1 << QUEUE_FLAG_STACKABLE))
452 468
453static inline int queue_is_locked(struct request_queue *q) 469static inline int queue_is_locked(struct request_queue *q)
454{ 470{
@@ -565,6 +581,7 @@ enum {
565#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 581#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
566#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 582#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
567#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 583#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
584#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
568#define blk_queue_flushing(q) ((q)->ordseq) 585#define blk_queue_flushing(q) ((q)->ordseq)
569#define blk_queue_stackable(q) \ 586#define blk_queue_stackable(q) \
570 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 587 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
@@ -603,32 +620,42 @@ enum {
603#define rq_data_dir(rq) ((rq)->cmd_flags & 1) 620#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
604 621
605/* 622/*
606 * We regard a request as sync, if it's a READ or a SYNC write. 623 * We regard a request as sync, if either a read or a sync write
607 */ 624 */
608#define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) 625static inline bool rw_is_sync(unsigned int rw_flags)
626{
627 return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC);
628}
629
630static inline bool rq_is_sync(struct request *rq)
631{
632 return rw_is_sync(rq->cmd_flags);
633}
634
609#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) 635#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META)
636#define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE)
610 637
611static inline int blk_queue_full(struct request_queue *q, int rw) 638static inline int blk_queue_full(struct request_queue *q, int sync)
612{ 639{
613 if (rw == READ) 640 if (sync)
614 return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 641 return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
615 return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 642 return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
616} 643}
617 644
618static inline void blk_set_queue_full(struct request_queue *q, int rw) 645static inline void blk_set_queue_full(struct request_queue *q, int sync)
619{ 646{
620 if (rw == READ) 647 if (sync)
621 queue_flag_set(QUEUE_FLAG_READFULL, q); 648 queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
622 else 649 else
623 queue_flag_set(QUEUE_FLAG_WRITEFULL, q); 650 queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
624} 651}
625 652
626static inline void blk_clear_queue_full(struct request_queue *q, int rw) 653static inline void blk_clear_queue_full(struct request_queue *q, int sync)
627{ 654{
628 if (rw == READ) 655 if (sync)
629 queue_flag_clear(QUEUE_FLAG_READFULL, q); 656 queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
630 else 657 else
631 queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); 658 queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
632} 659}
633 660
634 661
@@ -700,6 +727,8 @@ struct req_iterator {
700}; 727};
701 728
702/* This should not be used directly - use rq_for_each_segment */ 729/* This should not be used directly - use rq_for_each_segment */
730#define for_each_bio(_bio) \
731 for (; _bio; _bio = _bio->bi_next)
703#define __rq_for_each_bio(_bio, rq) \ 732#define __rq_for_each_bio(_bio, rq) \
704 if ((rq->bio)) \ 733 if ((rq->bio)) \
705 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) 734 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)