aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h57
1 files changed, 38 insertions, 19 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 465d6babc847..ba54c834a590 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -38,6 +38,10 @@ struct request;
38typedef void (rq_end_io_fn)(struct request *, int); 38typedef void (rq_end_io_fn)(struct request *, int);
39 39
40struct request_list { 40struct request_list {
41 /*
42 * count[], starved[], and wait[] are indexed by
43 * BLK_RW_SYNC/BLK_RW_ASYNC
44 */
41 int count[2]; 45 int count[2];
42 int starved[2]; 46 int starved[2];
43 int elvpriv; 47 int elvpriv;
@@ -66,6 +70,11 @@ enum rq_cmd_type_bits {
66 REQ_TYPE_ATA_PC, 70 REQ_TYPE_ATA_PC,
67}; 71};
68 72
73enum {
74 BLK_RW_ASYNC = 0,
75 BLK_RW_SYNC = 1,
76};
77
69/* 78/*
70 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being 79 * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being
71 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a 80 * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a
@@ -103,12 +112,12 @@ enum rq_flag_bits {
103 __REQ_QUIET, /* don't worry about errors */ 112 __REQ_QUIET, /* don't worry about errors */
104 __REQ_PREEMPT, /* set for "ide_preempt" requests */ 113 __REQ_PREEMPT, /* set for "ide_preempt" requests */
105 __REQ_ORDERED_COLOR, /* is before or after barrier */ 114 __REQ_ORDERED_COLOR, /* is before or after barrier */
106 __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ 115 __REQ_RW_SYNC, /* request is sync (sync write or read) */
107 __REQ_ALLOCED, /* request came from our alloc pool */ 116 __REQ_ALLOCED, /* request came from our alloc pool */
108 __REQ_RW_META, /* metadata io request */ 117 __REQ_RW_META, /* metadata io request */
109 __REQ_COPY_USER, /* contains copies of user pages */ 118 __REQ_COPY_USER, /* contains copies of user pages */
110 __REQ_INTEGRITY, /* integrity metadata has been remapped */ 119 __REQ_INTEGRITY, /* integrity metadata has been remapped */
111 __REQ_UNPLUG, /* unplug queue on submission */ 120 __REQ_NOIDLE, /* Don't anticipate more IO after this one */
112 __REQ_NR_BITS, /* stops here */ 121 __REQ_NR_BITS, /* stops here */
113}; 122};
114 123
@@ -135,7 +144,7 @@ enum rq_flag_bits {
135#define REQ_RW_META (1 << __REQ_RW_META) 144#define REQ_RW_META (1 << __REQ_RW_META)
136#define REQ_COPY_USER (1 << __REQ_COPY_USER) 145#define REQ_COPY_USER (1 << __REQ_COPY_USER)
137#define REQ_INTEGRITY (1 << __REQ_INTEGRITY) 146#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
138#define REQ_UNPLUG (1 << __REQ_UNPLUG) 147#define REQ_NOIDLE (1 << __REQ_NOIDLE)
139 148
140#define BLK_MAX_CDB 16 149#define BLK_MAX_CDB 16
141 150
@@ -438,8 +447,8 @@ struct request_queue
438#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ 447#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
439#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 448#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
440#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 449#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
441#define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ 450#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
442#define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ 451#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
443#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 452#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
444#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 453#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
445#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 454#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */
@@ -611,32 +620,42 @@ enum {
611#define rq_data_dir(rq) ((rq)->cmd_flags & 1) 620#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
612 621
613/* 622/*
614 * We regard a request as sync, if it's a READ or a SYNC write. 623 * We regard a request as sync, if either a read or a sync write
615 */ 624 */
616#define rq_is_sync(rq) (rq_data_dir((rq)) == READ || (rq)->cmd_flags & REQ_RW_SYNC) 625static inline bool rw_is_sync(unsigned int rw_flags)
626{
627 return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC);
628}
629
630static inline bool rq_is_sync(struct request *rq)
631{
632 return rw_is_sync(rq->cmd_flags);
633}
634
617#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) 635#define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META)
636#define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE)
618 637
619static inline int blk_queue_full(struct request_queue *q, int rw) 638static inline int blk_queue_full(struct request_queue *q, int sync)
620{ 639{
621 if (rw == READ) 640 if (sync)
622 return test_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 641 return test_bit(QUEUE_FLAG_SYNCFULL, &q->queue_flags);
623 return test_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 642 return test_bit(QUEUE_FLAG_ASYNCFULL, &q->queue_flags);
624} 643}
625 644
626static inline void blk_set_queue_full(struct request_queue *q, int rw) 645static inline void blk_set_queue_full(struct request_queue *q, int sync)
627{ 646{
628 if (rw == READ) 647 if (sync)
629 queue_flag_set(QUEUE_FLAG_READFULL, q); 648 queue_flag_set(QUEUE_FLAG_SYNCFULL, q);
630 else 649 else
631 queue_flag_set(QUEUE_FLAG_WRITEFULL, q); 650 queue_flag_set(QUEUE_FLAG_ASYNCFULL, q);
632} 651}
633 652
634static inline void blk_clear_queue_full(struct request_queue *q, int rw) 653static inline void blk_clear_queue_full(struct request_queue *q, int sync)
635{ 654{
636 if (rw == READ) 655 if (sync)
637 queue_flag_clear(QUEUE_FLAG_READFULL, q); 656 queue_flag_clear(QUEUE_FLAG_SYNCFULL, q);
638 else 657 else
639 queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); 658 queue_flag_clear(QUEUE_FLAG_ASYNCFULL, q);
640} 659}
641 660
642 661