aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/bio.h125
-rw-r--r--include/linux/blkdev.h66
-rw-r--r--include/linux/fs.h38
3 files changed, 99 insertions, 130 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 7fc5606e6ea5..4d379c8250ae 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -138,55 +138,83 @@ struct bio {
138#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) 138#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
139 139
140/* 140/*
141 * bio bi_rw flags 141 * Request flags. For use in the cmd_flags field of struct request, and in
142 * 142 * bi_rw of struct bio. Note that some flags are only valid in either one.
143 * bit 0 -- data direction
144 * If not set, bio is a read from device. If set, it's a write to device.
145 * bit 1 -- fail fast device errors
146 * bit 2 -- fail fast transport errors
147 * bit 3 -- fail fast driver errors
148 * bit 4 -- rw-ahead when set
149 * bit 5 -- barrier
150 * Insert a serialization point in the IO queue, forcing previously
151 * submitted IO to be completed before this one is issued.
152 * bit 6 -- synchronous I/O hint.
153 * bit 7 -- Unplug the device immediately after submitting this bio.
154 * bit 8 -- metadata request
155 * Used for tracing to differentiate metadata and data IO. May also
156 * get some preferential treatment in the IO scheduler
157 * bit 9 -- discard sectors
158 * Informs the lower level device that this range of sectors is no longer
159 * used by the file system and may thus be freed by the device. Used
160 * for flash based storage.
161 * Don't want driver retries for any fast fail whatever the reason.
162 * bit 10 -- Tell the IO scheduler not to wait for more requests after this
163 one has been submitted, even if it is a SYNC request.
164 */ 143 */
165enum bio_rw_flags { 144enum rq_flag_bits {
166 BIO_RW, 145 /* common flags */
167 BIO_RW_FAILFAST_DEV, 146 __REQ_WRITE, /* not set, read. set, write */
168 BIO_RW_FAILFAST_TRANSPORT, 147 __REQ_FAILFAST_DEV, /* no driver retries of device errors */
169 BIO_RW_FAILFAST_DRIVER, 148 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
170 /* above flags must match REQ_* */ 149 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
171 BIO_RW_AHEAD, 150
172 BIO_RW_BARRIER, 151 __REQ_HARDBARRIER, /* may not be passed by drive either */
173 BIO_RW_SYNCIO, 152 __REQ_SYNC, /* request is sync (sync write or read) */
174 BIO_RW_UNPLUG, 153 __REQ_META, /* metadata io request */
175 BIO_RW_META, 154 __REQ_DISCARD, /* request to discard sectors */
176 BIO_RW_DISCARD, 155 __REQ_NOIDLE, /* don't anticipate more IO after this one */
177 BIO_RW_NOIDLE, 156
157 /* bio only flags */
158 __REQ_UNPLUG, /* unplug the immediately after submission */
159 __REQ_RAHEAD, /* read ahead, can fail anytime */
160
161 /* request only flags */
162 __REQ_SORTED, /* elevator knows about this request */
163 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
164 __REQ_FUA, /* forced unit access */
165 __REQ_NOMERGE, /* don't touch this for merging */
166 __REQ_STARTED, /* drive already may have started this one */
167 __REQ_DONTPREP, /* don't call prep for this one */
168 __REQ_QUEUED, /* uses queueing */
169 __REQ_ELVPRIV, /* elevator private data attached */
170 __REQ_FAILED, /* set if the request failed */
171 __REQ_QUIET, /* don't worry about errors */
172 __REQ_PREEMPT, /* set for "ide_preempt" requests */
173 __REQ_ORDERED_COLOR, /* is before or after barrier */
174 __REQ_ALLOCED, /* request came from our alloc pool */
175 __REQ_COPY_USER, /* contains copies of user pages */
176 __REQ_INTEGRITY, /* integrity metadata has been remapped */
177 __REQ_IO_STAT, /* account I/O stat */
178 __REQ_MIXED_MERGE, /* merge of different types, fail separately */
179 __REQ_NR_BITS, /* stops here */
178}; 180};
179 181
180/* 182#define REQ_WRITE (1 << __REQ_WRITE)
181 * First four bits must match between bio->bi_rw and rq->cmd_flags, make 183#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
182 * that explicit here. 184#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
183 */ 185#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
184#define BIO_RW_RQ_MASK 0xf 186#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
185 187#define REQ_SYNC (1 << __REQ_SYNC)
186static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag) 188#define REQ_META (1 << __REQ_META)
187{ 189#define REQ_DISCARD (1 << __REQ_DISCARD)
188 return (bio->bi_rw & (1 << flag)) != 0; 190#define REQ_NOIDLE (1 << __REQ_NOIDLE)
189} 191
192#define REQ_FAILFAST_MASK \
193 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
194#define REQ_COMMON_MASK \
195 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \
196 REQ_META| REQ_DISCARD | REQ_NOIDLE)
197
198#define REQ_UNPLUG (1 << __REQ_UNPLUG)
199#define REQ_RAHEAD (1 << __REQ_RAHEAD)
200
201#define REQ_SORTED (1 << __REQ_SORTED)
202#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
203#define REQ_FUA (1 << __REQ_FUA)
204#define REQ_NOMERGE (1 << __REQ_NOMERGE)
205#define REQ_STARTED (1 << __REQ_STARTED)
206#define REQ_DONTPREP (1 << __REQ_DONTPREP)
207#define REQ_QUEUED (1 << __REQ_QUEUED)
208#define REQ_ELVPRIV (1 << __REQ_ELVPRIV)
209#define REQ_FAILED (1 << __REQ_FAILED)
210#define REQ_QUIET (1 << __REQ_QUIET)
211#define REQ_PREEMPT (1 << __REQ_PREEMPT)
212#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
213#define REQ_ALLOCED (1 << __REQ_ALLOCED)
214#define REQ_COPY_USER (1 << __REQ_COPY_USER)
215#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
216#define REQ_IO_STAT (1 << __REQ_IO_STAT)
217#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
190 218
191/* 219/*
192 * upper 16 bits of bi_rw define the io priority of this bio 220 * upper 16 bits of bi_rw define the io priority of this bio
@@ -211,7 +239,10 @@ static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag)
211#define bio_offset(bio) bio_iovec((bio))->bv_offset 239#define bio_offset(bio) bio_iovec((bio))->bv_offset
212#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) 240#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
213#define bio_sectors(bio) ((bio)->bi_size >> 9) 241#define bio_sectors(bio) ((bio)->bi_size >> 9)
214#define bio_empty_barrier(bio) (bio_rw_flagged(bio, BIO_RW_BARRIER) && !bio_has_data(bio) && !bio_rw_flagged(bio, BIO_RW_DISCARD)) 242#define bio_empty_barrier(bio) \
243 ((bio->bi_rw & REQ_HARDBARRIER) && \
244 !bio_has_data(bio) && \
245 !(bio->bi_rw & REQ_DISCARD))
215 246
216static inline unsigned int bio_cur_bytes(struct bio *bio) 247static inline unsigned int bio_cur_bytes(struct bio *bio)
217{ 248{
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3ecd28ef9ba4..3fc0f5908619 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -84,70 +84,6 @@ enum {
84 REQ_LB_OP_FLUSH = 0x41, /* flush request */ 84 REQ_LB_OP_FLUSH = 0x41, /* flush request */
85}; 85};
86 86
87/*
88 * request type modified bits. first four bits match BIO_RW* bits, important
89 */
90enum rq_flag_bits {
91 __REQ_RW, /* not set, read. set, write */
92 __REQ_FAILFAST_DEV, /* no driver retries of device errors */
93 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
94 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
95 /* above flags must match BIO_RW_* */
96 __REQ_DISCARD, /* request to discard sectors */
97 __REQ_SORTED, /* elevator knows about this request */
98 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
99 __REQ_HARDBARRIER, /* may not be passed by drive either */
100 __REQ_FUA, /* forced unit access */
101 __REQ_NOMERGE, /* don't touch this for merging */
102 __REQ_STARTED, /* drive already may have started this one */
103 __REQ_DONTPREP, /* don't call prep for this one */
104 __REQ_QUEUED, /* uses queueing */
105 __REQ_ELVPRIV, /* elevator private data attached */
106 __REQ_FAILED, /* set if the request failed */
107 __REQ_QUIET, /* don't worry about errors */
108 __REQ_PREEMPT, /* set for "ide_preempt" requests */
109 __REQ_ORDERED_COLOR, /* is before or after barrier */
110 __REQ_RW_SYNC, /* request is sync (sync write or read) */
111 __REQ_ALLOCED, /* request came from our alloc pool */
112 __REQ_RW_META, /* metadata io request */
113 __REQ_COPY_USER, /* contains copies of user pages */
114 __REQ_INTEGRITY, /* integrity metadata has been remapped */
115 __REQ_NOIDLE, /* Don't anticipate more IO after this one */
116 __REQ_IO_STAT, /* account I/O stat */
117 __REQ_MIXED_MERGE, /* merge of different types, fail separately */
118 __REQ_NR_BITS, /* stops here */
119};
120
121#define REQ_RW (1 << __REQ_RW)
122#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
123#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
124#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
125#define REQ_DISCARD (1 << __REQ_DISCARD)
126#define REQ_SORTED (1 << __REQ_SORTED)
127#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
128#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
129#define REQ_FUA (1 << __REQ_FUA)
130#define REQ_NOMERGE (1 << __REQ_NOMERGE)
131#define REQ_STARTED (1 << __REQ_STARTED)
132#define REQ_DONTPREP (1 << __REQ_DONTPREP)
133#define REQ_QUEUED (1 << __REQ_QUEUED)
134#define REQ_ELVPRIV (1 << __REQ_ELVPRIV)
135#define REQ_FAILED (1 << __REQ_FAILED)
136#define REQ_QUIET (1 << __REQ_QUIET)
137#define REQ_PREEMPT (1 << __REQ_PREEMPT)
138#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
139#define REQ_RW_SYNC (1 << __REQ_RW_SYNC)
140#define REQ_ALLOCED (1 << __REQ_ALLOCED)
141#define REQ_RW_META (1 << __REQ_RW_META)
142#define REQ_COPY_USER (1 << __REQ_COPY_USER)
143#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
144#define REQ_NOIDLE (1 << __REQ_NOIDLE)
145#define REQ_IO_STAT (1 << __REQ_IO_STAT)
146#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
147
148#define REQ_FAILFAST_MASK (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | \
149 REQ_FAILFAST_DRIVER)
150
151#define BLK_MAX_CDB 16 87#define BLK_MAX_CDB 16
152 88
153/* 89/*
@@ -631,7 +567,7 @@ enum {
631 */ 567 */
632static inline bool rw_is_sync(unsigned int rw_flags) 568static inline bool rw_is_sync(unsigned int rw_flags)
633{ 569{
634 return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); 570 return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC);
635} 571}
636 572
637static inline bool rq_is_sync(struct request *rq) 573static inline bool rq_is_sync(struct request *rq)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 598878831497..c5c92943c767 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -144,29 +144,31 @@ struct inodes_stat_t {
144 * of this IO. 144 * of this IO.
145 * 145 *
146 */ 146 */
147#define RW_MASK 1 147#define RW_MASK 1
148#define RWA_MASK 2 148#define RWA_MASK 2
149#define READ 0 149
150#define WRITE 1 150#define READ 0
151#define READA 2 /* read-ahead - don't block if no resources */ 151#define WRITE 1
152#define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ 152#define READA 2 /* readahead - don't block if no resources */
153#define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) 153#define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */
154#define READ_META (READ | (1 << BIO_RW_META)) 154
155#define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) 155#define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG)
156#define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) 156#define READ_META (READ | REQ_META)
157#define WRITE_ODIRECT_PLUG (WRITE | (1 << BIO_RW_SYNCIO)) 157#define WRITE_SYNC_PLUG (WRITE | REQ_SYNC | REQ_NOIDLE)
158#define WRITE_META (WRITE | (1 << BIO_RW_META)) 158#define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG)
159#define SWRITE_SYNC_PLUG \ 159#define WRITE_ODIRECT_PLUG (WRITE | REQ_SYNC)
160 (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) 160#define WRITE_META (WRITE | REQ_META)
161#define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) 161#define WRITE_BARRIER (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \
162#define WRITE_BARRIER (WRITE_SYNC | (1 << BIO_RW_BARRIER)) 162 REQ_HARDBARRIER)
163#define SWRITE_SYNC_PLUG (SWRITE | REQ_SYNC | REQ_NOIDLE)
164#define SWRITE_SYNC (SWRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG)
163 165
164/* 166/*
165 * These aren't really reads or writes, they pass down information about 167 * These aren't really reads or writes, they pass down information about
166 * parts of device that are now unused by the file system. 168 * parts of device that are now unused by the file system.
167 */ 169 */
168#define DISCARD_NOBARRIER (WRITE | (1 << BIO_RW_DISCARD)) 170#define DISCARD_NOBARRIER (WRITE | REQ_DISCARD)
169#define DISCARD_BARRIER (DISCARD_NOBARRIER | (1 << BIO_RW_BARRIER)) 171#define DISCARD_BARRIER (WRITE | REQ_DISCARD | REQ_HARDBARRIER)
170 172
171#define SEL_IN 1 173#define SEL_IN 1
172#define SEL_OUT 2 174#define SEL_OUT 2