aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/bio.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/bio.h')
-rw-r--r--include/linux/bio.h125
1 files changed, 78 insertions, 47 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 7fc5606e6ea..4d379c8250a 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -138,55 +138,83 @@ struct bio {
138#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) 138#define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
139 139
140/* 140/*
141 * bio bi_rw flags 141 * Request flags. For use in the cmd_flags field of struct request, and in
142 * 142 * bi_rw of struct bio. Note that some flags are only valid in either one.
143 * bit 0 -- data direction
144 * If not set, bio is a read from device. If set, it's a write to device.
145 * bit 1 -- fail fast device errors
146 * bit 2 -- fail fast transport errors
147 * bit 3 -- fail fast driver errors
148 * bit 4 -- rw-ahead when set
149 * bit 5 -- barrier
150 * Insert a serialization point in the IO queue, forcing previously
151 * submitted IO to be completed before this one is issued.
152 * bit 6 -- synchronous I/O hint.
153 * bit 7 -- Unplug the device immediately after submitting this bio.
154 * bit 8 -- metadata request
155 * Used for tracing to differentiate metadata and data IO. May also
156 * get some preferential treatment in the IO scheduler
157 * bit 9 -- discard sectors
158 * Informs the lower level device that this range of sectors is no longer
159 * used by the file system and may thus be freed by the device. Used
160 * for flash based storage.
161 * Don't want driver retries for any fast fail whatever the reason.
162 * bit 10 -- Tell the IO scheduler not to wait for more requests after this
163 one has been submitted, even if it is a SYNC request.
164 */ 143 */
165enum bio_rw_flags { 144enum rq_flag_bits {
166 BIO_RW, 145 /* common flags */
167 BIO_RW_FAILFAST_DEV, 146 __REQ_WRITE, /* not set, read. set, write */
168 BIO_RW_FAILFAST_TRANSPORT, 147 __REQ_FAILFAST_DEV, /* no driver retries of device errors */
169 BIO_RW_FAILFAST_DRIVER, 148 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
170 /* above flags must match REQ_* */ 149 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
171 BIO_RW_AHEAD, 150
172 BIO_RW_BARRIER, 151 __REQ_HARDBARRIER, /* may not be passed by drive either */
173 BIO_RW_SYNCIO, 152 __REQ_SYNC, /* request is sync (sync write or read) */
174 BIO_RW_UNPLUG, 153 __REQ_META, /* metadata io request */
175 BIO_RW_META, 154 __REQ_DISCARD, /* request to discard sectors */
176 BIO_RW_DISCARD, 155 __REQ_NOIDLE, /* don't anticipate more IO after this one */
177 BIO_RW_NOIDLE, 156
157 /* bio only flags */
158 __REQ_UNPLUG, /* unplug the immediately after submission */
159 __REQ_RAHEAD, /* read ahead, can fail anytime */
160
161 /* request only flags */
162 __REQ_SORTED, /* elevator knows about this request */
163 __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */
164 __REQ_FUA, /* forced unit access */
165 __REQ_NOMERGE, /* don't touch this for merging */
166 __REQ_STARTED, /* drive already may have started this one */
167 __REQ_DONTPREP, /* don't call prep for this one */
168 __REQ_QUEUED, /* uses queueing */
169 __REQ_ELVPRIV, /* elevator private data attached */
170 __REQ_FAILED, /* set if the request failed */
171 __REQ_QUIET, /* don't worry about errors */
172 __REQ_PREEMPT, /* set for "ide_preempt" requests */
173 __REQ_ORDERED_COLOR, /* is before or after barrier */
174 __REQ_ALLOCED, /* request came from our alloc pool */
175 __REQ_COPY_USER, /* contains copies of user pages */
176 __REQ_INTEGRITY, /* integrity metadata has been remapped */
177 __REQ_IO_STAT, /* account I/O stat */
178 __REQ_MIXED_MERGE, /* merge of different types, fail separately */
179 __REQ_NR_BITS, /* stops here */
178}; 180};
179 181
180/* 182#define REQ_WRITE (1 << __REQ_WRITE)
181 * First four bits must match between bio->bi_rw and rq->cmd_flags, make 183#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
182 * that explicit here. 184#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
183 */ 185#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
184#define BIO_RW_RQ_MASK 0xf 186#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
185 187#define REQ_SYNC (1 << __REQ_SYNC)
186static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag) 188#define REQ_META (1 << __REQ_META)
187{ 189#define REQ_DISCARD (1 << __REQ_DISCARD)
188 return (bio->bi_rw & (1 << flag)) != 0; 190#define REQ_NOIDLE (1 << __REQ_NOIDLE)
189} 191
192#define REQ_FAILFAST_MASK \
193 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
194#define REQ_COMMON_MASK \
195 (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \
196 REQ_META| REQ_DISCARD | REQ_NOIDLE)
197
198#define REQ_UNPLUG (1 << __REQ_UNPLUG)
199#define REQ_RAHEAD (1 << __REQ_RAHEAD)
200
201#define REQ_SORTED (1 << __REQ_SORTED)
202#define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER)
203#define REQ_FUA (1 << __REQ_FUA)
204#define REQ_NOMERGE (1 << __REQ_NOMERGE)
205#define REQ_STARTED (1 << __REQ_STARTED)
206#define REQ_DONTPREP (1 << __REQ_DONTPREP)
207#define REQ_QUEUED (1 << __REQ_QUEUED)
208#define REQ_ELVPRIV (1 << __REQ_ELVPRIV)
209#define REQ_FAILED (1 << __REQ_FAILED)
210#define REQ_QUIET (1 << __REQ_QUIET)
211#define REQ_PREEMPT (1 << __REQ_PREEMPT)
212#define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR)
213#define REQ_ALLOCED (1 << __REQ_ALLOCED)
214#define REQ_COPY_USER (1 << __REQ_COPY_USER)
215#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
216#define REQ_IO_STAT (1 << __REQ_IO_STAT)
217#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
190 218
191/* 219/*
192 * upper 16 bits of bi_rw define the io priority of this bio 220 * upper 16 bits of bi_rw define the io priority of this bio
@@ -211,7 +239,10 @@ static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag)
211#define bio_offset(bio) bio_iovec((bio))->bv_offset 239#define bio_offset(bio) bio_iovec((bio))->bv_offset
212#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) 240#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
213#define bio_sectors(bio) ((bio)->bi_size >> 9) 241#define bio_sectors(bio) ((bio)->bi_size >> 9)
214#define bio_empty_barrier(bio) (bio_rw_flagged(bio, BIO_RW_BARRIER) && !bio_has_data(bio) && !bio_rw_flagged(bio, BIO_RW_DISCARD)) 242#define bio_empty_barrier(bio) \
243 ((bio->bi_rw & REQ_HARDBARRIER) && \
244 !bio_has_data(bio) && \
245 !(bio->bi_rw & REQ_DISCARD))
215 246
216static inline unsigned int bio_cur_bytes(struct bio *bio) 247static inline unsigned int bio_cur_bytes(struct bio *bio)
217{ 248{