aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/bio.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/bio.h')
-rw-r--r--include/linux/bio.h108
1 files changed, 55 insertions, 53 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 0933a14e6414..ff5b4cf9e2da 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -26,21 +26,8 @@
26 26
27#ifdef CONFIG_BLOCK 27#ifdef CONFIG_BLOCK
28 28
29/* Platforms may set this to teach the BIO layer about IOMMU hardware. */
30#include <asm/io.h> 29#include <asm/io.h>
31 30
32#if defined(BIO_VMERGE_MAX_SIZE) && defined(BIO_VMERGE_BOUNDARY)
33#define BIOVEC_VIRT_START_SIZE(x) (bvec_to_phys(x) & (BIO_VMERGE_BOUNDARY - 1))
34#define BIOVEC_VIRT_OVERSIZE(x) ((x) > BIO_VMERGE_MAX_SIZE)
35#else
36#define BIOVEC_VIRT_START_SIZE(x) 0
37#define BIOVEC_VIRT_OVERSIZE(x) 0
38#endif
39
40#ifndef BIO_VMERGE_BOUNDARY
41#define BIO_VMERGE_BOUNDARY 0
42#endif
43
44#define BIO_DEBUG 31#define BIO_DEBUG
45 32
46#ifdef BIO_DEBUG 33#ifdef BIO_DEBUG
@@ -88,25 +75,14 @@ struct bio {
88 /* Number of segments in this BIO after 75 /* Number of segments in this BIO after
89 * physical address coalescing is performed. 76 * physical address coalescing is performed.
90 */ 77 */
91 unsigned short bi_phys_segments; 78 unsigned int bi_phys_segments;
92
93 /* Number of segments after physical and DMA remapping
94 * hardware coalescing is performed.
95 */
96 unsigned short bi_hw_segments;
97 79
98 unsigned int bi_size; /* residual I/O count */ 80 unsigned int bi_size; /* residual I/O count */
99 81
100 /*
101 * To keep track of the max hw size, we account for the
102 * sizes of the first and last virtually mergeable segments
103 * in this bio
104 */
105 unsigned int bi_hw_front_size;
106 unsigned int bi_hw_back_size;
107
108 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ 82 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
109 83
84 unsigned int bi_comp_cpu; /* completion CPU */
85
110 struct bio_vec *bi_io_vec; /* the actual vec list */ 86 struct bio_vec *bi_io_vec; /* the actual vec list */
111 87
112 bio_end_io_t *bi_end_io; 88 bio_end_io_t *bi_end_io;
@@ -126,11 +102,14 @@ struct bio {
126#define BIO_UPTODATE 0 /* ok after I/O completion */ 102#define BIO_UPTODATE 0 /* ok after I/O completion */
127#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */ 103#define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
128#define BIO_EOF 2 /* out-out-bounds error */ 104#define BIO_EOF 2 /* out-out-bounds error */
129#define BIO_SEG_VALID 3 /* nr_hw_seg valid */ 105#define BIO_SEG_VALID 3 /* bi_phys_segments valid */
130#define BIO_CLONED 4 /* doesn't own data */ 106#define BIO_CLONED 4 /* doesn't own data */
131#define BIO_BOUNCED 5 /* bio is a bounce bio */ 107#define BIO_BOUNCED 5 /* bio is a bounce bio */
132#define BIO_USER_MAPPED 6 /* contains user pages */ 108#define BIO_USER_MAPPED 6 /* contains user pages */
133#define BIO_EOPNOTSUPP 7 /* not supported */ 109#define BIO_EOPNOTSUPP 7 /* not supported */
110#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */
111#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
112#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
134#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) 113#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
135 114
136/* 115/*
@@ -144,18 +123,31 @@ struct bio {
144/* 123/*
145 * bio bi_rw flags 124 * bio bi_rw flags
146 * 125 *
147 * bit 0 -- read (not set) or write (set) 126 * bit 0 -- data direction
127 * If not set, bio is a read from device. If set, it's a write to device.
148 * bit 1 -- rw-ahead when set 128 * bit 1 -- rw-ahead when set
149 * bit 2 -- barrier 129 * bit 2 -- barrier
130 * Insert a serialization point in the IO queue, forcing previously
131 * submitted IO to be completed before this oen is issued.
150 * bit 3 -- fail fast, don't want low level driver retries 132 * bit 3 -- fail fast, don't want low level driver retries
151 * bit 4 -- synchronous I/O hint: the block layer will unplug immediately 133 * bit 4 -- synchronous I/O hint: the block layer will unplug immediately
134 * Note that this does NOT indicate that the IO itself is sync, just
135 * that the block layer will not postpone issue of this IO by plugging.
136 * bit 5 -- metadata request
137 * Used for tracing to differentiate metadata and data IO. May also
138 * get some preferential treatment in the IO scheduler
139 * bit 6 -- discard sectors
140 * Informs the lower level device that this range of sectors is no longer
141 * used by the file system and may thus be freed by the device. Used
142 * for flash based storage.
152 */ 143 */
153#define BIO_RW 0 144#define BIO_RW 0 /* Must match RW in req flags (blkdev.h) */
154#define BIO_RW_AHEAD 1 145#define BIO_RW_AHEAD 1 /* Must match FAILFAST in req flags */
155#define BIO_RW_BARRIER 2 146#define BIO_RW_BARRIER 2
156#define BIO_RW_FAILFAST 3 147#define BIO_RW_FAILFAST 3
157#define BIO_RW_SYNC 4 148#define BIO_RW_SYNC 4
158#define BIO_RW_META 5 149#define BIO_RW_META 5
150#define BIO_RW_DISCARD 6
159 151
160/* 152/*
161 * upper 16 bits of bi_rw define the io priority of this bio 153 * upper 16 bits of bi_rw define the io priority of this bio
@@ -185,14 +177,15 @@ struct bio {
185#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST)) 177#define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
186#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD)) 178#define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
187#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META)) 179#define bio_rw_meta(bio) ((bio)->bi_rw & (1 << BIO_RW_META))
188#define bio_empty_barrier(bio) (bio_barrier(bio) && !(bio)->bi_size) 180#define bio_discard(bio) ((bio)->bi_rw & (1 << BIO_RW_DISCARD))
181#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
189 182
190static inline unsigned int bio_cur_sectors(struct bio *bio) 183static inline unsigned int bio_cur_sectors(struct bio *bio)
191{ 184{
192 if (bio->bi_vcnt) 185 if (bio->bi_vcnt)
193 return bio_iovec(bio)->bv_len >> 9; 186 return bio_iovec(bio)->bv_len >> 9;
194 187 else /* dataless requests such as discard */
195 return 0; 188 return bio->bi_size >> 9;
196} 189}
197 190
198static inline void *bio_data(struct bio *bio) 191static inline void *bio_data(struct bio *bio)
@@ -236,8 +229,6 @@ static inline void *bio_data(struct bio *bio)
236 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) 229 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
237#endif 230#endif
238 231
239#define BIOVEC_VIRT_MERGEABLE(vec1, vec2) \
240 ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0)
241#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ 232#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
242 (((addr1) | (mask)) == (((addr2) - 1) | (mask))) 233 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
243#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ 234#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
@@ -319,15 +310,14 @@ struct bio_pair {
319 atomic_t cnt; 310 atomic_t cnt;
320 int error; 311 int error;
321}; 312};
322extern struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, 313extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
323 int first_sectors);
324extern mempool_t *bio_split_pool;
325extern void bio_pair_release(struct bio_pair *dbio); 314extern void bio_pair_release(struct bio_pair *dbio);
326 315
327extern struct bio_set *bioset_create(int, int); 316extern struct bio_set *bioset_create(int, int);
328extern void bioset_free(struct bio_set *); 317extern void bioset_free(struct bio_set *);
329 318
330extern struct bio *bio_alloc(gfp_t, int); 319extern struct bio *bio_alloc(gfp_t, int);
320extern struct bio *bio_kmalloc(gfp_t, int);
331extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); 321extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
332extern void bio_put(struct bio *); 322extern void bio_put(struct bio *);
333extern void bio_free(struct bio *, struct bio_set *); 323extern void bio_free(struct bio *, struct bio_set *);
@@ -335,7 +325,6 @@ extern void bio_free(struct bio *, struct bio_set *);
335extern void bio_endio(struct bio *, int); 325extern void bio_endio(struct bio *, int);
336struct request_queue; 326struct request_queue;
337extern int bio_phys_segments(struct request_queue *, struct bio *); 327extern int bio_phys_segments(struct request_queue *, struct bio *);
338extern int bio_hw_segments(struct request_queue *, struct bio *);
339 328
340extern void __bio_clone(struct bio *, struct bio *); 329extern void __bio_clone(struct bio *, struct bio *);
341extern struct bio *bio_clone(struct bio *, gfp_t); 330extern struct bio *bio_clone(struct bio *, gfp_t);
@@ -346,12 +335,14 @@ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
346extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, 335extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
347 unsigned int, unsigned int); 336 unsigned int, unsigned int);
348extern int bio_get_nr_vecs(struct block_device *); 337extern int bio_get_nr_vecs(struct block_device *);
338extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
349extern struct bio *bio_map_user(struct request_queue *, struct block_device *, 339extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
350 unsigned long, unsigned int, int); 340 unsigned long, unsigned int, int, gfp_t);
351struct sg_iovec; 341struct sg_iovec;
342struct rq_map_data;
352extern struct bio *bio_map_user_iov(struct request_queue *, 343extern struct bio *bio_map_user_iov(struct request_queue *,
353 struct block_device *, 344 struct block_device *,
354 struct sg_iovec *, int, int); 345 struct sg_iovec *, int, int, gfp_t);
355extern void bio_unmap_user(struct bio *); 346extern void bio_unmap_user(struct bio *);
356extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, 347extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
357 gfp_t); 348 gfp_t);
@@ -359,15 +350,25 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
359 gfp_t, int); 350 gfp_t, int);
360extern void bio_set_pages_dirty(struct bio *bio); 351extern void bio_set_pages_dirty(struct bio *bio);
361extern void bio_check_pages_dirty(struct bio *bio); 352extern void bio_check_pages_dirty(struct bio *bio);
362extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int); 353extern struct bio *bio_copy_user(struct request_queue *, struct rq_map_data *,
363extern struct bio *bio_copy_user_iov(struct request_queue *, struct sg_iovec *, 354 unsigned long, unsigned int, int, gfp_t);
364 int, int); 355extern struct bio *bio_copy_user_iov(struct request_queue *,
356 struct rq_map_data *, struct sg_iovec *,
357 int, int, gfp_t);
365extern int bio_uncopy_user(struct bio *); 358extern int bio_uncopy_user(struct bio *);
366void zero_fill_bio(struct bio *bio); 359void zero_fill_bio(struct bio *bio);
367extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *); 360extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
368extern unsigned int bvec_nr_vecs(unsigned short idx); 361extern unsigned int bvec_nr_vecs(unsigned short idx);
369 362
370/* 363/*
364 * Allow queuer to specify a completion CPU for this bio
365 */
366static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu)
367{
368 bio->bi_comp_cpu = cpu;
369}
370
371/*
371 * bio_set is used to allow other portions of the IO system to 372 * bio_set is used to allow other portions of the IO system to
372 * allocate their own private memory pools for bio and iovec structures. 373 * allocate their own private memory pools for bio and iovec structures.
373 * These memory pools in turn all allocate from the bio_slab 374 * These memory pools in turn all allocate from the bio_slab
@@ -445,6 +446,14 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
445 __bio_kmap_irq((bio), (bio)->bi_idx, (flags)) 446 __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
446#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) 447#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
447 448
449/*
450 * Check whether this bio carries any data or not. A NULL bio is allowed.
451 */
452static inline int bio_has_data(struct bio *bio)
453{
454 return bio && bio->bi_io_vec != NULL;
455}
456
448#if defined(CONFIG_BLK_DEV_INTEGRITY) 457#if defined(CONFIG_BLK_DEV_INTEGRITY)
449 458
450#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)])) 459#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
@@ -458,14 +467,7 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
458#define bip_for_each_vec(bvl, bip, i) \ 467#define bip_for_each_vec(bvl, bip, i) \
459 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx) 468 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx)
460 469
461static inline int bio_integrity(struct bio *bio) 470#define bio_integrity(bio) (bio->bi_integrity != NULL)
462{
463#if defined(CONFIG_BLK_DEV_INTEGRITY)
464 return bio->bi_integrity != NULL;
465#else
466 return 0;
467#endif
468}
469 471
470extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *); 472extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
471extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int); 473extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);