aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-30 14:19:05 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-30 14:19:05 -0500
commitf568849edac8611d603e00bd6cbbcfea09395ae6 (patch)
treeb9472d640fe5d87426d38c9d81d946cf197ad3fb /include/linux
parentd9894c228b11273e720bb63ba120d1d326fe9d94 (diff)
parent675675ada486dde5bf9aa51665e90706bff11a35 (diff)
Merge branch 'for-3.14/core' of git://git.kernel.dk/linux-block
Pull core block IO changes from Jens Axboe: "The major piece in here is the immutable bio_ve series from Kent, the rest is fairly minor. It was supposed to go in last round, but various issues pushed it to this release instead. The pull request contains: - Various smaller blk-mq fixes from different folks. Nothing major here, just minor fixes and cleanups. - Fix for a memory leak in the error path in the block ioctl code from Christian Engelmayer. - Header export fix from CaiZhiyong. - Finally the immutable biovec changes from Kent Overstreet. This enables some nice future work on making arbitrarily sized bios possible, and splitting more efficient. Related fixes to immutable bio_vecs: - dm-cache immutable fixup from Mike Snitzer. - btrfs immutable fixup from Muthu Kumar. - bio-integrity fix from Nic Bellinger, which is also going to stable" * 'for-3.14/core' of git://git.kernel.dk/linux-block: (44 commits) xtensa: fixup simdisk driver to work with immutable bio_vecs block/blk-mq-cpu.c: use hotcpu_notifier() blk-mq: for_each_* macro correctness block: Fix memory leak in rw_copy_check_uvector() handling bio-integrity: Fix bio_integrity_verify segment start bug block: remove unrelated header files and export symbol blk-mq: uses page->list incorrectly blk-mq: use __smp_call_function_single directly btrfs: fix missing increment of bi_remaining Revert "block: Warn and free bio if bi_end_io is not set" block: Warn and free bio if bi_end_io is not set blk-mq: fix initializing request's start time block: blk-mq: don't export blk_mq_free_queue() block: blk-mq: make blk_sync_queue support mq block: blk-mq: support draining mq queue dm cache: increment bi_remaining when bi_end_io is restored block: fixup for generic bio chaining block: Really silence spurious compiler warnings block: Silence spurious compiler warnings block: Kill bio_pair_split() ...
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bio.h283
-rw-r--r--include/linux/blk-mq.h13
-rw-r--r--include/linux/blk_types.h24
-rw-r--r--include/linux/blkdev.h9
-rw-r--r--include/linux/ceph/messenger.h4
-rw-r--r--include/linux/cmdline-parser.h8
-rw-r--r--include/linux/dm-io.h4
7 files changed, 208 insertions, 137 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 060ff695085c..70654521dab6 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -61,25 +61,87 @@
61 * various member access, note that bio_data should of course not be used 61 * various member access, note that bio_data should of course not be used
62 * on highmem page vectors 62 * on highmem page vectors
63 */ 63 */
64#define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)])) 64#define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx])
65#define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx) 65
66#define bio_page(bio) bio_iovec((bio))->bv_page 66#define bvec_iter_page(bvec, iter) \
67#define bio_offset(bio) bio_iovec((bio))->bv_offset 67 (__bvec_iter_bvec((bvec), (iter))->bv_page)
68#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) 68
69#define bio_sectors(bio) ((bio)->bi_size >> 9) 69#define bvec_iter_len(bvec, iter) \
70#define bio_end_sector(bio) ((bio)->bi_sector + bio_sectors((bio))) 70 min((iter).bi_size, \
71 __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
72
73#define bvec_iter_offset(bvec, iter) \
74 (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
75
76#define bvec_iter_bvec(bvec, iter) \
77((struct bio_vec) { \
78 .bv_page = bvec_iter_page((bvec), (iter)), \
79 .bv_len = bvec_iter_len((bvec), (iter)), \
80 .bv_offset = bvec_iter_offset((bvec), (iter)), \
81})
82
83#define bio_iter_iovec(bio, iter) \
84 bvec_iter_bvec((bio)->bi_io_vec, (iter))
85
86#define bio_iter_page(bio, iter) \
87 bvec_iter_page((bio)->bi_io_vec, (iter))
88#define bio_iter_len(bio, iter) \
89 bvec_iter_len((bio)->bi_io_vec, (iter))
90#define bio_iter_offset(bio, iter) \
91 bvec_iter_offset((bio)->bi_io_vec, (iter))
92
93#define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
94#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
95#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
96
97#define bio_multiple_segments(bio) \
98 ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
99#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
100#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
101
102/*
103 * Check whether this bio carries any data or not. A NULL bio is allowed.
104 */
105static inline bool bio_has_data(struct bio *bio)
106{
107 if (bio &&
108 bio->bi_iter.bi_size &&
109 !(bio->bi_rw & REQ_DISCARD))
110 return true;
111
112 return false;
113}
114
115static inline bool bio_is_rw(struct bio *bio)
116{
117 if (!bio_has_data(bio))
118 return false;
119
120 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
121 return false;
122
123 return true;
124}
125
126static inline bool bio_mergeable(struct bio *bio)
127{
128 if (bio->bi_rw & REQ_NOMERGE_FLAGS)
129 return false;
130
131 return true;
132}
71 133
72static inline unsigned int bio_cur_bytes(struct bio *bio) 134static inline unsigned int bio_cur_bytes(struct bio *bio)
73{ 135{
74 if (bio->bi_vcnt) 136 if (bio_has_data(bio))
75 return bio_iovec(bio)->bv_len; 137 return bio_iovec(bio).bv_len;
76 else /* dataless requests such as discard */ 138 else /* dataless requests such as discard */
77 return bio->bi_size; 139 return bio->bi_iter.bi_size;
78} 140}
79 141
80static inline void *bio_data(struct bio *bio) 142static inline void *bio_data(struct bio *bio)
81{ 143{
82 if (bio->bi_vcnt) 144 if (bio_has_data(bio))
83 return page_address(bio_page(bio)) + bio_offset(bio); 145 return page_address(bio_page(bio)) + bio_offset(bio);
84 146
85 return NULL; 147 return NULL;
@@ -97,19 +159,16 @@ static inline void *bio_data(struct bio *bio)
97 * permanent PIO fall back, user is probably better off disabling highmem 159 * permanent PIO fall back, user is probably better off disabling highmem
98 * I/O completely on that queue (see ide-dma for example) 160 * I/O completely on that queue (see ide-dma for example)
99 */ 161 */
100#define __bio_kmap_atomic(bio, idx) \ 162#define __bio_kmap_atomic(bio, iter) \
101 (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) + \ 163 (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \
102 bio_iovec_idx((bio), (idx))->bv_offset) 164 bio_iter_iovec((bio), (iter)).bv_offset)
103 165
104#define __bio_kunmap_atomic(addr) kunmap_atomic(addr) 166#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
105 167
106/* 168/*
107 * merge helpers etc 169 * merge helpers etc
108 */ 170 */
109 171
110#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
111#define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx)
112
113/* Default implementation of BIOVEC_PHYS_MERGEABLE */ 172/* Default implementation of BIOVEC_PHYS_MERGEABLE */
114#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 173#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
115 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) 174 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
@@ -126,33 +185,76 @@ static inline void *bio_data(struct bio *bio)
126 (((addr1) | (mask)) == (((addr2) - 1) | (mask))) 185 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
127#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ 186#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
128 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) 187 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
129#define BIO_SEG_BOUNDARY(q, b1, b2) \
130 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
131 188
132#define bio_io_error(bio) bio_endio((bio), -EIO) 189#define bio_io_error(bio) bio_endio((bio), -EIO)
133 190
134/* 191/*
135 * drivers should not use the __ version unless they _really_ know what
136 * they're doing
137 */
138#define __bio_for_each_segment(bvl, bio, i, start_idx) \
139 for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \
140 i < (bio)->bi_vcnt; \
141 bvl++, i++)
142
143/*
144 * drivers should _never_ use the all version - the bio may have been split 192 * drivers should _never_ use the all version - the bio may have been split
145 * before it got to the driver and the driver won't own all of it 193 * before it got to the driver and the driver won't own all of it
146 */ 194 */
147#define bio_for_each_segment_all(bvl, bio, i) \ 195#define bio_for_each_segment_all(bvl, bio, i) \
148 for (i = 0; \ 196 for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
149 bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \ 197
150 i++) 198static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter,
199 unsigned bytes)
200{
201 WARN_ONCE(bytes > iter->bi_size,
202 "Attempted to advance past end of bvec iter\n");
203
204 while (bytes) {
205 unsigned len = min(bytes, bvec_iter_len(bv, *iter));
206
207 bytes -= len;
208 iter->bi_size -= len;
209 iter->bi_bvec_done += len;
210
211 if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
212 iter->bi_bvec_done = 0;
213 iter->bi_idx++;
214 }
215 }
216}
217
218#define for_each_bvec(bvl, bio_vec, iter, start) \
219 for ((iter) = start; \
220 (bvl) = bvec_iter_bvec((bio_vec), (iter)), \
221 (iter).bi_size; \
222 bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
223
224
225static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
226 unsigned bytes)
227{
228 iter->bi_sector += bytes >> 9;
229
230 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
231 iter->bi_size -= bytes;
232 else
233 bvec_iter_advance(bio->bi_io_vec, iter, bytes);
234}
151 235
152#define bio_for_each_segment(bvl, bio, i) \ 236#define __bio_for_each_segment(bvl, bio, iter, start) \
153 for (i = (bio)->bi_idx; \ 237 for (iter = (start); \
154 bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \ 238 (iter).bi_size && \
155 i++) 239 ((bvl = bio_iter_iovec((bio), (iter))), 1); \
240 bio_advance_iter((bio), &(iter), (bvl).bv_len))
241
242#define bio_for_each_segment(bvl, bio, iter) \
243 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
244
245#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
246
247static inline unsigned bio_segments(struct bio *bio)
248{
249 unsigned segs = 0;
250 struct bio_vec bv;
251 struct bvec_iter iter;
252
253 bio_for_each_segment(bv, bio, iter)
254 segs++;
255
256 return segs;
257}
156 258
157/* 259/*
158 * get a reference to a bio, so it won't disappear. the intended use is 260 * get a reference to a bio, so it won't disappear. the intended use is
@@ -177,16 +279,15 @@ static inline void *bio_data(struct bio *bio)
177struct bio_integrity_payload { 279struct bio_integrity_payload {
178 struct bio *bip_bio; /* parent bio */ 280 struct bio *bip_bio; /* parent bio */
179 281
180 sector_t bip_sector; /* virtual start sector */ 282 struct bvec_iter bip_iter;
181 283
284 /* kill - should just use bip_vec */
182 void *bip_buf; /* generated integrity data */ 285 void *bip_buf; /* generated integrity data */
183 bio_end_io_t *bip_end_io; /* saved I/O completion fn */
184 286
185 unsigned int bip_size; 287 bio_end_io_t *bip_end_io; /* saved I/O completion fn */
186 288
187 unsigned short bip_slab; /* slab the bip came from */ 289 unsigned short bip_slab; /* slab the bip came from */
188 unsigned short bip_vcnt; /* # of integrity bio_vecs */ 290 unsigned short bip_vcnt; /* # of integrity bio_vecs */
189 unsigned short bip_idx; /* current bip_vec index */
190 unsigned bip_owns_buf:1; /* should free bip_buf */ 291 unsigned bip_owns_buf:1; /* should free bip_buf */
191 292
192 struct work_struct bip_work; /* I/O completion */ 293 struct work_struct bip_work; /* I/O completion */
@@ -196,29 +297,28 @@ struct bio_integrity_payload {
196}; 297};
197#endif /* CONFIG_BLK_DEV_INTEGRITY */ 298#endif /* CONFIG_BLK_DEV_INTEGRITY */
198 299
199/* 300extern void bio_trim(struct bio *bio, int offset, int size);
200 * A bio_pair is used when we need to split a bio. 301extern struct bio *bio_split(struct bio *bio, int sectors,
201 * This can only happen for a bio that refers to just one 302 gfp_t gfp, struct bio_set *bs);
202 * page of data, and in the unusual situation when the 303
203 * page crosses a chunk/device boundary 304/**
305 * bio_next_split - get next @sectors from a bio, splitting if necessary
306 * @bio: bio to split
307 * @sectors: number of sectors to split from the front of @bio
308 * @gfp: gfp mask
309 * @bs: bio set to allocate from
204 * 310 *
205 * The address of the master bio is stored in bio1.bi_private 311 * Returns a bio representing the next @sectors of @bio - if the bio is smaller
206 * The address of the pool the pair was allocated from is stored 312 * than @sectors, returns the original bio unchanged.
207 * in bio2.bi_private
208 */ 313 */
209struct bio_pair { 314static inline struct bio *bio_next_split(struct bio *bio, int sectors,
210 struct bio bio1, bio2; 315 gfp_t gfp, struct bio_set *bs)
211 struct bio_vec bv1, bv2; 316{
212#if defined(CONFIG_BLK_DEV_INTEGRITY) 317 if (sectors >= bio_sectors(bio))
213 struct bio_integrity_payload bip1, bip2; 318 return bio;
214 struct bio_vec iv1, iv2; 319
215#endif 320 return bio_split(bio, sectors, gfp, bs);
216 atomic_t cnt; 321}
217 int error;
218};
219extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
220extern void bio_pair_release(struct bio_pair *dbio);
221extern void bio_trim(struct bio *bio, int offset, int size);
222 322
223extern struct bio_set *bioset_create(unsigned int, unsigned int); 323extern struct bio_set *bioset_create(unsigned int, unsigned int);
224extern void bioset_free(struct bio_set *); 324extern void bioset_free(struct bio_set *);
@@ -227,7 +327,8 @@ extern mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries);
227extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); 327extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
228extern void bio_put(struct bio *); 328extern void bio_put(struct bio *);
229 329
230extern void __bio_clone(struct bio *, struct bio *); 330extern void __bio_clone_fast(struct bio *, struct bio *);
331extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
231extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); 332extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
232 333
233extern struct bio_set *fs_bio_set; 334extern struct bio_set *fs_bio_set;
@@ -254,6 +355,7 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
254} 355}
255 356
256extern void bio_endio(struct bio *, int); 357extern void bio_endio(struct bio *, int);
358extern void bio_endio_nodec(struct bio *, int);
257struct request_queue; 359struct request_queue;
258extern int bio_phys_segments(struct request_queue *, struct bio *); 360extern int bio_phys_segments(struct request_queue *, struct bio *);
259 361
@@ -262,12 +364,12 @@ extern void bio_advance(struct bio *, unsigned);
262 364
263extern void bio_init(struct bio *); 365extern void bio_init(struct bio *);
264extern void bio_reset(struct bio *); 366extern void bio_reset(struct bio *);
367void bio_chain(struct bio *, struct bio *);
265 368
266extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); 369extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
267extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, 370extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
268 unsigned int, unsigned int); 371 unsigned int, unsigned int);
269extern int bio_get_nr_vecs(struct block_device *); 372extern int bio_get_nr_vecs(struct block_device *);
270extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
271extern struct bio *bio_map_user(struct request_queue *, struct block_device *, 373extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
272 unsigned long, unsigned int, int, gfp_t); 374 unsigned long, unsigned int, int, gfp_t);
273struct sg_iovec; 375struct sg_iovec;
@@ -357,48 +459,18 @@ static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
357} 459}
358#endif 460#endif
359 461
360static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, 462static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter,
361 unsigned long *flags) 463 unsigned long *flags)
362{ 464{
363 return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags); 465 return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags);
364} 466}
365#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags) 467#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
366 468
367#define bio_kmap_irq(bio, flags) \ 469#define bio_kmap_irq(bio, flags) \
368 __bio_kmap_irq((bio), (bio)->bi_idx, (flags)) 470 __bio_kmap_irq((bio), (bio)->bi_iter, (flags))
369#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) 471#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
370 472
371/* 473/*
372 * Check whether this bio carries any data or not. A NULL bio is allowed.
373 */
374static inline bool bio_has_data(struct bio *bio)
375{
376 if (bio && bio->bi_vcnt)
377 return true;
378
379 return false;
380}
381
382static inline bool bio_is_rw(struct bio *bio)
383{
384 if (!bio_has_data(bio))
385 return false;
386
387 if (bio->bi_rw & REQ_WRITE_SAME)
388 return false;
389
390 return true;
391}
392
393static inline bool bio_mergeable(struct bio *bio)
394{
395 if (bio->bi_rw & REQ_NOMERGE_FLAGS)
396 return false;
397
398 return true;
399}
400
401/*
402 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. 474 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
403 * 475 *
404 * A bio_list anchors a singly-linked list of bios chained through the bi_next 476 * A bio_list anchors a singly-linked list of bios chained through the bi_next
@@ -559,16 +631,12 @@ struct biovec_slab {
559 631
560#if defined(CONFIG_BLK_DEV_INTEGRITY) 632#if defined(CONFIG_BLK_DEV_INTEGRITY)
561 633
562#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
563#define bip_vec(bip) bip_vec_idx(bip, 0)
564 634
565#define __bip_for_each_vec(bvl, bip, i, start_idx) \
566 for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx); \
567 i < (bip)->bip_vcnt; \
568 bvl++, i++)
569 635
570#define bip_for_each_vec(bvl, bip, i) \ 636#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
571 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx) 637
638#define bip_for_each_vec(bvl, bip, iter) \
639 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
572 640
573#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ 641#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
574 for_each_bio(_bio) \ 642 for_each_bio(_bio) \
@@ -586,7 +654,6 @@ extern int bio_integrity_prep(struct bio *);
586extern void bio_integrity_endio(struct bio *, int); 654extern void bio_integrity_endio(struct bio *, int);
587extern void bio_integrity_advance(struct bio *, unsigned int); 655extern void bio_integrity_advance(struct bio *, unsigned int);
588extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int); 656extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
589extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
590extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); 657extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
591extern int bioset_integrity_create(struct bio_set *, int); 658extern int bioset_integrity_create(struct bio_set *, int);
592extern void bioset_integrity_free(struct bio_set *); 659extern void bioset_integrity_free(struct bio_set *);
@@ -630,12 +697,6 @@ static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
630 return 0; 697 return 0;
631} 698}
632 699
633static inline void bio_integrity_split(struct bio *bio, struct bio_pair *bp,
634 int sectors)
635{
636 return;
637}
638
639static inline void bio_integrity_advance(struct bio *bio, 700static inline void bio_integrity_advance(struct bio *bio,
640 unsigned int bytes_done) 701 unsigned int bytes_done)
641{ 702{
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index ab0e9b2025b3..161b23105b1e 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -113,7 +113,6 @@ enum {
113}; 113};
114 114
115struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *); 115struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *);
116void blk_mq_free_queue(struct request_queue *);
117int blk_mq_register_disk(struct gendisk *); 116int blk_mq_register_disk(struct gendisk *);
118void blk_mq_unregister_disk(struct gendisk *); 117void blk_mq_unregister_disk(struct gendisk *);
119void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data); 118void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
@@ -159,16 +158,16 @@ static inline struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx,
159} 158}
160 159
161#define queue_for_each_hw_ctx(q, hctx, i) \ 160#define queue_for_each_hw_ctx(q, hctx, i) \
162 for ((i) = 0, hctx = (q)->queue_hw_ctx[0]; \ 161 for ((i) = 0; (i) < (q)->nr_hw_queues && \
163 (i) < (q)->nr_hw_queues; (i)++, hctx = (q)->queue_hw_ctx[i]) 162 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
164 163
165#define queue_for_each_ctx(q, ctx, i) \ 164#define queue_for_each_ctx(q, ctx, i) \
166 for ((i) = 0, ctx = per_cpu_ptr((q)->queue_ctx, 0); \ 165 for ((i) = 0; (i) < (q)->nr_queues && \
167 (i) < (q)->nr_queues; (i)++, ctx = per_cpu_ptr(q->queue_ctx, (i))) 166 ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
168 167
169#define hctx_for_each_ctx(hctx, ctx, i) \ 168#define hctx_for_each_ctx(hctx, ctx, i) \
170 for ((i) = 0, ctx = (hctx)->ctxs[0]; \ 169 for ((i) = 0; (i) < (hctx)->nr_ctx && \
171 (i) < (hctx)->nr_ctx; (i)++, ctx = (hctx)->ctxs[(i)]) 170 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
172 171
173#define blk_ctx_sum(q, sum) \ 172#define blk_ctx_sum(q, sum) \
174({ \ 173({ \
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 238ef0ed62f8..bbc3a6c88fce 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -28,13 +28,22 @@ struct bio_vec {
28 unsigned int bv_offset; 28 unsigned int bv_offset;
29}; 29};
30 30
31struct bvec_iter {
32 sector_t bi_sector; /* device address in 512 byte
33 sectors */
34 unsigned int bi_size; /* residual I/O count */
35
36 unsigned int bi_idx; /* current index into bvl_vec */
37
38 unsigned int bi_bvec_done; /* number of bytes completed in
39 current bvec */
40};
41
31/* 42/*
32 * main unit of I/O for the block layer and lower layers (ie drivers and 43 * main unit of I/O for the block layer and lower layers (ie drivers and
33 * stacking drivers) 44 * stacking drivers)
34 */ 45 */
35struct bio { 46struct bio {
36 sector_t bi_sector; /* device address in 512 byte
37 sectors */
38 struct bio *bi_next; /* request queue link */ 47 struct bio *bi_next; /* request queue link */
39 struct block_device *bi_bdev; 48 struct block_device *bi_bdev;
40 unsigned long bi_flags; /* status, command, etc */ 49 unsigned long bi_flags; /* status, command, etc */
@@ -42,16 +51,13 @@ struct bio {
42 * top bits priority 51 * top bits priority
43 */ 52 */
44 53
45 unsigned short bi_vcnt; /* how many bio_vec's */ 54 struct bvec_iter bi_iter;
46 unsigned short bi_idx; /* current index into bvl_vec */
47 55
48 /* Number of segments in this BIO after 56 /* Number of segments in this BIO after
49 * physical address coalescing is performed. 57 * physical address coalescing is performed.
50 */ 58 */
51 unsigned int bi_phys_segments; 59 unsigned int bi_phys_segments;
52 60
53 unsigned int bi_size; /* residual I/O count */
54
55 /* 61 /*
56 * To keep track of the max segment size, we account for the 62 * To keep track of the max segment size, we account for the
57 * sizes of the first and last mergeable segments in this bio. 63 * sizes of the first and last mergeable segments in this bio.
@@ -59,6 +65,8 @@ struct bio {
59 unsigned int bi_seg_front_size; 65 unsigned int bi_seg_front_size;
60 unsigned int bi_seg_back_size; 66 unsigned int bi_seg_back_size;
61 67
68 atomic_t bi_remaining;
69
62 bio_end_io_t *bi_end_io; 70 bio_end_io_t *bi_end_io;
63 71
64 void *bi_private; 72 void *bi_private;
@@ -74,11 +82,13 @@ struct bio {
74 struct bio_integrity_payload *bi_integrity; /* data integrity */ 82 struct bio_integrity_payload *bi_integrity; /* data integrity */
75#endif 83#endif
76 84
85 unsigned short bi_vcnt; /* how many bio_vec's */
86
77 /* 87 /*
78 * Everything starting with bi_max_vecs will be preserved by bio_reset() 88 * Everything starting with bi_max_vecs will be preserved by bio_reset()
79 */ 89 */
80 90
81 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ 91 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
82 92
83 atomic_t bi_cnt; /* pin count */ 93 atomic_t bi_cnt; /* pin count */
84 94
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1b135d49b279..02cb6f0ea71d 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -735,7 +735,7 @@ struct rq_map_data {
735}; 735};
736 736
737struct req_iterator { 737struct req_iterator {
738 int i; 738 struct bvec_iter iter;
739 struct bio *bio; 739 struct bio *bio;
740}; 740};
741 741
@@ -748,10 +748,11 @@ struct req_iterator {
748 748
749#define rq_for_each_segment(bvl, _rq, _iter) \ 749#define rq_for_each_segment(bvl, _rq, _iter) \
750 __rq_for_each_bio(_iter.bio, _rq) \ 750 __rq_for_each_bio(_iter.bio, _rq) \
751 bio_for_each_segment(bvl, _iter.bio, _iter.i) 751 bio_for_each_segment(bvl, _iter.bio, _iter.iter)
752 752
753#define rq_iter_last(rq, _iter) \ 753#define rq_iter_last(bvec, _iter) \
754 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) 754 (_iter.bio->bi_next == NULL && \
755 bio_iter_last(bvec, _iter.iter))
755 756
756#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 757#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
757# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 758# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 20ee8b63a968..d21f2dba0731 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -1,6 +1,7 @@
1#ifndef __FS_CEPH_MESSENGER_H 1#ifndef __FS_CEPH_MESSENGER_H
2#define __FS_CEPH_MESSENGER_H 2#define __FS_CEPH_MESSENGER_H
3 3
4#include <linux/blk_types.h>
4#include <linux/kref.h> 5#include <linux/kref.h>
5#include <linux/mutex.h> 6#include <linux/mutex.h>
6#include <linux/net.h> 7#include <linux/net.h>
@@ -119,8 +120,7 @@ struct ceph_msg_data_cursor {
119#ifdef CONFIG_BLOCK 120#ifdef CONFIG_BLOCK
120 struct { /* bio */ 121 struct { /* bio */
121 struct bio *bio; /* bio from list */ 122 struct bio *bio; /* bio from list */
122 unsigned int vector_index; /* vector from bio */ 123 struct bvec_iter bvec_iter;
123 unsigned int vector_offset; /* bytes from vector */
124 }; 124 };
125#endif /* CONFIG_BLOCK */ 125#endif /* CONFIG_BLOCK */
126 struct { /* pages */ 126 struct { /* pages */
diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h
index a0f9280421ec..2e6dce6e5c2a 100644
--- a/include/linux/cmdline-parser.h
+++ b/include/linux/cmdline-parser.h
@@ -37,9 +37,9 @@ int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline);
37struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts, 37struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts,
38 const char *bdev); 38 const char *bdev);
39 39
40void cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size, 40int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size,
41 int slot, 41 int slot,
42 int (*add_part)(int, struct cmdline_subpart *, void *), 42 int (*add_part)(int, struct cmdline_subpart *, void *),
43 void *param); 43 void *param);
44 44
45#endif /* CMDLINEPARSEH */ 45#endif /* CMDLINEPARSEH */
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
index f4b0aa3126f5..a68cbe59e6ad 100644
--- a/include/linux/dm-io.h
+++ b/include/linux/dm-io.h
@@ -29,7 +29,7 @@ typedef void (*io_notify_fn)(unsigned long error, void *context);
29 29
30enum dm_io_mem_type { 30enum dm_io_mem_type {
31 DM_IO_PAGE_LIST,/* Page list */ 31 DM_IO_PAGE_LIST,/* Page list */
32 DM_IO_BVEC, /* Bio vector */ 32 DM_IO_BIO, /* Bio vector */
33 DM_IO_VMA, /* Virtual memory area */ 33 DM_IO_VMA, /* Virtual memory area */
34 DM_IO_KMEM, /* Kernel memory */ 34 DM_IO_KMEM, /* Kernel memory */
35}; 35};
@@ -41,7 +41,7 @@ struct dm_io_memory {
41 41
42 union { 42 union {
43 struct page_list *pl; 43 struct page_list *pl;
44 struct bio_vec *bvec; 44 struct bio *bio;
45 void *vma; 45 void *vma;
46 void *addr; 46 void *addr;
47 } ptr; 47 } ptr;