diff options
Diffstat (limited to 'fs/bio.c')
-rw-r--r-- | fs/bio.c | 88 |
1 files changed, 52 insertions, 36 deletions
@@ -28,25 +28,10 @@ | |||
28 | #include <linux/blktrace_api.h> | 28 | #include <linux/blktrace_api.h> |
29 | #include <scsi/sg.h> /* for struct sg_iovec */ | 29 | #include <scsi/sg.h> /* for struct sg_iovec */ |
30 | 30 | ||
31 | #define BIO_POOL_SIZE 2 | ||
32 | |||
33 | static struct kmem_cache *bio_slab __read_mostly; | 31 | static struct kmem_cache *bio_slab __read_mostly; |
34 | 32 | ||
35 | #define BIOVEC_NR_POOLS 6 | ||
36 | |||
37 | /* | ||
38 | * a small number of entries is fine, not going to be performance critical. | ||
39 | * basically we just need to survive | ||
40 | */ | ||
41 | #define BIO_SPLIT_ENTRIES 2 | ||
42 | mempool_t *bio_split_pool __read_mostly; | 33 | mempool_t *bio_split_pool __read_mostly; |
43 | 34 | ||
44 | struct biovec_slab { | ||
45 | int nr_vecs; | ||
46 | char *name; | ||
47 | struct kmem_cache *slab; | ||
48 | }; | ||
49 | |||
50 | /* | 35 | /* |
51 | * if you change this list, also change bvec_alloc or things will | 36 | * if you change this list, also change bvec_alloc or things will |
52 | * break badly! cannot be bigger than what you can fit into an | 37 | * break badly! cannot be bigger than what you can fit into an |
@@ -60,23 +45,17 @@ static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = { | |||
60 | #undef BV | 45 | #undef BV |
61 | 46 | ||
62 | /* | 47 | /* |
63 | * bio_set is used to allow other portions of the IO system to | ||
64 | * allocate their own private memory pools for bio and iovec structures. | ||
65 | * These memory pools in turn all allocate from the bio_slab | ||
66 | * and the bvec_slabs[]. | ||
67 | */ | ||
68 | struct bio_set { | ||
69 | mempool_t *bio_pool; | ||
70 | mempool_t *bvec_pools[BIOVEC_NR_POOLS]; | ||
71 | }; | ||
72 | |||
73 | /* | ||
74 | * fs_bio_set is the bio_set containing bio and iovec memory pools used by | 48 | * fs_bio_set is the bio_set containing bio and iovec memory pools used by |
75 | * IO code that does not need private memory pools. | 49 | * IO code that does not need private memory pools. |
76 | */ | 50 | */ |
77 | static struct bio_set *fs_bio_set; | 51 | struct bio_set *fs_bio_set; |
52 | |||
53 | unsigned int bvec_nr_vecs(unsigned short idx) | ||
54 | { | ||
55 | return bvec_slabs[idx].nr_vecs; | ||
56 | } | ||
78 | 57 | ||
79 | static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) | 58 | struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs) |
80 | { | 59 | { |
81 | struct bio_vec *bvl; | 60 | struct bio_vec *bvl; |
82 | 61 | ||
@@ -117,6 +96,9 @@ void bio_free(struct bio *bio, struct bio_set *bio_set) | |||
117 | mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]); | 96 | mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]); |
118 | } | 97 | } |
119 | 98 | ||
99 | if (bio_integrity(bio)) | ||
100 | bio_integrity_free(bio, bio_set); | ||
101 | |||
120 | mempool_free(bio, bio_set->bio_pool); | 102 | mempool_free(bio, bio_set->bio_pool); |
121 | } | 103 | } |
122 | 104 | ||
@@ -275,9 +257,19 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) | |||
275 | { | 257 | { |
276 | struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set); | 258 | struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set); |
277 | 259 | ||
278 | if (b) { | 260 | if (!b) |
279 | b->bi_destructor = bio_fs_destructor; | 261 | return NULL; |
280 | __bio_clone(b, bio); | 262 | |
263 | b->bi_destructor = bio_fs_destructor; | ||
264 | __bio_clone(b, bio); | ||
265 | |||
266 | if (bio_integrity(bio)) { | ||
267 | int ret; | ||
268 | |||
269 | ret = bio_integrity_clone(b, bio, fs_bio_set); | ||
270 | |||
271 | if (ret < 0) | ||
272 | return NULL; | ||
281 | } | 273 | } |
282 | 274 | ||
283 | return b; | 275 | return b; |
@@ -333,10 +325,19 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | |||
333 | if (page == prev->bv_page && | 325 | if (page == prev->bv_page && |
334 | offset == prev->bv_offset + prev->bv_len) { | 326 | offset == prev->bv_offset + prev->bv_len) { |
335 | prev->bv_len += len; | 327 | prev->bv_len += len; |
336 | if (q->merge_bvec_fn && | 328 | |
337 | q->merge_bvec_fn(q, bio, prev) < len) { | 329 | if (q->merge_bvec_fn) { |
338 | prev->bv_len -= len; | 330 | struct bvec_merge_data bvm = { |
339 | return 0; | 331 | .bi_bdev = bio->bi_bdev, |
332 | .bi_sector = bio->bi_sector, | ||
333 | .bi_size = bio->bi_size, | ||
334 | .bi_rw = bio->bi_rw, | ||
335 | }; | ||
336 | |||
337 | if (q->merge_bvec_fn(q, &bvm, prev) < len) { | ||
338 | prev->bv_len -= len; | ||
339 | return 0; | ||
340 | } | ||
340 | } | 341 | } |
341 | 342 | ||
342 | goto done; | 343 | goto done; |
@@ -377,11 +378,18 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | |||
377 | * queue to get further control | 378 | * queue to get further control |
378 | */ | 379 | */ |
379 | if (q->merge_bvec_fn) { | 380 | if (q->merge_bvec_fn) { |
381 | struct bvec_merge_data bvm = { | ||
382 | .bi_bdev = bio->bi_bdev, | ||
383 | .bi_sector = bio->bi_sector, | ||
384 | .bi_size = bio->bi_size, | ||
385 | .bi_rw = bio->bi_rw, | ||
386 | }; | ||
387 | |||
380 | /* | 388 | /* |
381 | * merge_bvec_fn() returns number of bytes it can accept | 389 | * merge_bvec_fn() returns number of bytes it can accept |
382 | * at this offset | 390 | * at this offset |
383 | */ | 391 | */ |
384 | if (q->merge_bvec_fn(q, bio, bvec) < len) { | 392 | if (q->merge_bvec_fn(q, &bvm, bvec) < len) { |
385 | bvec->bv_page = NULL; | 393 | bvec->bv_page = NULL; |
386 | bvec->bv_len = 0; | 394 | bvec->bv_len = 0; |
387 | bvec->bv_offset = 0; | 395 | bvec->bv_offset = 0; |
@@ -1249,6 +1257,9 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) | |||
1249 | bp->bio1.bi_private = bi; | 1257 | bp->bio1.bi_private = bi; |
1250 | bp->bio2.bi_private = pool; | 1258 | bp->bio2.bi_private = pool; |
1251 | 1259 | ||
1260 | if (bio_integrity(bi)) | ||
1261 | bio_integrity_split(bi, bp, first_sectors); | ||
1262 | |||
1252 | return bp; | 1263 | return bp; |
1253 | } | 1264 | } |
1254 | 1265 | ||
@@ -1290,6 +1301,7 @@ void bioset_free(struct bio_set *bs) | |||
1290 | if (bs->bio_pool) | 1301 | if (bs->bio_pool) |
1291 | mempool_destroy(bs->bio_pool); | 1302 | mempool_destroy(bs->bio_pool); |
1292 | 1303 | ||
1304 | bioset_integrity_free(bs); | ||
1293 | biovec_free_pools(bs); | 1305 | biovec_free_pools(bs); |
1294 | 1306 | ||
1295 | kfree(bs); | 1307 | kfree(bs); |
@@ -1306,6 +1318,9 @@ struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size) | |||
1306 | if (!bs->bio_pool) | 1318 | if (!bs->bio_pool) |
1307 | goto bad; | 1319 | goto bad; |
1308 | 1320 | ||
1321 | if (bioset_integrity_create(bs, bio_pool_size)) | ||
1322 | goto bad; | ||
1323 | |||
1309 | if (!biovec_create_pools(bs, bvec_pool_size)) | 1324 | if (!biovec_create_pools(bs, bvec_pool_size)) |
1310 | return bs; | 1325 | return bs; |
1311 | 1326 | ||
@@ -1332,6 +1347,7 @@ static int __init init_bio(void) | |||
1332 | { | 1347 | { |
1333 | bio_slab = KMEM_CACHE(bio, SLAB_HWCACHE_ALIGN|SLAB_PANIC); | 1348 | bio_slab = KMEM_CACHE(bio, SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
1334 | 1349 | ||
1350 | bio_integrity_init_slab(); | ||
1335 | biovec_init_slabs(); | 1351 | biovec_init_slabs(); |
1336 | 1352 | ||
1337 | fs_bio_set = bioset_create(BIO_POOL_SIZE, 2); | 1353 | fs_bio_set = bioset_create(BIO_POOL_SIZE, 2); |