diff options
author | Kent Overstreet <koverstreet@google.com> | 2012-09-06 18:35:01 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-09-09 04:35:39 -0400 |
commit | 3f86a82aeb03e6100f7ab39f4702e033a5e38166 (patch) | |
tree | 6d598b5b55c78828f13aee9baffe7350ae18c274 /fs/bio.c | |
parent | 4254bba17d92d53822a56ebc2a0c1eb7e2a71155 (diff) |
block: Consolidate bio_alloc_bioset(), bio_kmalloc()
Previously, bio_kmalloc() and bio_alloc_bioset() behaved slightly
different because there was some almost-duplicated code - this fixes
some of that.
The important change is that previously bio_kmalloc() always set
bi_io_vec = bi_inline_vecs, even if nr_iovecs == 0 - unlike
bio_alloc_bioset(). This would cause bio_has_data() to return true; I
don't know if this resulted in any actual bugs but it was certainly
wrong.
bio_kmalloc() and bio_alloc_bioset() also have different arbitrary
limits on nr_iovecs - 1024 (UIO_MAXIOV) for bio_kmalloc(), 256
(BIO_MAX_PAGES) for bio_alloc_bioset(). This patch doesn't fix that, but
at least they're enforced closer together and hopefully they will be
fixed in a later patch.
This'll also help with some future cleanups - there are a fair number of
functions that allocate bios (e.g. bio_clone()), and now they don't have
to be duplicated for bio_alloc(), bio_alloc_bioset(), and bio_kmalloc().
Signed-off-by: Kent Overstreet <koverstreet@google.com>
CC: Jens Axboe <axboe@kernel.dk>
v7: Re-add dropped comments, improv patch description
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/bio.c')
-rw-r--r-- | fs/bio.c | 110 |
1 files changed, 37 insertions, 73 deletions
@@ -55,6 +55,7 @@ static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = { | |||
55 | * IO code that does not need private memory pools. | 55 | * IO code that does not need private memory pools. |
56 | */ | 56 | */ |
57 | struct bio_set *fs_bio_set; | 57 | struct bio_set *fs_bio_set; |
58 | EXPORT_SYMBOL(fs_bio_set); | ||
58 | 59 | ||
59 | /* | 60 | /* |
60 | * Our slab pool management | 61 | * Our slab pool management |
@@ -301,39 +302,58 @@ EXPORT_SYMBOL(bio_reset); | |||
301 | * @bs: the bio_set to allocate from. | 302 | * @bs: the bio_set to allocate from. |
302 | * | 303 | * |
303 | * Description: | 304 | * Description: |
304 | * bio_alloc_bioset will try its own mempool to satisfy the allocation. | 305 | * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is |
305 | * If %__GFP_WAIT is set then we will block on the internal pool waiting | 306 | * backed by the @bs's mempool. |
306 | * for a &struct bio to become free. | 307 | * |
307 | **/ | 308 | * When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be |
309 | * able to allocate a bio. This is due to the mempool guarantees. To make this | ||
310 | * work, callers must never allocate more than 1 bio at a time from this pool. | ||
311 | * Callers that need to allocate more than 1 bio must always submit the | ||
312 | * previously allocated bio for IO before attempting to allocate a new one. | ||
313 | * Failure to do so can cause deadlocks under memory pressure. | ||
314 | * | ||
315 | * RETURNS: | ||
316 | * Pointer to new bio on success, NULL on failure. | ||
317 | */ | ||
308 | struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) | 318 | struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) |
309 | { | 319 | { |
320 | unsigned front_pad; | ||
321 | unsigned inline_vecs; | ||
310 | unsigned long idx = BIO_POOL_NONE; | 322 | unsigned long idx = BIO_POOL_NONE; |
311 | struct bio_vec *bvl = NULL; | 323 | struct bio_vec *bvl = NULL; |
312 | struct bio *bio; | 324 | struct bio *bio; |
313 | void *p; | 325 | void *p; |
314 | 326 | ||
315 | p = mempool_alloc(bs->bio_pool, gfp_mask); | 327 | if (!bs) { |
328 | if (nr_iovecs > UIO_MAXIOV) | ||
329 | return NULL; | ||
330 | |||
331 | p = kmalloc(sizeof(struct bio) + | ||
332 | nr_iovecs * sizeof(struct bio_vec), | ||
333 | gfp_mask); | ||
334 | front_pad = 0; | ||
335 | inline_vecs = nr_iovecs; | ||
336 | } else { | ||
337 | p = mempool_alloc(bs->bio_pool, gfp_mask); | ||
338 | front_pad = bs->front_pad; | ||
339 | inline_vecs = BIO_INLINE_VECS; | ||
340 | } | ||
341 | |||
316 | if (unlikely(!p)) | 342 | if (unlikely(!p)) |
317 | return NULL; | 343 | return NULL; |
318 | bio = p + bs->front_pad; | ||
319 | 344 | ||
345 | bio = p + front_pad; | ||
320 | bio_init(bio); | 346 | bio_init(bio); |
321 | bio->bi_pool = bs; | ||
322 | |||
323 | if (unlikely(!nr_iovecs)) | ||
324 | goto out_set; | ||
325 | 347 | ||
326 | if (nr_iovecs <= BIO_INLINE_VECS) { | 348 | if (nr_iovecs > inline_vecs) { |
327 | bvl = bio->bi_inline_vecs; | ||
328 | nr_iovecs = BIO_INLINE_VECS; | ||
329 | } else { | ||
330 | bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); | 349 | bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); |
331 | if (unlikely(!bvl)) | 350 | if (unlikely(!bvl)) |
332 | goto err_free; | 351 | goto err_free; |
333 | 352 | } else if (nr_iovecs) { | |
334 | nr_iovecs = bvec_nr_vecs(idx); | 353 | bvl = bio->bi_inline_vecs; |
335 | } | 354 | } |
336 | out_set: | 355 | |
356 | bio->bi_pool = bs; | ||
337 | bio->bi_flags |= idx << BIO_POOL_OFFSET; | 357 | bio->bi_flags |= idx << BIO_POOL_OFFSET; |
338 | bio->bi_max_vecs = nr_iovecs; | 358 | bio->bi_max_vecs = nr_iovecs; |
339 | bio->bi_io_vec = bvl; | 359 | bio->bi_io_vec = bvl; |
@@ -345,62 +365,6 @@ err_free: | |||
345 | } | 365 | } |
346 | EXPORT_SYMBOL(bio_alloc_bioset); | 366 | EXPORT_SYMBOL(bio_alloc_bioset); |
347 | 367 | ||
348 | /** | ||
349 | * bio_alloc - allocate a new bio, memory pool backed | ||
350 | * @gfp_mask: allocation mask to use | ||
351 | * @nr_iovecs: number of iovecs | ||
352 | * | ||
353 | * bio_alloc will allocate a bio and associated bio_vec array that can hold | ||
354 | * at least @nr_iovecs entries. Allocations will be done from the | ||
355 | * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc. | ||
356 | * | ||
357 | * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate | ||
358 | * a bio. This is due to the mempool guarantees. To make this work, callers | ||
359 | * must never allocate more than 1 bio at a time from this pool. Callers | ||
360 | * that need to allocate more than 1 bio must always submit the previously | ||
361 | * allocated bio for IO before attempting to allocate a new one. Failure to | ||
362 | * do so can cause livelocks under memory pressure. | ||
363 | * | ||
364 | * RETURNS: | ||
365 | * Pointer to new bio on success, NULL on failure. | ||
366 | */ | ||
367 | struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) | ||
368 | { | ||
369 | return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); | ||
370 | } | ||
371 | EXPORT_SYMBOL(bio_alloc); | ||
372 | |||
373 | /** | ||
374 | * bio_kmalloc - allocate a bio for I/O using kmalloc() | ||
375 | * @gfp_mask: the GFP_ mask given to the slab allocator | ||
376 | * @nr_iovecs: number of iovecs to pre-allocate | ||
377 | * | ||
378 | * Description: | ||
379 | * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask contains | ||
380 | * %__GFP_WAIT, the allocation is guaranteed to succeed. | ||
381 | * | ||
382 | **/ | ||
383 | struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) | ||
384 | { | ||
385 | struct bio *bio; | ||
386 | |||
387 | if (nr_iovecs > UIO_MAXIOV) | ||
388 | return NULL; | ||
389 | |||
390 | bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec), | ||
391 | gfp_mask); | ||
392 | if (unlikely(!bio)) | ||
393 | return NULL; | ||
394 | |||
395 | bio_init(bio); | ||
396 | bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET; | ||
397 | bio->bi_max_vecs = nr_iovecs; | ||
398 | bio->bi_io_vec = bio->bi_inline_vecs; | ||
399 | |||
400 | return bio; | ||
401 | } | ||
402 | EXPORT_SYMBOL(bio_kmalloc); | ||
403 | |||
404 | void zero_fill_bio(struct bio *bio) | 368 | void zero_fill_bio(struct bio *bio) |
405 | { | 369 | { |
406 | unsigned long flags; | 370 | unsigned long flags; |