aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/bio.c110
-rw-r--r--include/linux/bio.h16
2 files changed, 49 insertions, 77 deletions
diff --git a/fs/bio.c b/fs/bio.c
index 736ef12f5191..191b9b86c272 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -55,6 +55,7 @@ static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
55 * IO code that does not need private memory pools. 55 * IO code that does not need private memory pools.
56 */ 56 */
57struct bio_set *fs_bio_set; 57struct bio_set *fs_bio_set;
58EXPORT_SYMBOL(fs_bio_set);
58 59
59/* 60/*
60 * Our slab pool management 61 * Our slab pool management
@@ -301,39 +302,58 @@ EXPORT_SYMBOL(bio_reset);
301 * @bs: the bio_set to allocate from. 302 * @bs: the bio_set to allocate from.
302 * 303 *
303 * Description: 304 * Description:
304 * bio_alloc_bioset will try its own mempool to satisfy the allocation. 305 * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
305 * If %__GFP_WAIT is set then we will block on the internal pool waiting 306 * backed by the @bs's mempool.
306 * for a &struct bio to become free. 307 *
307 **/ 308 * When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be
309 * able to allocate a bio. This is due to the mempool guarantees. To make this
310 * work, callers must never allocate more than 1 bio at a time from this pool.
311 * Callers that need to allocate more than 1 bio must always submit the
312 * previously allocated bio for IO before attempting to allocate a new one.
313 * Failure to do so can cause deadlocks under memory pressure.
314 *
315 * RETURNS:
316 * Pointer to new bio on success, NULL on failure.
317 */
308struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) 318struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
309{ 319{
320 unsigned front_pad;
321 unsigned inline_vecs;
310 unsigned long idx = BIO_POOL_NONE; 322 unsigned long idx = BIO_POOL_NONE;
311 struct bio_vec *bvl = NULL; 323 struct bio_vec *bvl = NULL;
312 struct bio *bio; 324 struct bio *bio;
313 void *p; 325 void *p;
314 326
315 p = mempool_alloc(bs->bio_pool, gfp_mask); 327 if (!bs) {
328 if (nr_iovecs > UIO_MAXIOV)
329 return NULL;
330
331 p = kmalloc(sizeof(struct bio) +
332 nr_iovecs * sizeof(struct bio_vec),
333 gfp_mask);
334 front_pad = 0;
335 inline_vecs = nr_iovecs;
336 } else {
337 p = mempool_alloc(bs->bio_pool, gfp_mask);
338 front_pad = bs->front_pad;
339 inline_vecs = BIO_INLINE_VECS;
340 }
341
316 if (unlikely(!p)) 342 if (unlikely(!p))
317 return NULL; 343 return NULL;
318 bio = p + bs->front_pad;
319 344
345 bio = p + front_pad;
320 bio_init(bio); 346 bio_init(bio);
321 bio->bi_pool = bs;
322
323 if (unlikely(!nr_iovecs))
324 goto out_set;
325 347
326 if (nr_iovecs <= BIO_INLINE_VECS) { 348 if (nr_iovecs > inline_vecs) {
327 bvl = bio->bi_inline_vecs;
328 nr_iovecs = BIO_INLINE_VECS;
329 } else {
330 bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); 349 bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
331 if (unlikely(!bvl)) 350 if (unlikely(!bvl))
332 goto err_free; 351 goto err_free;
333 352 } else if (nr_iovecs) {
334 nr_iovecs = bvec_nr_vecs(idx); 353 bvl = bio->bi_inline_vecs;
335 } 354 }
336out_set: 355
356 bio->bi_pool = bs;
337 bio->bi_flags |= idx << BIO_POOL_OFFSET; 357 bio->bi_flags |= idx << BIO_POOL_OFFSET;
338 bio->bi_max_vecs = nr_iovecs; 358 bio->bi_max_vecs = nr_iovecs;
339 bio->bi_io_vec = bvl; 359 bio->bi_io_vec = bvl;
@@ -345,62 +365,6 @@ err_free:
345} 365}
346EXPORT_SYMBOL(bio_alloc_bioset); 366EXPORT_SYMBOL(bio_alloc_bioset);
347 367
348/**
349 * bio_alloc - allocate a new bio, memory pool backed
350 * @gfp_mask: allocation mask to use
351 * @nr_iovecs: number of iovecs
352 *
353 * bio_alloc will allocate a bio and associated bio_vec array that can hold
354 * at least @nr_iovecs entries. Allocations will be done from the
355 * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc.
356 *
357 * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
358 * a bio. This is due to the mempool guarantees. To make this work, callers
359 * must never allocate more than 1 bio at a time from this pool. Callers
360 * that need to allocate more than 1 bio must always submit the previously
361 * allocated bio for IO before attempting to allocate a new one. Failure to
362 * do so can cause livelocks under memory pressure.
363 *
364 * RETURNS:
365 * Pointer to new bio on success, NULL on failure.
366 */
367struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
368{
369 return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
370}
371EXPORT_SYMBOL(bio_alloc);
372
373/**
374 * bio_kmalloc - allocate a bio for I/O using kmalloc()
375 * @gfp_mask: the GFP_ mask given to the slab allocator
376 * @nr_iovecs: number of iovecs to pre-allocate
377 *
378 * Description:
379 * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask contains
380 * %__GFP_WAIT, the allocation is guaranteed to succeed.
381 *
382 **/
383struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
384{
385 struct bio *bio;
386
387 if (nr_iovecs > UIO_MAXIOV)
388 return NULL;
389
390 bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
391 gfp_mask);
392 if (unlikely(!bio))
393 return NULL;
394
395 bio_init(bio);
396 bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
397 bio->bi_max_vecs = nr_iovecs;
398 bio->bi_io_vec = bio->bi_inline_vecs;
399
400 return bio;
401}
402EXPORT_SYMBOL(bio_kmalloc);
403
404void zero_fill_bio(struct bio *bio) 368void zero_fill_bio(struct bio *bio)
405{ 369{
406 unsigned long flags; 370 unsigned long flags;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 04944c91fae7..fbe35b175555 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -212,11 +212,21 @@ extern void bio_pair_release(struct bio_pair *dbio);
212extern struct bio_set *bioset_create(unsigned int, unsigned int); 212extern struct bio_set *bioset_create(unsigned int, unsigned int);
213extern void bioset_free(struct bio_set *); 213extern void bioset_free(struct bio_set *);
214 214
215extern struct bio *bio_alloc(gfp_t, unsigned int);
216extern struct bio *bio_kmalloc(gfp_t, unsigned int);
217extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); 215extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
218extern void bio_put(struct bio *); 216extern void bio_put(struct bio *);
219 217
218extern struct bio_set *fs_bio_set;
219
220static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
221{
222 return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
223}
224
225static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
226{
227 return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
228}
229
220extern void bio_endio(struct bio *, int); 230extern void bio_endio(struct bio *, int);
221struct request_queue; 231struct request_queue;
222extern int bio_phys_segments(struct request_queue *, struct bio *); 232extern int bio_phys_segments(struct request_queue *, struct bio *);
@@ -304,8 +314,6 @@ struct biovec_slab {
304 struct kmem_cache *slab; 314 struct kmem_cache *slab;
305}; 315};
306 316
307extern struct bio_set *fs_bio_set;
308
309/* 317/*
310 * a small number of entries is fine, not going to be performance critical. 318 * a small number of entries is fine, not going to be performance critical.
311 * basically we just need to survive 319 * basically we just need to survive