diff options
Diffstat (limited to 'fs/bio.c')
| -rw-r--r-- | fs/bio.c | 231 |
1 files changed, 105 insertions, 126 deletions
| @@ -55,6 +55,7 @@ static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = { | |||
| 55 | * IO code that does not need private memory pools. | 55 | * IO code that does not need private memory pools. |
| 56 | */ | 56 | */ |
| 57 | struct bio_set *fs_bio_set; | 57 | struct bio_set *fs_bio_set; |
| 58 | EXPORT_SYMBOL(fs_bio_set); | ||
| 58 | 59 | ||
| 59 | /* | 60 | /* |
| 60 | * Our slab pool management | 61 | * Our slab pool management |
| @@ -233,26 +234,37 @@ fallback: | |||
| 233 | return bvl; | 234 | return bvl; |
| 234 | } | 235 | } |
| 235 | 236 | ||
| 236 | void bio_free(struct bio *bio, struct bio_set *bs) | 237 | static void __bio_free(struct bio *bio) |
| 237 | { | 238 | { |
| 239 | bio_disassociate_task(bio); | ||
| 240 | |||
| 241 | if (bio_integrity(bio)) | ||
| 242 | bio_integrity_free(bio); | ||
| 243 | } | ||
| 244 | |||
| 245 | static void bio_free(struct bio *bio) | ||
| 246 | { | ||
| 247 | struct bio_set *bs = bio->bi_pool; | ||
| 238 | void *p; | 248 | void *p; |
| 239 | 249 | ||
| 240 | if (bio_has_allocated_vec(bio)) | 250 | __bio_free(bio); |
| 241 | bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio)); | ||
| 242 | 251 | ||
| 243 | if (bio_integrity(bio)) | 252 | if (bs) { |
| 244 | bio_integrity_free(bio, bs); | 253 | if (bio_has_allocated_vec(bio)) |
| 254 | bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio)); | ||
| 245 | 255 | ||
| 246 | /* | 256 | /* |
| 247 | * If we have front padding, adjust the bio pointer before freeing | 257 | * If we have front padding, adjust the bio pointer before freeing |
| 248 | */ | 258 | */ |
| 249 | p = bio; | 259 | p = bio; |
| 250 | if (bs->front_pad) | ||
| 251 | p -= bs->front_pad; | 260 | p -= bs->front_pad; |
| 252 | 261 | ||
| 253 | mempool_free(p, bs->bio_pool); | 262 | mempool_free(p, bs->bio_pool); |
| 263 | } else { | ||
| 264 | /* Bio was allocated by bio_kmalloc() */ | ||
| 265 | kfree(bio); | ||
| 266 | } | ||
| 254 | } | 267 | } |
| 255 | EXPORT_SYMBOL(bio_free); | ||
| 256 | 268 | ||
| 257 | void bio_init(struct bio *bio) | 269 | void bio_init(struct bio *bio) |
| 258 | { | 270 | { |
| @@ -263,48 +275,85 @@ void bio_init(struct bio *bio) | |||
| 263 | EXPORT_SYMBOL(bio_init); | 275 | EXPORT_SYMBOL(bio_init); |
| 264 | 276 | ||
| 265 | /** | 277 | /** |
| 278 | * bio_reset - reinitialize a bio | ||
| 279 | * @bio: bio to reset | ||
| 280 | * | ||
| 281 | * Description: | ||
| 282 | * After calling bio_reset(), @bio will be in the same state as a freshly | ||
| 283 | * allocated bio returned bio bio_alloc_bioset() - the only fields that are | ||
| 284 | * preserved are the ones that are initialized by bio_alloc_bioset(). See | ||
| 285 | * comment in struct bio. | ||
| 286 | */ | ||
| 287 | void bio_reset(struct bio *bio) | ||
| 288 | { | ||
| 289 | unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); | ||
| 290 | |||
| 291 | __bio_free(bio); | ||
| 292 | |||
| 293 | memset(bio, 0, BIO_RESET_BYTES); | ||
| 294 | bio->bi_flags = flags|(1 << BIO_UPTODATE); | ||
| 295 | } | ||
| 296 | EXPORT_SYMBOL(bio_reset); | ||
| 297 | |||
| 298 | /** | ||
| 266 | * bio_alloc_bioset - allocate a bio for I/O | 299 | * bio_alloc_bioset - allocate a bio for I/O |
| 267 | * @gfp_mask: the GFP_ mask given to the slab allocator | 300 | * @gfp_mask: the GFP_ mask given to the slab allocator |
| 268 | * @nr_iovecs: number of iovecs to pre-allocate | 301 | * @nr_iovecs: number of iovecs to pre-allocate |
| 269 | * @bs: the bio_set to allocate from. | 302 | * @bs: the bio_set to allocate from. |
| 270 | * | 303 | * |
| 271 | * Description: | 304 | * Description: |
| 272 | * bio_alloc_bioset will try its own mempool to satisfy the allocation. | 305 | * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is |
| 273 | * If %__GFP_WAIT is set then we will block on the internal pool waiting | 306 | * backed by the @bs's mempool. |
| 274 | * for a &struct bio to become free. | ||
| 275 | * | 307 | * |
| 276 | * Note that the caller must set ->bi_destructor on successful return | 308 | * When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be |
| 277 | * of a bio, to do the appropriate freeing of the bio once the reference | 309 | * able to allocate a bio. This is due to the mempool guarantees. To make this |
| 278 | * count drops to zero. | 310 | * work, callers must never allocate more than 1 bio at a time from this pool. |
| 279 | **/ | 311 | * Callers that need to allocate more than 1 bio must always submit the |
| 312 | * previously allocated bio for IO before attempting to allocate a new one. | ||
| 313 | * Failure to do so can cause deadlocks under memory pressure. | ||
| 314 | * | ||
| 315 | * RETURNS: | ||
| 316 | * Pointer to new bio on success, NULL on failure. | ||
| 317 | */ | ||
| 280 | struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) | 318 | struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) |
| 281 | { | 319 | { |
| 320 | unsigned front_pad; | ||
| 321 | unsigned inline_vecs; | ||
| 282 | unsigned long idx = BIO_POOL_NONE; | 322 | unsigned long idx = BIO_POOL_NONE; |
| 283 | struct bio_vec *bvl = NULL; | 323 | struct bio_vec *bvl = NULL; |
| 284 | struct bio *bio; | 324 | struct bio *bio; |
| 285 | void *p; | 325 | void *p; |
| 286 | 326 | ||
| 287 | p = mempool_alloc(bs->bio_pool, gfp_mask); | 327 | if (!bs) { |
| 328 | if (nr_iovecs > UIO_MAXIOV) | ||
| 329 | return NULL; | ||
| 330 | |||
| 331 | p = kmalloc(sizeof(struct bio) + | ||
| 332 | nr_iovecs * sizeof(struct bio_vec), | ||
| 333 | gfp_mask); | ||
| 334 | front_pad = 0; | ||
| 335 | inline_vecs = nr_iovecs; | ||
| 336 | } else { | ||
| 337 | p = mempool_alloc(bs->bio_pool, gfp_mask); | ||
| 338 | front_pad = bs->front_pad; | ||
| 339 | inline_vecs = BIO_INLINE_VECS; | ||
| 340 | } | ||
| 341 | |||
| 288 | if (unlikely(!p)) | 342 | if (unlikely(!p)) |
| 289 | return NULL; | 343 | return NULL; |
| 290 | bio = p + bs->front_pad; | ||
| 291 | 344 | ||
| 345 | bio = p + front_pad; | ||
| 292 | bio_init(bio); | 346 | bio_init(bio); |
| 293 | 347 | ||
| 294 | if (unlikely(!nr_iovecs)) | 348 | if (nr_iovecs > inline_vecs) { |
| 295 | goto out_set; | ||
| 296 | |||
| 297 | if (nr_iovecs <= BIO_INLINE_VECS) { | ||
| 298 | bvl = bio->bi_inline_vecs; | ||
| 299 | nr_iovecs = BIO_INLINE_VECS; | ||
| 300 | } else { | ||
| 301 | bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); | 349 | bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); |
| 302 | if (unlikely(!bvl)) | 350 | if (unlikely(!bvl)) |
| 303 | goto err_free; | 351 | goto err_free; |
| 304 | 352 | } else if (nr_iovecs) { | |
| 305 | nr_iovecs = bvec_nr_vecs(idx); | 353 | bvl = bio->bi_inline_vecs; |
| 306 | } | 354 | } |
| 307 | out_set: | 355 | |
| 356 | bio->bi_pool = bs; | ||
| 308 | bio->bi_flags |= idx << BIO_POOL_OFFSET; | 357 | bio->bi_flags |= idx << BIO_POOL_OFFSET; |
| 309 | bio->bi_max_vecs = nr_iovecs; | 358 | bio->bi_max_vecs = nr_iovecs; |
| 310 | bio->bi_io_vec = bvl; | 359 | bio->bi_io_vec = bvl; |
| @@ -316,80 +365,6 @@ err_free: | |||
| 316 | } | 365 | } |
| 317 | EXPORT_SYMBOL(bio_alloc_bioset); | 366 | EXPORT_SYMBOL(bio_alloc_bioset); |
| 318 | 367 | ||
| 319 | static void bio_fs_destructor(struct bio *bio) | ||
| 320 | { | ||
| 321 | bio_free(bio, fs_bio_set); | ||
| 322 | } | ||
| 323 | |||
| 324 | /** | ||
| 325 | * bio_alloc - allocate a new bio, memory pool backed | ||
| 326 | * @gfp_mask: allocation mask to use | ||
| 327 | * @nr_iovecs: number of iovecs | ||
| 328 | * | ||
| 329 | * bio_alloc will allocate a bio and associated bio_vec array that can hold | ||
| 330 | * at least @nr_iovecs entries. Allocations will be done from the | ||
| 331 | * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc. | ||
| 332 | * | ||
| 333 | * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate | ||
| 334 | * a bio. This is due to the mempool guarantees. To make this work, callers | ||
| 335 | * must never allocate more than 1 bio at a time from this pool. Callers | ||
| 336 | * that need to allocate more than 1 bio must always submit the previously | ||
| 337 | * allocated bio for IO before attempting to allocate a new one. Failure to | ||
| 338 | * do so can cause livelocks under memory pressure. | ||
| 339 | * | ||
| 340 | * RETURNS: | ||
| 341 | * Pointer to new bio on success, NULL on failure. | ||
| 342 | */ | ||
| 343 | struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) | ||
| 344 | { | ||
| 345 | struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set); | ||
| 346 | |||
| 347 | if (bio) | ||
| 348 | bio->bi_destructor = bio_fs_destructor; | ||
| 349 | |||
| 350 | return bio; | ||
| 351 | } | ||
| 352 | EXPORT_SYMBOL(bio_alloc); | ||
| 353 | |||
| 354 | static void bio_kmalloc_destructor(struct bio *bio) | ||
| 355 | { | ||
| 356 | if (bio_integrity(bio)) | ||
| 357 | bio_integrity_free(bio, fs_bio_set); | ||
| 358 | kfree(bio); | ||
| 359 | } | ||
| 360 | |||
| 361 | /** | ||
| 362 | * bio_kmalloc - allocate a bio for I/O using kmalloc() | ||
| 363 | * @gfp_mask: the GFP_ mask given to the slab allocator | ||
| 364 | * @nr_iovecs: number of iovecs to pre-allocate | ||
| 365 | * | ||
| 366 | * Description: | ||
| 367 | * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask contains | ||
| 368 | * %__GFP_WAIT, the allocation is guaranteed to succeed. | ||
| 369 | * | ||
| 370 | **/ | ||
| 371 | struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs) | ||
| 372 | { | ||
| 373 | struct bio *bio; | ||
| 374 | |||
| 375 | if (nr_iovecs > UIO_MAXIOV) | ||
| 376 | return NULL; | ||
| 377 | |||
| 378 | bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec), | ||
| 379 | gfp_mask); | ||
| 380 | if (unlikely(!bio)) | ||
| 381 | return NULL; | ||
| 382 | |||
| 383 | bio_init(bio); | ||
| 384 | bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET; | ||
| 385 | bio->bi_max_vecs = nr_iovecs; | ||
| 386 | bio->bi_io_vec = bio->bi_inline_vecs; | ||
| 387 | bio->bi_destructor = bio_kmalloc_destructor; | ||
| 388 | |||
| 389 | return bio; | ||
| 390 | } | ||
| 391 | EXPORT_SYMBOL(bio_kmalloc); | ||
| 392 | |||
| 393 | void zero_fill_bio(struct bio *bio) | 368 | void zero_fill_bio(struct bio *bio) |
| 394 | { | 369 | { |
| 395 | unsigned long flags; | 370 | unsigned long flags; |
| @@ -420,11 +395,8 @@ void bio_put(struct bio *bio) | |||
| 420 | /* | 395 | /* |
| 421 | * last put frees it | 396 | * last put frees it |
| 422 | */ | 397 | */ |
| 423 | if (atomic_dec_and_test(&bio->bi_cnt)) { | 398 | if (atomic_dec_and_test(&bio->bi_cnt)) |
| 424 | bio_disassociate_task(bio); | 399 | bio_free(bio); |
| 425 | bio->bi_next = NULL; | ||
| 426 | bio->bi_destructor(bio); | ||
| 427 | } | ||
| 428 | } | 400 | } |
| 429 | EXPORT_SYMBOL(bio_put); | 401 | EXPORT_SYMBOL(bio_put); |
| 430 | 402 | ||
| @@ -466,26 +438,28 @@ void __bio_clone(struct bio *bio, struct bio *bio_src) | |||
| 466 | EXPORT_SYMBOL(__bio_clone); | 438 | EXPORT_SYMBOL(__bio_clone); |
| 467 | 439 | ||
| 468 | /** | 440 | /** |
| 469 | * bio_clone - clone a bio | 441 | * bio_clone_bioset - clone a bio |
| 470 | * @bio: bio to clone | 442 | * @bio: bio to clone |
| 471 | * @gfp_mask: allocation priority | 443 | * @gfp_mask: allocation priority |
| 444 | * @bs: bio_set to allocate from | ||
| 472 | * | 445 | * |
| 473 | * Like __bio_clone, only also allocates the returned bio | 446 | * Like __bio_clone, only also allocates the returned bio |
| 474 | */ | 447 | */ |
| 475 | struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) | 448 | struct bio *bio_clone_bioset(struct bio *bio, gfp_t gfp_mask, |
| 449 | struct bio_set *bs) | ||
| 476 | { | 450 | { |
| 477 | struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set); | 451 | struct bio *b; |
| 478 | 452 | ||
| 453 | b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, bs); | ||
| 479 | if (!b) | 454 | if (!b) |
| 480 | return NULL; | 455 | return NULL; |
| 481 | 456 | ||
| 482 | b->bi_destructor = bio_fs_destructor; | ||
| 483 | __bio_clone(b, bio); | 457 | __bio_clone(b, bio); |
| 484 | 458 | ||
| 485 | if (bio_integrity(bio)) { | 459 | if (bio_integrity(bio)) { |
| 486 | int ret; | 460 | int ret; |
| 487 | 461 | ||
| 488 | ret = bio_integrity_clone(b, bio, gfp_mask, fs_bio_set); | 462 | ret = bio_integrity_clone(b, bio, gfp_mask); |
| 489 | 463 | ||
| 490 | if (ret < 0) { | 464 | if (ret < 0) { |
| 491 | bio_put(b); | 465 | bio_put(b); |
| @@ -495,7 +469,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) | |||
| 495 | 469 | ||
| 496 | return b; | 470 | return b; |
| 497 | } | 471 | } |
| 498 | EXPORT_SYMBOL(bio_clone); | 472 | EXPORT_SYMBOL(bio_clone_bioset); |
| 499 | 473 | ||
| 500 | /** | 474 | /** |
| 501 | * bio_get_nr_vecs - return approx number of vecs | 475 | * bio_get_nr_vecs - return approx number of vecs |
| @@ -1501,7 +1475,7 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors) | |||
| 1501 | trace_block_split(bdev_get_queue(bi->bi_bdev), bi, | 1475 | trace_block_split(bdev_get_queue(bi->bi_bdev), bi, |
| 1502 | bi->bi_sector + first_sectors); | 1476 | bi->bi_sector + first_sectors); |
| 1503 | 1477 | ||
| 1504 | BUG_ON(bi->bi_vcnt != 1); | 1478 | BUG_ON(bi->bi_vcnt != 1 && bi->bi_vcnt != 0); |
| 1505 | BUG_ON(bi->bi_idx != 0); | 1479 | BUG_ON(bi->bi_idx != 0); |
| 1506 | atomic_set(&bp->cnt, 3); | 1480 | atomic_set(&bp->cnt, 3); |
| 1507 | bp->error = 0; | 1481 | bp->error = 0; |
| @@ -1511,17 +1485,22 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors) | |||
| 1511 | bp->bio2.bi_size -= first_sectors << 9; | 1485 | bp->bio2.bi_size -= first_sectors << 9; |
| 1512 | bp->bio1.bi_size = first_sectors << 9; | 1486 | bp->bio1.bi_size = first_sectors << 9; |
| 1513 | 1487 | ||
| 1514 | bp->bv1 = bi->bi_io_vec[0]; | 1488 | if (bi->bi_vcnt != 0) { |
| 1515 | bp->bv2 = bi->bi_io_vec[0]; | 1489 | bp->bv1 = bi->bi_io_vec[0]; |
| 1516 | bp->bv2.bv_offset += first_sectors << 9; | 1490 | bp->bv2 = bi->bi_io_vec[0]; |
| 1517 | bp->bv2.bv_len -= first_sectors << 9; | 1491 | |
| 1518 | bp->bv1.bv_len = first_sectors << 9; | 1492 | if (bio_is_rw(bi)) { |
| 1493 | bp->bv2.bv_offset += first_sectors << 9; | ||
| 1494 | bp->bv2.bv_len -= first_sectors << 9; | ||
| 1495 | bp->bv1.bv_len = first_sectors << 9; | ||
| 1496 | } | ||
| 1519 | 1497 | ||
| 1520 | bp->bio1.bi_io_vec = &bp->bv1; | 1498 | bp->bio1.bi_io_vec = &bp->bv1; |
| 1521 | bp->bio2.bi_io_vec = &bp->bv2; | 1499 | bp->bio2.bi_io_vec = &bp->bv2; |
| 1522 | 1500 | ||
| 1523 | bp->bio1.bi_max_vecs = 1; | 1501 | bp->bio1.bi_max_vecs = 1; |
| 1524 | bp->bio2.bi_max_vecs = 1; | 1502 | bp->bio2.bi_max_vecs = 1; |
| 1503 | } | ||
| 1525 | 1504 | ||
| 1526 | bp->bio1.bi_end_io = bio_pair_end_1; | 1505 | bp->bio1.bi_end_io = bio_pair_end_1; |
| 1527 | bp->bio2.bi_end_io = bio_pair_end_2; | 1506 | bp->bio2.bi_end_io = bio_pair_end_2; |
