diff options
Diffstat (limited to 'fs/bio.c')
| -rw-r--r-- | fs/bio.c | 77 |
1 files changed, 37 insertions, 40 deletions
| @@ -249,6 +249,7 @@ void bio_free(struct bio *bio, struct bio_set *bs) | |||
| 249 | 249 | ||
| 250 | mempool_free(p, bs->bio_pool); | 250 | mempool_free(p, bs->bio_pool); |
| 251 | } | 251 | } |
| 252 | EXPORT_SYMBOL(bio_free); | ||
| 252 | 253 | ||
| 253 | void bio_init(struct bio *bio) | 254 | void bio_init(struct bio *bio) |
| 254 | { | 255 | { |
| @@ -257,6 +258,7 @@ void bio_init(struct bio *bio) | |||
| 257 | bio->bi_comp_cpu = -1; | 258 | bio->bi_comp_cpu = -1; |
| 258 | atomic_set(&bio->bi_cnt, 1); | 259 | atomic_set(&bio->bi_cnt, 1); |
| 259 | } | 260 | } |
| 261 | EXPORT_SYMBOL(bio_init); | ||
| 260 | 262 | ||
| 261 | /** | 263 | /** |
| 262 | * bio_alloc_bioset - allocate a bio for I/O | 264 | * bio_alloc_bioset - allocate a bio for I/O |
| @@ -311,6 +313,7 @@ err_free: | |||
| 311 | mempool_free(p, bs->bio_pool); | 313 | mempool_free(p, bs->bio_pool); |
| 312 | return NULL; | 314 | return NULL; |
| 313 | } | 315 | } |
| 316 | EXPORT_SYMBOL(bio_alloc_bioset); | ||
| 314 | 317 | ||
| 315 | static void bio_fs_destructor(struct bio *bio) | 318 | static void bio_fs_destructor(struct bio *bio) |
| 316 | { | 319 | { |
| @@ -322,8 +325,16 @@ static void bio_fs_destructor(struct bio *bio) | |||
| 322 | * @gfp_mask: allocation mask to use | 325 | * @gfp_mask: allocation mask to use |
| 323 | * @nr_iovecs: number of iovecs | 326 | * @nr_iovecs: number of iovecs |
| 324 | * | 327 | * |
| 325 | * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask | 328 | * bio_alloc will allocate a bio and associated bio_vec array that can hold |
| 326 | * contains __GFP_WAIT, the allocation is guaranteed to succeed. | 329 | * at least @nr_iovecs entries. Allocations will be done from the |
| 330 | * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc. | ||
| 331 | * | ||
| 332 | * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate | ||
| 333 | * a bio. This is due to the mempool guarantees. To make this work, callers | ||
| 334 | * must never allocate more than 1 bio at a time from this pool. Callers | ||
| 335 | * that need to allocate more than 1 bio must always submit the previously | ||
| 336 | * allocated bio for IO before attempting to allocate a new one. Failure to | ||
| 337 | * do so can cause livelocks under memory pressure. | ||
| 327 | * | 338 | * |
| 328 | * RETURNS: | 339 | * RETURNS: |
| 329 | * Pointer to new bio on success, NULL on failure. | 340 | * Pointer to new bio on success, NULL on failure. |
| @@ -337,6 +348,7 @@ struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs) | |||
| 337 | 348 | ||
| 338 | return bio; | 349 | return bio; |
| 339 | } | 350 | } |
| 351 | EXPORT_SYMBOL(bio_alloc); | ||
| 340 | 352 | ||
| 341 | static void bio_kmalloc_destructor(struct bio *bio) | 353 | static void bio_kmalloc_destructor(struct bio *bio) |
| 342 | { | 354 | { |
| @@ -346,21 +358,13 @@ static void bio_kmalloc_destructor(struct bio *bio) | |||
| 346 | } | 358 | } |
| 347 | 359 | ||
| 348 | /** | 360 | /** |
| 349 | * bio_alloc - allocate a bio for I/O | 361 | * bio_kmalloc - allocate a bio for I/O using kmalloc() |
| 350 | * @gfp_mask: the GFP_ mask given to the slab allocator | 362 | * @gfp_mask: the GFP_ mask given to the slab allocator |
| 351 | * @nr_iovecs: number of iovecs to pre-allocate | 363 | * @nr_iovecs: number of iovecs to pre-allocate |
| 352 | * | 364 | * |
| 353 | * Description: | 365 | * Description: |
| 354 | * bio_alloc will allocate a bio and associated bio_vec array that can hold | 366 | * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask contains |
| 355 | * at least @nr_iovecs entries. Allocations will be done from the | 367 | * %__GFP_WAIT, the allocation is guaranteed to succeed. |
| 356 | * fs_bio_set. Also see @bio_alloc_bioset. | ||
| 357 | * | ||
| 358 | * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate | ||
| 359 | * a bio. This is due to the mempool guarantees. To make this work, callers | ||
| 360 | * must never allocate more than 1 bio at a time from this pool. Callers | ||
| 361 | * that need to allocate more than 1 bio must always submit the previously | ||
| 362 | * allocated bio for IO before attempting to allocate a new one. Failure to | ||
| 363 | * do so can cause livelocks under memory pressure. | ||
| 364 | * | 368 | * |
| 365 | **/ | 369 | **/ |
| 366 | struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs) | 370 | struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs) |
| @@ -380,6 +384,7 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs) | |||
| 380 | 384 | ||
| 381 | return bio; | 385 | return bio; |
| 382 | } | 386 | } |
| 387 | EXPORT_SYMBOL(bio_kmalloc); | ||
| 383 | 388 | ||
| 384 | void zero_fill_bio(struct bio *bio) | 389 | void zero_fill_bio(struct bio *bio) |
| 385 | { | 390 | { |
| @@ -402,7 +407,7 @@ EXPORT_SYMBOL(zero_fill_bio); | |||
| 402 | * | 407 | * |
| 403 | * Description: | 408 | * Description: |
| 404 | * Put a reference to a &struct bio, either one you have gotten with | 409 | * Put a reference to a &struct bio, either one you have gotten with |
| 405 | * bio_alloc or bio_get. The last put of a bio will free it. | 410 | * bio_alloc, bio_get or bio_clone. The last put of a bio will free it. |
| 406 | **/ | 411 | **/ |
| 407 | void bio_put(struct bio *bio) | 412 | void bio_put(struct bio *bio) |
| 408 | { | 413 | { |
| @@ -416,6 +421,7 @@ void bio_put(struct bio *bio) | |||
| 416 | bio->bi_destructor(bio); | 421 | bio->bi_destructor(bio); |
| 417 | } | 422 | } |
| 418 | } | 423 | } |
| 424 | EXPORT_SYMBOL(bio_put); | ||
| 419 | 425 | ||
| 420 | inline int bio_phys_segments(struct request_queue *q, struct bio *bio) | 426 | inline int bio_phys_segments(struct request_queue *q, struct bio *bio) |
| 421 | { | 427 | { |
| @@ -424,6 +430,7 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio) | |||
| 424 | 430 | ||
| 425 | return bio->bi_phys_segments; | 431 | return bio->bi_phys_segments; |
| 426 | } | 432 | } |
| 433 | EXPORT_SYMBOL(bio_phys_segments); | ||
| 427 | 434 | ||
| 428 | /** | 435 | /** |
| 429 | * __bio_clone - clone a bio | 436 | * __bio_clone - clone a bio |
| @@ -451,6 +458,7 @@ void __bio_clone(struct bio *bio, struct bio *bio_src) | |||
| 451 | bio->bi_size = bio_src->bi_size; | 458 | bio->bi_size = bio_src->bi_size; |
| 452 | bio->bi_idx = bio_src->bi_idx; | 459 | bio->bi_idx = bio_src->bi_idx; |
| 453 | } | 460 | } |
| 461 | EXPORT_SYMBOL(__bio_clone); | ||
| 454 | 462 | ||
| 455 | /** | 463 | /** |
| 456 | * bio_clone - clone a bio | 464 | * bio_clone - clone a bio |
| @@ -482,6 +490,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) | |||
| 482 | 490 | ||
| 483 | return b; | 491 | return b; |
| 484 | } | 492 | } |
| 493 | EXPORT_SYMBOL(bio_clone); | ||
| 485 | 494 | ||
| 486 | /** | 495 | /** |
| 487 | * bio_get_nr_vecs - return approx number of vecs | 496 | * bio_get_nr_vecs - return approx number of vecs |
| @@ -505,6 +514,7 @@ int bio_get_nr_vecs(struct block_device *bdev) | |||
| 505 | 514 | ||
| 506 | return nr_pages; | 515 | return nr_pages; |
| 507 | } | 516 | } |
| 517 | EXPORT_SYMBOL(bio_get_nr_vecs); | ||
| 508 | 518 | ||
| 509 | static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | 519 | static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page |
| 510 | *page, unsigned int len, unsigned int offset, | 520 | *page, unsigned int len, unsigned int offset, |
| @@ -635,6 +645,7 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, | |||
| 635 | return __bio_add_page(q, bio, page, len, offset, | 645 | return __bio_add_page(q, bio, page, len, offset, |
| 636 | queue_max_hw_sectors(q)); | 646 | queue_max_hw_sectors(q)); |
| 637 | } | 647 | } |
| 648 | EXPORT_SYMBOL(bio_add_pc_page); | ||
| 638 | 649 | ||
| 639 | /** | 650 | /** |
| 640 | * bio_add_page - attempt to add page to bio | 651 | * bio_add_page - attempt to add page to bio |
| @@ -655,6 +666,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len, | |||
| 655 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); | 666 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
| 656 | return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q)); | 667 | return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q)); |
| 657 | } | 668 | } |
| 669 | EXPORT_SYMBOL(bio_add_page); | ||
| 658 | 670 | ||
| 659 | struct bio_map_data { | 671 | struct bio_map_data { |
| 660 | struct bio_vec *iovecs; | 672 | struct bio_vec *iovecs; |
| @@ -776,6 +788,7 @@ int bio_uncopy_user(struct bio *bio) | |||
| 776 | bio_put(bio); | 788 | bio_put(bio); |
| 777 | return ret; | 789 | return ret; |
| 778 | } | 790 | } |
| 791 | EXPORT_SYMBOL(bio_uncopy_user); | ||
| 779 | 792 | ||
| 780 | /** | 793 | /** |
| 781 | * bio_copy_user_iov - copy user data to bio | 794 | * bio_copy_user_iov - copy user data to bio |
| @@ -920,6 +933,7 @@ struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data, | |||
| 920 | 933 | ||
| 921 | return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask); | 934 | return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask); |
| 922 | } | 935 | } |
| 936 | EXPORT_SYMBOL(bio_copy_user); | ||
| 923 | 937 | ||
| 924 | static struct bio *__bio_map_user_iov(struct request_queue *q, | 938 | static struct bio *__bio_map_user_iov(struct request_queue *q, |
| 925 | struct block_device *bdev, | 939 | struct block_device *bdev, |
| @@ -1050,6 +1064,7 @@ struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev, | |||
| 1050 | 1064 | ||
| 1051 | return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask); | 1065 | return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask); |
| 1052 | } | 1066 | } |
| 1067 | EXPORT_SYMBOL(bio_map_user); | ||
| 1053 | 1068 | ||
| 1054 | /** | 1069 | /** |
| 1055 | * bio_map_user_iov - map user sg_iovec table into bio | 1070 | * bio_map_user_iov - map user sg_iovec table into bio |
| @@ -1117,13 +1132,13 @@ void bio_unmap_user(struct bio *bio) | |||
| 1117 | __bio_unmap_user(bio); | 1132 | __bio_unmap_user(bio); |
| 1118 | bio_put(bio); | 1133 | bio_put(bio); |
| 1119 | } | 1134 | } |
| 1135 | EXPORT_SYMBOL(bio_unmap_user); | ||
| 1120 | 1136 | ||
| 1121 | static void bio_map_kern_endio(struct bio *bio, int err) | 1137 | static void bio_map_kern_endio(struct bio *bio, int err) |
| 1122 | { | 1138 | { |
| 1123 | bio_put(bio); | 1139 | bio_put(bio); |
| 1124 | } | 1140 | } |
| 1125 | 1141 | ||
| 1126 | |||
| 1127 | static struct bio *__bio_map_kern(struct request_queue *q, void *data, | 1142 | static struct bio *__bio_map_kern(struct request_queue *q, void *data, |
| 1128 | unsigned int len, gfp_t gfp_mask) | 1143 | unsigned int len, gfp_t gfp_mask) |
| 1129 | { | 1144 | { |
| @@ -1189,6 +1204,7 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, | |||
| 1189 | bio_put(bio); | 1204 | bio_put(bio); |
| 1190 | return ERR_PTR(-EINVAL); | 1205 | return ERR_PTR(-EINVAL); |
| 1191 | } | 1206 | } |
| 1207 | EXPORT_SYMBOL(bio_map_kern); | ||
| 1192 | 1208 | ||
| 1193 | static void bio_copy_kern_endio(struct bio *bio, int err) | 1209 | static void bio_copy_kern_endio(struct bio *bio, int err) |
| 1194 | { | 1210 | { |
| @@ -1250,6 +1266,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, | |||
| 1250 | 1266 | ||
| 1251 | return bio; | 1267 | return bio; |
| 1252 | } | 1268 | } |
| 1269 | EXPORT_SYMBOL(bio_copy_kern); | ||
| 1253 | 1270 | ||
| 1254 | /* | 1271 | /* |
| 1255 | * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions | 1272 | * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions |
| @@ -1400,6 +1417,7 @@ void bio_endio(struct bio *bio, int error) | |||
| 1400 | if (bio->bi_end_io) | 1417 | if (bio->bi_end_io) |
| 1401 | bio->bi_end_io(bio, error); | 1418 | bio->bi_end_io(bio, error); |
| 1402 | } | 1419 | } |
| 1420 | EXPORT_SYMBOL(bio_endio); | ||
| 1403 | 1421 | ||
| 1404 | void bio_pair_release(struct bio_pair *bp) | 1422 | void bio_pair_release(struct bio_pair *bp) |
| 1405 | { | 1423 | { |
| @@ -1410,6 +1428,7 @@ void bio_pair_release(struct bio_pair *bp) | |||
| 1410 | mempool_free(bp, bp->bio2.bi_private); | 1428 | mempool_free(bp, bp->bio2.bi_private); |
| 1411 | } | 1429 | } |
| 1412 | } | 1430 | } |
| 1431 | EXPORT_SYMBOL(bio_pair_release); | ||
| 1413 | 1432 | ||
| 1414 | static void bio_pair_end_1(struct bio *bi, int err) | 1433 | static void bio_pair_end_1(struct bio *bi, int err) |
| 1415 | { | 1434 | { |
| @@ -1477,6 +1496,7 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors) | |||
| 1477 | 1496 | ||
| 1478 | return bp; | 1497 | return bp; |
| 1479 | } | 1498 | } |
| 1499 | EXPORT_SYMBOL(bio_split); | ||
| 1480 | 1500 | ||
| 1481 | /** | 1501 | /** |
| 1482 | * bio_sector_offset - Find hardware sector offset in bio | 1502 | * bio_sector_offset - Find hardware sector offset in bio |
| @@ -1547,6 +1567,7 @@ void bioset_free(struct bio_set *bs) | |||
| 1547 | 1567 | ||
| 1548 | kfree(bs); | 1568 | kfree(bs); |
| 1549 | } | 1569 | } |
| 1570 | EXPORT_SYMBOL(bioset_free); | ||
| 1550 | 1571 | ||
| 1551 | /** | 1572 | /** |
| 1552 | * bioset_create - Create a bio_set | 1573 | * bioset_create - Create a bio_set |
| @@ -1592,6 +1613,7 @@ bad: | |||
| 1592 | bioset_free(bs); | 1613 | bioset_free(bs); |
| 1593 | return NULL; | 1614 | return NULL; |
| 1594 | } | 1615 | } |
| 1616 | EXPORT_SYMBOL(bioset_create); | ||
| 1595 | 1617 | ||
| 1596 | static void __init biovec_init_slabs(void) | 1618 | static void __init biovec_init_slabs(void) |
| 1597 | { | 1619 | { |
| @@ -1636,29 +1658,4 @@ static int __init init_bio(void) | |||
| 1636 | 1658 | ||
| 1637 | return 0; | 1659 | return 0; |
| 1638 | } | 1660 | } |
| 1639 | |||
| 1640 | subsys_initcall(init_bio); | 1661 | subsys_initcall(init_bio); |
| 1641 | |||
| 1642 | EXPORT_SYMBOL(bio_alloc); | ||
| 1643 | EXPORT_SYMBOL(bio_kmalloc); | ||
| 1644 | EXPORT_SYMBOL(bio_put); | ||
| 1645 | EXPORT_SYMBOL(bio_free); | ||
| 1646 | EXPORT_SYMBOL(bio_endio); | ||
| 1647 | EXPORT_SYMBOL(bio_init); | ||
| 1648 | EXPORT_SYMBOL(__bio_clone); | ||
| 1649 | EXPORT_SYMBOL(bio_clone); | ||
| 1650 | EXPORT_SYMBOL(bio_phys_segments); | ||
| 1651 | EXPORT_SYMBOL(bio_add_page); | ||
| 1652 | EXPORT_SYMBOL(bio_add_pc_page); | ||
| 1653 | EXPORT_SYMBOL(bio_get_nr_vecs); | ||
| 1654 | EXPORT_SYMBOL(bio_map_user); | ||
| 1655 | EXPORT_SYMBOL(bio_unmap_user); | ||
| 1656 | EXPORT_SYMBOL(bio_map_kern); | ||
| 1657 | EXPORT_SYMBOL(bio_copy_kern); | ||
| 1658 | EXPORT_SYMBOL(bio_pair_release); | ||
| 1659 | EXPORT_SYMBOL(bio_split); | ||
| 1660 | EXPORT_SYMBOL(bio_copy_user); | ||
| 1661 | EXPORT_SYMBOL(bio_uncopy_user); | ||
| 1662 | EXPORT_SYMBOL(bioset_create); | ||
| 1663 | EXPORT_SYMBOL(bioset_free); | ||
| 1664 | EXPORT_SYMBOL(bio_alloc_bioset); | ||
