diff options
Diffstat (limited to 'fs')
| -rw-r--r-- | fs/bio-integrity.c | 144 | ||||
| -rw-r--r-- | fs/bio.c | 366 | ||||
| -rw-r--r-- | fs/btrfs/extent_io.c | 3 | ||||
| -rw-r--r-- | fs/btrfs/volumes.c | 2 | ||||
| -rw-r--r-- | fs/buffer.c | 1 | ||||
| -rw-r--r-- | fs/direct-io.c | 8 | ||||
| -rw-r--r-- | fs/exofs/ore.c | 2 | ||||
| -rw-r--r-- | fs/exofs/ore_raid.c | 2 | ||||
| -rw-r--r-- | fs/gfs2/lops.c | 2 | ||||
| -rw-r--r-- | fs/jfs/jfs_logmgr.c | 2 | ||||
| -rw-r--r-- | fs/logfs/dev_bdev.c | 5 |
11 files changed, 387 insertions, 150 deletions
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index a3f28f331b2b..8fb42916d8a2 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c | |||
| @@ -27,48 +27,11 @@ | |||
| 27 | #include <linux/workqueue.h> | 27 | #include <linux/workqueue.h> |
| 28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
| 29 | 29 | ||
| 30 | struct integrity_slab { | 30 | #define BIP_INLINE_VECS 4 |
| 31 | struct kmem_cache *slab; | ||
| 32 | unsigned short nr_vecs; | ||
| 33 | char name[8]; | ||
| 34 | }; | ||
| 35 | |||
| 36 | #define IS(x) { .nr_vecs = x, .name = "bip-"__stringify(x) } | ||
| 37 | struct integrity_slab bip_slab[BIOVEC_NR_POOLS] __read_mostly = { | ||
| 38 | IS(1), IS(4), IS(16), IS(64), IS(128), IS(BIO_MAX_PAGES), | ||
| 39 | }; | ||
| 40 | #undef IS | ||
| 41 | 31 | ||
| 32 | static struct kmem_cache *bip_slab; | ||
| 42 | static struct workqueue_struct *kintegrityd_wq; | 33 | static struct workqueue_struct *kintegrityd_wq; |
| 43 | 34 | ||
| 44 | static inline unsigned int vecs_to_idx(unsigned int nr) | ||
| 45 | { | ||
| 46 | switch (nr) { | ||
| 47 | case 1: | ||
| 48 | return 0; | ||
| 49 | case 2 ... 4: | ||
| 50 | return 1; | ||
| 51 | case 5 ... 16: | ||
| 52 | return 2; | ||
| 53 | case 17 ... 64: | ||
| 54 | return 3; | ||
| 55 | case 65 ... 128: | ||
| 56 | return 4; | ||
| 57 | case 129 ... BIO_MAX_PAGES: | ||
| 58 | return 5; | ||
| 59 | default: | ||
| 60 | BUG(); | ||
| 61 | } | ||
| 62 | } | ||
| 63 | |||
| 64 | static inline int use_bip_pool(unsigned int idx) | ||
| 65 | { | ||
| 66 | if (idx == BIOVEC_MAX_IDX) | ||
| 67 | return 1; | ||
| 68 | |||
| 69 | return 0; | ||
| 70 | } | ||
| 71 | |||
| 72 | /** | 35 | /** |
| 73 | * bio_integrity_alloc - Allocate integrity payload and attach it to bio | 36 | * bio_integrity_alloc - Allocate integrity payload and attach it to bio |
| 74 | * @bio: bio to attach integrity metadata to | 37 | * @bio: bio to attach integrity metadata to |
| @@ -84,37 +47,41 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, | |||
| 84 | unsigned int nr_vecs) | 47 | unsigned int nr_vecs) |
| 85 | { | 48 | { |
| 86 | struct bio_integrity_payload *bip; | 49 | struct bio_integrity_payload *bip; |
| 87 | unsigned int idx = vecs_to_idx(nr_vecs); | ||
| 88 | struct bio_set *bs = bio->bi_pool; | 50 | struct bio_set *bs = bio->bi_pool; |
| 89 | 51 | unsigned long idx = BIO_POOL_NONE; | |
| 90 | if (!bs) | 52 | unsigned inline_vecs; |
| 91 | bs = fs_bio_set; | 53 | |
| 92 | 54 | if (!bs) { | |
| 93 | BUG_ON(bio == NULL); | 55 | bip = kmalloc(sizeof(struct bio_integrity_payload) + |
| 94 | bip = NULL; | 56 | sizeof(struct bio_vec) * nr_vecs, gfp_mask); |
| 95 | 57 | inline_vecs = nr_vecs; | |
| 96 | /* Lower order allocations come straight from slab */ | 58 | } else { |
| 97 | if (!use_bip_pool(idx)) | ||
| 98 | bip = kmem_cache_alloc(bip_slab[idx].slab, gfp_mask); | ||
| 99 | |||
| 100 | /* Use mempool if lower order alloc failed or max vecs were requested */ | ||
| 101 | if (bip == NULL) { | ||
| 102 | idx = BIOVEC_MAX_IDX; /* so we free the payload properly later */ | ||
| 103 | bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); | 59 | bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask); |
| 104 | 60 | inline_vecs = BIP_INLINE_VECS; | |
| 105 | if (unlikely(bip == NULL)) { | ||
| 106 | printk(KERN_ERR "%s: could not alloc bip\n", __func__); | ||
| 107 | return NULL; | ||
| 108 | } | ||
| 109 | } | 61 | } |
| 110 | 62 | ||
| 63 | if (unlikely(!bip)) | ||
| 64 | return NULL; | ||
| 65 | |||
| 111 | memset(bip, 0, sizeof(*bip)); | 66 | memset(bip, 0, sizeof(*bip)); |
| 112 | 67 | ||
| 68 | if (nr_vecs > inline_vecs) { | ||
| 69 | bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx, | ||
| 70 | bs->bvec_integrity_pool); | ||
| 71 | if (!bip->bip_vec) | ||
| 72 | goto err; | ||
| 73 | } else { | ||
| 74 | bip->bip_vec = bip->bip_inline_vecs; | ||
| 75 | } | ||
| 76 | |||
| 113 | bip->bip_slab = idx; | 77 | bip->bip_slab = idx; |
| 114 | bip->bip_bio = bio; | 78 | bip->bip_bio = bio; |
| 115 | bio->bi_integrity = bip; | 79 | bio->bi_integrity = bip; |
| 116 | 80 | ||
| 117 | return bip; | 81 | return bip; |
| 82 | err: | ||
| 83 | mempool_free(bip, bs->bio_integrity_pool); | ||
| 84 | return NULL; | ||
| 118 | } | 85 | } |
| 119 | EXPORT_SYMBOL(bio_integrity_alloc); | 86 | EXPORT_SYMBOL(bio_integrity_alloc); |
| 120 | 87 | ||
| @@ -130,20 +97,18 @@ void bio_integrity_free(struct bio *bio) | |||
| 130 | struct bio_integrity_payload *bip = bio->bi_integrity; | 97 | struct bio_integrity_payload *bip = bio->bi_integrity; |
| 131 | struct bio_set *bs = bio->bi_pool; | 98 | struct bio_set *bs = bio->bi_pool; |
| 132 | 99 | ||
| 133 | if (!bs) | 100 | if (bip->bip_owns_buf) |
| 134 | bs = fs_bio_set; | ||
| 135 | |||
| 136 | BUG_ON(bip == NULL); | ||
| 137 | |||
| 138 | /* A cloned bio doesn't own the integrity metadata */ | ||
| 139 | if (!bio_flagged(bio, BIO_CLONED) && !bio_flagged(bio, BIO_FS_INTEGRITY) | ||
| 140 | && bip->bip_buf != NULL) | ||
| 141 | kfree(bip->bip_buf); | 101 | kfree(bip->bip_buf); |
| 142 | 102 | ||
| 143 | if (use_bip_pool(bip->bip_slab)) | 103 | if (bs) { |
| 104 | if (bip->bip_slab != BIO_POOL_NONE) | ||
| 105 | bvec_free(bs->bvec_integrity_pool, bip->bip_vec, | ||
| 106 | bip->bip_slab); | ||
| 107 | |||
| 144 | mempool_free(bip, bs->bio_integrity_pool); | 108 | mempool_free(bip, bs->bio_integrity_pool); |
| 145 | else | 109 | } else { |
| 146 | kmem_cache_free(bip_slab[bip->bip_slab].slab, bip); | 110 | kfree(bip); |
| 111 | } | ||
| 147 | 112 | ||
| 148 | bio->bi_integrity = NULL; | 113 | bio->bi_integrity = NULL; |
| 149 | } | 114 | } |
| @@ -419,6 +384,7 @@ int bio_integrity_prep(struct bio *bio) | |||
| 419 | return -EIO; | 384 | return -EIO; |
| 420 | } | 385 | } |
| 421 | 386 | ||
| 387 | bip->bip_owns_buf = 1; | ||
| 422 | bip->bip_buf = buf; | 388 | bip->bip_buf = buf; |
| 423 | bip->bip_size = len; | 389 | bip->bip_size = len; |
| 424 | bip->bip_sector = bio->bi_sector; | 390 | bip->bip_sector = bio->bi_sector; |
| @@ -694,11 +660,11 @@ void bio_integrity_split(struct bio *bio, struct bio_pair *bp, int sectors) | |||
| 694 | bp->bio1.bi_integrity = &bp->bip1; | 660 | bp->bio1.bi_integrity = &bp->bip1; |
| 695 | bp->bio2.bi_integrity = &bp->bip2; | 661 | bp->bio2.bi_integrity = &bp->bip2; |
| 696 | 662 | ||
| 697 | bp->iv1 = bip->bip_vec[0]; | 663 | bp->iv1 = bip->bip_vec[bip->bip_idx]; |
| 698 | bp->iv2 = bip->bip_vec[0]; | 664 | bp->iv2 = bip->bip_vec[bip->bip_idx]; |
| 699 | 665 | ||
| 700 | bp->bip1.bip_vec[0] = bp->iv1; | 666 | bp->bip1.bip_vec = &bp->iv1; |
| 701 | bp->bip2.bip_vec[0] = bp->iv2; | 667 | bp->bip2.bip_vec = &bp->iv2; |
| 702 | 668 | ||
| 703 | bp->iv1.bv_len = sectors * bi->tuple_size; | 669 | bp->iv1.bv_len = sectors * bi->tuple_size; |
| 704 | bp->iv2.bv_offset += sectors * bi->tuple_size; | 670 | bp->iv2.bv_offset += sectors * bi->tuple_size; |
| @@ -746,13 +712,14 @@ EXPORT_SYMBOL(bio_integrity_clone); | |||
| 746 | 712 | ||
| 747 | int bioset_integrity_create(struct bio_set *bs, int pool_size) | 713 | int bioset_integrity_create(struct bio_set *bs, int pool_size) |
| 748 | { | 714 | { |
| 749 | unsigned int max_slab = vecs_to_idx(BIO_MAX_PAGES); | ||
| 750 | |||
| 751 | if (bs->bio_integrity_pool) | 715 | if (bs->bio_integrity_pool) |
| 752 | return 0; | 716 | return 0; |
| 753 | 717 | ||
| 754 | bs->bio_integrity_pool = | 718 | bs->bio_integrity_pool = mempool_create_slab_pool(pool_size, bip_slab); |
| 755 | mempool_create_slab_pool(pool_size, bip_slab[max_slab].slab); | 719 | |
| 720 | bs->bvec_integrity_pool = biovec_create_pool(bs, pool_size); | ||
| 721 | if (!bs->bvec_integrity_pool) | ||
| 722 | return -1; | ||
| 756 | 723 | ||
| 757 | if (!bs->bio_integrity_pool) | 724 | if (!bs->bio_integrity_pool) |
| 758 | return -1; | 725 | return -1; |
| @@ -765,13 +732,14 @@ void bioset_integrity_free(struct bio_set *bs) | |||
| 765 | { | 732 | { |
| 766 | if (bs->bio_integrity_pool) | 733 | if (bs->bio_integrity_pool) |
| 767 | mempool_destroy(bs->bio_integrity_pool); | 734 | mempool_destroy(bs->bio_integrity_pool); |
| 735 | |||
| 736 | if (bs->bvec_integrity_pool) | ||
| 737 | mempool_destroy(bs->bio_integrity_pool); | ||
| 768 | } | 738 | } |
| 769 | EXPORT_SYMBOL(bioset_integrity_free); | 739 | EXPORT_SYMBOL(bioset_integrity_free); |
| 770 | 740 | ||
| 771 | void __init bio_integrity_init(void) | 741 | void __init bio_integrity_init(void) |
| 772 | { | 742 | { |
| 773 | unsigned int i; | ||
| 774 | |||
| 775 | /* | 743 | /* |
| 776 | * kintegrityd won't block much but may burn a lot of CPU cycles. | 744 | * kintegrityd won't block much but may burn a lot of CPU cycles. |
| 777 | * Make it highpri CPU intensive wq with max concurrency of 1. | 745 | * Make it highpri CPU intensive wq with max concurrency of 1. |
| @@ -781,14 +749,10 @@ void __init bio_integrity_init(void) | |||
| 781 | if (!kintegrityd_wq) | 749 | if (!kintegrityd_wq) |
| 782 | panic("Failed to create kintegrityd\n"); | 750 | panic("Failed to create kintegrityd\n"); |
| 783 | 751 | ||
| 784 | for (i = 0 ; i < BIOVEC_NR_POOLS ; i++) { | 752 | bip_slab = kmem_cache_create("bio_integrity_payload", |
| 785 | unsigned int size; | 753 | sizeof(struct bio_integrity_payload) + |
| 786 | 754 | sizeof(struct bio_vec) * BIP_INLINE_VECS, | |
| 787 | size = sizeof(struct bio_integrity_payload) | 755 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
| 788 | + bip_slab[i].nr_vecs * sizeof(struct bio_vec); | 756 | if (!bip_slab) |
| 789 | 757 | panic("Failed to create slab\n"); | |
| 790 | bip_slab[i].slab = | ||
| 791 | kmem_cache_create(bip_slab[i].name, size, 0, | ||
| 792 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | ||
| 793 | } | ||
| 794 | } | 758 | } |
| @@ -160,12 +160,12 @@ unsigned int bvec_nr_vecs(unsigned short idx) | |||
| 160 | return bvec_slabs[idx].nr_vecs; | 160 | return bvec_slabs[idx].nr_vecs; |
| 161 | } | 161 | } |
| 162 | 162 | ||
| 163 | void bvec_free_bs(struct bio_set *bs, struct bio_vec *bv, unsigned int idx) | 163 | void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx) |
| 164 | { | 164 | { |
| 165 | BIO_BUG_ON(idx >= BIOVEC_NR_POOLS); | 165 | BIO_BUG_ON(idx >= BIOVEC_NR_POOLS); |
| 166 | 166 | ||
| 167 | if (idx == BIOVEC_MAX_IDX) | 167 | if (idx == BIOVEC_MAX_IDX) |
| 168 | mempool_free(bv, bs->bvec_pool); | 168 | mempool_free(bv, pool); |
| 169 | else { | 169 | else { |
| 170 | struct biovec_slab *bvs = bvec_slabs + idx; | 170 | struct biovec_slab *bvs = bvec_slabs + idx; |
| 171 | 171 | ||
| @@ -173,8 +173,8 @@ void bvec_free_bs(struct bio_set *bs, struct bio_vec *bv, unsigned int idx) | |||
| 173 | } | 173 | } |
| 174 | } | 174 | } |
| 175 | 175 | ||
| 176 | struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, | 176 | struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, |
| 177 | struct bio_set *bs) | 177 | mempool_t *pool) |
| 178 | { | 178 | { |
| 179 | struct bio_vec *bvl; | 179 | struct bio_vec *bvl; |
| 180 | 180 | ||
| @@ -210,7 +210,7 @@ struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, | |||
| 210 | */ | 210 | */ |
| 211 | if (*idx == BIOVEC_MAX_IDX) { | 211 | if (*idx == BIOVEC_MAX_IDX) { |
| 212 | fallback: | 212 | fallback: |
| 213 | bvl = mempool_alloc(bs->bvec_pool, gfp_mask); | 213 | bvl = mempool_alloc(pool, gfp_mask); |
| 214 | } else { | 214 | } else { |
| 215 | struct biovec_slab *bvs = bvec_slabs + *idx; | 215 | struct biovec_slab *bvs = bvec_slabs + *idx; |
| 216 | gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO); | 216 | gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO); |
| @@ -252,8 +252,8 @@ static void bio_free(struct bio *bio) | |||
| 252 | __bio_free(bio); | 252 | __bio_free(bio); |
| 253 | 253 | ||
| 254 | if (bs) { | 254 | if (bs) { |
| 255 | if (bio_has_allocated_vec(bio)) | 255 | if (bio_flagged(bio, BIO_OWNS_VEC)) |
| 256 | bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio)); | 256 | bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio)); |
| 257 | 257 | ||
| 258 | /* | 258 | /* |
| 259 | * If we have front padding, adjust the bio pointer before freeing | 259 | * If we have front padding, adjust the bio pointer before freeing |
| @@ -297,6 +297,54 @@ void bio_reset(struct bio *bio) | |||
| 297 | } | 297 | } |
| 298 | EXPORT_SYMBOL(bio_reset); | 298 | EXPORT_SYMBOL(bio_reset); |
| 299 | 299 | ||
| 300 | static void bio_alloc_rescue(struct work_struct *work) | ||
| 301 | { | ||
| 302 | struct bio_set *bs = container_of(work, struct bio_set, rescue_work); | ||
| 303 | struct bio *bio; | ||
| 304 | |||
| 305 | while (1) { | ||
| 306 | spin_lock(&bs->rescue_lock); | ||
| 307 | bio = bio_list_pop(&bs->rescue_list); | ||
| 308 | spin_unlock(&bs->rescue_lock); | ||
| 309 | |||
| 310 | if (!bio) | ||
| 311 | break; | ||
| 312 | |||
| 313 | generic_make_request(bio); | ||
| 314 | } | ||
| 315 | } | ||
| 316 | |||
| 317 | static void punt_bios_to_rescuer(struct bio_set *bs) | ||
| 318 | { | ||
| 319 | struct bio_list punt, nopunt; | ||
| 320 | struct bio *bio; | ||
| 321 | |||
| 322 | /* | ||
| 323 | * In order to guarantee forward progress we must punt only bios that | ||
| 324 | * were allocated from this bio_set; otherwise, if there was a bio on | ||
| 325 | * there for a stacking driver higher up in the stack, processing it | ||
| 326 | * could require allocating bios from this bio_set, and doing that from | ||
| 327 | * our own rescuer would be bad. | ||
| 328 | * | ||
| 329 | * Since bio lists are singly linked, pop them all instead of trying to | ||
| 330 | * remove from the middle of the list: | ||
| 331 | */ | ||
| 332 | |||
| 333 | bio_list_init(&punt); | ||
| 334 | bio_list_init(&nopunt); | ||
| 335 | |||
| 336 | while ((bio = bio_list_pop(current->bio_list))) | ||
| 337 | bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); | ||
| 338 | |||
| 339 | *current->bio_list = nopunt; | ||
| 340 | |||
| 341 | spin_lock(&bs->rescue_lock); | ||
| 342 | bio_list_merge(&bs->rescue_list, &punt); | ||
| 343 | spin_unlock(&bs->rescue_lock); | ||
| 344 | |||
| 345 | queue_work(bs->rescue_workqueue, &bs->rescue_work); | ||
| 346 | } | ||
| 347 | |||
| 300 | /** | 348 | /** |
| 301 | * bio_alloc_bioset - allocate a bio for I/O | 349 | * bio_alloc_bioset - allocate a bio for I/O |
| 302 | * @gfp_mask: the GFP_ mask given to the slab allocator | 350 | * @gfp_mask: the GFP_ mask given to the slab allocator |
| @@ -314,11 +362,27 @@ EXPORT_SYMBOL(bio_reset); | |||
| 314 | * previously allocated bio for IO before attempting to allocate a new one. | 362 | * previously allocated bio for IO before attempting to allocate a new one. |
| 315 | * Failure to do so can cause deadlocks under memory pressure. | 363 | * Failure to do so can cause deadlocks under memory pressure. |
| 316 | * | 364 | * |
| 365 | * Note that when running under generic_make_request() (i.e. any block | ||
| 366 | * driver), bios are not submitted until after you return - see the code in | ||
| 367 | * generic_make_request() that converts recursion into iteration, to prevent | ||
| 368 | * stack overflows. | ||
| 369 | * | ||
| 370 | * This would normally mean allocating multiple bios under | ||
| 371 | * generic_make_request() would be susceptible to deadlocks, but we have | ||
| 372 | * deadlock avoidance code that resubmits any blocked bios from a rescuer | ||
| 373 | * thread. | ||
| 374 | * | ||
| 375 | * However, we do not guarantee forward progress for allocations from other | ||
| 376 | * mempools. Doing multiple allocations from the same mempool under | ||
| 377 | * generic_make_request() should be avoided - instead, use bio_set's front_pad | ||
| 378 | * for per bio allocations. | ||
| 379 | * | ||
| 317 | * RETURNS: | 380 | * RETURNS: |
| 318 | * Pointer to new bio on success, NULL on failure. | 381 | * Pointer to new bio on success, NULL on failure. |
| 319 | */ | 382 | */ |
| 320 | struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) | 383 | struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) |
| 321 | { | 384 | { |
| 385 | gfp_t saved_gfp = gfp_mask; | ||
| 322 | unsigned front_pad; | 386 | unsigned front_pad; |
| 323 | unsigned inline_vecs; | 387 | unsigned inline_vecs; |
| 324 | unsigned long idx = BIO_POOL_NONE; | 388 | unsigned long idx = BIO_POOL_NONE; |
| @@ -336,7 +400,37 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) | |||
| 336 | front_pad = 0; | 400 | front_pad = 0; |
| 337 | inline_vecs = nr_iovecs; | 401 | inline_vecs = nr_iovecs; |
| 338 | } else { | 402 | } else { |
| 403 | /* | ||
| 404 | * generic_make_request() converts recursion to iteration; this | ||
| 405 | * means if we're running beneath it, any bios we allocate and | ||
| 406 | * submit will not be submitted (and thus freed) until after we | ||
| 407 | * return. | ||
| 408 | * | ||
| 409 | * This exposes us to a potential deadlock if we allocate | ||
| 410 | * multiple bios from the same bio_set() while running | ||
| 411 | * underneath generic_make_request(). If we were to allocate | ||
| 412 | * multiple bios (say a stacking block driver that was splitting | ||
| 413 | * bios), we would deadlock if we exhausted the mempool's | ||
| 414 | * reserve. | ||
| 415 | * | ||
| 416 | * We solve this, and guarantee forward progress, with a rescuer | ||
| 417 | * workqueue per bio_set. If we go to allocate and there are | ||
| 418 | * bios on current->bio_list, we first try the allocation | ||
| 419 | * without __GFP_WAIT; if that fails, we punt those bios we | ||
| 420 | * would be blocking to the rescuer workqueue before we retry | ||
| 421 | * with the original gfp_flags. | ||
| 422 | */ | ||
| 423 | |||
| 424 | if (current->bio_list && !bio_list_empty(current->bio_list)) | ||
| 425 | gfp_mask &= ~__GFP_WAIT; | ||
| 426 | |||
| 339 | p = mempool_alloc(bs->bio_pool, gfp_mask); | 427 | p = mempool_alloc(bs->bio_pool, gfp_mask); |
| 428 | if (!p && gfp_mask != saved_gfp) { | ||
| 429 | punt_bios_to_rescuer(bs); | ||
| 430 | gfp_mask = saved_gfp; | ||
| 431 | p = mempool_alloc(bs->bio_pool, gfp_mask); | ||
| 432 | } | ||
| 433 | |||
| 340 | front_pad = bs->front_pad; | 434 | front_pad = bs->front_pad; |
| 341 | inline_vecs = BIO_INLINE_VECS; | 435 | inline_vecs = BIO_INLINE_VECS; |
| 342 | } | 436 | } |
| @@ -348,9 +442,17 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) | |||
| 348 | bio_init(bio); | 442 | bio_init(bio); |
| 349 | 443 | ||
| 350 | if (nr_iovecs > inline_vecs) { | 444 | if (nr_iovecs > inline_vecs) { |
| 351 | bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs); | 445 | bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool); |
| 446 | if (!bvl && gfp_mask != saved_gfp) { | ||
| 447 | punt_bios_to_rescuer(bs); | ||
| 448 | gfp_mask = saved_gfp; | ||
| 449 | bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool); | ||
| 450 | } | ||
| 451 | |||
| 352 | if (unlikely(!bvl)) | 452 | if (unlikely(!bvl)) |
| 353 | goto err_free; | 453 | goto err_free; |
| 454 | |||
| 455 | bio->bi_flags |= 1 << BIO_OWNS_VEC; | ||
| 354 | } else if (nr_iovecs) { | 456 | } else if (nr_iovecs) { |
| 355 | bvl = bio->bi_inline_vecs; | 457 | bvl = bio->bi_inline_vecs; |
| 356 | } | 458 | } |
| @@ -652,6 +754,181 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len, | |||
| 652 | } | 754 | } |
| 653 | EXPORT_SYMBOL(bio_add_page); | 755 | EXPORT_SYMBOL(bio_add_page); |
| 654 | 756 | ||
| 757 | struct submit_bio_ret { | ||
| 758 | struct completion event; | ||
| 759 | int error; | ||
| 760 | }; | ||
| 761 | |||
| 762 | static void submit_bio_wait_endio(struct bio *bio, int error) | ||
| 763 | { | ||
| 764 | struct submit_bio_ret *ret = bio->bi_private; | ||
| 765 | |||
| 766 | ret->error = error; | ||
| 767 | complete(&ret->event); | ||
| 768 | } | ||
| 769 | |||
| 770 | /** | ||
| 771 | * submit_bio_wait - submit a bio, and wait until it completes | ||
| 772 | * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead) | ||
| 773 | * @bio: The &struct bio which describes the I/O | ||
| 774 | * | ||
| 775 | * Simple wrapper around submit_bio(). Returns 0 on success, or the error from | ||
| 776 | * bio_endio() on failure. | ||
| 777 | */ | ||
| 778 | int submit_bio_wait(int rw, struct bio *bio) | ||
| 779 | { | ||
| 780 | struct submit_bio_ret ret; | ||
| 781 | |||
| 782 | rw |= REQ_SYNC; | ||
| 783 | init_completion(&ret.event); | ||
| 784 | bio->bi_private = &ret; | ||
| 785 | bio->bi_end_io = submit_bio_wait_endio; | ||
| 786 | submit_bio(rw, bio); | ||
| 787 | wait_for_completion(&ret.event); | ||
| 788 | |||
| 789 | return ret.error; | ||
| 790 | } | ||
| 791 | EXPORT_SYMBOL(submit_bio_wait); | ||
| 792 | |||
| 793 | /** | ||
| 794 | * bio_advance - increment/complete a bio by some number of bytes | ||
| 795 | * @bio: bio to advance | ||
| 796 | * @bytes: number of bytes to complete | ||
| 797 | * | ||
| 798 | * This updates bi_sector, bi_size and bi_idx; if the number of bytes to | ||
| 799 | * complete doesn't align with a bvec boundary, then bv_len and bv_offset will | ||
| 800 | * be updated on the last bvec as well. | ||
| 801 | * | ||
| 802 | * @bio will then represent the remaining, uncompleted portion of the io. | ||
| 803 | */ | ||
| 804 | void bio_advance(struct bio *bio, unsigned bytes) | ||
| 805 | { | ||
| 806 | if (bio_integrity(bio)) | ||
| 807 | bio_integrity_advance(bio, bytes); | ||
| 808 | |||
| 809 | bio->bi_sector += bytes >> 9; | ||
| 810 | bio->bi_size -= bytes; | ||
| 811 | |||
| 812 | if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) | ||
| 813 | return; | ||
| 814 | |||
| 815 | while (bytes) { | ||
| 816 | if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { | ||
| 817 | WARN_ONCE(1, "bio idx %d >= vcnt %d\n", | ||
| 818 | bio->bi_idx, bio->bi_vcnt); | ||
| 819 | break; | ||
| 820 | } | ||
| 821 | |||
| 822 | if (bytes >= bio_iovec(bio)->bv_len) { | ||
| 823 | bytes -= bio_iovec(bio)->bv_len; | ||
| 824 | bio->bi_idx++; | ||
| 825 | } else { | ||
| 826 | bio_iovec(bio)->bv_len -= bytes; | ||
| 827 | bio_iovec(bio)->bv_offset += bytes; | ||
| 828 | bytes = 0; | ||
| 829 | } | ||
| 830 | } | ||
| 831 | } | ||
| 832 | EXPORT_SYMBOL(bio_advance); | ||
| 833 | |||
| 834 | /** | ||
| 835 | * bio_alloc_pages - allocates a single page for each bvec in a bio | ||
| 836 | * @bio: bio to allocate pages for | ||
| 837 | * @gfp_mask: flags for allocation | ||
| 838 | * | ||
| 839 | * Allocates pages up to @bio->bi_vcnt. | ||
| 840 | * | ||
| 841 | * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are | ||
| 842 | * freed. | ||
| 843 | */ | ||
| 844 | int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask) | ||
| 845 | { | ||
| 846 | int i; | ||
| 847 | struct bio_vec *bv; | ||
| 848 | |||
| 849 | bio_for_each_segment_all(bv, bio, i) { | ||
| 850 | bv->bv_page = alloc_page(gfp_mask); | ||
| 851 | if (!bv->bv_page) { | ||
| 852 | while (--bv >= bio->bi_io_vec) | ||
| 853 | __free_page(bv->bv_page); | ||
| 854 | return -ENOMEM; | ||
| 855 | } | ||
| 856 | } | ||
| 857 | |||
| 858 | return 0; | ||
| 859 | } | ||
| 860 | EXPORT_SYMBOL(bio_alloc_pages); | ||
| 861 | |||
| 862 | /** | ||
| 863 | * bio_copy_data - copy contents of data buffers from one chain of bios to | ||
| 864 | * another | ||
| 865 | * @src: source bio list | ||
| 866 | * @dst: destination bio list | ||
| 867 | * | ||
| 868 | * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats | ||
| 869 | * @src and @dst as linked lists of bios. | ||
| 870 | * | ||
| 871 | * Stops when it reaches the end of either @src or @dst - that is, copies | ||
| 872 | * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios). | ||
| 873 | */ | ||
| 874 | void bio_copy_data(struct bio *dst, struct bio *src) | ||
| 875 | { | ||
| 876 | struct bio_vec *src_bv, *dst_bv; | ||
| 877 | unsigned src_offset, dst_offset, bytes; | ||
| 878 | void *src_p, *dst_p; | ||
| 879 | |||
| 880 | src_bv = bio_iovec(src); | ||
| 881 | dst_bv = bio_iovec(dst); | ||
| 882 | |||
| 883 | src_offset = src_bv->bv_offset; | ||
| 884 | dst_offset = dst_bv->bv_offset; | ||
| 885 | |||
| 886 | while (1) { | ||
| 887 | if (src_offset == src_bv->bv_offset + src_bv->bv_len) { | ||
| 888 | src_bv++; | ||
| 889 | if (src_bv == bio_iovec_idx(src, src->bi_vcnt)) { | ||
| 890 | src = src->bi_next; | ||
| 891 | if (!src) | ||
| 892 | break; | ||
| 893 | |||
| 894 | src_bv = bio_iovec(src); | ||
| 895 | } | ||
| 896 | |||
| 897 | src_offset = src_bv->bv_offset; | ||
| 898 | } | ||
| 899 | |||
| 900 | if (dst_offset == dst_bv->bv_offset + dst_bv->bv_len) { | ||
| 901 | dst_bv++; | ||
| 902 | if (dst_bv == bio_iovec_idx(dst, dst->bi_vcnt)) { | ||
| 903 | dst = dst->bi_next; | ||
| 904 | if (!dst) | ||
| 905 | break; | ||
| 906 | |||
| 907 | dst_bv = bio_iovec(dst); | ||
| 908 | } | ||
| 909 | |||
| 910 | dst_offset = dst_bv->bv_offset; | ||
| 911 | } | ||
| 912 | |||
| 913 | bytes = min(dst_bv->bv_offset + dst_bv->bv_len - dst_offset, | ||
| 914 | src_bv->bv_offset + src_bv->bv_len - src_offset); | ||
| 915 | |||
| 916 | src_p = kmap_atomic(src_bv->bv_page); | ||
| 917 | dst_p = kmap_atomic(dst_bv->bv_page); | ||
| 918 | |||
| 919 | memcpy(dst_p + dst_bv->bv_offset, | ||
| 920 | src_p + src_bv->bv_offset, | ||
| 921 | bytes); | ||
| 922 | |||
| 923 | kunmap_atomic(dst_p); | ||
| 924 | kunmap_atomic(src_p); | ||
| 925 | |||
| 926 | src_offset += bytes; | ||
| 927 | dst_offset += bytes; | ||
| 928 | } | ||
| 929 | } | ||
| 930 | EXPORT_SYMBOL(bio_copy_data); | ||
| 931 | |||
| 655 | struct bio_map_data { | 932 | struct bio_map_data { |
| 656 | struct bio_vec *iovecs; | 933 | struct bio_vec *iovecs; |
| 657 | struct sg_iovec *sgvecs; | 934 | struct sg_iovec *sgvecs; |
| @@ -714,7 +991,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs, | |||
| 714 | int iov_idx = 0; | 991 | int iov_idx = 0; |
| 715 | unsigned int iov_off = 0; | 992 | unsigned int iov_off = 0; |
| 716 | 993 | ||
| 717 | __bio_for_each_segment(bvec, bio, i, 0) { | 994 | bio_for_each_segment_all(bvec, bio, i) { |
| 718 | char *bv_addr = page_address(bvec->bv_page); | 995 | char *bv_addr = page_address(bvec->bv_page); |
| 719 | unsigned int bv_len = iovecs[i].bv_len; | 996 | unsigned int bv_len = iovecs[i].bv_len; |
| 720 | 997 | ||
| @@ -896,7 +1173,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, | |||
| 896 | return bio; | 1173 | return bio; |
| 897 | cleanup: | 1174 | cleanup: |
| 898 | if (!map_data) | 1175 | if (!map_data) |
| 899 | bio_for_each_segment(bvec, bio, i) | 1176 | bio_for_each_segment_all(bvec, bio, i) |
| 900 | __free_page(bvec->bv_page); | 1177 | __free_page(bvec->bv_page); |
| 901 | 1178 | ||
| 902 | bio_put(bio); | 1179 | bio_put(bio); |
| @@ -1110,7 +1387,7 @@ static void __bio_unmap_user(struct bio *bio) | |||
| 1110 | /* | 1387 | /* |
| 1111 | * make sure we dirty pages we wrote to | 1388 | * make sure we dirty pages we wrote to |
| 1112 | */ | 1389 | */ |
| 1113 | __bio_for_each_segment(bvec, bio, i, 0) { | 1390 | bio_for_each_segment_all(bvec, bio, i) { |
| 1114 | if (bio_data_dir(bio) == READ) | 1391 | if (bio_data_dir(bio) == READ) |
| 1115 | set_page_dirty_lock(bvec->bv_page); | 1392 | set_page_dirty_lock(bvec->bv_page); |
| 1116 | 1393 | ||
| @@ -1216,7 +1493,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err) | |||
| 1216 | int i; | 1493 | int i; |
| 1217 | char *p = bmd->sgvecs[0].iov_base; | 1494 | char *p = bmd->sgvecs[0].iov_base; |
| 1218 | 1495 | ||
| 1219 | __bio_for_each_segment(bvec, bio, i, 0) { | 1496 | bio_for_each_segment_all(bvec, bio, i) { |
| 1220 | char *addr = page_address(bvec->bv_page); | 1497 | char *addr = page_address(bvec->bv_page); |
| 1221 | int len = bmd->iovecs[i].bv_len; | 1498 | int len = bmd->iovecs[i].bv_len; |
| 1222 | 1499 | ||
| @@ -1256,7 +1533,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, | |||
| 1256 | if (!reading) { | 1533 | if (!reading) { |
| 1257 | void *p = data; | 1534 | void *p = data; |
| 1258 | 1535 | ||
| 1259 | bio_for_each_segment(bvec, bio, i) { | 1536 | bio_for_each_segment_all(bvec, bio, i) { |
| 1260 | char *addr = page_address(bvec->bv_page); | 1537 | char *addr = page_address(bvec->bv_page); |
| 1261 | 1538 | ||
| 1262 | memcpy(addr, p, bvec->bv_len); | 1539 | memcpy(addr, p, bvec->bv_len); |
| @@ -1301,11 +1578,11 @@ EXPORT_SYMBOL(bio_copy_kern); | |||
| 1301 | */ | 1578 | */ |
| 1302 | void bio_set_pages_dirty(struct bio *bio) | 1579 | void bio_set_pages_dirty(struct bio *bio) |
| 1303 | { | 1580 | { |
| 1304 | struct bio_vec *bvec = bio->bi_io_vec; | 1581 | struct bio_vec *bvec; |
| 1305 | int i; | 1582 | int i; |
| 1306 | 1583 | ||
| 1307 | for (i = 0; i < bio->bi_vcnt; i++) { | 1584 | bio_for_each_segment_all(bvec, bio, i) { |
| 1308 | struct page *page = bvec[i].bv_page; | 1585 | struct page *page = bvec->bv_page; |
| 1309 | 1586 | ||
| 1310 | if (page && !PageCompound(page)) | 1587 | if (page && !PageCompound(page)) |
| 1311 | set_page_dirty_lock(page); | 1588 | set_page_dirty_lock(page); |
| @@ -1314,11 +1591,11 @@ void bio_set_pages_dirty(struct bio *bio) | |||
| 1314 | 1591 | ||
| 1315 | static void bio_release_pages(struct bio *bio) | 1592 | static void bio_release_pages(struct bio *bio) |
| 1316 | { | 1593 | { |
| 1317 | struct bio_vec *bvec = bio->bi_io_vec; | 1594 | struct bio_vec *bvec; |
| 1318 | int i; | 1595 | int i; |
| 1319 | 1596 | ||
| 1320 | for (i = 0; i < bio->bi_vcnt; i++) { | 1597 | bio_for_each_segment_all(bvec, bio, i) { |
| 1321 | struct page *page = bvec[i].bv_page; | 1598 | struct page *page = bvec->bv_page; |
| 1322 | 1599 | ||
| 1323 | if (page) | 1600 | if (page) |
| 1324 | put_page(page); | 1601 | put_page(page); |
| @@ -1367,16 +1644,16 @@ static void bio_dirty_fn(struct work_struct *work) | |||
| 1367 | 1644 | ||
| 1368 | void bio_check_pages_dirty(struct bio *bio) | 1645 | void bio_check_pages_dirty(struct bio *bio) |
| 1369 | { | 1646 | { |
| 1370 | struct bio_vec *bvec = bio->bi_io_vec; | 1647 | struct bio_vec *bvec; |
| 1371 | int nr_clean_pages = 0; | 1648 | int nr_clean_pages = 0; |
| 1372 | int i; | 1649 | int i; |
| 1373 | 1650 | ||
| 1374 | for (i = 0; i < bio->bi_vcnt; i++) { | 1651 | bio_for_each_segment_all(bvec, bio, i) { |
| 1375 | struct page *page = bvec[i].bv_page; | 1652 | struct page *page = bvec->bv_page; |
| 1376 | 1653 | ||
| 1377 | if (PageDirty(page) || PageCompound(page)) { | 1654 | if (PageDirty(page) || PageCompound(page)) { |
| 1378 | page_cache_release(page); | 1655 | page_cache_release(page); |
| 1379 | bvec[i].bv_page = NULL; | 1656 | bvec->bv_page = NULL; |
| 1380 | } else { | 1657 | } else { |
| 1381 | nr_clean_pages++; | 1658 | nr_clean_pages++; |
| 1382 | } | 1659 | } |
| @@ -1479,8 +1756,7 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors) | |||
| 1479 | trace_block_split(bdev_get_queue(bi->bi_bdev), bi, | 1756 | trace_block_split(bdev_get_queue(bi->bi_bdev), bi, |
| 1480 | bi->bi_sector + first_sectors); | 1757 | bi->bi_sector + first_sectors); |
| 1481 | 1758 | ||
| 1482 | BUG_ON(bi->bi_vcnt != 1 && bi->bi_vcnt != 0); | 1759 | BUG_ON(bio_segments(bi) > 1); |
| 1483 | BUG_ON(bi->bi_idx != 0); | ||
| 1484 | atomic_set(&bp->cnt, 3); | 1760 | atomic_set(&bp->cnt, 3); |
| 1485 | bp->error = 0; | 1761 | bp->error = 0; |
| 1486 | bp->bio1 = *bi; | 1762 | bp->bio1 = *bi; |
| @@ -1490,8 +1766,8 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors) | |||
| 1490 | bp->bio1.bi_size = first_sectors << 9; | 1766 | bp->bio1.bi_size = first_sectors << 9; |
| 1491 | 1767 | ||
| 1492 | if (bi->bi_vcnt != 0) { | 1768 | if (bi->bi_vcnt != 0) { |
| 1493 | bp->bv1 = bi->bi_io_vec[0]; | 1769 | bp->bv1 = *bio_iovec(bi); |
| 1494 | bp->bv2 = bi->bi_io_vec[0]; | 1770 | bp->bv2 = *bio_iovec(bi); |
| 1495 | 1771 | ||
| 1496 | if (bio_is_rw(bi)) { | 1772 | if (bio_is_rw(bi)) { |
| 1497 | bp->bv2.bv_offset += first_sectors << 9; | 1773 | bp->bv2.bv_offset += first_sectors << 9; |
| @@ -1543,7 +1819,7 @@ sector_t bio_sector_offset(struct bio *bio, unsigned short index, | |||
| 1543 | if (index >= bio->bi_idx) | 1819 | if (index >= bio->bi_idx) |
| 1544 | index = bio->bi_vcnt - 1; | 1820 | index = bio->bi_vcnt - 1; |
| 1545 | 1821 | ||
| 1546 | __bio_for_each_segment(bv, bio, i, 0) { | 1822 | bio_for_each_segment_all(bv, bio, i) { |
| 1547 | if (i == index) { | 1823 | if (i == index) { |
| 1548 | if (offset > bv->bv_offset) | 1824 | if (offset > bv->bv_offset) |
| 1549 | sectors += (offset - bv->bv_offset) / sector_sz; | 1825 | sectors += (offset - bv->bv_offset) / sector_sz; |
| @@ -1561,29 +1837,25 @@ EXPORT_SYMBOL(bio_sector_offset); | |||
| 1561 | * create memory pools for biovec's in a bio_set. | 1837 | * create memory pools for biovec's in a bio_set. |
| 1562 | * use the global biovec slabs created for general use. | 1838 | * use the global biovec slabs created for general use. |
| 1563 | */ | 1839 | */ |
| 1564 | static int biovec_create_pools(struct bio_set *bs, int pool_entries) | 1840 | mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries) |
| 1565 | { | 1841 | { |
| 1566 | struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX; | 1842 | struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX; |
| 1567 | 1843 | ||
| 1568 | bs->bvec_pool = mempool_create_slab_pool(pool_entries, bp->slab); | 1844 | return mempool_create_slab_pool(pool_entries, bp->slab); |
| 1569 | if (!bs->bvec_pool) | ||
| 1570 | return -ENOMEM; | ||
| 1571 | |||
| 1572 | return 0; | ||
| 1573 | } | ||
| 1574 | |||
| 1575 | static void biovec_free_pools(struct bio_set *bs) | ||
| 1576 | { | ||
| 1577 | mempool_destroy(bs->bvec_pool); | ||
| 1578 | } | 1845 | } |
| 1579 | 1846 | ||
| 1580 | void bioset_free(struct bio_set *bs) | 1847 | void bioset_free(struct bio_set *bs) |
| 1581 | { | 1848 | { |
| 1849 | if (bs->rescue_workqueue) | ||
| 1850 | destroy_workqueue(bs->rescue_workqueue); | ||
| 1851 | |||
| 1582 | if (bs->bio_pool) | 1852 | if (bs->bio_pool) |
| 1583 | mempool_destroy(bs->bio_pool); | 1853 | mempool_destroy(bs->bio_pool); |
| 1584 | 1854 | ||
| 1855 | if (bs->bvec_pool) | ||
| 1856 | mempool_destroy(bs->bvec_pool); | ||
| 1857 | |||
| 1585 | bioset_integrity_free(bs); | 1858 | bioset_integrity_free(bs); |
| 1586 | biovec_free_pools(bs); | ||
| 1587 | bio_put_slab(bs); | 1859 | bio_put_slab(bs); |
| 1588 | 1860 | ||
| 1589 | kfree(bs); | 1861 | kfree(bs); |
| @@ -1614,6 +1886,10 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) | |||
| 1614 | 1886 | ||
| 1615 | bs->front_pad = front_pad; | 1887 | bs->front_pad = front_pad; |
| 1616 | 1888 | ||
| 1889 | spin_lock_init(&bs->rescue_lock); | ||
| 1890 | bio_list_init(&bs->rescue_list); | ||
| 1891 | INIT_WORK(&bs->rescue_work, bio_alloc_rescue); | ||
| 1892 | |||
| 1617 | bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad); | 1893 | bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad); |
| 1618 | if (!bs->bio_slab) { | 1894 | if (!bs->bio_slab) { |
| 1619 | kfree(bs); | 1895 | kfree(bs); |
| @@ -1624,9 +1900,15 @@ struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad) | |||
| 1624 | if (!bs->bio_pool) | 1900 | if (!bs->bio_pool) |
| 1625 | goto bad; | 1901 | goto bad; |
| 1626 | 1902 | ||
| 1627 | if (!biovec_create_pools(bs, pool_size)) | 1903 | bs->bvec_pool = biovec_create_pool(bs, pool_size); |
| 1628 | return bs; | 1904 | if (!bs->bvec_pool) |
| 1905 | goto bad; | ||
| 1906 | |||
| 1907 | bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0); | ||
| 1908 | if (!bs->rescue_workqueue) | ||
| 1909 | goto bad; | ||
| 1629 | 1910 | ||
| 1911 | return bs; | ||
| 1630 | bad: | 1912 | bad: |
| 1631 | bioset_free(bs); | 1913 | bioset_free(bs); |
| 1632 | return NULL; | 1914 | return NULL; |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index f173c5af6461..bed072aa461f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
| @@ -2527,8 +2527,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, | |||
| 2527 | if (old_compressed) | 2527 | if (old_compressed) |
| 2528 | contig = bio->bi_sector == sector; | 2528 | contig = bio->bi_sector == sector; |
| 2529 | else | 2529 | else |
| 2530 | contig = bio->bi_sector + (bio->bi_size >> 9) == | 2530 | contig = bio_end_sector(bio) == sector; |
| 2531 | sector; | ||
| 2532 | 2531 | ||
| 2533 | if (prev_bio_flags != bio_flags || !contig || | 2532 | if (prev_bio_flags != bio_flags || !contig || |
| 2534 | merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) || | 2533 | merge_bio(rw, tree, page, offset, page_size, bio, bio_flags) || |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 5989a92236f7..d90e0485e01b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
| @@ -5166,7 +5166,7 @@ static int bio_size_ok(struct block_device *bdev, struct bio *bio, | |||
| 5166 | } | 5166 | } |
| 5167 | 5167 | ||
| 5168 | prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; | 5168 | prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; |
| 5169 | if ((bio->bi_size >> 9) > max_sectors) | 5169 | if (bio_sectors(bio) > max_sectors) |
| 5170 | return 0; | 5170 | return 0; |
| 5171 | 5171 | ||
| 5172 | if (!q->merge_bvec_fn) | 5172 | if (!q->merge_bvec_fn) |
diff --git a/fs/buffer.c b/fs/buffer.c index b4dcb34c9635..ecd3792ae0e9 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
| @@ -2979,7 +2979,6 @@ int submit_bh(int rw, struct buffer_head * bh) | |||
| 2979 | bio->bi_io_vec[0].bv_offset = bh_offset(bh); | 2979 | bio->bi_io_vec[0].bv_offset = bh_offset(bh); |
| 2980 | 2980 | ||
| 2981 | bio->bi_vcnt = 1; | 2981 | bio->bi_vcnt = 1; |
| 2982 | bio->bi_idx = 0; | ||
| 2983 | bio->bi_size = bh->b_size; | 2982 | bio->bi_size = bh->b_size; |
| 2984 | 2983 | ||
| 2985 | bio->bi_end_io = end_bio_bh_io_sync; | 2984 | bio->bi_end_io = end_bio_bh_io_sync; |
diff --git a/fs/direct-io.c b/fs/direct-io.c index f853263cf74f..38484b08a39a 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
| @@ -441,8 +441,8 @@ static struct bio *dio_await_one(struct dio *dio) | |||
| 441 | static int dio_bio_complete(struct dio *dio, struct bio *bio) | 441 | static int dio_bio_complete(struct dio *dio, struct bio *bio) |
| 442 | { | 442 | { |
| 443 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | 443 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); |
| 444 | struct bio_vec *bvec = bio->bi_io_vec; | 444 | struct bio_vec *bvec; |
| 445 | int page_no; | 445 | unsigned i; |
| 446 | 446 | ||
| 447 | if (!uptodate) | 447 | if (!uptodate) |
| 448 | dio->io_error = -EIO; | 448 | dio->io_error = -EIO; |
| @@ -450,8 +450,8 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio) | |||
| 450 | if (dio->is_async && dio->rw == READ) { | 450 | if (dio->is_async && dio->rw == READ) { |
| 451 | bio_check_pages_dirty(bio); /* transfers ownership */ | 451 | bio_check_pages_dirty(bio); /* transfers ownership */ |
| 452 | } else { | 452 | } else { |
| 453 | for (page_no = 0; page_no < bio->bi_vcnt; page_no++) { | 453 | bio_for_each_segment_all(bvec, bio, i) { |
| 454 | struct page *page = bvec[page_no].bv_page; | 454 | struct page *page = bvec->bv_page; |
| 455 | 455 | ||
| 456 | if (dio->rw == READ && !PageCompound(page)) | 456 | if (dio->rw == READ && !PageCompound(page)) |
| 457 | set_page_dirty_lock(page); | 457 | set_page_dirty_lock(page); |
diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c index f936cb50dc0d..b74422888604 100644 --- a/fs/exofs/ore.c +++ b/fs/exofs/ore.c | |||
| @@ -401,7 +401,7 @@ static void _clear_bio(struct bio *bio) | |||
| 401 | struct bio_vec *bv; | 401 | struct bio_vec *bv; |
| 402 | unsigned i; | 402 | unsigned i; |
| 403 | 403 | ||
| 404 | __bio_for_each_segment(bv, bio, i, 0) { | 404 | bio_for_each_segment_all(bv, bio, i) { |
| 405 | unsigned this_count = bv->bv_len; | 405 | unsigned this_count = bv->bv_len; |
| 406 | 406 | ||
| 407 | if (likely(PAGE_SIZE == this_count)) | 407 | if (likely(PAGE_SIZE == this_count)) |
diff --git a/fs/exofs/ore_raid.c b/fs/exofs/ore_raid.c index b963f38ac298..7682b970d0f1 100644 --- a/fs/exofs/ore_raid.c +++ b/fs/exofs/ore_raid.c | |||
| @@ -432,7 +432,7 @@ static void _mark_read4write_pages_uptodate(struct ore_io_state *ios, int ret) | |||
| 432 | if (!bio) | 432 | if (!bio) |
| 433 | continue; | 433 | continue; |
| 434 | 434 | ||
| 435 | __bio_for_each_segment(bv, bio, i, 0) { | 435 | bio_for_each_segment_all(bv, bio, i) { |
| 436 | struct page *page = bv->bv_page; | 436 | struct page *page = bv->bv_page; |
| 437 | 437 | ||
| 438 | SetPageUptodate(page); | 438 | SetPageUptodate(page); |
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index a5055977a214..5c37ef982390 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c | |||
| @@ -300,7 +300,7 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno) | |||
| 300 | u64 nblk; | 300 | u64 nblk; |
| 301 | 301 | ||
| 302 | if (bio) { | 302 | if (bio) { |
| 303 | nblk = bio->bi_sector + bio_sectors(bio); | 303 | nblk = bio_end_sector(bio); |
| 304 | nblk >>= sdp->sd_fsb2bb_shift; | 304 | nblk >>= sdp->sd_fsb2bb_shift; |
| 305 | if (blkno == nblk) | 305 | if (blkno == nblk) |
| 306 | return bio; | 306 | return bio; |
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index 2eb952c41a69..8ae5e350da43 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c | |||
| @@ -2004,7 +2004,6 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp) | |||
| 2004 | bio->bi_io_vec[0].bv_offset = bp->l_offset; | 2004 | bio->bi_io_vec[0].bv_offset = bp->l_offset; |
| 2005 | 2005 | ||
| 2006 | bio->bi_vcnt = 1; | 2006 | bio->bi_vcnt = 1; |
| 2007 | bio->bi_idx = 0; | ||
| 2008 | bio->bi_size = LOGPSIZE; | 2007 | bio->bi_size = LOGPSIZE; |
| 2009 | 2008 | ||
| 2010 | bio->bi_end_io = lbmIODone; | 2009 | bio->bi_end_io = lbmIODone; |
| @@ -2145,7 +2144,6 @@ static void lbmStartIO(struct lbuf * bp) | |||
| 2145 | bio->bi_io_vec[0].bv_offset = bp->l_offset; | 2144 | bio->bi_io_vec[0].bv_offset = bp->l_offset; |
| 2146 | 2145 | ||
| 2147 | bio->bi_vcnt = 1; | 2146 | bio->bi_vcnt = 1; |
| 2148 | bio->bi_idx = 0; | ||
| 2149 | bio->bi_size = LOGPSIZE; | 2147 | bio->bi_size = LOGPSIZE; |
| 2150 | 2148 | ||
| 2151 | bio->bi_end_io = lbmIODone; | 2149 | bio->bi_end_io = lbmIODone; |
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c index e784a217b500..550475ca6a0e 100644 --- a/fs/logfs/dev_bdev.c +++ b/fs/logfs/dev_bdev.c | |||
| @@ -32,7 +32,6 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw) | |||
| 32 | bio_vec.bv_len = PAGE_SIZE; | 32 | bio_vec.bv_len = PAGE_SIZE; |
| 33 | bio_vec.bv_offset = 0; | 33 | bio_vec.bv_offset = 0; |
| 34 | bio.bi_vcnt = 1; | 34 | bio.bi_vcnt = 1; |
| 35 | bio.bi_idx = 0; | ||
| 36 | bio.bi_size = PAGE_SIZE; | 35 | bio.bi_size = PAGE_SIZE; |
| 37 | bio.bi_bdev = bdev; | 36 | bio.bi_bdev = bdev; |
| 38 | bio.bi_sector = page->index * (PAGE_SIZE >> 9); | 37 | bio.bi_sector = page->index * (PAGE_SIZE >> 9); |
| @@ -108,7 +107,6 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, | |||
| 108 | if (i >= max_pages) { | 107 | if (i >= max_pages) { |
| 109 | /* Block layer cannot split bios :( */ | 108 | /* Block layer cannot split bios :( */ |
| 110 | bio->bi_vcnt = i; | 109 | bio->bi_vcnt = i; |
| 111 | bio->bi_idx = 0; | ||
| 112 | bio->bi_size = i * PAGE_SIZE; | 110 | bio->bi_size = i * PAGE_SIZE; |
| 113 | bio->bi_bdev = super->s_bdev; | 111 | bio->bi_bdev = super->s_bdev; |
| 114 | bio->bi_sector = ofs >> 9; | 112 | bio->bi_sector = ofs >> 9; |
| @@ -136,7 +134,6 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, | |||
| 136 | unlock_page(page); | 134 | unlock_page(page); |
| 137 | } | 135 | } |
| 138 | bio->bi_vcnt = nr_pages; | 136 | bio->bi_vcnt = nr_pages; |
| 139 | bio->bi_idx = 0; | ||
| 140 | bio->bi_size = nr_pages * PAGE_SIZE; | 137 | bio->bi_size = nr_pages * PAGE_SIZE; |
| 141 | bio->bi_bdev = super->s_bdev; | 138 | bio->bi_bdev = super->s_bdev; |
| 142 | bio->bi_sector = ofs >> 9; | 139 | bio->bi_sector = ofs >> 9; |
| @@ -202,7 +199,6 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index, | |||
| 202 | if (i >= max_pages) { | 199 | if (i >= max_pages) { |
| 203 | /* Block layer cannot split bios :( */ | 200 | /* Block layer cannot split bios :( */ |
| 204 | bio->bi_vcnt = i; | 201 | bio->bi_vcnt = i; |
| 205 | bio->bi_idx = 0; | ||
| 206 | bio->bi_size = i * PAGE_SIZE; | 202 | bio->bi_size = i * PAGE_SIZE; |
| 207 | bio->bi_bdev = super->s_bdev; | 203 | bio->bi_bdev = super->s_bdev; |
| 208 | bio->bi_sector = ofs >> 9; | 204 | bio->bi_sector = ofs >> 9; |
| @@ -224,7 +220,6 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index, | |||
| 224 | bio->bi_io_vec[i].bv_offset = 0; | 220 | bio->bi_io_vec[i].bv_offset = 0; |
| 225 | } | 221 | } |
| 226 | bio->bi_vcnt = nr_pages; | 222 | bio->bi_vcnt = nr_pages; |
| 227 | bio->bi_idx = 0; | ||
| 228 | bio->bi_size = nr_pages * PAGE_SIZE; | 223 | bio->bi_size = nr_pages * PAGE_SIZE; |
| 229 | bio->bi_bdev = super->s_bdev; | 224 | bio->bi_bdev = super->s_bdev; |
| 230 | bio->bi_sector = ofs >> 9; | 225 | bio->bi_sector = ofs >> 9; |
