aboutsummaryrefslogtreecommitdiffstats
path: root/fs/bio.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/bio.c')
-rw-r--r--fs/bio.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/fs/bio.c b/fs/bio.c
index 76e6713abf94..e1f922184b45 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
78 78
79 i = 0; 79 i = 0;
80 while (i < bio_slab_nr) { 80 while (i < bio_slab_nr) {
81 struct bio_slab *bslab = &bio_slabs[i]; 81 bslab = &bio_slabs[i];
82 82
83 if (!bslab->slab && entry == -1) 83 if (!bslab->slab && entry == -1)
84 entry = i; 84 entry = i;
@@ -264,13 +264,12 @@ EXPORT_SYMBOL(bio_init);
264 * bio_alloc_bioset - allocate a bio for I/O 264 * bio_alloc_bioset - allocate a bio for I/O
265 * @gfp_mask: the GFP_ mask given to the slab allocator 265 * @gfp_mask: the GFP_ mask given to the slab allocator
266 * @nr_iovecs: number of iovecs to pre-allocate 266 * @nr_iovecs: number of iovecs to pre-allocate
267 * @bs: the bio_set to allocate from. If %NULL, just use kmalloc 267 * @bs: the bio_set to allocate from.
268 * 268 *
269 * Description: 269 * Description:
270 * bio_alloc_bioset will first try its own mempool to satisfy the allocation. 270 * bio_alloc_bioset will try its own mempool to satisfy the allocation.
271 * If %__GFP_WAIT is set then we will block on the internal pool waiting 271 * If %__GFP_WAIT is set then we will block on the internal pool waiting
272 * for a &struct bio to become free. If a %NULL @bs is passed in, we will 272 * for a &struct bio to become free.
273 * fall back to just using @kmalloc to allocate the required memory.
274 * 273 *
275 * Note that the caller must set ->bi_destructor on successful return 274 * Note that the caller must set ->bi_destructor on successful return
276 * of a bio, to do the appropriate freeing of the bio once the reference 275 * of a bio, to do the appropriate freeing of the bio once the reference
@@ -507,10 +506,8 @@ int bio_get_nr_vecs(struct block_device *bdev)
507 int nr_pages; 506 int nr_pages;
508 507
509 nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; 508 nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
510 if (nr_pages > queue_max_phys_segments(q)) 509 if (nr_pages > queue_max_segments(q))
511 nr_pages = queue_max_phys_segments(q); 510 nr_pages = queue_max_segments(q);
512 if (nr_pages > queue_max_hw_segments(q))
513 nr_pages = queue_max_hw_segments(q);
514 511
515 return nr_pages; 512 return nr_pages;
516} 513}
@@ -542,13 +539,18 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
542 539
543 if (page == prev->bv_page && 540 if (page == prev->bv_page &&
544 offset == prev->bv_offset + prev->bv_len) { 541 offset == prev->bv_offset + prev->bv_len) {
542 unsigned int prev_bv_len = prev->bv_len;
545 prev->bv_len += len; 543 prev->bv_len += len;
546 544
547 if (q->merge_bvec_fn) { 545 if (q->merge_bvec_fn) {
548 struct bvec_merge_data bvm = { 546 struct bvec_merge_data bvm = {
547 /* prev_bvec is already charged in
548 bi_size, discharge it in order to
549 simulate merging updated prev_bvec
550 as new bvec. */
549 .bi_bdev = bio->bi_bdev, 551 .bi_bdev = bio->bi_bdev,
550 .bi_sector = bio->bi_sector, 552 .bi_sector = bio->bi_sector,
551 .bi_size = bio->bi_size, 553 .bi_size = bio->bi_size - prev_bv_len,
552 .bi_rw = bio->bi_rw, 554 .bi_rw = bio->bi_rw,
553 }; 555 };
554 556
@@ -570,8 +572,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
570 * make this too complex. 572 * make this too complex.
571 */ 573 */
572 574
573 while (bio->bi_phys_segments >= queue_max_phys_segments(q) 575 while (bio->bi_phys_segments >= queue_max_segments(q)) {
574 || bio->bi_phys_segments >= queue_max_hw_segments(q)) {
575 576
576 if (retried_segments) 577 if (retried_segments)
577 return 0; 578 return 0;