aboutsummaryrefslogtreecommitdiffstats
path: root/fs/bio.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/bio.c')
-rw-r--r--fs/bio.c16
1 files changed, 6 insertions, 10 deletions
diff --git a/fs/bio.c b/fs/bio.c
index 88094afc29ea..e1f922184b45 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -264,13 +264,12 @@ EXPORT_SYMBOL(bio_init);
264 * bio_alloc_bioset - allocate a bio for I/O 264 * bio_alloc_bioset - allocate a bio for I/O
265 * @gfp_mask: the GFP_ mask given to the slab allocator 265 * @gfp_mask: the GFP_ mask given to the slab allocator
266 * @nr_iovecs: number of iovecs to pre-allocate 266 * @nr_iovecs: number of iovecs to pre-allocate
267 * @bs: the bio_set to allocate from. If %NULL, just use kmalloc 267 * @bs: the bio_set to allocate from.
268 * 268 *
269 * Description: 269 * Description:
270 * bio_alloc_bioset will first try its own mempool to satisfy the allocation. 270 * bio_alloc_bioset will try its own mempool to satisfy the allocation.
271 * If %__GFP_WAIT is set then we will block on the internal pool waiting 271 * If %__GFP_WAIT is set then we will block on the internal pool waiting
272 * for a &struct bio to become free. If a %NULL @bs is passed in, we will 272 * for a &struct bio to become free.
273 * fall back to just using @kmalloc to allocate the required memory.
274 * 273 *
275 * Note that the caller must set ->bi_destructor on successful return 274 * Note that the caller must set ->bi_destructor on successful return
276 * of a bio, to do the appropriate freeing of the bio once the reference 275 * of a bio, to do the appropriate freeing of the bio once the reference
@@ -507,10 +506,8 @@ int bio_get_nr_vecs(struct block_device *bdev)
507 int nr_pages; 506 int nr_pages;
508 507
509 nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; 508 nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
510 if (nr_pages > queue_max_phys_segments(q)) 509 if (nr_pages > queue_max_segments(q))
511 nr_pages = queue_max_phys_segments(q); 510 nr_pages = queue_max_segments(q);
512 if (nr_pages > queue_max_hw_segments(q))
513 nr_pages = queue_max_hw_segments(q);
514 511
515 return nr_pages; 512 return nr_pages;
516} 513}
@@ -575,8 +572,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
575 * make this too complex. 572 * make this too complex.
576 */ 573 */
577 574
578 while (bio->bi_phys_segments >= queue_max_phys_segments(q) 575 while (bio->bi_phys_segments >= queue_max_segments(q)) {
579 || bio->bi_phys_segments >= queue_max_hw_segments(q)) {
580 576
581 if (retried_segments) 577 if (retried_segments)
582 return 0; 578 return 0;