aboutsummaryrefslogtreecommitdiffstats
path: root/fs/bio.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/bio.c')
-rw-r--r--fs/bio.c63
1 files changed, 54 insertions, 9 deletions
diff --git a/fs/bio.c b/fs/bio.c
index 460554b07ff9..7b3069589951 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -126,6 +126,7 @@ static void bio_fs_destructor(struct bio *bio)
126inline void bio_init(struct bio *bio) 126inline void bio_init(struct bio *bio)
127{ 127{
128 bio->bi_next = NULL; 128 bio->bi_next = NULL;
129 bio->bi_bdev = NULL;
129 bio->bi_flags = 1 << BIO_UPTODATE; 130 bio->bi_flags = 1 << BIO_UPTODATE;
130 bio->bi_rw = 0; 131 bio->bi_rw = 0;
131 bio->bi_vcnt = 0; 132 bio->bi_vcnt = 0;
@@ -313,7 +314,8 @@ int bio_get_nr_vecs(struct block_device *bdev)
313} 314}
314 315
315static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page 316static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
316 *page, unsigned int len, unsigned int offset) 317 *page, unsigned int len, unsigned int offset,
318 unsigned short max_sectors)
317{ 319{
318 int retried_segments = 0; 320 int retried_segments = 0;
319 struct bio_vec *bvec; 321 struct bio_vec *bvec;
@@ -324,10 +326,31 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
324 if (unlikely(bio_flagged(bio, BIO_CLONED))) 326 if (unlikely(bio_flagged(bio, BIO_CLONED)))
325 return 0; 327 return 0;
326 328
327 if (bio->bi_vcnt >= bio->bi_max_vecs) 329 if (((bio->bi_size + len) >> 9) > max_sectors)
328 return 0; 330 return 0;
329 331
330 if (((bio->bi_size + len) >> 9) > q->max_sectors) 332 /*
333 * For filesystems with a blocksize smaller than the pagesize
334 * we will often be called with the same page as last time and
335 * a consecutive offset. Optimize this special case.
336 */
337 if (bio->bi_vcnt > 0) {
338 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
339
340 if (page == prev->bv_page &&
341 offset == prev->bv_offset + prev->bv_len) {
342 prev->bv_len += len;
343 if (q->merge_bvec_fn &&
344 q->merge_bvec_fn(q, bio, prev) < len) {
345 prev->bv_len -= len;
346 return 0;
347 }
348
349 goto done;
350 }
351 }
352
353 if (bio->bi_vcnt >= bio->bi_max_vecs)
331 return 0; 354 return 0;
332 355
333 /* 356 /*
@@ -381,11 +404,31 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
381 bio->bi_vcnt++; 404 bio->bi_vcnt++;
382 bio->bi_phys_segments++; 405 bio->bi_phys_segments++;
383 bio->bi_hw_segments++; 406 bio->bi_hw_segments++;
407 done:
384 bio->bi_size += len; 408 bio->bi_size += len;
385 return len; 409 return len;
386} 410}
387 411
388/** 412/**
413 * bio_add_pc_page - attempt to add page to bio
414 * @bio: destination bio
415 * @page: page to add
416 * @len: vec entry length
417 * @offset: vec entry offset
418 *
419 * Attempt to add a page to the bio_vec maplist. This can fail for a
420 * number of reasons, such as the bio being full or target block
421 * device limitations. The target block device must allow bio's
422 * smaller than PAGE_SIZE, so it is always possible to add a single
423 * page to an empty bio. This should only be used by REQ_PC bios.
424 */
425int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page,
426 unsigned int len, unsigned int offset)
427{
428 return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors);
429}
430
431/**
389 * bio_add_page - attempt to add page to bio 432 * bio_add_page - attempt to add page to bio
390 * @bio: destination bio 433 * @bio: destination bio
391 * @page: page to add 434 * @page: page to add
@@ -401,8 +444,8 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page
401int bio_add_page(struct bio *bio, struct page *page, unsigned int len, 444int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
402 unsigned int offset) 445 unsigned int offset)
403{ 446{
404 return __bio_add_page(bdev_get_queue(bio->bi_bdev), bio, page, 447 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
405 len, offset); 448 return __bio_add_page(q, bio, page, len, offset, q->max_sectors);
406} 449}
407 450
408struct bio_map_data { 451struct bio_map_data {
@@ -514,7 +557,7 @@ struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr,
514 break; 557 break;
515 } 558 }
516 559
517 if (__bio_add_page(q, bio, page, bytes, 0) < bytes) { 560 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
518 ret = -EINVAL; 561 ret = -EINVAL;
519 break; 562 break;
520 } 563 }
@@ -628,7 +671,8 @@ static struct bio *__bio_map_user_iov(request_queue_t *q,
628 /* 671 /*
629 * sorry... 672 * sorry...
630 */ 673 */
631 if (__bio_add_page(q, bio, pages[j], bytes, offset) < bytes) 674 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
675 bytes)
632 break; 676 break;
633 677
634 len -= bytes; 678 len -= bytes;
@@ -801,8 +845,8 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data,
801 if (bytes > len) 845 if (bytes > len)
802 bytes = len; 846 bytes = len;
803 847
804 if (__bio_add_page(q, bio, virt_to_page(data), bytes, 848 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
805 offset) < bytes) 849 offset) < bytes)
806 break; 850 break;
807 851
808 data += bytes; 852 data += bytes;
@@ -1228,6 +1272,7 @@ EXPORT_SYMBOL(bio_clone);
1228EXPORT_SYMBOL(bio_phys_segments); 1272EXPORT_SYMBOL(bio_phys_segments);
1229EXPORT_SYMBOL(bio_hw_segments); 1273EXPORT_SYMBOL(bio_hw_segments);
1230EXPORT_SYMBOL(bio_add_page); 1274EXPORT_SYMBOL(bio_add_page);
1275EXPORT_SYMBOL(bio_add_pc_page);
1231EXPORT_SYMBOL(bio_get_nr_vecs); 1276EXPORT_SYMBOL(bio_get_nr_vecs);
1232EXPORT_SYMBOL(bio_map_user); 1277EXPORT_SYMBOL(bio_map_user);
1233EXPORT_SYMBOL(bio_unmap_user); 1278EXPORT_SYMBOL(bio_unmap_user);