aboutsummaryrefslogtreecommitdiffstats
path: root/fs/bio.c
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /fs/bio.c
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'fs/bio.c')
-rw-r--r--fs/bio.c43
1 files changed, 28 insertions, 15 deletions
diff --git a/fs/bio.c b/fs/bio.c
index 12da5db8682c..e7bf6ca64dcf 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
78 78
79 i = 0; 79 i = 0;
80 while (i < bio_slab_nr) { 80 while (i < bio_slab_nr) {
81 struct bio_slab *bslab = &bio_slabs[i]; 81 bslab = &bio_slabs[i];
82 82
83 if (!bslab->slab && entry == -1) 83 if (!bslab->slab && entry == -1)
84 entry = i; 84 entry = i;
@@ -264,15 +264,14 @@ EXPORT_SYMBOL(bio_init);
264 * bio_alloc_bioset - allocate a bio for I/O 264 * bio_alloc_bioset - allocate a bio for I/O
265 * @gfp_mask: the GFP_ mask given to the slab allocator 265 * @gfp_mask: the GFP_ mask given to the slab allocator
266 * @nr_iovecs: number of iovecs to pre-allocate 266 * @nr_iovecs: number of iovecs to pre-allocate
267 * @bs: the bio_set to allocate from. If %NULL, just use kmalloc 267 * @bs: the bio_set to allocate from.
268 * 268 *
269 * Description: 269 * Description:
270 * bio_alloc_bioset will first try its own mempool to satisfy the allocation. 270 * bio_alloc_bioset will try its own mempool to satisfy the allocation.
271 * If %__GFP_WAIT is set then we will block on the internal pool waiting 271 * If %__GFP_WAIT is set then we will block on the internal pool waiting
272 * for a &struct bio to become free. If a %NULL @bs is passed in, we will 272 * for a &struct bio to become free.
273 * fall back to just using @kmalloc to allocate the required memory.
274 * 273 *
275 * Note that the caller must set ->bi_destructor on succesful return 274 * Note that the caller must set ->bi_destructor on successful return
276 * of a bio, to do the appropriate freeing of the bio once the reference 275 * of a bio, to do the appropriate freeing of the bio once the reference
277 * count drops to zero. 276 * count drops to zero.
278 **/ 277 **/
@@ -507,10 +506,8 @@ int bio_get_nr_vecs(struct block_device *bdev)
507 int nr_pages; 506 int nr_pages;
508 507
509 nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; 508 nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
510 if (nr_pages > queue_max_phys_segments(q)) 509 if (nr_pages > queue_max_segments(q))
511 nr_pages = queue_max_phys_segments(q); 510 nr_pages = queue_max_segments(q);
512 if (nr_pages > queue_max_hw_segments(q))
513 nr_pages = queue_max_hw_segments(q);
514 511
515 return nr_pages; 512 return nr_pages;
516} 513}
@@ -542,17 +539,22 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
542 539
543 if (page == prev->bv_page && 540 if (page == prev->bv_page &&
544 offset == prev->bv_offset + prev->bv_len) { 541 offset == prev->bv_offset + prev->bv_len) {
542 unsigned int prev_bv_len = prev->bv_len;
545 prev->bv_len += len; 543 prev->bv_len += len;
546 544
547 if (q->merge_bvec_fn) { 545 if (q->merge_bvec_fn) {
548 struct bvec_merge_data bvm = { 546 struct bvec_merge_data bvm = {
547 /* prev_bvec is already charged in
548 bi_size, discharge it in order to
549 simulate merging updated prev_bvec
550 as new bvec. */
549 .bi_bdev = bio->bi_bdev, 551 .bi_bdev = bio->bi_bdev,
550 .bi_sector = bio->bi_sector, 552 .bi_sector = bio->bi_sector,
551 .bi_size = bio->bi_size, 553 .bi_size = bio->bi_size - prev_bv_len,
552 .bi_rw = bio->bi_rw, 554 .bi_rw = bio->bi_rw,
553 }; 555 };
554 556
555 if (q->merge_bvec_fn(q, &bvm, prev) < len) { 557 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) {
556 prev->bv_len -= len; 558 prev->bv_len -= len;
557 return 0; 559 return 0;
558 } 560 }
@@ -570,8 +572,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
570 * make this too complex. 572 * make this too complex.
571 */ 573 */
572 574
573 while (bio->bi_phys_segments >= queue_max_phys_segments(q) 575 while (bio->bi_phys_segments >= queue_max_segments(q)) {
574 || bio->bi_phys_segments >= queue_max_hw_segments(q)) {
575 576
576 if (retried_segments) 577 if (retried_segments)
577 return 0; 578 return 0;
@@ -606,7 +607,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
606 * merge_bvec_fn() returns number of bytes it can accept 607 * merge_bvec_fn() returns number of bytes it can accept
607 * at this offset 608 * at this offset
608 */ 609 */
609 if (q->merge_bvec_fn(q, &bvm, bvec) < len) { 610 if (q->merge_bvec_fn(q, &bvm, bvec) < bvec->bv_len) {
610 bvec->bv_page = NULL; 611 bvec->bv_page = NULL;
611 bvec->bv_len = 0; 612 bvec->bv_len = 0;
612 bvec->bv_offset = 0; 613 bvec->bv_offset = 0;
@@ -1393,6 +1394,18 @@ void bio_check_pages_dirty(struct bio *bio)
1393 } 1394 }
1394} 1395}
1395 1396
1397#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1398void bio_flush_dcache_pages(struct bio *bi)
1399{
1400 int i;
1401 struct bio_vec *bvec;
1402
1403 bio_for_each_segment(bvec, bi, i)
1404 flush_dcache_page(bvec->bv_page);
1405}
1406EXPORT_SYMBOL(bio_flush_dcache_pages);
1407#endif
1408
1396/** 1409/**
1397 * bio_endio - end I/O on a bio 1410 * bio_endio - end I/O on a bio
1398 * @bio: bio 1411 * @bio: bio