aboutsummaryrefslogtreecommitdiffstats
path: root/fs/bio.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/bio.c')
-rw-r--r--fs/bio.c29
1 files changed, 15 insertions, 14 deletions
diff --git a/fs/bio.c b/fs/bio.c
index 98711647ece4..59000215e59b 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -26,10 +26,9 @@
26#include <linux/mempool.h> 26#include <linux/mempool.h>
27#include <linux/workqueue.h> 27#include <linux/workqueue.h>
28#include <linux/blktrace_api.h> 28#include <linux/blktrace_api.h>
29#include <trace/block.h>
30#include <scsi/sg.h> /* for struct sg_iovec */ 29#include <scsi/sg.h> /* for struct sg_iovec */
31 30
32DEFINE_TRACE(block_split); 31#include <trace/events/block.h>
33 32
34/* 33/*
35 * Test patch to inline a certain number of bi_io_vec's inside the bio 34 * Test patch to inline a certain number of bi_io_vec's inside the bio
@@ -499,11 +498,11 @@ int bio_get_nr_vecs(struct block_device *bdev)
499 struct request_queue *q = bdev_get_queue(bdev); 498 struct request_queue *q = bdev_get_queue(bdev);
500 int nr_pages; 499 int nr_pages;
501 500
502 nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; 501 nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT;
503 if (nr_pages > q->max_phys_segments) 502 if (nr_pages > queue_max_phys_segments(q))
504 nr_pages = q->max_phys_segments; 503 nr_pages = queue_max_phys_segments(q);
505 if (nr_pages > q->max_hw_segments) 504 if (nr_pages > queue_max_hw_segments(q))
506 nr_pages = q->max_hw_segments; 505 nr_pages = queue_max_hw_segments(q);
507 506
508 return nr_pages; 507 return nr_pages;
509} 508}
@@ -562,8 +561,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
562 * make this too complex. 561 * make this too complex.
563 */ 562 */
564 563
565 while (bio->bi_phys_segments >= q->max_phys_segments 564 while (bio->bi_phys_segments >= queue_max_phys_segments(q)
566 || bio->bi_phys_segments >= q->max_hw_segments) { 565 || bio->bi_phys_segments >= queue_max_hw_segments(q)) {
567 566
568 if (retried_segments) 567 if (retried_segments)
569 return 0; 568 return 0;
@@ -634,7 +633,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
634int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, 633int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page,
635 unsigned int len, unsigned int offset) 634 unsigned int len, unsigned int offset)
636{ 635{
637 return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors); 636 return __bio_add_page(q, bio, page, len, offset,
637 queue_max_hw_sectors(q));
638} 638}
639 639
640/** 640/**
@@ -654,7 +654,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len,
654 unsigned int offset) 654 unsigned int offset)
655{ 655{
656 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 656 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
657 return __bio_add_page(q, bio, page, len, offset, q->max_sectors); 657 return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q));
658} 658}
659 659
660struct bio_map_data { 660struct bio_map_data {
@@ -721,7 +721,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
721 721
722 while (bv_len && iov_idx < iov_count) { 722 while (bv_len && iov_idx < iov_count) {
723 unsigned int bytes; 723 unsigned int bytes;
724 char *iov_addr; 724 char __user *iov_addr;
725 725
726 bytes = min_t(unsigned int, 726 bytes = min_t(unsigned int,
727 iov[iov_idx].iov_len - iov_off, bv_len); 727 iov[iov_idx].iov_len - iov_off, bv_len);
@@ -1201,7 +1201,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
1201 char *addr = page_address(bvec->bv_page); 1201 char *addr = page_address(bvec->bv_page);
1202 int len = bmd->iovecs[i].bv_len; 1202 int len = bmd->iovecs[i].bv_len;
1203 1203
1204 if (read && !err) 1204 if (read)
1205 memcpy(p, addr, len); 1205 memcpy(p, addr, len);
1206 1206
1207 __free_page(bvec->bv_page); 1207 __free_page(bvec->bv_page);
@@ -1490,11 +1490,12 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors)
1490sector_t bio_sector_offset(struct bio *bio, unsigned short index, 1490sector_t bio_sector_offset(struct bio *bio, unsigned short index,
1491 unsigned int offset) 1491 unsigned int offset)
1492{ 1492{
1493 unsigned int sector_sz = queue_hardsect_size(bio->bi_bdev->bd_disk->queue); 1493 unsigned int sector_sz;
1494 struct bio_vec *bv; 1494 struct bio_vec *bv;
1495 sector_t sectors; 1495 sector_t sectors;
1496 int i; 1496 int i;
1497 1497
1498 sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue);
1498 sectors = 0; 1499 sectors = 0;
1499 1500
1500 if (index >= bio->bi_idx) 1501 if (index >= bio->bi_idx)