diff options
Diffstat (limited to 'fs/bio.c')
| -rw-r--r-- | fs/bio.c | 30 |
1 files changed, 15 insertions, 15 deletions
| @@ -230,7 +230,7 @@ void bio_put(struct bio *bio) | |||
| 230 | } | 230 | } |
| 231 | } | 231 | } |
| 232 | 232 | ||
| 233 | inline int bio_phys_segments(request_queue_t *q, struct bio *bio) | 233 | inline int bio_phys_segments(struct request_queue *q, struct bio *bio) |
| 234 | { | 234 | { |
| 235 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | 235 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) |
| 236 | blk_recount_segments(q, bio); | 236 | blk_recount_segments(q, bio); |
| @@ -238,7 +238,7 @@ inline int bio_phys_segments(request_queue_t *q, struct bio *bio) | |||
| 238 | return bio->bi_phys_segments; | 238 | return bio->bi_phys_segments; |
| 239 | } | 239 | } |
| 240 | 240 | ||
| 241 | inline int bio_hw_segments(request_queue_t *q, struct bio *bio) | 241 | inline int bio_hw_segments(struct request_queue *q, struct bio *bio) |
| 242 | { | 242 | { |
| 243 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) | 243 | if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) |
| 244 | blk_recount_segments(q, bio); | 244 | blk_recount_segments(q, bio); |
| @@ -257,7 +257,7 @@ inline int bio_hw_segments(request_queue_t *q, struct bio *bio) | |||
| 257 | */ | 257 | */ |
| 258 | void __bio_clone(struct bio *bio, struct bio *bio_src) | 258 | void __bio_clone(struct bio *bio, struct bio *bio_src) |
| 259 | { | 259 | { |
| 260 | request_queue_t *q = bdev_get_queue(bio_src->bi_bdev); | 260 | struct request_queue *q = bdev_get_queue(bio_src->bi_bdev); |
| 261 | 261 | ||
| 262 | memcpy(bio->bi_io_vec, bio_src->bi_io_vec, | 262 | memcpy(bio->bi_io_vec, bio_src->bi_io_vec, |
| 263 | bio_src->bi_max_vecs * sizeof(struct bio_vec)); | 263 | bio_src->bi_max_vecs * sizeof(struct bio_vec)); |
| @@ -303,7 +303,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) | |||
| 303 | */ | 303 | */ |
| 304 | int bio_get_nr_vecs(struct block_device *bdev) | 304 | int bio_get_nr_vecs(struct block_device *bdev) |
| 305 | { | 305 | { |
| 306 | request_queue_t *q = bdev_get_queue(bdev); | 306 | struct request_queue *q = bdev_get_queue(bdev); |
| 307 | int nr_pages; | 307 | int nr_pages; |
| 308 | 308 | ||
| 309 | nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; | 309 | nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| @@ -315,7 +315,7 @@ int bio_get_nr_vecs(struct block_device *bdev) | |||
| 315 | return nr_pages; | 315 | return nr_pages; |
| 316 | } | 316 | } |
| 317 | 317 | ||
| 318 | static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page | 318 | static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page |
| 319 | *page, unsigned int len, unsigned int offset, | 319 | *page, unsigned int len, unsigned int offset, |
| 320 | unsigned short max_sectors) | 320 | unsigned short max_sectors) |
| 321 | { | 321 | { |
| @@ -425,7 +425,7 @@ static int __bio_add_page(request_queue_t *q, struct bio *bio, struct page | |||
| 425 | * smaller than PAGE_SIZE, so it is always possible to add a single | 425 | * smaller than PAGE_SIZE, so it is always possible to add a single |
| 426 | * page to an empty bio. This should only be used by REQ_PC bios. | 426 | * page to an empty bio. This should only be used by REQ_PC bios. |
| 427 | */ | 427 | */ |
| 428 | int bio_add_pc_page(request_queue_t *q, struct bio *bio, struct page *page, | 428 | int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, |
| 429 | unsigned int len, unsigned int offset) | 429 | unsigned int len, unsigned int offset) |
| 430 | { | 430 | { |
| 431 | return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors); | 431 | return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors); |
| @@ -523,7 +523,7 @@ int bio_uncopy_user(struct bio *bio) | |||
| 523 | * to/from kernel pages as necessary. Must be paired with | 523 | * to/from kernel pages as necessary. Must be paired with |
| 524 | * call bio_uncopy_user() on io completion. | 524 | * call bio_uncopy_user() on io completion. |
| 525 | */ | 525 | */ |
| 526 | struct bio *bio_copy_user(request_queue_t *q, unsigned long uaddr, | 526 | struct bio *bio_copy_user(struct request_queue *q, unsigned long uaddr, |
| 527 | unsigned int len, int write_to_vm) | 527 | unsigned int len, int write_to_vm) |
| 528 | { | 528 | { |
| 529 | unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; | 529 | unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| @@ -600,7 +600,7 @@ out_bmd: | |||
| 600 | return ERR_PTR(ret); | 600 | return ERR_PTR(ret); |
| 601 | } | 601 | } |
| 602 | 602 | ||
| 603 | static struct bio *__bio_map_user_iov(request_queue_t *q, | 603 | static struct bio *__bio_map_user_iov(struct request_queue *q, |
| 604 | struct block_device *bdev, | 604 | struct block_device *bdev, |
| 605 | struct sg_iovec *iov, int iov_count, | 605 | struct sg_iovec *iov, int iov_count, |
| 606 | int write_to_vm) | 606 | int write_to_vm) |
| @@ -712,7 +712,7 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, | |||
| 712 | 712 | ||
| 713 | /** | 713 | /** |
| 714 | * bio_map_user - map user address into bio | 714 | * bio_map_user - map user address into bio |
| 715 | * @q: the request_queue_t for the bio | 715 | * @q: the struct request_queue for the bio |
| 716 | * @bdev: destination block device | 716 | * @bdev: destination block device |
| 717 | * @uaddr: start of user address | 717 | * @uaddr: start of user address |
| 718 | * @len: length in bytes | 718 | * @len: length in bytes |
| @@ -721,7 +721,7 @@ static struct bio *__bio_map_user_iov(request_queue_t *q, | |||
| 721 | * Map the user space address into a bio suitable for io to a block | 721 | * Map the user space address into a bio suitable for io to a block |
| 722 | * device. Returns an error pointer in case of error. | 722 | * device. Returns an error pointer in case of error. |
| 723 | */ | 723 | */ |
| 724 | struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, | 724 | struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev, |
| 725 | unsigned long uaddr, unsigned int len, int write_to_vm) | 725 | unsigned long uaddr, unsigned int len, int write_to_vm) |
| 726 | { | 726 | { |
| 727 | struct sg_iovec iov; | 727 | struct sg_iovec iov; |
| @@ -734,7 +734,7 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, | |||
| 734 | 734 | ||
| 735 | /** | 735 | /** |
| 736 | * bio_map_user_iov - map user sg_iovec table into bio | 736 | * bio_map_user_iov - map user sg_iovec table into bio |
| 737 | * @q: the request_queue_t for the bio | 737 | * @q: the struct request_queue for the bio |
| 738 | * @bdev: destination block device | 738 | * @bdev: destination block device |
| 739 | * @iov: the iovec. | 739 | * @iov: the iovec. |
| 740 | * @iov_count: number of elements in the iovec | 740 | * @iov_count: number of elements in the iovec |
| @@ -743,7 +743,7 @@ struct bio *bio_map_user(request_queue_t *q, struct block_device *bdev, | |||
| 743 | * Map the user space address into a bio suitable for io to a block | 743 | * Map the user space address into a bio suitable for io to a block |
| 744 | * device. Returns an error pointer in case of error. | 744 | * device. Returns an error pointer in case of error. |
| 745 | */ | 745 | */ |
| 746 | struct bio *bio_map_user_iov(request_queue_t *q, struct block_device *bdev, | 746 | struct bio *bio_map_user_iov(struct request_queue *q, struct block_device *bdev, |
| 747 | struct sg_iovec *iov, int iov_count, | 747 | struct sg_iovec *iov, int iov_count, |
| 748 | int write_to_vm) | 748 | int write_to_vm) |
| 749 | { | 749 | { |
| @@ -808,7 +808,7 @@ static int bio_map_kern_endio(struct bio *bio, unsigned int bytes_done, int err) | |||
| 808 | } | 808 | } |
| 809 | 809 | ||
| 810 | 810 | ||
| 811 | static struct bio *__bio_map_kern(request_queue_t *q, void *data, | 811 | static struct bio *__bio_map_kern(struct request_queue *q, void *data, |
| 812 | unsigned int len, gfp_t gfp_mask) | 812 | unsigned int len, gfp_t gfp_mask) |
| 813 | { | 813 | { |
| 814 | unsigned long kaddr = (unsigned long)data; | 814 | unsigned long kaddr = (unsigned long)data; |
| @@ -847,7 +847,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data, | |||
| 847 | 847 | ||
| 848 | /** | 848 | /** |
| 849 | * bio_map_kern - map kernel address into bio | 849 | * bio_map_kern - map kernel address into bio |
| 850 | * @q: the request_queue_t for the bio | 850 | * @q: the struct request_queue for the bio |
| 851 | * @data: pointer to buffer to map | 851 | * @data: pointer to buffer to map |
| 852 | * @len: length in bytes | 852 | * @len: length in bytes |
| 853 | * @gfp_mask: allocation flags for bio allocation | 853 | * @gfp_mask: allocation flags for bio allocation |
| @@ -855,7 +855,7 @@ static struct bio *__bio_map_kern(request_queue_t *q, void *data, | |||
| 855 | * Map the kernel address into a bio suitable for io to a block | 855 | * Map the kernel address into a bio suitable for io to a block |
| 856 | * device. Returns an error pointer in case of error. | 856 | * device. Returns an error pointer in case of error. |
| 857 | */ | 857 | */ |
| 858 | struct bio *bio_map_kern(request_queue_t *q, void *data, unsigned int len, | 858 | struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, |
| 859 | gfp_t gfp_mask) | 859 | gfp_t gfp_mask) |
| 860 | { | 860 | { |
| 861 | struct bio *bio; | 861 | struct bio *bio; |
