diff options
| author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2007-10-12 21:27:47 -0400 |
|---|---|---|
| committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2007-10-12 21:27:47 -0400 |
| commit | b981d8b3f5e008ff10d993be633ad00564fc22cd (patch) | |
| tree | e292dc07b22308912cf6a58354a608b9e5e8e1fd /include/linux/blkdev.h | |
| parent | b11d2127c4893a7315d1e16273bc8560049fa3ca (diff) | |
| parent | 2b9e0aae1d50e880c58d46788e5e3ebd89d75d62 (diff) | |
Merge master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
drivers/macintosh/adbhid.c
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 183 |
1 files changed, 88 insertions, 95 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f78965fc6426..5ed888b04b29 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -1,6 +1,8 @@ | |||
| 1 | #ifndef _LINUX_BLKDEV_H | 1 | #ifndef _LINUX_BLKDEV_H |
| 2 | #define _LINUX_BLKDEV_H | 2 | #define _LINUX_BLKDEV_H |
| 3 | 3 | ||
| 4 | #ifdef CONFIG_BLOCK | ||
| 5 | |||
| 4 | #include <linux/sched.h> | 6 | #include <linux/sched.h> |
| 5 | #include <linux/major.h> | 7 | #include <linux/major.h> |
| 6 | #include <linux/genhd.h> | 8 | #include <linux/genhd.h> |
| @@ -18,26 +20,10 @@ | |||
| 18 | 20 | ||
| 19 | #include <asm/scatterlist.h> | 21 | #include <asm/scatterlist.h> |
| 20 | 22 | ||
| 21 | #ifdef CONFIG_LBD | ||
| 22 | # include <asm/div64.h> | ||
| 23 | # define sector_div(a, b) do_div(a, b) | ||
| 24 | #else | ||
| 25 | # define sector_div(n, b)( \ | ||
| 26 | { \ | ||
| 27 | int _res; \ | ||
| 28 | _res = (n) % (b); \ | ||
| 29 | (n) /= (b); \ | ||
| 30 | _res; \ | ||
| 31 | } \ | ||
| 32 | ) | ||
| 33 | #endif | ||
| 34 | |||
| 35 | #ifdef CONFIG_BLOCK | ||
| 36 | |||
| 37 | struct scsi_ioctl_command; | 23 | struct scsi_ioctl_command; |
| 38 | 24 | ||
| 39 | struct request_queue; | 25 | struct request_queue; |
| 40 | typedef struct request_queue request_queue_t; | 26 | typedef struct request_queue request_queue_t __deprecated; |
| 41 | struct elevator_queue; | 27 | struct elevator_queue; |
| 42 | typedef struct elevator_queue elevator_t; | 28 | typedef struct elevator_queue elevator_t; |
| 43 | struct request_pm_state; | 29 | struct request_pm_state; |
| @@ -233,7 +219,7 @@ struct request { | |||
| 233 | struct list_head queuelist; | 219 | struct list_head queuelist; |
| 234 | struct list_head donelist; | 220 | struct list_head donelist; |
| 235 | 221 | ||
| 236 | request_queue_t *q; | 222 | struct request_queue *q; |
| 237 | 223 | ||
| 238 | unsigned int cmd_flags; | 224 | unsigned int cmd_flags; |
| 239 | enum rq_cmd_type_bits cmd_type; | 225 | enum rq_cmd_type_bits cmd_type; |
| @@ -337,15 +323,15 @@ struct request_pm_state | |||
| 337 | 323 | ||
| 338 | #include <linux/elevator.h> | 324 | #include <linux/elevator.h> |
| 339 | 325 | ||
| 340 | typedef void (request_fn_proc) (request_queue_t *q); | 326 | typedef void (request_fn_proc) (struct request_queue *q); |
| 341 | typedef int (make_request_fn) (request_queue_t *q, struct bio *bio); | 327 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); |
| 342 | typedef int (prep_rq_fn) (request_queue_t *, struct request *); | 328 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
| 343 | typedef void (unplug_fn) (request_queue_t *); | 329 | typedef void (unplug_fn) (struct request_queue *); |
| 344 | 330 | ||
| 345 | struct bio_vec; | 331 | struct bio_vec; |
| 346 | typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *); | 332 | typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *); |
| 347 | typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); | 333 | typedef int (issue_flush_fn) (struct request_queue *, struct gendisk *, sector_t *); |
| 348 | typedef void (prepare_flush_fn) (request_queue_t *, struct request *); | 334 | typedef void (prepare_flush_fn) (struct request_queue *, struct request *); |
| 349 | typedef void (softirq_done_fn)(struct request *); | 335 | typedef void (softirq_done_fn)(struct request *); |
| 350 | 336 | ||
| 351 | enum blk_queue_state { | 337 | enum blk_queue_state { |
| @@ -471,7 +457,6 @@ struct request_queue | |||
| 471 | int orderr, ordcolor; | 457 | int orderr, ordcolor; |
| 472 | struct request pre_flush_rq, bar_rq, post_flush_rq; | 458 | struct request pre_flush_rq, bar_rq, post_flush_rq; |
| 473 | struct request *orig_bar_rq; | 459 | struct request *orig_bar_rq; |
| 474 | unsigned int bi_size; | ||
| 475 | 460 | ||
| 476 | struct mutex sysfs_lock; | 461 | struct mutex sysfs_lock; |
| 477 | 462 | ||
| @@ -483,8 +468,8 @@ struct request_queue | |||
| 483 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ | 468 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ |
| 484 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 469 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
| 485 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ | 470 | #define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ |
| 486 | #define QUEUE_FLAG_READFULL 3 /* write queue has been filled */ | 471 | #define QUEUE_FLAG_READFULL 3 /* read queue has been filled */ |
| 487 | #define QUEUE_FLAG_WRITEFULL 4 /* read queue has been filled */ | 472 | #define QUEUE_FLAG_WRITEFULL 4 /* write queue has been filled */ |
| 488 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ | 473 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ |
| 489 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ | 474 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ |
| 490 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ | 475 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ |
| @@ -626,34 +611,47 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn; | |||
| 626 | 611 | ||
| 627 | #ifdef CONFIG_BOUNCE | 612 | #ifdef CONFIG_BOUNCE |
| 628 | extern int init_emergency_isa_pool(void); | 613 | extern int init_emergency_isa_pool(void); |
| 629 | extern void blk_queue_bounce(request_queue_t *q, struct bio **bio); | 614 | extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); |
| 630 | #else | 615 | #else |
| 631 | static inline int init_emergency_isa_pool(void) | 616 | static inline int init_emergency_isa_pool(void) |
| 632 | { | 617 | { |
| 633 | return 0; | 618 | return 0; |
| 634 | } | 619 | } |
| 635 | static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio) | 620 | static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) |
| 636 | { | 621 | { |
| 637 | } | 622 | } |
| 638 | #endif /* CONFIG_MMU */ | 623 | #endif /* CONFIG_MMU */ |
| 639 | 624 | ||
| 640 | #define rq_for_each_bio(_bio, rq) \ | 625 | struct req_iterator { |
| 626 | int i; | ||
| 627 | struct bio *bio; | ||
| 628 | }; | ||
| 629 | |||
| 630 | /* This should not be used directly - use rq_for_each_segment */ | ||
| 631 | #define __rq_for_each_bio(_bio, rq) \ | ||
| 641 | if ((rq->bio)) \ | 632 | if ((rq->bio)) \ |
| 642 | for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) | 633 | for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) |
| 643 | 634 | ||
| 635 | #define rq_for_each_segment(bvl, _rq, _iter) \ | ||
| 636 | __rq_for_each_bio(_iter.bio, _rq) \ | ||
| 637 | bio_for_each_segment(bvl, _iter.bio, _iter.i) | ||
| 638 | |||
| 639 | #define rq_iter_last(rq, _iter) \ | ||
| 640 | (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) | ||
| 641 | |||
| 644 | extern int blk_register_queue(struct gendisk *disk); | 642 | extern int blk_register_queue(struct gendisk *disk); |
| 645 | extern void blk_unregister_queue(struct gendisk *disk); | 643 | extern void blk_unregister_queue(struct gendisk *disk); |
| 646 | extern void register_disk(struct gendisk *dev); | 644 | extern void register_disk(struct gendisk *dev); |
| 647 | extern void generic_make_request(struct bio *bio); | 645 | extern void generic_make_request(struct bio *bio); |
| 648 | extern void blk_put_request(struct request *); | 646 | extern void blk_put_request(struct request *); |
| 649 | extern void __blk_put_request(request_queue_t *, struct request *); | 647 | extern void __blk_put_request(struct request_queue *, struct request *); |
| 650 | extern void blk_end_sync_rq(struct request *rq, int error); | 648 | extern void blk_end_sync_rq(struct request *rq, int error); |
| 651 | extern struct request *blk_get_request(request_queue_t *, int, gfp_t); | 649 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
| 652 | extern void blk_insert_request(request_queue_t *, struct request *, int, void *); | 650 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); |
| 653 | extern void blk_requeue_request(request_queue_t *, struct request *); | 651 | extern void blk_requeue_request(struct request_queue *, struct request *); |
| 654 | extern void blk_plug_device(request_queue_t *); | 652 | extern void blk_plug_device(struct request_queue *); |
| 655 | extern int blk_remove_plug(request_queue_t *); | 653 | extern int blk_remove_plug(struct request_queue *); |
| 656 | extern void blk_recount_segments(request_queue_t *, struct bio *); | 654 | extern void blk_recount_segments(struct request_queue *, struct bio *); |
| 657 | extern int scsi_cmd_ioctl(struct file *, struct request_queue *, | 655 | extern int scsi_cmd_ioctl(struct file *, struct request_queue *, |
| 658 | struct gendisk *, unsigned int, void __user *); | 656 | struct gendisk *, unsigned int, void __user *); |
| 659 | extern int sg_scsi_ioctl(struct file *, struct request_queue *, | 657 | extern int sg_scsi_ioctl(struct file *, struct request_queue *, |
| @@ -662,14 +660,15 @@ extern int sg_scsi_ioctl(struct file *, struct request_queue *, | |||
| 662 | /* | 660 | /* |
| 663 | * Temporary export, until SCSI gets fixed up. | 661 | * Temporary export, until SCSI gets fixed up. |
| 664 | */ | 662 | */ |
| 665 | extern int ll_back_merge_fn(request_queue_t *, struct request *, struct bio *); | 663 | extern int blk_rq_append_bio(struct request_queue *q, struct request *rq, |
| 664 | struct bio *bio); | ||
| 666 | 665 | ||
| 667 | /* | 666 | /* |
| 668 | * A queue has just exitted congestion. Note this in the global counter of | 667 | * A queue has just exitted congestion. Note this in the global counter of |
| 669 | * congested queues, and wake up anyone who was waiting for requests to be | 668 | * congested queues, and wake up anyone who was waiting for requests to be |
| 670 | * put back. | 669 | * put back. |
| 671 | */ | 670 | */ |
| 672 | static inline void blk_clear_queue_congested(request_queue_t *q, int rw) | 671 | static inline void blk_clear_queue_congested(struct request_queue *q, int rw) |
| 673 | { | 672 | { |
| 674 | clear_bdi_congested(&q->backing_dev_info, rw); | 673 | clear_bdi_congested(&q->backing_dev_info, rw); |
| 675 | } | 674 | } |
| @@ -678,34 +677,29 @@ static inline void blk_clear_queue_congested(request_queue_t *q, int rw) | |||
| 678 | * A queue has just entered congestion. Flag that in the queue's VM-visible | 677 | * A queue has just entered congestion. Flag that in the queue's VM-visible |
| 679 | * state flags and increment the global gounter of congested queues. | 678 | * state flags and increment the global gounter of congested queues. |
| 680 | */ | 679 | */ |
| 681 | static inline void blk_set_queue_congested(request_queue_t *q, int rw) | 680 | static inline void blk_set_queue_congested(struct request_queue *q, int rw) |
| 682 | { | 681 | { |
| 683 | set_bdi_congested(&q->backing_dev_info, rw); | 682 | set_bdi_congested(&q->backing_dev_info, rw); |
| 684 | } | 683 | } |
| 685 | 684 | ||
| 686 | extern void blk_start_queue(request_queue_t *q); | 685 | extern void blk_start_queue(struct request_queue *q); |
| 687 | extern void blk_stop_queue(request_queue_t *q); | 686 | extern void blk_stop_queue(struct request_queue *q); |
| 688 | extern void blk_sync_queue(struct request_queue *q); | 687 | extern void blk_sync_queue(struct request_queue *q); |
| 689 | extern void __blk_stop_queue(request_queue_t *q); | 688 | extern void __blk_stop_queue(struct request_queue *q); |
| 690 | extern void blk_run_queue(request_queue_t *); | 689 | extern void blk_run_queue(struct request_queue *); |
| 691 | extern void blk_start_queueing(request_queue_t *); | 690 | extern void blk_start_queueing(struct request_queue *); |
| 692 | extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long); | 691 | extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); |
| 693 | extern int blk_rq_unmap_user(struct bio *); | 692 | extern int blk_rq_unmap_user(struct bio *); |
| 694 | extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t); | 693 | extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); |
| 695 | extern int blk_rq_map_user_iov(request_queue_t *, struct request *, | 694 | extern int blk_rq_map_user_iov(struct request_queue *, struct request *, |
| 696 | struct sg_iovec *, int, unsigned int); | 695 | struct sg_iovec *, int, unsigned int); |
| 697 | extern int blk_execute_rq(request_queue_t *, struct gendisk *, | 696 | extern int blk_execute_rq(struct request_queue *, struct gendisk *, |
| 698 | struct request *, int); | 697 | struct request *, int); |
| 699 | extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *, | 698 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, |
| 700 | struct request *, int, rq_end_io_fn *); | 699 | struct request *, int, rq_end_io_fn *); |
| 701 | extern int blk_fill_sghdr_rq(request_queue_t *, struct request *, | ||
| 702 | struct sg_io_hdr *, int); | ||
| 703 | extern int blk_unmap_sghdr_rq(struct request *, struct sg_io_hdr *); | ||
| 704 | extern int blk_complete_sghdr_rq(struct request *, struct sg_io_hdr *, | ||
| 705 | struct bio *); | ||
| 706 | extern int blk_verify_command(unsigned char *, int); | 700 | extern int blk_verify_command(unsigned char *, int); |
| 707 | 701 | ||
| 708 | static inline request_queue_t *bdev_get_queue(struct block_device *bdev) | 702 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) |
| 709 | { | 703 | { |
| 710 | return bdev->bd_disk->queue; | 704 | return bdev->bd_disk->queue; |
| 711 | } | 705 | } |
| @@ -754,41 +748,41 @@ static inline void blkdev_dequeue_request(struct request *req) | |||
| 754 | /* | 748 | /* |
| 755 | * Access functions for manipulating queue properties | 749 | * Access functions for manipulating queue properties |
| 756 | */ | 750 | */ |
| 757 | extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn, | 751 | extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, |
| 758 | spinlock_t *lock, int node_id); | 752 | spinlock_t *lock, int node_id); |
| 759 | extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *); | 753 | extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); |
| 760 | extern void blk_cleanup_queue(request_queue_t *); | 754 | extern void blk_cleanup_queue(struct request_queue *); |
| 761 | extern void blk_queue_make_request(request_queue_t *, make_request_fn *); | 755 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
| 762 | extern void blk_queue_bounce_limit(request_queue_t *, u64); | 756 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
| 763 | extern void blk_queue_max_sectors(request_queue_t *, unsigned int); | 757 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); |
| 764 | extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short); | 758 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); |
| 765 | extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short); | 759 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); |
| 766 | extern void blk_queue_max_segment_size(request_queue_t *, unsigned int); | 760 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
| 767 | extern void blk_queue_hardsect_size(request_queue_t *, unsigned short); | 761 | extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); |
| 768 | extern void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b); | 762 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); |
| 769 | extern void blk_queue_segment_boundary(request_queue_t *, unsigned long); | 763 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); |
| 770 | extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); | 764 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); |
| 771 | extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); | 765 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); |
| 772 | extern void blk_queue_dma_alignment(request_queue_t *, int); | 766 | extern void blk_queue_dma_alignment(struct request_queue *, int); |
| 773 | extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *); | 767 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
| 774 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 768 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
| 775 | extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *); | 769 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); |
| 776 | extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); | 770 | extern void blk_queue_issue_flush_fn(struct request_queue *, issue_flush_fn *); |
| 777 | extern int blk_do_ordered(request_queue_t *, struct request **); | 771 | extern int blk_do_ordered(struct request_queue *, struct request **); |
| 778 | extern unsigned blk_ordered_cur_seq(request_queue_t *); | 772 | extern unsigned blk_ordered_cur_seq(struct request_queue *); |
| 779 | extern unsigned blk_ordered_req_seq(struct request *); | 773 | extern unsigned blk_ordered_req_seq(struct request *); |
| 780 | extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int); | 774 | extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int); |
| 781 | 775 | ||
| 782 | extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); | 776 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); |
| 783 | extern void blk_dump_rq_flags(struct request *, char *); | 777 | extern void blk_dump_rq_flags(struct request *, char *); |
| 784 | extern void generic_unplug_device(request_queue_t *); | 778 | extern void generic_unplug_device(struct request_queue *); |
| 785 | extern void __generic_unplug_device(request_queue_t *); | 779 | extern void __generic_unplug_device(struct request_queue *); |
| 786 | extern long nr_blockdev_pages(void); | 780 | extern long nr_blockdev_pages(void); |
| 787 | 781 | ||
| 788 | int blk_get_queue(request_queue_t *); | 782 | int blk_get_queue(struct request_queue *); |
| 789 | request_queue_t *blk_alloc_queue(gfp_t); | 783 | struct request_queue *blk_alloc_queue(gfp_t); |
| 790 | request_queue_t *blk_alloc_queue_node(gfp_t, int); | 784 | struct request_queue *blk_alloc_queue_node(gfp_t, int); |
| 791 | extern void blk_put_queue(request_queue_t *); | 785 | extern void blk_put_queue(struct request_queue *); |
| 792 | 786 | ||
| 793 | /* | 787 | /* |
| 794 | * tag stuff | 788 | * tag stuff |
| @@ -796,13 +790,13 @@ extern void blk_put_queue(request_queue_t *); | |||
| 796 | #define blk_queue_tag_depth(q) ((q)->queue_tags->busy) | 790 | #define blk_queue_tag_depth(q) ((q)->queue_tags->busy) |
| 797 | #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) | 791 | #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) |
| 798 | #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) | 792 | #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) |
| 799 | extern int blk_queue_start_tag(request_queue_t *, struct request *); | 793 | extern int blk_queue_start_tag(struct request_queue *, struct request *); |
| 800 | extern struct request *blk_queue_find_tag(request_queue_t *, int); | 794 | extern struct request *blk_queue_find_tag(struct request_queue *, int); |
| 801 | extern void blk_queue_end_tag(request_queue_t *, struct request *); | 795 | extern void blk_queue_end_tag(struct request_queue *, struct request *); |
| 802 | extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *); | 796 | extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); |
| 803 | extern void blk_queue_free_tags(request_queue_t *); | 797 | extern void blk_queue_free_tags(struct request_queue *); |
| 804 | extern int blk_queue_resize_tags(request_queue_t *, int); | 798 | extern int blk_queue_resize_tags(struct request_queue *, int); |
| 805 | extern void blk_queue_invalidate_tags(request_queue_t *); | 799 | extern void blk_queue_invalidate_tags(struct request_queue *); |
| 806 | extern struct blk_queue_tag *blk_init_tags(int); | 800 | extern struct blk_queue_tag *blk_init_tags(int); |
| 807 | extern void blk_free_tags(struct blk_queue_tag *); | 801 | extern void blk_free_tags(struct blk_queue_tag *); |
| 808 | 802 | ||
| @@ -814,7 +808,6 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, | |||
| 814 | return bqt->tag_index[tag]; | 808 | return bqt->tag_index[tag]; |
| 815 | } | 809 | } |
| 816 | 810 | ||
| 817 | extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *); | ||
| 818 | extern int blkdev_issue_flush(struct block_device *, sector_t *); | 811 | extern int blkdev_issue_flush(struct block_device *, sector_t *); |
| 819 | 812 | ||
| 820 | #define MAX_PHYS_SEGMENTS 128 | 813 | #define MAX_PHYS_SEGMENTS 128 |
| @@ -826,7 +819,7 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *); | |||
| 826 | 819 | ||
| 827 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) | 820 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) |
| 828 | 821 | ||
| 829 | static inline int queue_hardsect_size(request_queue_t *q) | 822 | static inline int queue_hardsect_size(struct request_queue *q) |
| 830 | { | 823 | { |
| 831 | int retval = 512; | 824 | int retval = 512; |
| 832 | 825 | ||
| @@ -841,7 +834,7 @@ static inline int bdev_hardsect_size(struct block_device *bdev) | |||
| 841 | return queue_hardsect_size(bdev_get_queue(bdev)); | 834 | return queue_hardsect_size(bdev_get_queue(bdev)); |
| 842 | } | 835 | } |
| 843 | 836 | ||
| 844 | static inline int queue_dma_alignment(request_queue_t *q) | 837 | static inline int queue_dma_alignment(struct request_queue *q) |
| 845 | { | 838 | { |
| 846 | int retval = 511; | 839 | int retval = 511; |
| 847 | 840 | ||
