diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-24 15:26:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-24 15:26:44 -0400 |
commit | b2e961eb2e7a54ffaae82f8e0198b26b54ade98e (patch) | |
tree | 33e5192a4c84e717d8c2f8235f268216b01053f7 /include/linux/blkdev.h | |
parent | b8c1c5da1520977cb55a358f20fc09567d40cad9 (diff) | |
parent | 71f65e6bd7651610d2d6aeb3c12aab63667ace30 (diff) |
Merge branch 'request-queue-t' of git://git.kernel.dk/linux-2.6-block
* 'request-queue-t' of git://git.kernel.dk/linux-2.6-block:
[BLOCK] Add request_queue_t and mark it deprecated
[BLOCK] Get rid of request_queue_t typedef
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 141 |
1 files changed, 71 insertions, 70 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 695e34964cb7..a1c96d9ee720 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -37,7 +37,7 @@ | |||
37 | struct scsi_ioctl_command; | 37 | struct scsi_ioctl_command; |
38 | 38 | ||
39 | struct request_queue; | 39 | struct request_queue; |
40 | typedef struct request_queue request_queue_t; | 40 | typedef struct request_queue request_queue_t __deprecated; |
41 | struct elevator_queue; | 41 | struct elevator_queue; |
42 | typedef struct elevator_queue elevator_t; | 42 | typedef struct elevator_queue elevator_t; |
43 | struct request_pm_state; | 43 | struct request_pm_state; |
@@ -233,7 +233,7 @@ struct request { | |||
233 | struct list_head queuelist; | 233 | struct list_head queuelist; |
234 | struct list_head donelist; | 234 | struct list_head donelist; |
235 | 235 | ||
236 | request_queue_t *q; | 236 | struct request_queue *q; |
237 | 237 | ||
238 | unsigned int cmd_flags; | 238 | unsigned int cmd_flags; |
239 | enum rq_cmd_type_bits cmd_type; | 239 | enum rq_cmd_type_bits cmd_type; |
@@ -337,15 +337,15 @@ struct request_pm_state | |||
337 | 337 | ||
338 | #include <linux/elevator.h> | 338 | #include <linux/elevator.h> |
339 | 339 | ||
340 | typedef void (request_fn_proc) (request_queue_t *q); | 340 | typedef void (request_fn_proc) (struct request_queue *q); |
341 | typedef int (make_request_fn) (request_queue_t *q, struct bio *bio); | 341 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); |
342 | typedef int (prep_rq_fn) (request_queue_t *, struct request *); | 342 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
343 | typedef void (unplug_fn) (request_queue_t *); | 343 | typedef void (unplug_fn) (struct request_queue *); |
344 | 344 | ||
345 | struct bio_vec; | 345 | struct bio_vec; |
346 | typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *); | 346 | typedef int (merge_bvec_fn) (struct request_queue *, struct bio *, struct bio_vec *); |
347 | typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); | 347 | typedef int (issue_flush_fn) (struct request_queue *, struct gendisk *, sector_t *); |
348 | typedef void (prepare_flush_fn) (request_queue_t *, struct request *); | 348 | typedef void (prepare_flush_fn) (struct request_queue *, struct request *); |
349 | typedef void (softirq_done_fn)(struct request *); | 349 | typedef void (softirq_done_fn)(struct request *); |
350 | 350 | ||
351 | enum blk_queue_state { | 351 | enum blk_queue_state { |
@@ -626,13 +626,13 @@ extern unsigned long blk_max_low_pfn, blk_max_pfn; | |||
626 | 626 | ||
627 | #ifdef CONFIG_BOUNCE | 627 | #ifdef CONFIG_BOUNCE |
628 | extern int init_emergency_isa_pool(void); | 628 | extern int init_emergency_isa_pool(void); |
629 | extern void blk_queue_bounce(request_queue_t *q, struct bio **bio); | 629 | extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); |
630 | #else | 630 | #else |
631 | static inline int init_emergency_isa_pool(void) | 631 | static inline int init_emergency_isa_pool(void) |
632 | { | 632 | { |
633 | return 0; | 633 | return 0; |
634 | } | 634 | } |
635 | static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio) | 635 | static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) |
636 | { | 636 | { |
637 | } | 637 | } |
638 | #endif /* CONFIG_MMU */ | 638 | #endif /* CONFIG_MMU */ |
@@ -646,14 +646,14 @@ extern void blk_unregister_queue(struct gendisk *disk); | |||
646 | extern void register_disk(struct gendisk *dev); | 646 | extern void register_disk(struct gendisk *dev); |
647 | extern void generic_make_request(struct bio *bio); | 647 | extern void generic_make_request(struct bio *bio); |
648 | extern void blk_put_request(struct request *); | 648 | extern void blk_put_request(struct request *); |
649 | extern void __blk_put_request(request_queue_t *, struct request *); | 649 | extern void __blk_put_request(struct request_queue *, struct request *); |
650 | extern void blk_end_sync_rq(struct request *rq, int error); | 650 | extern void blk_end_sync_rq(struct request *rq, int error); |
651 | extern struct request *blk_get_request(request_queue_t *, int, gfp_t); | 651 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
652 | extern void blk_insert_request(request_queue_t *, struct request *, int, void *); | 652 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); |
653 | extern void blk_requeue_request(request_queue_t *, struct request *); | 653 | extern void blk_requeue_request(struct request_queue *, struct request *); |
654 | extern void blk_plug_device(request_queue_t *); | 654 | extern void blk_plug_device(struct request_queue *); |
655 | extern int blk_remove_plug(request_queue_t *); | 655 | extern int blk_remove_plug(struct request_queue *); |
656 | extern void blk_recount_segments(request_queue_t *, struct bio *); | 656 | extern void blk_recount_segments(struct request_queue *, struct bio *); |
657 | extern int scsi_cmd_ioctl(struct file *, struct request_queue *, | 657 | extern int scsi_cmd_ioctl(struct file *, struct request_queue *, |
658 | struct gendisk *, unsigned int, void __user *); | 658 | struct gendisk *, unsigned int, void __user *); |
659 | extern int sg_scsi_ioctl(struct file *, struct request_queue *, | 659 | extern int sg_scsi_ioctl(struct file *, struct request_queue *, |
@@ -662,14 +662,15 @@ extern int sg_scsi_ioctl(struct file *, struct request_queue *, | |||
662 | /* | 662 | /* |
663 | * Temporary export, until SCSI gets fixed up. | 663 | * Temporary export, until SCSI gets fixed up. |
664 | */ | 664 | */ |
665 | extern int ll_back_merge_fn(request_queue_t *, struct request *, struct bio *); | 665 | extern int ll_back_merge_fn(struct request_queue *, struct request *, |
666 | struct bio *); | ||
666 | 667 | ||
667 | /* | 668 | /* |
668 | * A queue has just exitted congestion. Note this in the global counter of | 669 | * A queue has just exitted congestion. Note this in the global counter of |
669 | * congested queues, and wake up anyone who was waiting for requests to be | 670 | * congested queues, and wake up anyone who was waiting for requests to be |
670 | * put back. | 671 | * put back. |
671 | */ | 672 | */ |
672 | static inline void blk_clear_queue_congested(request_queue_t *q, int rw) | 673 | static inline void blk_clear_queue_congested(struct request_queue *q, int rw) |
673 | { | 674 | { |
674 | clear_bdi_congested(&q->backing_dev_info, rw); | 675 | clear_bdi_congested(&q->backing_dev_info, rw); |
675 | } | 676 | } |
@@ -678,29 +679,29 @@ static inline void blk_clear_queue_congested(request_queue_t *q, int rw) | |||
678 | * A queue has just entered congestion. Flag that in the queue's VM-visible | 679 | * A queue has just entered congestion. Flag that in the queue's VM-visible |
679 | * state flags and increment the global gounter of congested queues. | 680 | * state flags and increment the global gounter of congested queues. |
680 | */ | 681 | */ |
681 | static inline void blk_set_queue_congested(request_queue_t *q, int rw) | 682 | static inline void blk_set_queue_congested(struct request_queue *q, int rw) |
682 | { | 683 | { |
683 | set_bdi_congested(&q->backing_dev_info, rw); | 684 | set_bdi_congested(&q->backing_dev_info, rw); |
684 | } | 685 | } |
685 | 686 | ||
686 | extern void blk_start_queue(request_queue_t *q); | 687 | extern void blk_start_queue(struct request_queue *q); |
687 | extern void blk_stop_queue(request_queue_t *q); | 688 | extern void blk_stop_queue(struct request_queue *q); |
688 | extern void blk_sync_queue(struct request_queue *q); | 689 | extern void blk_sync_queue(struct request_queue *q); |
689 | extern void __blk_stop_queue(request_queue_t *q); | 690 | extern void __blk_stop_queue(struct request_queue *q); |
690 | extern void blk_run_queue(request_queue_t *); | 691 | extern void blk_run_queue(struct request_queue *); |
691 | extern void blk_start_queueing(request_queue_t *); | 692 | extern void blk_start_queueing(struct request_queue *); |
692 | extern int blk_rq_map_user(request_queue_t *, struct request *, void __user *, unsigned long); | 693 | extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long); |
693 | extern int blk_rq_unmap_user(struct bio *); | 694 | extern int blk_rq_unmap_user(struct bio *); |
694 | extern int blk_rq_map_kern(request_queue_t *, struct request *, void *, unsigned int, gfp_t); | 695 | extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t); |
695 | extern int blk_rq_map_user_iov(request_queue_t *, struct request *, | 696 | extern int blk_rq_map_user_iov(struct request_queue *, struct request *, |
696 | struct sg_iovec *, int, unsigned int); | 697 | struct sg_iovec *, int, unsigned int); |
697 | extern int blk_execute_rq(request_queue_t *, struct gendisk *, | 698 | extern int blk_execute_rq(struct request_queue *, struct gendisk *, |
698 | struct request *, int); | 699 | struct request *, int); |
699 | extern void blk_execute_rq_nowait(request_queue_t *, struct gendisk *, | 700 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, |
700 | struct request *, int, rq_end_io_fn *); | 701 | struct request *, int, rq_end_io_fn *); |
701 | extern int blk_verify_command(unsigned char *, int); | 702 | extern int blk_verify_command(unsigned char *, int); |
702 | 703 | ||
703 | static inline request_queue_t *bdev_get_queue(struct block_device *bdev) | 704 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) |
704 | { | 705 | { |
705 | return bdev->bd_disk->queue; | 706 | return bdev->bd_disk->queue; |
706 | } | 707 | } |
@@ -749,41 +750,41 @@ static inline void blkdev_dequeue_request(struct request *req) | |||
749 | /* | 750 | /* |
750 | * Access functions for manipulating queue properties | 751 | * Access functions for manipulating queue properties |
751 | */ | 752 | */ |
752 | extern request_queue_t *blk_init_queue_node(request_fn_proc *rfn, | 753 | extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn, |
753 | spinlock_t *lock, int node_id); | 754 | spinlock_t *lock, int node_id); |
754 | extern request_queue_t *blk_init_queue(request_fn_proc *, spinlock_t *); | 755 | extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); |
755 | extern void blk_cleanup_queue(request_queue_t *); | 756 | extern void blk_cleanup_queue(struct request_queue *); |
756 | extern void blk_queue_make_request(request_queue_t *, make_request_fn *); | 757 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
757 | extern void blk_queue_bounce_limit(request_queue_t *, u64); | 758 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
758 | extern void blk_queue_max_sectors(request_queue_t *, unsigned int); | 759 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); |
759 | extern void blk_queue_max_phys_segments(request_queue_t *, unsigned short); | 760 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); |
760 | extern void blk_queue_max_hw_segments(request_queue_t *, unsigned short); | 761 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); |
761 | extern void blk_queue_max_segment_size(request_queue_t *, unsigned int); | 762 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
762 | extern void blk_queue_hardsect_size(request_queue_t *, unsigned short); | 763 | extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); |
763 | extern void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b); | 764 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); |
764 | extern void blk_queue_segment_boundary(request_queue_t *, unsigned long); | 765 | extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); |
765 | extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); | 766 | extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); |
766 | extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); | 767 | extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); |
767 | extern void blk_queue_dma_alignment(request_queue_t *, int); | 768 | extern void blk_queue_dma_alignment(struct request_queue *, int); |
768 | extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *); | 769 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
769 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 770 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
770 | extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *); | 771 | extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); |
771 | extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); | 772 | extern void blk_queue_issue_flush_fn(struct request_queue *, issue_flush_fn *); |
772 | extern int blk_do_ordered(request_queue_t *, struct request **); | 773 | extern int blk_do_ordered(struct request_queue *, struct request **); |
773 | extern unsigned blk_ordered_cur_seq(request_queue_t *); | 774 | extern unsigned blk_ordered_cur_seq(struct request_queue *); |
774 | extern unsigned blk_ordered_req_seq(struct request *); | 775 | extern unsigned blk_ordered_req_seq(struct request *); |
775 | extern void blk_ordered_complete_seq(request_queue_t *, unsigned, int); | 776 | extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int); |
776 | 777 | ||
777 | extern int blk_rq_map_sg(request_queue_t *, struct request *, struct scatterlist *); | 778 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); |
778 | extern void blk_dump_rq_flags(struct request *, char *); | 779 | extern void blk_dump_rq_flags(struct request *, char *); |
779 | extern void generic_unplug_device(request_queue_t *); | 780 | extern void generic_unplug_device(struct request_queue *); |
780 | extern void __generic_unplug_device(request_queue_t *); | 781 | extern void __generic_unplug_device(struct request_queue *); |
781 | extern long nr_blockdev_pages(void); | 782 | extern long nr_blockdev_pages(void); |
782 | 783 | ||
783 | int blk_get_queue(request_queue_t *); | 784 | int blk_get_queue(struct request_queue *); |
784 | request_queue_t *blk_alloc_queue(gfp_t); | 785 | struct request_queue *blk_alloc_queue(gfp_t); |
785 | request_queue_t *blk_alloc_queue_node(gfp_t, int); | 786 | struct request_queue *blk_alloc_queue_node(gfp_t, int); |
786 | extern void blk_put_queue(request_queue_t *); | 787 | extern void blk_put_queue(struct request_queue *); |
787 | 788 | ||
788 | /* | 789 | /* |
789 | * tag stuff | 790 | * tag stuff |
@@ -791,13 +792,13 @@ extern void blk_put_queue(request_queue_t *); | |||
791 | #define blk_queue_tag_depth(q) ((q)->queue_tags->busy) | 792 | #define blk_queue_tag_depth(q) ((q)->queue_tags->busy) |
792 | #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) | 793 | #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) |
793 | #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) | 794 | #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) |
794 | extern int blk_queue_start_tag(request_queue_t *, struct request *); | 795 | extern int blk_queue_start_tag(struct request_queue *, struct request *); |
795 | extern struct request *blk_queue_find_tag(request_queue_t *, int); | 796 | extern struct request *blk_queue_find_tag(struct request_queue *, int); |
796 | extern void blk_queue_end_tag(request_queue_t *, struct request *); | 797 | extern void blk_queue_end_tag(struct request_queue *, struct request *); |
797 | extern int blk_queue_init_tags(request_queue_t *, int, struct blk_queue_tag *); | 798 | extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *); |
798 | extern void blk_queue_free_tags(request_queue_t *); | 799 | extern void blk_queue_free_tags(struct request_queue *); |
799 | extern int blk_queue_resize_tags(request_queue_t *, int); | 800 | extern int blk_queue_resize_tags(struct request_queue *, int); |
800 | extern void blk_queue_invalidate_tags(request_queue_t *); | 801 | extern void blk_queue_invalidate_tags(struct request_queue *); |
801 | extern struct blk_queue_tag *blk_init_tags(int); | 802 | extern struct blk_queue_tag *blk_init_tags(int); |
802 | extern void blk_free_tags(struct blk_queue_tag *); | 803 | extern void blk_free_tags(struct blk_queue_tag *); |
803 | 804 | ||
@@ -809,7 +810,7 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, | |||
809 | return bqt->tag_index[tag]; | 810 | return bqt->tag_index[tag]; |
810 | } | 811 | } |
811 | 812 | ||
812 | extern void blk_rq_bio_prep(request_queue_t *, struct request *, struct bio *); | 813 | extern void blk_rq_bio_prep(struct request_queue *, struct request *, struct bio *); |
813 | extern int blkdev_issue_flush(struct block_device *, sector_t *); | 814 | extern int blkdev_issue_flush(struct block_device *, sector_t *); |
814 | 815 | ||
815 | #define MAX_PHYS_SEGMENTS 128 | 816 | #define MAX_PHYS_SEGMENTS 128 |
@@ -821,7 +822,7 @@ extern int blkdev_issue_flush(struct block_device *, sector_t *); | |||
821 | 822 | ||
822 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) | 823 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) |
823 | 824 | ||
824 | static inline int queue_hardsect_size(request_queue_t *q) | 825 | static inline int queue_hardsect_size(struct request_queue *q) |
825 | { | 826 | { |
826 | int retval = 512; | 827 | int retval = 512; |
827 | 828 | ||
@@ -836,7 +837,7 @@ static inline int bdev_hardsect_size(struct block_device *bdev) | |||
836 | return queue_hardsect_size(bdev_get_queue(bdev)); | 837 | return queue_hardsect_size(bdev_get_queue(bdev)); |
837 | } | 838 | } |
838 | 839 | ||
839 | static inline int queue_dma_alignment(request_queue_t *q) | 840 | static inline int queue_dma_alignment(struct request_queue *q) |
840 | { | 841 | { |
841 | int retval = 511; | 842 | int retval = 511; |
842 | 843 | ||