diff options
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 247 |
1 files changed, 183 insertions, 64 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b4f71f1a4af7..0b1a6cae9de1 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -166,19 +166,9 @@ struct request { | |||
166 | enum rq_cmd_type_bits cmd_type; | 166 | enum rq_cmd_type_bits cmd_type; |
167 | unsigned long atomic_flags; | 167 | unsigned long atomic_flags; |
168 | 168 | ||
169 | /* Maintain bio traversal state for part by part I/O submission. | 169 | /* the following two fields are internal, NEVER access directly */ |
170 | * hard_* are block layer internals, no driver should touch them! | 170 | sector_t __sector; /* sector cursor */ |
171 | */ | 171 | unsigned int __data_len; /* total data len */ |
172 | |||
173 | sector_t sector; /* next sector to submit */ | ||
174 | sector_t hard_sector; /* next sector to complete */ | ||
175 | unsigned long nr_sectors; /* no. of sectors left to submit */ | ||
176 | unsigned long hard_nr_sectors; /* no. of sectors left to complete */ | ||
177 | /* no. of sectors left to submit in the current segment */ | ||
178 | unsigned int current_nr_sectors; | ||
179 | |||
180 | /* no. of sectors left to complete in the current segment */ | ||
181 | unsigned int hard_cur_sectors; | ||
182 | 172 | ||
183 | struct bio *bio; | 173 | struct bio *bio; |
184 | struct bio *biotail; | 174 | struct bio *biotail; |
@@ -211,8 +201,8 @@ struct request { | |||
211 | 201 | ||
212 | unsigned short ioprio; | 202 | unsigned short ioprio; |
213 | 203 | ||
214 | void *special; | 204 | void *special; /* opaque pointer available for LLD use */ |
215 | char *buffer; | 205 | char *buffer; /* kaddr of the current segment if available */ |
216 | 206 | ||
217 | int tag; | 207 | int tag; |
218 | int errors; | 208 | int errors; |
@@ -226,10 +216,9 @@ struct request { | |||
226 | unsigned char __cmd[BLK_MAX_CDB]; | 216 | unsigned char __cmd[BLK_MAX_CDB]; |
227 | unsigned char *cmd; | 217 | unsigned char *cmd; |
228 | 218 | ||
229 | unsigned int data_len; | ||
230 | unsigned int extra_len; /* length of alignment and padding */ | 219 | unsigned int extra_len; /* length of alignment and padding */ |
231 | unsigned int sense_len; | 220 | unsigned int sense_len; |
232 | void *data; | 221 | unsigned int resid_len; /* residual count */ |
233 | void *sense; | 222 | void *sense; |
234 | 223 | ||
235 | unsigned long deadline; | 224 | unsigned long deadline; |
@@ -318,6 +307,26 @@ struct blk_cmd_filter { | |||
318 | struct kobject kobj; | 307 | struct kobject kobj; |
319 | }; | 308 | }; |
320 | 309 | ||
310 | struct queue_limits { | ||
311 | unsigned long bounce_pfn; | ||
312 | unsigned long seg_boundary_mask; | ||
313 | |||
314 | unsigned int max_hw_sectors; | ||
315 | unsigned int max_sectors; | ||
316 | unsigned int max_segment_size; | ||
317 | unsigned int physical_block_size; | ||
318 | unsigned int alignment_offset; | ||
319 | unsigned int io_min; | ||
320 | unsigned int io_opt; | ||
321 | |||
322 | unsigned short logical_block_size; | ||
323 | unsigned short max_hw_segments; | ||
324 | unsigned short max_phys_segments; | ||
325 | |||
326 | unsigned char misaligned; | ||
327 | unsigned char no_cluster; | ||
328 | }; | ||
329 | |||
321 | struct request_queue | 330 | struct request_queue |
322 | { | 331 | { |
323 | /* | 332 | /* |
@@ -369,7 +378,6 @@ struct request_queue | |||
369 | /* | 378 | /* |
370 | * queue needs bounce pages for pages above this limit | 379 | * queue needs bounce pages for pages above this limit |
371 | */ | 380 | */ |
372 | unsigned long bounce_pfn; | ||
373 | gfp_t bounce_gfp; | 381 | gfp_t bounce_gfp; |
374 | 382 | ||
375 | /* | 383 | /* |
@@ -398,14 +406,6 @@ struct request_queue | |||
398 | unsigned int nr_congestion_off; | 406 | unsigned int nr_congestion_off; |
399 | unsigned int nr_batching; | 407 | unsigned int nr_batching; |
400 | 408 | ||
401 | unsigned int max_sectors; | ||
402 | unsigned int max_hw_sectors; | ||
403 | unsigned short max_phys_segments; | ||
404 | unsigned short max_hw_segments; | ||
405 | unsigned short hardsect_size; | ||
406 | unsigned int max_segment_size; | ||
407 | |||
408 | unsigned long seg_boundary_mask; | ||
409 | void *dma_drain_buffer; | 409 | void *dma_drain_buffer; |
410 | unsigned int dma_drain_size; | 410 | unsigned int dma_drain_size; |
411 | unsigned int dma_pad_mask; | 411 | unsigned int dma_pad_mask; |
@@ -415,12 +415,14 @@ struct request_queue | |||
415 | struct list_head tag_busy_list; | 415 | struct list_head tag_busy_list; |
416 | 416 | ||
417 | unsigned int nr_sorted; | 417 | unsigned int nr_sorted; |
418 | unsigned int in_flight; | 418 | unsigned int in_flight[2]; |
419 | 419 | ||
420 | unsigned int rq_timeout; | 420 | unsigned int rq_timeout; |
421 | struct timer_list timeout; | 421 | struct timer_list timeout; |
422 | struct list_head timeout_list; | 422 | struct list_head timeout_list; |
423 | 423 | ||
424 | struct queue_limits limits; | ||
425 | |||
424 | /* | 426 | /* |
425 | * sg stuff | 427 | * sg stuff |
426 | */ | 428 | */ |
@@ -522,6 +524,11 @@ static inline void queue_flag_clear_unlocked(unsigned int flag, | |||
522 | __clear_bit(flag, &q->queue_flags); | 524 | __clear_bit(flag, &q->queue_flags); |
523 | } | 525 | } |
524 | 526 | ||
527 | static inline int queue_in_flight(struct request_queue *q) | ||
528 | { | ||
529 | return q->in_flight[0] + q->in_flight[1]; | ||
530 | } | ||
531 | |||
525 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | 532 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) |
526 | { | 533 | { |
527 | WARN_ON_ONCE(!queue_is_locked(q)); | 534 | WARN_ON_ONCE(!queue_is_locked(q)); |
@@ -752,10 +759,17 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq); | |||
752 | extern void blk_put_request(struct request *); | 759 | extern void blk_put_request(struct request *); |
753 | extern void __blk_put_request(struct request_queue *, struct request *); | 760 | extern void __blk_put_request(struct request_queue *, struct request *); |
754 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | 761 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
762 | extern struct request *blk_make_request(struct request_queue *, struct bio *, | ||
763 | gfp_t); | ||
755 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); | 764 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); |
756 | extern void blk_requeue_request(struct request_queue *, struct request *); | 765 | extern void blk_requeue_request(struct request_queue *, struct request *); |
757 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); | 766 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); |
758 | extern int blk_lld_busy(struct request_queue *q); | 767 | extern int blk_lld_busy(struct request_queue *q); |
768 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | ||
769 | struct bio_set *bs, gfp_t gfp_mask, | ||
770 | int (*bio_ctr)(struct bio *, struct bio *, void *), | ||
771 | void *data); | ||
772 | extern void blk_rq_unprep_clone(struct request *rq); | ||
759 | extern int blk_insert_cloned_request(struct request_queue *q, | 773 | extern int blk_insert_cloned_request(struct request_queue *q, |
760 | struct request *rq); | 774 | struct request *rq); |
761 | extern void blk_plug_device(struct request_queue *); | 775 | extern void blk_plug_device(struct request_queue *); |
@@ -768,12 +782,6 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | |||
768 | struct scsi_ioctl_command __user *); | 782 | struct scsi_ioctl_command __user *); |
769 | 783 | ||
770 | /* | 784 | /* |
771 | * Temporary export, until SCSI gets fixed up. | ||
772 | */ | ||
773 | extern int blk_rq_append_bio(struct request_queue *q, struct request *rq, | ||
774 | struct bio *bio); | ||
775 | |||
776 | /* | ||
777 | * A queue has just exitted congestion. Note this in the global counter of | 785 | * A queue has just exitted congestion. Note this in the global counter of |
778 | * congested queues, and wake up anyone who was waiting for requests to be | 786 | * congested queues, and wake up anyone who was waiting for requests to be |
779 | * put back. | 787 | * put back. |
@@ -798,7 +806,6 @@ extern void blk_sync_queue(struct request_queue *q); | |||
798 | extern void __blk_stop_queue(struct request_queue *q); | 806 | extern void __blk_stop_queue(struct request_queue *q); |
799 | extern void __blk_run_queue(struct request_queue *); | 807 | extern void __blk_run_queue(struct request_queue *); |
800 | extern void blk_run_queue(struct request_queue *); | 808 | extern void blk_run_queue(struct request_queue *); |
801 | extern void blk_start_queueing(struct request_queue *); | ||
802 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 809 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
803 | struct rq_map_data *, void __user *, unsigned long, | 810 | struct rq_map_data *, void __user *, unsigned long, |
804 | gfp_t); | 811 | gfp_t); |
@@ -831,41 +838,73 @@ static inline void blk_run_address_space(struct address_space *mapping) | |||
831 | blk_run_backing_dev(mapping->backing_dev_info, NULL); | 838 | blk_run_backing_dev(mapping->backing_dev_info, NULL); |
832 | } | 839 | } |
833 | 840 | ||
834 | extern void blkdev_dequeue_request(struct request *req); | 841 | /* |
842 | * blk_rq_pos() : the current sector | ||
843 | * blk_rq_bytes() : bytes left in the entire request | ||
844 | * blk_rq_cur_bytes() : bytes left in the current segment | ||
845 | * blk_rq_sectors() : sectors left in the entire request | ||
846 | * blk_rq_cur_sectors() : sectors left in the current segment | ||
847 | */ | ||
848 | static inline sector_t blk_rq_pos(const struct request *rq) | ||
849 | { | ||
850 | return rq->__sector; | ||
851 | } | ||
852 | |||
853 | static inline unsigned int blk_rq_bytes(const struct request *rq) | ||
854 | { | ||
855 | return rq->__data_len; | ||
856 | } | ||
857 | |||
858 | static inline int blk_rq_cur_bytes(const struct request *rq) | ||
859 | { | ||
860 | return rq->bio ? bio_cur_bytes(rq->bio) : 0; | ||
861 | } | ||
862 | |||
863 | static inline unsigned int blk_rq_sectors(const struct request *rq) | ||
864 | { | ||
865 | return blk_rq_bytes(rq) >> 9; | ||
866 | } | ||
867 | |||
868 | static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | ||
869 | { | ||
870 | return blk_rq_cur_bytes(rq) >> 9; | ||
871 | } | ||
872 | |||
873 | /* | ||
874 | * Request issue related functions. | ||
875 | */ | ||
876 | extern struct request *blk_peek_request(struct request_queue *q); | ||
877 | extern void blk_start_request(struct request *rq); | ||
878 | extern struct request *blk_fetch_request(struct request_queue *q); | ||
835 | 879 | ||
836 | /* | 880 | /* |
837 | * blk_end_request() and friends. | 881 | * Request completion related functions. |
838 | * __blk_end_request() and end_request() must be called with | 882 | * |
839 | * the request queue spinlock acquired. | 883 | * blk_update_request() completes given number of bytes and updates |
884 | * the request without completing it. | ||
885 | * | ||
886 | * blk_end_request() and friends. __blk_end_request() must be called | ||
887 | * with the request queue spinlock acquired. | ||
840 | * | 888 | * |
841 | * Several drivers define their own end_request and call | 889 | * Several drivers define their own end_request and call |
842 | * blk_end_request() for parts of the original function. | 890 | * blk_end_request() for parts of the original function. |
843 | * This prevents code duplication in drivers. | 891 | * This prevents code duplication in drivers. |
844 | */ | 892 | */ |
845 | extern int blk_end_request(struct request *rq, int error, | 893 | extern bool blk_update_request(struct request *rq, int error, |
846 | unsigned int nr_bytes); | 894 | unsigned int nr_bytes); |
847 | extern int __blk_end_request(struct request *rq, int error, | 895 | extern bool blk_end_request(struct request *rq, int error, |
848 | unsigned int nr_bytes); | 896 | unsigned int nr_bytes); |
849 | extern int blk_end_bidi_request(struct request *rq, int error, | 897 | extern void blk_end_request_all(struct request *rq, int error); |
850 | unsigned int nr_bytes, unsigned int bidi_bytes); | 898 | extern bool blk_end_request_cur(struct request *rq, int error); |
851 | extern void end_request(struct request *, int); | 899 | extern bool __blk_end_request(struct request *rq, int error, |
852 | extern int blk_end_request_callback(struct request *rq, int error, | 900 | unsigned int nr_bytes); |
853 | unsigned int nr_bytes, | 901 | extern void __blk_end_request_all(struct request *rq, int error); |
854 | int (drv_callback)(struct request *)); | 902 | extern bool __blk_end_request_cur(struct request *rq, int error); |
903 | |||
855 | extern void blk_complete_request(struct request *); | 904 | extern void blk_complete_request(struct request *); |
856 | extern void __blk_complete_request(struct request *); | 905 | extern void __blk_complete_request(struct request *); |
857 | extern void blk_abort_request(struct request *); | 906 | extern void blk_abort_request(struct request *); |
858 | extern void blk_abort_queue(struct request_queue *); | 907 | extern void blk_abort_queue(struct request_queue *); |
859 | extern void blk_update_request(struct request *rq, int error, | ||
860 | unsigned int nr_bytes); | ||
861 | |||
862 | /* | ||
863 | * blk_end_request() takes bytes instead of sectors as a complete size. | ||
864 | * blk_rq_bytes() returns bytes left to complete in the entire request. | ||
865 | * blk_rq_cur_bytes() returns bytes left to complete in the current segment. | ||
866 | */ | ||
867 | extern unsigned int blk_rq_bytes(struct request *rq); | ||
868 | extern unsigned int blk_rq_cur_bytes(struct request *rq); | ||
869 | 908 | ||
870 | /* | 909 | /* |
871 | * Access functions for manipulating queue properties | 910 | * Access functions for manipulating queue properties |
@@ -877,10 +916,20 @@ extern void blk_cleanup_queue(struct request_queue *); | |||
877 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 916 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
878 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 917 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
879 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); | 918 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); |
919 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | ||
880 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); | 920 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); |
881 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); | 921 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); |
882 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 922 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
883 | extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); | 923 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
924 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); | ||
925 | extern void blk_queue_alignment_offset(struct request_queue *q, | ||
926 | unsigned int alignment); | ||
927 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); | ||
928 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | ||
929 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | ||
930 | sector_t offset); | ||
931 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | ||
932 | sector_t offset); | ||
884 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); | 933 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); |
885 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); | 934 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); |
886 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); | 935 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); |
@@ -967,19 +1016,87 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); | |||
967 | 1016 | ||
968 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) | 1017 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) |
969 | 1018 | ||
970 | static inline int queue_hardsect_size(struct request_queue *q) | 1019 | static inline unsigned long queue_bounce_pfn(struct request_queue *q) |
1020 | { | ||
1021 | return q->limits.bounce_pfn; | ||
1022 | } | ||
1023 | |||
1024 | static inline unsigned long queue_segment_boundary(struct request_queue *q) | ||
1025 | { | ||
1026 | return q->limits.seg_boundary_mask; | ||
1027 | } | ||
1028 | |||
1029 | static inline unsigned int queue_max_sectors(struct request_queue *q) | ||
1030 | { | ||
1031 | return q->limits.max_sectors; | ||
1032 | } | ||
1033 | |||
1034 | static inline unsigned int queue_max_hw_sectors(struct request_queue *q) | ||
1035 | { | ||
1036 | return q->limits.max_hw_sectors; | ||
1037 | } | ||
1038 | |||
1039 | static inline unsigned short queue_max_hw_segments(struct request_queue *q) | ||
1040 | { | ||
1041 | return q->limits.max_hw_segments; | ||
1042 | } | ||
1043 | |||
1044 | static inline unsigned short queue_max_phys_segments(struct request_queue *q) | ||
1045 | { | ||
1046 | return q->limits.max_phys_segments; | ||
1047 | } | ||
1048 | |||
1049 | static inline unsigned int queue_max_segment_size(struct request_queue *q) | ||
1050 | { | ||
1051 | return q->limits.max_segment_size; | ||
1052 | } | ||
1053 | |||
1054 | static inline unsigned short queue_logical_block_size(struct request_queue *q) | ||
971 | { | 1055 | { |
972 | int retval = 512; | 1056 | int retval = 512; |
973 | 1057 | ||
974 | if (q && q->hardsect_size) | 1058 | if (q && q->limits.logical_block_size) |
975 | retval = q->hardsect_size; | 1059 | retval = q->limits.logical_block_size; |
976 | 1060 | ||
977 | return retval; | 1061 | return retval; |
978 | } | 1062 | } |
979 | 1063 | ||
980 | static inline int bdev_hardsect_size(struct block_device *bdev) | 1064 | static inline unsigned short bdev_logical_block_size(struct block_device *bdev) |
1065 | { | ||
1066 | return queue_logical_block_size(bdev_get_queue(bdev)); | ||
1067 | } | ||
1068 | |||
1069 | static inline unsigned int queue_physical_block_size(struct request_queue *q) | ||
1070 | { | ||
1071 | return q->limits.physical_block_size; | ||
1072 | } | ||
1073 | |||
1074 | static inline unsigned int queue_io_min(struct request_queue *q) | ||
1075 | { | ||
1076 | return q->limits.io_min; | ||
1077 | } | ||
1078 | |||
1079 | static inline unsigned int queue_io_opt(struct request_queue *q) | ||
1080 | { | ||
1081 | return q->limits.io_opt; | ||
1082 | } | ||
1083 | |||
1084 | static inline int queue_alignment_offset(struct request_queue *q) | ||
1085 | { | ||
1086 | if (q && q->limits.misaligned) | ||
1087 | return -1; | ||
1088 | |||
1089 | if (q && q->limits.alignment_offset) | ||
1090 | return q->limits.alignment_offset; | ||
1091 | |||
1092 | return 0; | ||
1093 | } | ||
1094 | |||
1095 | static inline int queue_sector_alignment_offset(struct request_queue *q, | ||
1096 | sector_t sector) | ||
981 | { | 1097 | { |
982 | return queue_hardsect_size(bdev_get_queue(bdev)); | 1098 | return ((sector << 9) - q->limits.alignment_offset) |
1099 | & (q->limits.io_min - 1); | ||
983 | } | 1100 | } |
984 | 1101 | ||
985 | static inline int queue_dma_alignment(struct request_queue *q) | 1102 | static inline int queue_dma_alignment(struct request_queue *q) |
@@ -1109,6 +1226,8 @@ struct block_device_operations { | |||
1109 | int (*direct_access) (struct block_device *, sector_t, | 1226 | int (*direct_access) (struct block_device *, sector_t, |
1110 | void **, unsigned long *); | 1227 | void **, unsigned long *); |
1111 | int (*media_changed) (struct gendisk *); | 1228 | int (*media_changed) (struct gendisk *); |
1229 | unsigned long long (*set_capacity) (struct gendisk *, | ||
1230 | unsigned long long); | ||
1112 | int (*revalidate_disk) (struct gendisk *); | 1231 | int (*revalidate_disk) (struct gendisk *); |
1113 | int (*getgeo)(struct block_device *, struct hd_geometry *); | 1232 | int (*getgeo)(struct block_device *, struct hd_geometry *); |
1114 | struct module *owner; | 1233 | struct module *owner; |