diff options
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 273 |
1 files changed, 188 insertions, 85 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b4f71f1a4af7..e7cb5dbf6c26 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -70,11 +70,6 @@ enum rq_cmd_type_bits { | |||
70 | REQ_TYPE_ATA_PC, | 70 | REQ_TYPE_ATA_PC, |
71 | }; | 71 | }; |
72 | 72 | ||
73 | enum { | ||
74 | BLK_RW_ASYNC = 0, | ||
75 | BLK_RW_SYNC = 1, | ||
76 | }; | ||
77 | |||
78 | /* | 73 | /* |
79 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being | 74 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being |
80 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a | 75 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a |
@@ -166,19 +161,9 @@ struct request { | |||
166 | enum rq_cmd_type_bits cmd_type; | 161 | enum rq_cmd_type_bits cmd_type; |
167 | unsigned long atomic_flags; | 162 | unsigned long atomic_flags; |
168 | 163 | ||
169 | /* Maintain bio traversal state for part by part I/O submission. | 164 | /* the following two fields are internal, NEVER access directly */ |
170 | * hard_* are block layer internals, no driver should touch them! | 165 | sector_t __sector; /* sector cursor */ |
171 | */ | 166 | unsigned int __data_len; /* total data len */ |
172 | |||
173 | sector_t sector; /* next sector to submit */ | ||
174 | sector_t hard_sector; /* next sector to complete */ | ||
175 | unsigned long nr_sectors; /* no. of sectors left to submit */ | ||
176 | unsigned long hard_nr_sectors; /* no. of sectors left to complete */ | ||
177 | /* no. of sectors left to submit in the current segment */ | ||
178 | unsigned int current_nr_sectors; | ||
179 | |||
180 | /* no. of sectors left to complete in the current segment */ | ||
181 | unsigned int hard_cur_sectors; | ||
182 | 167 | ||
183 | struct bio *bio; | 168 | struct bio *bio; |
184 | struct bio *biotail; | 169 | struct bio *biotail; |
@@ -211,8 +196,8 @@ struct request { | |||
211 | 196 | ||
212 | unsigned short ioprio; | 197 | unsigned short ioprio; |
213 | 198 | ||
214 | void *special; | 199 | void *special; /* opaque pointer available for LLD use */ |
215 | char *buffer; | 200 | char *buffer; /* kaddr of the current segment if available */ |
216 | 201 | ||
217 | int tag; | 202 | int tag; |
218 | int errors; | 203 | int errors; |
@@ -226,10 +211,9 @@ struct request { | |||
226 | unsigned char __cmd[BLK_MAX_CDB]; | 211 | unsigned char __cmd[BLK_MAX_CDB]; |
227 | unsigned char *cmd; | 212 | unsigned char *cmd; |
228 | 213 | ||
229 | unsigned int data_len; | ||
230 | unsigned int extra_len; /* length of alignment and padding */ | 214 | unsigned int extra_len; /* length of alignment and padding */ |
231 | unsigned int sense_len; | 215 | unsigned int sense_len; |
232 | void *data; | 216 | unsigned int resid_len; /* residual count */ |
233 | void *sense; | 217 | void *sense; |
234 | 218 | ||
235 | unsigned long deadline; | 219 | unsigned long deadline; |
@@ -312,10 +296,24 @@ struct blk_queue_tag { | |||
312 | #define BLK_SCSI_MAX_CMDS (256) | 296 | #define BLK_SCSI_MAX_CMDS (256) |
313 | #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) | 297 | #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) |
314 | 298 | ||
315 | struct blk_cmd_filter { | 299 | struct queue_limits { |
316 | unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; | 300 | unsigned long bounce_pfn; |
317 | unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; | 301 | unsigned long seg_boundary_mask; |
318 | struct kobject kobj; | 302 | |
303 | unsigned int max_hw_sectors; | ||
304 | unsigned int max_sectors; | ||
305 | unsigned int max_segment_size; | ||
306 | unsigned int physical_block_size; | ||
307 | unsigned int alignment_offset; | ||
308 | unsigned int io_min; | ||
309 | unsigned int io_opt; | ||
310 | |||
311 | unsigned short logical_block_size; | ||
312 | unsigned short max_hw_segments; | ||
313 | unsigned short max_phys_segments; | ||
314 | |||
315 | unsigned char misaligned; | ||
316 | unsigned char no_cluster; | ||
319 | }; | 317 | }; |
320 | 318 | ||
321 | struct request_queue | 319 | struct request_queue |
@@ -369,7 +367,6 @@ struct request_queue | |||
369 | /* | 367 | /* |
370 | * queue needs bounce pages for pages above this limit | 368 | * queue needs bounce pages for pages above this limit |
371 | */ | 369 | */ |
372 | unsigned long bounce_pfn; | ||
373 | gfp_t bounce_gfp; | 370 | gfp_t bounce_gfp; |
374 | 371 | ||
375 | /* | 372 | /* |
@@ -398,14 +395,6 @@ struct request_queue | |||
398 | unsigned int nr_congestion_off; | 395 | unsigned int nr_congestion_off; |
399 | unsigned int nr_batching; | 396 | unsigned int nr_batching; |
400 | 397 | ||
401 | unsigned int max_sectors; | ||
402 | unsigned int max_hw_sectors; | ||
403 | unsigned short max_phys_segments; | ||
404 | unsigned short max_hw_segments; | ||
405 | unsigned short hardsect_size; | ||
406 | unsigned int max_segment_size; | ||
407 | |||
408 | unsigned long seg_boundary_mask; | ||
409 | void *dma_drain_buffer; | 398 | void *dma_drain_buffer; |
410 | unsigned int dma_drain_size; | 399 | unsigned int dma_drain_size; |
411 | unsigned int dma_pad_mask; | 400 | unsigned int dma_pad_mask; |
@@ -415,12 +404,14 @@ struct request_queue | |||
415 | struct list_head tag_busy_list; | 404 | struct list_head tag_busy_list; |
416 | 405 | ||
417 | unsigned int nr_sorted; | 406 | unsigned int nr_sorted; |
418 | unsigned int in_flight; | 407 | unsigned int in_flight[2]; |
419 | 408 | ||
420 | unsigned int rq_timeout; | 409 | unsigned int rq_timeout; |
421 | struct timer_list timeout; | 410 | struct timer_list timeout; |
422 | struct list_head timeout_list; | 411 | struct list_head timeout_list; |
423 | 412 | ||
413 | struct queue_limits limits; | ||
414 | |||
424 | /* | 415 | /* |
425 | * sg stuff | 416 | * sg stuff |
426 | */ | 417 | */ |
@@ -443,7 +434,6 @@ struct request_queue | |||
443 | #if defined(CONFIG_BLK_DEV_BSG) | 434 | #if defined(CONFIG_BLK_DEV_BSG) |
444 | struct bsg_class_device bsg_dev; | 435 | struct bsg_class_device bsg_dev; |
445 | #endif | 436 | #endif |
446 | struct blk_cmd_filter cmd_filter; | ||
447 | }; | 437 | }; |
448 | 438 | ||
449 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ | 439 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ |
@@ -522,6 +512,11 @@ static inline void queue_flag_clear_unlocked(unsigned int flag, | |||
522 | __clear_bit(flag, &q->queue_flags); | 512 | __clear_bit(flag, &q->queue_flags); |
523 | } | 513 | } |
524 | 514 | ||
515 | static inline int queue_in_flight(struct request_queue *q) | ||
516 | { | ||
517 | return q->in_flight[0] + q->in_flight[1]; | ||
518 | } | ||
519 | |||
525 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | 520 | static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) |
526 | { | 521 | { |
527 | WARN_ON_ONCE(!queue_is_locked(q)); | 522 | WARN_ON_ONCE(!queue_is_locked(q)); |
@@ -723,6 +718,7 @@ struct rq_map_data { | |||
723 | int nr_entries; | 718 | int nr_entries; |
724 | unsigned long offset; | 719 | unsigned long offset; |
725 | int null_mapped; | 720 | int null_mapped; |
721 | int from_user; | ||
726 | }; | 722 | }; |
727 | 723 | ||
728 | struct req_iterator { | 724 | struct req_iterator { |
@@ -752,10 +748,17 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq); | |||
752 | extern void blk_put_request(struct request *); | 748 | extern void blk_put_request(struct request *); |
753 | extern void __blk_put_request(struct request_queue *, struct request *); | 749 | extern void __blk_put_request(struct request_queue *, struct request *); |
754 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | 750 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
751 | extern struct request *blk_make_request(struct request_queue *, struct bio *, | ||
752 | gfp_t); | ||
755 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); | 753 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); |
756 | extern void blk_requeue_request(struct request_queue *, struct request *); | 754 | extern void blk_requeue_request(struct request_queue *, struct request *); |
757 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); | 755 | extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); |
758 | extern int blk_lld_busy(struct request_queue *q); | 756 | extern int blk_lld_busy(struct request_queue *q); |
757 | extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | ||
758 | struct bio_set *bs, gfp_t gfp_mask, | ||
759 | int (*bio_ctr)(struct bio *, struct bio *, void *), | ||
760 | void *data); | ||
761 | extern void blk_rq_unprep_clone(struct request *rq); | ||
759 | extern int blk_insert_cloned_request(struct request_queue *q, | 762 | extern int blk_insert_cloned_request(struct request_queue *q, |
760 | struct request *rq); | 763 | struct request *rq); |
761 | extern void blk_plug_device(struct request_queue *); | 764 | extern void blk_plug_device(struct request_queue *); |
@@ -768,28 +771,22 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | |||
768 | struct scsi_ioctl_command __user *); | 771 | struct scsi_ioctl_command __user *); |
769 | 772 | ||
770 | /* | 773 | /* |
771 | * Temporary export, until SCSI gets fixed up. | ||
772 | */ | ||
773 | extern int blk_rq_append_bio(struct request_queue *q, struct request *rq, | ||
774 | struct bio *bio); | ||
775 | |||
776 | /* | ||
777 | * A queue has just exitted congestion. Note this in the global counter of | 774 | * A queue has just exitted congestion. Note this in the global counter of |
778 | * congested queues, and wake up anyone who was waiting for requests to be | 775 | * congested queues, and wake up anyone who was waiting for requests to be |
779 | * put back. | 776 | * put back. |
780 | */ | 777 | */ |
781 | static inline void blk_clear_queue_congested(struct request_queue *q, int rw) | 778 | static inline void blk_clear_queue_congested(struct request_queue *q, int sync) |
782 | { | 779 | { |
783 | clear_bdi_congested(&q->backing_dev_info, rw); | 780 | clear_bdi_congested(&q->backing_dev_info, sync); |
784 | } | 781 | } |
785 | 782 | ||
786 | /* | 783 | /* |
787 | * A queue has just entered congestion. Flag that in the queue's VM-visible | 784 | * A queue has just entered congestion. Flag that in the queue's VM-visible |
788 | * state flags and increment the global gounter of congested queues. | 785 | * state flags and increment the global gounter of congested queues. |
789 | */ | 786 | */ |
790 | static inline void blk_set_queue_congested(struct request_queue *q, int rw) | 787 | static inline void blk_set_queue_congested(struct request_queue *q, int sync) |
791 | { | 788 | { |
792 | set_bdi_congested(&q->backing_dev_info, rw); | 789 | set_bdi_congested(&q->backing_dev_info, sync); |
793 | } | 790 | } |
794 | 791 | ||
795 | extern void blk_start_queue(struct request_queue *q); | 792 | extern void blk_start_queue(struct request_queue *q); |
@@ -798,7 +795,6 @@ extern void blk_sync_queue(struct request_queue *q); | |||
798 | extern void __blk_stop_queue(struct request_queue *q); | 795 | extern void __blk_stop_queue(struct request_queue *q); |
799 | extern void __blk_run_queue(struct request_queue *); | 796 | extern void __blk_run_queue(struct request_queue *); |
800 | extern void blk_run_queue(struct request_queue *); | 797 | extern void blk_run_queue(struct request_queue *); |
801 | extern void blk_start_queueing(struct request_queue *); | ||
802 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 798 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
803 | struct rq_map_data *, void __user *, unsigned long, | 799 | struct rq_map_data *, void __user *, unsigned long, |
804 | gfp_t); | 800 | gfp_t); |
@@ -831,41 +827,73 @@ static inline void blk_run_address_space(struct address_space *mapping) | |||
831 | blk_run_backing_dev(mapping->backing_dev_info, NULL); | 827 | blk_run_backing_dev(mapping->backing_dev_info, NULL); |
832 | } | 828 | } |
833 | 829 | ||
834 | extern void blkdev_dequeue_request(struct request *req); | 830 | /* |
831 | * blk_rq_pos() : the current sector | ||
832 | * blk_rq_bytes() : bytes left in the entire request | ||
833 | * blk_rq_cur_bytes() : bytes left in the current segment | ||
834 | * blk_rq_sectors() : sectors left in the entire request | ||
835 | * blk_rq_cur_sectors() : sectors left in the current segment | ||
836 | */ | ||
837 | static inline sector_t blk_rq_pos(const struct request *rq) | ||
838 | { | ||
839 | return rq->__sector; | ||
840 | } | ||
841 | |||
842 | static inline unsigned int blk_rq_bytes(const struct request *rq) | ||
843 | { | ||
844 | return rq->__data_len; | ||
845 | } | ||
846 | |||
847 | static inline int blk_rq_cur_bytes(const struct request *rq) | ||
848 | { | ||
849 | return rq->bio ? bio_cur_bytes(rq->bio) : 0; | ||
850 | } | ||
851 | |||
852 | static inline unsigned int blk_rq_sectors(const struct request *rq) | ||
853 | { | ||
854 | return blk_rq_bytes(rq) >> 9; | ||
855 | } | ||
856 | |||
857 | static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | ||
858 | { | ||
859 | return blk_rq_cur_bytes(rq) >> 9; | ||
860 | } | ||
835 | 861 | ||
836 | /* | 862 | /* |
837 | * blk_end_request() and friends. | 863 | * Request issue related functions. |
838 | * __blk_end_request() and end_request() must be called with | 864 | */ |
839 | * the request queue spinlock acquired. | 865 | extern struct request *blk_peek_request(struct request_queue *q); |
866 | extern void blk_start_request(struct request *rq); | ||
867 | extern struct request *blk_fetch_request(struct request_queue *q); | ||
868 | |||
869 | /* | ||
870 | * Request completion related functions. | ||
871 | * | ||
872 | * blk_update_request() completes given number of bytes and updates | ||
873 | * the request without completing it. | ||
874 | * | ||
875 | * blk_end_request() and friends. __blk_end_request() must be called | ||
876 | * with the request queue spinlock acquired. | ||
840 | * | 877 | * |
841 | * Several drivers define their own end_request and call | 878 | * Several drivers define their own end_request and call |
842 | * blk_end_request() for parts of the original function. | 879 | * blk_end_request() for parts of the original function. |
843 | * This prevents code duplication in drivers. | 880 | * This prevents code duplication in drivers. |
844 | */ | 881 | */ |
845 | extern int blk_end_request(struct request *rq, int error, | 882 | extern bool blk_update_request(struct request *rq, int error, |
846 | unsigned int nr_bytes); | 883 | unsigned int nr_bytes); |
847 | extern int __blk_end_request(struct request *rq, int error, | 884 | extern bool blk_end_request(struct request *rq, int error, |
848 | unsigned int nr_bytes); | 885 | unsigned int nr_bytes); |
849 | extern int blk_end_bidi_request(struct request *rq, int error, | 886 | extern void blk_end_request_all(struct request *rq, int error); |
850 | unsigned int nr_bytes, unsigned int bidi_bytes); | 887 | extern bool blk_end_request_cur(struct request *rq, int error); |
851 | extern void end_request(struct request *, int); | 888 | extern bool __blk_end_request(struct request *rq, int error, |
852 | extern int blk_end_request_callback(struct request *rq, int error, | 889 | unsigned int nr_bytes); |
853 | unsigned int nr_bytes, | 890 | extern void __blk_end_request_all(struct request *rq, int error); |
854 | int (drv_callback)(struct request *)); | 891 | extern bool __blk_end_request_cur(struct request *rq, int error); |
892 | |||
855 | extern void blk_complete_request(struct request *); | 893 | extern void blk_complete_request(struct request *); |
856 | extern void __blk_complete_request(struct request *); | 894 | extern void __blk_complete_request(struct request *); |
857 | extern void blk_abort_request(struct request *); | 895 | extern void blk_abort_request(struct request *); |
858 | extern void blk_abort_queue(struct request_queue *); | 896 | extern void blk_abort_queue(struct request_queue *); |
859 | extern void blk_update_request(struct request *rq, int error, | ||
860 | unsigned int nr_bytes); | ||
861 | |||
862 | /* | ||
863 | * blk_end_request() takes bytes instead of sectors as a complete size. | ||
864 | * blk_rq_bytes() returns bytes left to complete in the entire request. | ||
865 | * blk_rq_cur_bytes() returns bytes left to complete in the current segment. | ||
866 | */ | ||
867 | extern unsigned int blk_rq_bytes(struct request *rq); | ||
868 | extern unsigned int blk_rq_cur_bytes(struct request *rq); | ||
869 | 897 | ||
870 | /* | 898 | /* |
871 | * Access functions for manipulating queue properties | 899 | * Access functions for manipulating queue properties |
@@ -877,10 +905,21 @@ extern void blk_cleanup_queue(struct request_queue *); | |||
877 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 905 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
878 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 906 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
879 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); | 907 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); |
908 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | ||
880 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); | 909 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); |
881 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); | 910 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); |
882 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 911 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
883 | extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); | 912 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
913 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); | ||
914 | extern void blk_queue_alignment_offset(struct request_queue *q, | ||
915 | unsigned int alignment); | ||
916 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); | ||
917 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | ||
918 | extern void blk_set_default_limits(struct queue_limits *lim); | ||
919 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | ||
920 | sector_t offset); | ||
921 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | ||
922 | sector_t offset); | ||
884 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); | 923 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); |
885 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); | 924 | extern void blk_queue_dma_pad(struct request_queue *, unsigned int); |
886 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); | 925 | extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); |
@@ -948,13 +987,7 @@ static inline int sb_issue_discard(struct super_block *sb, | |||
948 | return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL); | 987 | return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL); |
949 | } | 988 | } |
950 | 989 | ||
951 | /* | 990 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); |
952 | * command filter functions | ||
953 | */ | ||
954 | extern int blk_verify_command(struct blk_cmd_filter *filter, | ||
955 | unsigned char *cmd, fmode_t has_write_perm); | ||
956 | extern void blk_unregister_filter(struct gendisk *disk); | ||
957 | extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); | ||
958 | 991 | ||
959 | #define MAX_PHYS_SEGMENTS 128 | 992 | #define MAX_PHYS_SEGMENTS 128 |
960 | #define MAX_HW_SEGMENTS 128 | 993 | #define MAX_HW_SEGMENTS 128 |
@@ -967,19 +1000,87 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); | |||
967 | 1000 | ||
968 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) | 1001 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) |
969 | 1002 | ||
970 | static inline int queue_hardsect_size(struct request_queue *q) | 1003 | static inline unsigned long queue_bounce_pfn(struct request_queue *q) |
1004 | { | ||
1005 | return q->limits.bounce_pfn; | ||
1006 | } | ||
1007 | |||
1008 | static inline unsigned long queue_segment_boundary(struct request_queue *q) | ||
1009 | { | ||
1010 | return q->limits.seg_boundary_mask; | ||
1011 | } | ||
1012 | |||
1013 | static inline unsigned int queue_max_sectors(struct request_queue *q) | ||
1014 | { | ||
1015 | return q->limits.max_sectors; | ||
1016 | } | ||
1017 | |||
1018 | static inline unsigned int queue_max_hw_sectors(struct request_queue *q) | ||
1019 | { | ||
1020 | return q->limits.max_hw_sectors; | ||
1021 | } | ||
1022 | |||
1023 | static inline unsigned short queue_max_hw_segments(struct request_queue *q) | ||
1024 | { | ||
1025 | return q->limits.max_hw_segments; | ||
1026 | } | ||
1027 | |||
1028 | static inline unsigned short queue_max_phys_segments(struct request_queue *q) | ||
1029 | { | ||
1030 | return q->limits.max_phys_segments; | ||
1031 | } | ||
1032 | |||
1033 | static inline unsigned int queue_max_segment_size(struct request_queue *q) | ||
1034 | { | ||
1035 | return q->limits.max_segment_size; | ||
1036 | } | ||
1037 | |||
1038 | static inline unsigned short queue_logical_block_size(struct request_queue *q) | ||
971 | { | 1039 | { |
972 | int retval = 512; | 1040 | int retval = 512; |
973 | 1041 | ||
974 | if (q && q->hardsect_size) | 1042 | if (q && q->limits.logical_block_size) |
975 | retval = q->hardsect_size; | 1043 | retval = q->limits.logical_block_size; |
976 | 1044 | ||
977 | return retval; | 1045 | return retval; |
978 | } | 1046 | } |
979 | 1047 | ||
980 | static inline int bdev_hardsect_size(struct block_device *bdev) | 1048 | static inline unsigned short bdev_logical_block_size(struct block_device *bdev) |
1049 | { | ||
1050 | return queue_logical_block_size(bdev_get_queue(bdev)); | ||
1051 | } | ||
1052 | |||
1053 | static inline unsigned int queue_physical_block_size(struct request_queue *q) | ||
1054 | { | ||
1055 | return q->limits.physical_block_size; | ||
1056 | } | ||
1057 | |||
1058 | static inline unsigned int queue_io_min(struct request_queue *q) | ||
1059 | { | ||
1060 | return q->limits.io_min; | ||
1061 | } | ||
1062 | |||
1063 | static inline unsigned int queue_io_opt(struct request_queue *q) | ||
1064 | { | ||
1065 | return q->limits.io_opt; | ||
1066 | } | ||
1067 | |||
1068 | static inline int queue_alignment_offset(struct request_queue *q) | ||
1069 | { | ||
1070 | if (q && q->limits.misaligned) | ||
1071 | return -1; | ||
1072 | |||
1073 | if (q && q->limits.alignment_offset) | ||
1074 | return q->limits.alignment_offset; | ||
1075 | |||
1076 | return 0; | ||
1077 | } | ||
1078 | |||
1079 | static inline int queue_sector_alignment_offset(struct request_queue *q, | ||
1080 | sector_t sector) | ||
981 | { | 1081 | { |
982 | return queue_hardsect_size(bdev_get_queue(bdev)); | 1082 | return ((sector << 9) - q->limits.alignment_offset) |
1083 | & (q->limits.io_min - 1); | ||
983 | } | 1084 | } |
984 | 1085 | ||
985 | static inline int queue_dma_alignment(struct request_queue *q) | 1086 | static inline int queue_dma_alignment(struct request_queue *q) |
@@ -1109,6 +1210,8 @@ struct block_device_operations { | |||
1109 | int (*direct_access) (struct block_device *, sector_t, | 1210 | int (*direct_access) (struct block_device *, sector_t, |
1110 | void **, unsigned long *); | 1211 | void **, unsigned long *); |
1111 | int (*media_changed) (struct gendisk *); | 1212 | int (*media_changed) (struct gendisk *); |
1213 | unsigned long long (*set_capacity) (struct gendisk *, | ||
1214 | unsigned long long); | ||
1112 | int (*revalidate_disk) (struct gendisk *); | 1215 | int (*revalidate_disk) (struct gendisk *); |
1113 | int (*getgeo)(struct block_device *, struct hd_geometry *); | 1216 | int (*getgeo)(struct block_device *, struct hd_geometry *); |
1114 | struct module *owner; | 1217 | struct module *owner; |