diff options
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 79 |
1 files changed, 47 insertions, 32 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 784a919aa0d..ebd22dbed86 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -316,8 +316,7 @@ struct queue_limits { | |||
316 | unsigned int discard_alignment; | 316 | unsigned int discard_alignment; |
317 | 317 | ||
318 | unsigned short logical_block_size; | 318 | unsigned short logical_block_size; |
319 | unsigned short max_hw_segments; | 319 | unsigned short max_segments; |
320 | unsigned short max_phys_segments; | ||
321 | 320 | ||
322 | unsigned char misaligned; | 321 | unsigned char misaligned; |
323 | unsigned char discard_misaligned; | 322 | unsigned char discard_misaligned; |
@@ -461,8 +460,8 @@ struct request_queue | |||
461 | #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ | 460 | #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ |
462 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ | 461 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
463 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | 462 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ |
464 | #define QUEUE_FLAG_CQ 16 /* hardware does queuing */ | 463 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ |
465 | #define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */ | 464 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ |
466 | 465 | ||
467 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 466 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
468 | (1 << QUEUE_FLAG_CLUSTER) | \ | 467 | (1 << QUEUE_FLAG_CLUSTER) | \ |
@@ -586,9 +585,10 @@ enum { | |||
586 | 585 | ||
587 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) | 586 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) |
588 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 587 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
589 | #define blk_queue_queuing(q) test_bit(QUEUE_FLAG_CQ, &(q)->queue_flags) | ||
590 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 588 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
591 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 589 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
590 | #define blk_queue_noxmerges(q) \ | ||
591 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) | ||
592 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | 592 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) |
593 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) | 593 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) |
594 | #define blk_queue_flushing(q) ((q)->ordseq) | 594 | #define blk_queue_flushing(q) ((q)->ordseq) |
@@ -845,7 +845,6 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev) | |||
845 | * blk_rq_err_bytes() : bytes left till the next error boundary | 845 | * blk_rq_err_bytes() : bytes left till the next error boundary |
846 | * blk_rq_sectors() : sectors left in the entire request | 846 | * blk_rq_sectors() : sectors left in the entire request |
847 | * blk_rq_cur_sectors() : sectors left in the current segment | 847 | * blk_rq_cur_sectors() : sectors left in the current segment |
848 | * blk_rq_err_sectors() : sectors left till the next error boundary | ||
849 | */ | 848 | */ |
850 | static inline sector_t blk_rq_pos(const struct request *rq) | 849 | static inline sector_t blk_rq_pos(const struct request *rq) |
851 | { | 850 | { |
@@ -874,11 +873,6 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | |||
874 | return blk_rq_cur_bytes(rq) >> 9; | 873 | return blk_rq_cur_bytes(rq) >> 9; |
875 | } | 874 | } |
876 | 875 | ||
877 | static inline unsigned int blk_rq_err_sectors(const struct request *rq) | ||
878 | { | ||
879 | return blk_rq_err_bytes(rq) >> 9; | ||
880 | } | ||
881 | |||
882 | /* | 876 | /* |
883 | * Request issue related functions. | 877 | * Request issue related functions. |
884 | */ | 878 | */ |
@@ -926,10 +920,27 @@ extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); | |||
926 | extern void blk_cleanup_queue(struct request_queue *); | 920 | extern void blk_cleanup_queue(struct request_queue *); |
927 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 921 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
928 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 922 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
929 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); | ||
930 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | 923 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); |
931 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); | 924 | |
932 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); | 925 | /* Temporary compatibility wrapper */ |
926 | static inline void blk_queue_max_sectors(struct request_queue *q, unsigned int max) | ||
927 | { | ||
928 | blk_queue_max_hw_sectors(q, max); | ||
929 | } | ||
930 | |||
931 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); | ||
932 | |||
933 | static inline void blk_queue_max_phys_segments(struct request_queue *q, unsigned short max) | ||
934 | { | ||
935 | blk_queue_max_segments(q, max); | ||
936 | } | ||
937 | |||
938 | static inline void blk_queue_max_hw_segments(struct request_queue *q, unsigned short max) | ||
939 | { | ||
940 | blk_queue_max_segments(q, max); | ||
941 | } | ||
942 | |||
943 | |||
933 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 944 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
934 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | 945 | extern void blk_queue_max_discard_sectors(struct request_queue *q, |
935 | unsigned int max_discard_sectors); | 946 | unsigned int max_discard_sectors); |
@@ -944,6 +955,8 @@ extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | |||
944 | extern void blk_set_default_limits(struct queue_limits *lim); | 955 | extern void blk_set_default_limits(struct queue_limits *lim); |
945 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | 956 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, |
946 | sector_t offset); | 957 | sector_t offset); |
958 | extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, | ||
959 | sector_t offset); | ||
947 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | 960 | extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, |
948 | sector_t offset); | 961 | sector_t offset); |
949 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); | 962 | extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); |
@@ -1020,11 +1033,15 @@ extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); | |||
1020 | #define MAX_PHYS_SEGMENTS 128 | 1033 | #define MAX_PHYS_SEGMENTS 128 |
1021 | #define MAX_HW_SEGMENTS 128 | 1034 | #define MAX_HW_SEGMENTS 128 |
1022 | #define SAFE_MAX_SECTORS 255 | 1035 | #define SAFE_MAX_SECTORS 255 |
1023 | #define BLK_DEF_MAX_SECTORS 1024 | ||
1024 | |||
1025 | #define MAX_SEGMENT_SIZE 65536 | 1036 | #define MAX_SEGMENT_SIZE 65536 |
1026 | 1037 | ||
1027 | #define BLK_SEG_BOUNDARY_MASK 0xFFFFFFFFUL | 1038 | enum blk_default_limits { |
1039 | BLK_MAX_SEGMENTS = 128, | ||
1040 | BLK_SAFE_MAX_SECTORS = 255, | ||
1041 | BLK_DEF_MAX_SECTORS = 1024, | ||
1042 | BLK_MAX_SEGMENT_SIZE = 65536, | ||
1043 | BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, | ||
1044 | }; | ||
1028 | 1045 | ||
1029 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) | 1046 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) |
1030 | 1047 | ||
@@ -1048,14 +1065,9 @@ static inline unsigned int queue_max_hw_sectors(struct request_queue *q) | |||
1048 | return q->limits.max_hw_sectors; | 1065 | return q->limits.max_hw_sectors; |
1049 | } | 1066 | } |
1050 | 1067 | ||
1051 | static inline unsigned short queue_max_hw_segments(struct request_queue *q) | 1068 | static inline unsigned short queue_max_segments(struct request_queue *q) |
1052 | { | 1069 | { |
1053 | return q->limits.max_hw_segments; | 1070 | return q->limits.max_segments; |
1054 | } | ||
1055 | |||
1056 | static inline unsigned short queue_max_phys_segments(struct request_queue *q) | ||
1057 | { | ||
1058 | return q->limits.max_phys_segments; | ||
1059 | } | 1071 | } |
1060 | 1072 | ||
1061 | static inline unsigned int queue_max_segment_size(struct request_queue *q) | 1073 | static inline unsigned int queue_max_segment_size(struct request_queue *q) |
@@ -1116,11 +1128,13 @@ static inline int queue_alignment_offset(struct request_queue *q) | |||
1116 | return q->limits.alignment_offset; | 1128 | return q->limits.alignment_offset; |
1117 | } | 1129 | } |
1118 | 1130 | ||
1119 | static inline int queue_sector_alignment_offset(struct request_queue *q, | 1131 | static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) |
1120 | sector_t sector) | ||
1121 | { | 1132 | { |
1122 | return ((sector << 9) - q->limits.alignment_offset) | 1133 | unsigned int granularity = max(lim->physical_block_size, lim->io_min); |
1123 | & (q->limits.io_min - 1); | 1134 | unsigned int alignment = (sector << 9) & (granularity - 1); |
1135 | |||
1136 | return (granularity + lim->alignment_offset - alignment) | ||
1137 | & (granularity - 1); | ||
1124 | } | 1138 | } |
1125 | 1139 | ||
1126 | static inline int bdev_alignment_offset(struct block_device *bdev) | 1140 | static inline int bdev_alignment_offset(struct block_device *bdev) |
@@ -1144,11 +1158,12 @@ static inline int queue_discard_alignment(struct request_queue *q) | |||
1144 | return q->limits.discard_alignment; | 1158 | return q->limits.discard_alignment; |
1145 | } | 1159 | } |
1146 | 1160 | ||
1147 | static inline int queue_sector_discard_alignment(struct request_queue *q, | 1161 | static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) |
1148 | sector_t sector) | ||
1149 | { | 1162 | { |
1150 | return ((sector << 9) - q->limits.discard_alignment) | 1163 | unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); |
1151 | & (q->limits.discard_granularity - 1); | 1164 | |
1165 | return (lim->discard_granularity + lim->discard_alignment - alignment) | ||
1166 | & (lim->discard_granularity - 1); | ||
1152 | } | 1167 | } |
1153 | 1168 | ||
1154 | static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) | 1169 | static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) |