diff options
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 66 |
1 files changed, 58 insertions, 8 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index accbd0e5c89..009b80e49f5 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -115,6 +115,7 @@ struct request { | |||
115 | void *elevator_private3; | 115 | void *elevator_private3; |
116 | 116 | ||
117 | struct gendisk *rq_disk; | 117 | struct gendisk *rq_disk; |
118 | struct hd_struct *part; | ||
118 | unsigned long start_time; | 119 | unsigned long start_time; |
119 | #ifdef CONFIG_BLK_CGROUP | 120 | #ifdef CONFIG_BLK_CGROUP |
120 | unsigned long long start_time_ns; | 121 | unsigned long long start_time_ns; |
@@ -124,6 +125,9 @@ struct request { | |||
124 | * physical address coalescing is performed. | 125 | * physical address coalescing is performed. |
125 | */ | 126 | */ |
126 | unsigned short nr_phys_segments; | 127 | unsigned short nr_phys_segments; |
128 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | ||
129 | unsigned short nr_integrity_segments; | ||
130 | #endif | ||
127 | 131 | ||
128 | unsigned short ioprio; | 132 | unsigned short ioprio; |
129 | 133 | ||
@@ -243,6 +247,7 @@ struct queue_limits { | |||
243 | 247 | ||
244 | unsigned short logical_block_size; | 248 | unsigned short logical_block_size; |
245 | unsigned short max_segments; | 249 | unsigned short max_segments; |
250 | unsigned short max_integrity_segments; | ||
246 | 251 | ||
247 | unsigned char misaligned; | 252 | unsigned char misaligned; |
248 | unsigned char discard_misaligned; | 253 | unsigned char discard_misaligned; |
@@ -369,6 +374,11 @@ struct request_queue | |||
369 | #if defined(CONFIG_BLK_DEV_BSG) | 374 | #if defined(CONFIG_BLK_DEV_BSG) |
370 | struct bsg_class_device bsg_dev; | 375 | struct bsg_class_device bsg_dev; |
371 | #endif | 376 | #endif |
377 | |||
378 | #ifdef CONFIG_BLK_DEV_THROTTLING | ||
379 | /* Throttle data */ | ||
380 | struct throtl_data *td; | ||
381 | #endif | ||
372 | }; | 382 | }; |
373 | 383 | ||
374 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ | 384 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ |
@@ -803,7 +813,7 @@ extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | |||
803 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | 813 | extern void blk_queue_max_discard_sectors(struct request_queue *q, |
804 | unsigned int max_discard_sectors); | 814 | unsigned int max_discard_sectors); |
805 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); | 815 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
806 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); | 816 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); |
807 | extern void blk_queue_alignment_offset(struct request_queue *q, | 817 | extern void blk_queue_alignment_offset(struct request_queue *q, |
808 | unsigned int alignment); | 818 | unsigned int alignment); |
809 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); | 819 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); |
@@ -945,7 +955,7 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q) | |||
945 | return q->limits.physical_block_size; | 955 | return q->limits.physical_block_size; |
946 | } | 956 | } |
947 | 957 | ||
948 | static inline int bdev_physical_block_size(struct block_device *bdev) | 958 | static inline unsigned int bdev_physical_block_size(struct block_device *bdev) |
949 | { | 959 | { |
950 | return queue_physical_block_size(bdev_get_queue(bdev)); | 960 | return queue_physical_block_size(bdev_get_queue(bdev)); |
951 | } | 961 | } |
@@ -1034,11 +1044,11 @@ static inline int queue_dma_alignment(struct request_queue *q) | |||
1034 | return q ? q->dma_alignment : 511; | 1044 | return q ? q->dma_alignment : 511; |
1035 | } | 1045 | } |
1036 | 1046 | ||
1037 | static inline int blk_rq_aligned(struct request_queue *q, void *addr, | 1047 | static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, |
1038 | unsigned int len) | 1048 | unsigned int len) |
1039 | { | 1049 | { |
1040 | unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; | 1050 | unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; |
1041 | return !((unsigned long)addr & alignment) && !(len & alignment); | 1051 | return !(addr & alignment) && !(len & alignment); |
1042 | } | 1052 | } |
1043 | 1053 | ||
1044 | /* assumes size > 256 */ | 1054 | /* assumes size > 256 */ |
@@ -1068,6 +1078,7 @@ static inline void put_dev_sector(Sector p) | |||
1068 | 1078 | ||
1069 | struct work_struct; | 1079 | struct work_struct; |
1070 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); | 1080 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); |
1081 | int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay); | ||
1071 | 1082 | ||
1072 | #ifdef CONFIG_BLK_CGROUP | 1083 | #ifdef CONFIG_BLK_CGROUP |
1073 | /* | 1084 | /* |
@@ -1111,6 +1122,24 @@ static inline uint64_t rq_io_start_time_ns(struct request *req) | |||
1111 | } | 1122 | } |
1112 | #endif | 1123 | #endif |
1113 | 1124 | ||
1125 | #ifdef CONFIG_BLK_DEV_THROTTLING | ||
1126 | extern int blk_throtl_init(struct request_queue *q); | ||
1127 | extern void blk_throtl_exit(struct request_queue *q); | ||
1128 | extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); | ||
1129 | extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay); | ||
1130 | extern void throtl_shutdown_timer_wq(struct request_queue *q); | ||
1131 | #else /* CONFIG_BLK_DEV_THROTTLING */ | ||
1132 | static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) | ||
1133 | { | ||
1134 | return 0; | ||
1135 | } | ||
1136 | |||
1137 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } | ||
1138 | static inline int blk_throtl_exit(struct request_queue *q) { return 0; } | ||
1139 | static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {} | ||
1140 | static inline void throtl_shutdown_timer_wq(struct request_queue *q) {} | ||
1141 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | ||
1142 | |||
1114 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ | 1143 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ |
1115 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) | 1144 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) |
1116 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ | 1145 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ |
@@ -1154,8 +1183,13 @@ struct blk_integrity { | |||
1154 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); | 1183 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); |
1155 | extern void blk_integrity_unregister(struct gendisk *); | 1184 | extern void blk_integrity_unregister(struct gendisk *); |
1156 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); | 1185 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); |
1157 | extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); | 1186 | extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, |
1158 | extern int blk_rq_count_integrity_sg(struct request *); | 1187 | struct scatterlist *); |
1188 | extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); | ||
1189 | extern int blk_integrity_merge_rq(struct request_queue *, struct request *, | ||
1190 | struct request *); | ||
1191 | extern int blk_integrity_merge_bio(struct request_queue *, struct request *, | ||
1192 | struct bio *); | ||
1159 | 1193 | ||
1160 | static inline | 1194 | static inline |
1161 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) | 1195 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) |
@@ -1176,16 +1210,32 @@ static inline int blk_integrity_rq(struct request *rq) | |||
1176 | return bio_integrity(rq->bio); | 1210 | return bio_integrity(rq->bio); |
1177 | } | 1211 | } |
1178 | 1212 | ||
1213 | static inline void blk_queue_max_integrity_segments(struct request_queue *q, | ||
1214 | unsigned int segs) | ||
1215 | { | ||
1216 | q->limits.max_integrity_segments = segs; | ||
1217 | } | ||
1218 | |||
1219 | static inline unsigned short | ||
1220 | queue_max_integrity_segments(struct request_queue *q) | ||
1221 | { | ||
1222 | return q->limits.max_integrity_segments; | ||
1223 | } | ||
1224 | |||
1179 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | 1225 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
1180 | 1226 | ||
1181 | #define blk_integrity_rq(rq) (0) | 1227 | #define blk_integrity_rq(rq) (0) |
1182 | #define blk_rq_count_integrity_sg(a) (0) | 1228 | #define blk_rq_count_integrity_sg(a, b) (0) |
1183 | #define blk_rq_map_integrity_sg(a, b) (0) | 1229 | #define blk_rq_map_integrity_sg(a, b, c) (0) |
1184 | #define bdev_get_integrity(a) (0) | 1230 | #define bdev_get_integrity(a) (0) |
1185 | #define blk_get_integrity(a) (0) | 1231 | #define blk_get_integrity(a) (0) |
1186 | #define blk_integrity_compare(a, b) (0) | 1232 | #define blk_integrity_compare(a, b) (0) |
1187 | #define blk_integrity_register(a, b) (0) | 1233 | #define blk_integrity_register(a, b) (0) |
1188 | #define blk_integrity_unregister(a) do { } while (0); | 1234 | #define blk_integrity_unregister(a) do { } while (0); |
1235 | #define blk_queue_max_integrity_segments(a, b) do { } while (0); | ||
1236 | #define queue_max_integrity_segments(a) (0) | ||
1237 | #define blk_integrity_merge_rq(a, b, c) (0) | ||
1238 | #define blk_integrity_merge_bio(a, b, c) (0) | ||
1189 | 1239 | ||
1190 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 1240 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
1191 | 1241 | ||