diff options
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 66 |
1 files changed, 58 insertions, 8 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 2c54906f678..16f7f1be1ac 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -115,6 +115,7 @@ struct request { | |||
115 | void *elevator_private3; | 115 | void *elevator_private3; |
116 | 116 | ||
117 | struct gendisk *rq_disk; | 117 | struct gendisk *rq_disk; |
118 | struct hd_struct *part; | ||
118 | unsigned long start_time; | 119 | unsigned long start_time; |
119 | #ifdef CONFIG_BLK_CGROUP | 120 | #ifdef CONFIG_BLK_CGROUP |
120 | unsigned long long start_time_ns; | 121 | unsigned long long start_time_ns; |
@@ -124,6 +125,9 @@ struct request { | |||
124 | * physical address coalescing is performed. | 125 | * physical address coalescing is performed. |
125 | */ | 126 | */ |
126 | unsigned short nr_phys_segments; | 127 | unsigned short nr_phys_segments; |
128 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | ||
129 | unsigned short nr_integrity_segments; | ||
130 | #endif | ||
127 | 131 | ||
128 | unsigned short ioprio; | 132 | unsigned short ioprio; |
129 | 133 | ||
@@ -243,6 +247,7 @@ struct queue_limits { | |||
243 | 247 | ||
244 | unsigned short logical_block_size; | 248 | unsigned short logical_block_size; |
245 | unsigned short max_segments; | 249 | unsigned short max_segments; |
250 | unsigned short max_integrity_segments; | ||
246 | 251 | ||
247 | unsigned char misaligned; | 252 | unsigned char misaligned; |
248 | unsigned char discard_misaligned; | 253 | unsigned char discard_misaligned; |
@@ -367,6 +372,11 @@ struct request_queue | |||
367 | #if defined(CONFIG_BLK_DEV_BSG) | 372 | #if defined(CONFIG_BLK_DEV_BSG) |
368 | struct bsg_class_device bsg_dev; | 373 | struct bsg_class_device bsg_dev; |
369 | #endif | 374 | #endif |
375 | |||
376 | #ifdef CONFIG_BLK_DEV_THROTTLING | ||
377 | /* Throttle data */ | ||
378 | struct throtl_data *td; | ||
379 | #endif | ||
370 | }; | 380 | }; |
371 | 381 | ||
372 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ | 382 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ |
@@ -851,7 +861,7 @@ extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | |||
851 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | 861 | extern void blk_queue_max_discard_sectors(struct request_queue *q, |
852 | unsigned int max_discard_sectors); | 862 | unsigned int max_discard_sectors); |
853 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); | 863 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
854 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); | 864 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); |
855 | extern void blk_queue_alignment_offset(struct request_queue *q, | 865 | extern void blk_queue_alignment_offset(struct request_queue *q, |
856 | unsigned int alignment); | 866 | unsigned int alignment); |
857 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); | 867 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); |
@@ -1004,7 +1014,7 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q) | |||
1004 | return q->limits.physical_block_size; | 1014 | return q->limits.physical_block_size; |
1005 | } | 1015 | } |
1006 | 1016 | ||
1007 | static inline int bdev_physical_block_size(struct block_device *bdev) | 1017 | static inline unsigned int bdev_physical_block_size(struct block_device *bdev) |
1008 | { | 1018 | { |
1009 | return queue_physical_block_size(bdev_get_queue(bdev)); | 1019 | return queue_physical_block_size(bdev_get_queue(bdev)); |
1010 | } | 1020 | } |
@@ -1093,11 +1103,11 @@ static inline int queue_dma_alignment(struct request_queue *q) | |||
1093 | return q ? q->dma_alignment : 511; | 1103 | return q ? q->dma_alignment : 511; |
1094 | } | 1104 | } |
1095 | 1105 | ||
1096 | static inline int blk_rq_aligned(struct request_queue *q, void *addr, | 1106 | static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, |
1097 | unsigned int len) | 1107 | unsigned int len) |
1098 | { | 1108 | { |
1099 | unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; | 1109 | unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; |
1100 | return !((unsigned long)addr & alignment) && !(len & alignment); | 1110 | return !(addr & alignment) && !(len & alignment); |
1101 | } | 1111 | } |
1102 | 1112 | ||
1103 | /* assumes size > 256 */ | 1113 | /* assumes size > 256 */ |
@@ -1127,6 +1137,7 @@ static inline void put_dev_sector(Sector p) | |||
1127 | 1137 | ||
1128 | struct work_struct; | 1138 | struct work_struct; |
1129 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); | 1139 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); |
1140 | int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay); | ||
1130 | 1141 | ||
1131 | #ifdef CONFIG_BLK_CGROUP | 1142 | #ifdef CONFIG_BLK_CGROUP |
1132 | /* | 1143 | /* |
@@ -1170,6 +1181,24 @@ static inline uint64_t rq_io_start_time_ns(struct request *req) | |||
1170 | } | 1181 | } |
1171 | #endif | 1182 | #endif |
1172 | 1183 | ||
1184 | #ifdef CONFIG_BLK_DEV_THROTTLING | ||
1185 | extern int blk_throtl_init(struct request_queue *q); | ||
1186 | extern void blk_throtl_exit(struct request_queue *q); | ||
1187 | extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); | ||
1188 | extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay); | ||
1189 | extern void throtl_shutdown_timer_wq(struct request_queue *q); | ||
1190 | #else /* CONFIG_BLK_DEV_THROTTLING */ | ||
1191 | static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) | ||
1192 | { | ||
1193 | return 0; | ||
1194 | } | ||
1195 | |||
1196 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } | ||
1197 | static inline int blk_throtl_exit(struct request_queue *q) { return 0; } | ||
1198 | static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {} | ||
1199 | static inline void throtl_shutdown_timer_wq(struct request_queue *q) {} | ||
1200 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | ||
1201 | |||
1173 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ | 1202 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ |
1174 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) | 1203 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) |
1175 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ | 1204 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ |
@@ -1213,8 +1242,13 @@ struct blk_integrity { | |||
1213 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); | 1242 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); |
1214 | extern void blk_integrity_unregister(struct gendisk *); | 1243 | extern void blk_integrity_unregister(struct gendisk *); |
1215 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); | 1244 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); |
1216 | extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); | 1245 | extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, |
1217 | extern int blk_rq_count_integrity_sg(struct request *); | 1246 | struct scatterlist *); |
1247 | extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); | ||
1248 | extern int blk_integrity_merge_rq(struct request_queue *, struct request *, | ||
1249 | struct request *); | ||
1250 | extern int blk_integrity_merge_bio(struct request_queue *, struct request *, | ||
1251 | struct bio *); | ||
1218 | 1252 | ||
1219 | static inline | 1253 | static inline |
1220 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) | 1254 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) |
@@ -1235,16 +1269,32 @@ static inline int blk_integrity_rq(struct request *rq) | |||
1235 | return bio_integrity(rq->bio); | 1269 | return bio_integrity(rq->bio); |
1236 | } | 1270 | } |
1237 | 1271 | ||
1272 | static inline void blk_queue_max_integrity_segments(struct request_queue *q, | ||
1273 | unsigned int segs) | ||
1274 | { | ||
1275 | q->limits.max_integrity_segments = segs; | ||
1276 | } | ||
1277 | |||
1278 | static inline unsigned short | ||
1279 | queue_max_integrity_segments(struct request_queue *q) | ||
1280 | { | ||
1281 | return q->limits.max_integrity_segments; | ||
1282 | } | ||
1283 | |||
1238 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | 1284 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
1239 | 1285 | ||
1240 | #define blk_integrity_rq(rq) (0) | 1286 | #define blk_integrity_rq(rq) (0) |
1241 | #define blk_rq_count_integrity_sg(a) (0) | 1287 | #define blk_rq_count_integrity_sg(a, b) (0) |
1242 | #define blk_rq_map_integrity_sg(a, b) (0) | 1288 | #define blk_rq_map_integrity_sg(a, b, c) (0) |
1243 | #define bdev_get_integrity(a) (0) | 1289 | #define bdev_get_integrity(a) (0) |
1244 | #define blk_get_integrity(a) (0) | 1290 | #define blk_get_integrity(a) (0) |
1245 | #define blk_integrity_compare(a, b) (0) | 1291 | #define blk_integrity_compare(a, b) (0) |
1246 | #define blk_integrity_register(a, b) (0) | 1292 | #define blk_integrity_register(a, b) (0) |
1247 | #define blk_integrity_unregister(a) do { } while (0); | 1293 | #define blk_integrity_unregister(a) do { } while (0); |
1294 | #define blk_queue_max_integrity_segments(a, b) do { } while (0); | ||
1295 | #define queue_max_integrity_segments(a) (0) | ||
1296 | #define blk_integrity_merge_rq(a, b, c) (0) | ||
1297 | #define blk_integrity_merge_bio(a, b, c) (0) | ||
1248 | 1298 | ||
1249 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 1299 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
1250 | 1300 | ||