diff options
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 104 |
1 files changed, 87 insertions, 17 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 94acd8172b5b..6c6a1f008065 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -111,10 +111,14 @@ struct request { | |||
| 111 | * Three pointers are available for the IO schedulers, if they need | 111 | * Three pointers are available for the IO schedulers, if they need |
| 112 | * more they have to dynamically allocate it. Flush requests are | 112 | * more they have to dynamically allocate it. Flush requests are |
| 113 | * never put on the IO scheduler. So let the flush fields share | 113 | * never put on the IO scheduler. So let the flush fields share |
| 114 | * space with the three elevator_private pointers. | 114 | * space with the elevator data. |
| 115 | */ | 115 | */ |
| 116 | union { | 116 | union { |
| 117 | void *elevator_private[3]; | 117 | struct { |
| 118 | struct io_cq *icq; | ||
| 119 | void *priv[2]; | ||
| 120 | } elv; | ||
| 121 | |||
| 118 | struct { | 122 | struct { |
| 119 | unsigned int seq; | 123 | unsigned int seq; |
| 120 | struct list_head list; | 124 | struct list_head list; |
| @@ -311,6 +315,12 @@ struct request_queue { | |||
| 311 | unsigned long queue_flags; | 315 | unsigned long queue_flags; |
| 312 | 316 | ||
| 313 | /* | 317 | /* |
| 318 | * ida allocated id for this queue. Used to index queues from | ||
| 319 | * ioctx. | ||
| 320 | */ | ||
| 321 | int id; | ||
| 322 | |||
| 323 | /* | ||
| 314 | * queue needs bounce pages for pages above this limit | 324 | * queue needs bounce pages for pages above this limit |
| 315 | */ | 325 | */ |
| 316 | gfp_t bounce_gfp; | 326 | gfp_t bounce_gfp; |
| @@ -351,6 +361,8 @@ struct request_queue { | |||
| 351 | struct timer_list timeout; | 361 | struct timer_list timeout; |
| 352 | struct list_head timeout_list; | 362 | struct list_head timeout_list; |
| 353 | 363 | ||
| 364 | struct list_head icq_list; | ||
| 365 | |||
| 354 | struct queue_limits limits; | 366 | struct queue_limits limits; |
| 355 | 367 | ||
| 356 | /* | 368 | /* |
| @@ -387,6 +399,9 @@ struct request_queue { | |||
| 387 | /* Throttle data */ | 399 | /* Throttle data */ |
| 388 | struct throtl_data *td; | 400 | struct throtl_data *td; |
| 389 | #endif | 401 | #endif |
| 402 | #ifdef CONFIG_LOCKDEP | ||
| 403 | int ioc_release_depth; | ||
| 404 | #endif | ||
| 390 | }; | 405 | }; |
| 391 | 406 | ||
| 392 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 407 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
| @@ -481,6 +496,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
| 481 | 496 | ||
| 482 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 497 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
| 483 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 498 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
| 499 | #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) | ||
| 484 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 500 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
| 485 | #define blk_queue_noxmerges(q) \ | 501 | #define blk_queue_noxmerges(q) \ |
| 486 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) | 502 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) |
| @@ -660,7 +676,6 @@ extern void __blk_put_request(struct request_queue *, struct request *); | |||
| 660 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | 676 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
| 661 | extern struct request *blk_make_request(struct request_queue *, struct bio *, | 677 | extern struct request *blk_make_request(struct request_queue *, struct bio *, |
| 662 | gfp_t); | 678 | gfp_t); |
| 663 | extern void blk_insert_request(struct request_queue *, struct request *, int, void *); | ||
| 664 | extern void blk_requeue_request(struct request_queue *, struct request *); | 679 | extern void blk_requeue_request(struct request_queue *, struct request *); |
| 665 | extern void blk_add_request_payload(struct request *rq, struct page *page, | 680 | extern void blk_add_request_payload(struct request *rq, struct page *page, |
| 666 | unsigned int len); | 681 | unsigned int len); |
| @@ -675,6 +690,9 @@ extern int blk_insert_cloned_request(struct request_queue *q, | |||
| 675 | struct request *rq); | 690 | struct request *rq); |
| 676 | extern void blk_delay_queue(struct request_queue *, unsigned long); | 691 | extern void blk_delay_queue(struct request_queue *, unsigned long); |
| 677 | extern void blk_recount_segments(struct request_queue *, struct bio *); | 692 | extern void blk_recount_segments(struct request_queue *, struct bio *); |
| 693 | extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); | ||
| 694 | extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, | ||
| 695 | unsigned int, void __user *); | ||
| 678 | extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, | 696 | extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
| 679 | unsigned int, void __user *); | 697 | unsigned int, void __user *); |
| 680 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | 698 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
| @@ -826,6 +844,7 @@ extern void blk_queue_io_min(struct request_queue *q, unsigned int min); | |||
| 826 | extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); | 844 | extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); |
| 827 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | 845 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); |
| 828 | extern void blk_set_default_limits(struct queue_limits *lim); | 846 | extern void blk_set_default_limits(struct queue_limits *lim); |
| 847 | extern void blk_set_stacking_limits(struct queue_limits *lim); | ||
| 829 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | 848 | extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, |
| 830 | sector_t offset); | 849 | sector_t offset); |
| 831 | extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, | 850 | extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, |
| @@ -856,7 +875,7 @@ extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatte | |||
| 856 | extern void blk_dump_rq_flags(struct request *, char *); | 875 | extern void blk_dump_rq_flags(struct request *, char *); |
| 857 | extern long nr_blockdev_pages(void); | 876 | extern long nr_blockdev_pages(void); |
| 858 | 877 | ||
| 859 | int blk_get_queue(struct request_queue *); | 878 | bool __must_check blk_get_queue(struct request_queue *); |
| 860 | struct request_queue *blk_alloc_queue(gfp_t); | 879 | struct request_queue *blk_alloc_queue(gfp_t); |
| 861 | struct request_queue *blk_alloc_queue_node(gfp_t, int); | 880 | struct request_queue *blk_alloc_queue_node(gfp_t, int); |
| 862 | extern void blk_put_queue(struct request_queue *); | 881 | extern void blk_put_queue(struct request_queue *); |
| @@ -1279,19 +1298,70 @@ queue_max_integrity_segments(struct request_queue *q) | |||
| 1279 | 1298 | ||
| 1280 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | 1299 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
| 1281 | 1300 | ||
| 1282 | #define blk_integrity_rq(rq) (0) | 1301 | struct bio; |
| 1283 | #define blk_rq_count_integrity_sg(a, b) (0) | 1302 | struct block_device; |
| 1284 | #define blk_rq_map_integrity_sg(a, b, c) (0) | 1303 | struct gendisk; |
| 1285 | #define bdev_get_integrity(a) (0) | 1304 | struct blk_integrity; |
| 1286 | #define blk_get_integrity(a) (0) | 1305 | |
| 1287 | #define blk_integrity_compare(a, b) (0) | 1306 | static inline int blk_integrity_rq(struct request *rq) |
| 1288 | #define blk_integrity_register(a, b) (0) | 1307 | { |
| 1289 | #define blk_integrity_unregister(a) do { } while (0) | 1308 | return 0; |
| 1290 | #define blk_queue_max_integrity_segments(a, b) do { } while (0) | 1309 | } |
| 1291 | #define queue_max_integrity_segments(a) (0) | 1310 | static inline int blk_rq_count_integrity_sg(struct request_queue *q, |
| 1292 | #define blk_integrity_merge_rq(a, b, c) (0) | 1311 | struct bio *b) |
| 1293 | #define blk_integrity_merge_bio(a, b, c) (0) | 1312 | { |
| 1294 | #define blk_integrity_is_initialized(a) (0) | 1313 | return 0; |
| 1314 | } | ||
| 1315 | static inline int blk_rq_map_integrity_sg(struct request_queue *q, | ||
| 1316 | struct bio *b, | ||
| 1317 | struct scatterlist *s) | ||
| 1318 | { | ||
| 1319 | return 0; | ||
| 1320 | } | ||
| 1321 | static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) | ||
| 1322 | { | ||
| 1323 | return 0; | ||
| 1324 | } | ||
| 1325 | static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) | ||
| 1326 | { | ||
| 1327 | return NULL; | ||
| 1328 | } | ||
| 1329 | static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) | ||
| 1330 | { | ||
| 1331 | return 0; | ||
| 1332 | } | ||
| 1333 | static inline int blk_integrity_register(struct gendisk *d, | ||
| 1334 | struct blk_integrity *b) | ||
| 1335 | { | ||
| 1336 | return 0; | ||
| 1337 | } | ||
| 1338 | static inline void blk_integrity_unregister(struct gendisk *d) | ||
| 1339 | { | ||
| 1340 | } | ||
| 1341 | static inline void blk_queue_max_integrity_segments(struct request_queue *q, | ||
| 1342 | unsigned int segs) | ||
| 1343 | { | ||
| 1344 | } | ||
| 1345 | static inline unsigned short queue_max_integrity_segments(struct request_queue *q) | ||
| 1346 | { | ||
| 1347 | return 0; | ||
| 1348 | } | ||
| 1349 | static inline int blk_integrity_merge_rq(struct request_queue *rq, | ||
| 1350 | struct request *r1, | ||
| 1351 | struct request *r2) | ||
| 1352 | { | ||
| 1353 | return 0; | ||
| 1354 | } | ||
| 1355 | static inline int blk_integrity_merge_bio(struct request_queue *rq, | ||
| 1356 | struct request *r, | ||
| 1357 | struct bio *b) | ||
| 1358 | { | ||
| 1359 | return 0; | ||
| 1360 | } | ||
| 1361 | static inline bool blk_integrity_is_initialized(struct gendisk *g) | ||
| 1362 | { | ||
| 1363 | return 0; | ||
| 1364 | } | ||
| 1295 | 1365 | ||
| 1296 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 1366 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
| 1297 | 1367 | ||
