diff options
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 38 |
1 files changed, 36 insertions, 2 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8aba35f46f87..31e11051f1ba 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -280,6 +280,7 @@ struct queue_limits { | |||
| 280 | unsigned long seg_boundary_mask; | 280 | unsigned long seg_boundary_mask; |
| 281 | 281 | ||
| 282 | unsigned int max_hw_sectors; | 282 | unsigned int max_hw_sectors; |
| 283 | unsigned int chunk_sectors; | ||
| 283 | unsigned int max_sectors; | 284 | unsigned int max_sectors; |
| 284 | unsigned int max_segment_size; | 285 | unsigned int max_segment_size; |
| 285 | unsigned int physical_block_size; | 286 | unsigned int physical_block_size; |
| @@ -335,7 +336,7 @@ struct request_queue { | |||
| 335 | unsigned int *mq_map; | 336 | unsigned int *mq_map; |
| 336 | 337 | ||
| 337 | /* sw queues */ | 338 | /* sw queues */ |
| 338 | struct blk_mq_ctx *queue_ctx; | 339 | struct blk_mq_ctx __percpu *queue_ctx; |
| 339 | unsigned int nr_queues; | 340 | unsigned int nr_queues; |
| 340 | 341 | ||
| 341 | /* hw dispatch queues */ | 342 | /* hw dispatch queues */ |
| @@ -795,6 +796,7 @@ extern void __blk_put_request(struct request_queue *, struct request *); | |||
| 795 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); | 796 | extern struct request *blk_get_request(struct request_queue *, int, gfp_t); |
| 796 | extern struct request *blk_make_request(struct request_queue *, struct bio *, | 797 | extern struct request *blk_make_request(struct request_queue *, struct bio *, |
| 797 | gfp_t); | 798 | gfp_t); |
| 799 | extern void blk_rq_set_block_pc(struct request *); | ||
| 798 | extern void blk_requeue_request(struct request_queue *, struct request *); | 800 | extern void blk_requeue_request(struct request_queue *, struct request *); |
| 799 | extern void blk_add_request_payload(struct request *rq, struct page *page, | 801 | extern void blk_add_request_payload(struct request *rq, struct page *page, |
| 800 | unsigned int len); | 802 | unsigned int len); |
| @@ -910,6 +912,20 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, | |||
| 910 | return q->limits.max_sectors; | 912 | return q->limits.max_sectors; |
| 911 | } | 913 | } |
| 912 | 914 | ||
| 915 | /* | ||
| 916 | * Return maximum size of a request at given offset. Only valid for | ||
| 917 | * file system requests. | ||
| 918 | */ | ||
| 919 | static inline unsigned int blk_max_size_offset(struct request_queue *q, | ||
| 920 | sector_t offset) | ||
| 921 | { | ||
| 922 | if (!q->limits.chunk_sectors) | ||
| 923 | return q->limits.max_hw_sectors; | ||
| 924 | |||
| 925 | return q->limits.chunk_sectors - | ||
| 926 | (offset & (q->limits.chunk_sectors - 1)); | ||
| 927 | } | ||
| 928 | |||
| 913 | static inline unsigned int blk_rq_get_max_sectors(struct request *rq) | 929 | static inline unsigned int blk_rq_get_max_sectors(struct request *rq) |
| 914 | { | 930 | { |
| 915 | struct request_queue *q = rq->q; | 931 | struct request_queue *q = rq->q; |
| @@ -917,7 +933,11 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq) | |||
| 917 | if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) | 933 | if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) |
| 918 | return q->limits.max_hw_sectors; | 934 | return q->limits.max_hw_sectors; |
| 919 | 935 | ||
| 920 | return blk_queue_get_max_sectors(q, rq->cmd_flags); | 936 | if (!q->limits.chunk_sectors) |
| 937 | return blk_queue_get_max_sectors(q, rq->cmd_flags); | ||
| 938 | |||
| 939 | return min(blk_max_size_offset(q, blk_rq_pos(rq)), | ||
| 940 | blk_queue_get_max_sectors(q, rq->cmd_flags)); | ||
| 921 | } | 941 | } |
| 922 | 942 | ||
| 923 | static inline unsigned int blk_rq_count_bios(struct request *rq) | 943 | static inline unsigned int blk_rq_count_bios(struct request *rq) |
| @@ -983,6 +1003,7 @@ extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | |||
| 983 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 1003 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
| 984 | extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); | 1004 | extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); |
| 985 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | 1005 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); |
| 1006 | extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int); | ||
| 986 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); | 1007 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); |
| 987 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 1008 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
| 988 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | 1009 | extern void blk_queue_max_discard_sectors(struct request_queue *q, |
| @@ -1588,6 +1609,7 @@ static inline bool blk_integrity_is_initialized(struct gendisk *g) | |||
| 1588 | struct block_device_operations { | 1609 | struct block_device_operations { |
| 1589 | int (*open) (struct block_device *, fmode_t); | 1610 | int (*open) (struct block_device *, fmode_t); |
| 1590 | void (*release) (struct gendisk *, fmode_t); | 1611 | void (*release) (struct gendisk *, fmode_t); |
| 1612 | int (*rw_page)(struct block_device *, sector_t, struct page *, int rw); | ||
| 1591 | int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 1613 | int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
| 1592 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); | 1614 | int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); |
| 1593 | int (*direct_access) (struct block_device *, sector_t, | 1615 | int (*direct_access) (struct block_device *, sector_t, |
| @@ -1606,7 +1628,13 @@ struct block_device_operations { | |||
| 1606 | 1628 | ||
| 1607 | extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, | 1629 | extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, |
| 1608 | unsigned long); | 1630 | unsigned long); |
| 1631 | extern int bdev_read_page(struct block_device *, sector_t, struct page *); | ||
| 1632 | extern int bdev_write_page(struct block_device *, sector_t, struct page *, | ||
| 1633 | struct writeback_control *); | ||
| 1609 | #else /* CONFIG_BLOCK */ | 1634 | #else /* CONFIG_BLOCK */ |
| 1635 | |||
| 1636 | struct block_device; | ||
| 1637 | |||
| 1610 | /* | 1638 | /* |
| 1611 | * stubs for when the block layer is configured out | 1639 | * stubs for when the block layer is configured out |
| 1612 | */ | 1640 | */ |
| @@ -1642,6 +1670,12 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk) | |||
| 1642 | return false; | 1670 | return false; |
| 1643 | } | 1671 | } |
| 1644 | 1672 | ||
| 1673 | static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, | ||
| 1674 | sector_t *error_sector) | ||
| 1675 | { | ||
| 1676 | return 0; | ||
| 1677 | } | ||
| 1678 | |||
| 1645 | #endif /* CONFIG_BLOCK */ | 1679 | #endif /* CONFIG_BLOCK */ |
| 1646 | 1680 | ||
| 1647 | #endif | 1681 | #endif |
