aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h66
1 files changed, 43 insertions, 23 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 79226ca8f80f..d6869e0e2b64 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -27,8 +27,6 @@
27#include <linux/percpu-refcount.h> 27#include <linux/percpu-refcount.h>
28#include <linux/scatterlist.h> 28#include <linux/scatterlist.h>
29#include <linux/blkzoned.h> 29#include <linux/blkzoned.h>
30#include <linux/seqlock.h>
31#include <linux/u64_stats_sync.h>
32 30
33struct module; 31struct module;
34struct scsi_ioctl_command; 32struct scsi_ioctl_command;
@@ -42,7 +40,7 @@ struct bsg_job;
42struct blkcg_gq; 40struct blkcg_gq;
43struct blk_flush_queue; 41struct blk_flush_queue;
44struct pr_ops; 42struct pr_ops;
45struct rq_wb; 43struct rq_qos;
46struct blk_queue_stats; 44struct blk_queue_stats;
47struct blk_stat_callback; 45struct blk_stat_callback;
48 46
@@ -442,10 +440,8 @@ struct request_queue {
442 int nr_rqs[2]; /* # allocated [a]sync rqs */ 440 int nr_rqs[2]; /* # allocated [a]sync rqs */
443 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */ 441 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
444 442
445 atomic_t shared_hctx_restart;
446
447 struct blk_queue_stats *stats; 443 struct blk_queue_stats *stats;
448 struct rq_wb *rq_wb; 444 struct rq_qos *rq_qos;
449 445
450 /* 446 /*
451 * If blkcg is not used, @q->root_rl serves all requests. If blkcg 447 * If blkcg is not used, @q->root_rl serves all requests. If blkcg
@@ -592,6 +588,7 @@ struct request_queue {
592 588
593 struct queue_limits limits; 589 struct queue_limits limits;
594 590
591#ifdef CONFIG_BLK_DEV_ZONED
595 /* 592 /*
596 * Zoned block device information for request dispatch control. 593 * Zoned block device information for request dispatch control.
597 * nr_zones is the total number of zones of the device. This is always 594 * nr_zones is the total number of zones of the device. This is always
@@ -612,6 +609,7 @@ struct request_queue {
612 unsigned int nr_zones; 609 unsigned int nr_zones;
613 unsigned long *seq_zones_bitmap; 610 unsigned long *seq_zones_bitmap;
614 unsigned long *seq_zones_wlock; 611 unsigned long *seq_zones_wlock;
612#endif /* CONFIG_BLK_DEV_ZONED */
615 613
616 /* 614 /*
617 * sg stuff 615 * sg stuff
@@ -800,11 +798,7 @@ static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
800 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; 798 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
801} 799}
802 800
803static inline unsigned int blk_queue_nr_zones(struct request_queue *q) 801#ifdef CONFIG_BLK_DEV_ZONED
804{
805 return q->nr_zones;
806}
807
808static inline unsigned int blk_queue_zone_no(struct request_queue *q, 802static inline unsigned int blk_queue_zone_no(struct request_queue *q,
809 sector_t sector) 803 sector_t sector)
810{ 804{
@@ -820,6 +814,7 @@ static inline bool blk_queue_zone_is_seq(struct request_queue *q,
820 return false; 814 return false;
821 return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap); 815 return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap);
822} 816}
817#endif /* CONFIG_BLK_DEV_ZONED */
823 818
824static inline bool rq_is_sync(struct request *rq) 819static inline bool rq_is_sync(struct request *rq)
825{ 820{
@@ -1070,6 +1065,7 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
1070 return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT; 1065 return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
1071} 1066}
1072 1067
1068#ifdef CONFIG_BLK_DEV_ZONED
1073static inline unsigned int blk_rq_zone_no(struct request *rq) 1069static inline unsigned int blk_rq_zone_no(struct request *rq)
1074{ 1070{
1075 return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); 1071 return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
@@ -1079,6 +1075,7 @@ static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
1079{ 1075{
1080 return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); 1076 return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
1081} 1077}
1078#endif /* CONFIG_BLK_DEV_ZONED */
1082 1079
1083/* 1080/*
1084 * Some commands like WRITE SAME have a payload or data transfer size which 1081 * Some commands like WRITE SAME have a payload or data transfer size which
@@ -1437,8 +1434,6 @@ enum blk_default_limits {
1437 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, 1434 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1438}; 1435};
1439 1436
1440#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
1441
1442static inline unsigned long queue_segment_boundary(struct request_queue *q) 1437static inline unsigned long queue_segment_boundary(struct request_queue *q)
1443{ 1438{
1444 return q->limits.seg_boundary_mask; 1439 return q->limits.seg_boundary_mask;
@@ -1639,15 +1634,6 @@ static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
1639 return 0; 1634 return 0;
1640} 1635}
1641 1636
1642static inline unsigned int bdev_nr_zones(struct block_device *bdev)
1643{
1644 struct request_queue *q = bdev_get_queue(bdev);
1645
1646 if (q)
1647 return blk_queue_nr_zones(q);
1648 return 0;
1649}
1650
1651static inline int queue_dma_alignment(struct request_queue *q) 1637static inline int queue_dma_alignment(struct request_queue *q)
1652{ 1638{
1653 return q ? q->dma_alignment : 511; 1639 return q ? q->dma_alignment : 511;
@@ -1877,6 +1863,28 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
1877 bip_next->bip_vec[0].bv_offset); 1863 bip_next->bip_vec[0].bv_offset);
1878} 1864}
1879 1865
1866/**
1867 * bio_integrity_intervals - Return number of integrity intervals for a bio
1868 * @bi: blk_integrity profile for device
1869 * @sectors: Size of the bio in 512-byte sectors
1870 *
1871 * Description: The block layer calculates everything in 512 byte
1872 * sectors but integrity metadata is done in terms of the data integrity
1873 * interval size of the storage device. Convert the block layer sectors
1874 * to the appropriate number of integrity intervals.
1875 */
1876static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
1877 unsigned int sectors)
1878{
1879 return sectors >> (bi->interval_exp - 9);
1880}
1881
1882static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
1883 unsigned int sectors)
1884{
1885 return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
1886}
1887
1880#else /* CONFIG_BLK_DEV_INTEGRITY */ 1888#else /* CONFIG_BLK_DEV_INTEGRITY */
1881 1889
1882struct bio; 1890struct bio;
@@ -1950,12 +1958,24 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
1950 return false; 1958 return false;
1951} 1959}
1952 1960
1961static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
1962 unsigned int sectors)
1963{
1964 return 0;
1965}
1966
1967static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
1968 unsigned int sectors)
1969{
1970 return 0;
1971}
1972
1953#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1973#endif /* CONFIG_BLK_DEV_INTEGRITY */
1954 1974
1955struct block_device_operations { 1975struct block_device_operations {
1956 int (*open) (struct block_device *, fmode_t); 1976 int (*open) (struct block_device *, fmode_t);
1957 void (*release) (struct gendisk *, fmode_t); 1977 void (*release) (struct gendisk *, fmode_t);
1958 int (*rw_page)(struct block_device *, sector_t, struct page *, bool); 1978 int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
1959 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1979 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1960 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1980 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1961 unsigned int (*check_events) (struct gendisk *disk, 1981 unsigned int (*check_events) (struct gendisk *disk,