aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h80
1 files changed, 43 insertions, 37 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3d9cf326574f..c96db9c22d10 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -90,18 +90,17 @@ struct request {
90 struct list_head queuelist; 90 struct list_head queuelist;
91 union { 91 union {
92 struct call_single_data csd; 92 struct call_single_data csd;
93 unsigned long fifo_time; 93 u64 fifo_time;
94 }; 94 };
95 95
96 struct request_queue *q; 96 struct request_queue *q;
97 struct blk_mq_ctx *mq_ctx; 97 struct blk_mq_ctx *mq_ctx;
98 98
99 u64 cmd_flags; 99 int cpu;
100 unsigned cmd_type; 100 unsigned cmd_type;
101 u64 cmd_flags;
101 unsigned long atomic_flags; 102 unsigned long atomic_flags;
102 103
103 int cpu;
104
105 /* the following two fields are internal, NEVER access directly */ 104 /* the following two fields are internal, NEVER access directly */
106 unsigned int __data_len; /* total data len */ 105 unsigned int __data_len; /* total data len */
107 sector_t __sector; /* sector cursor */ 106 sector_t __sector; /* sector cursor */
@@ -200,6 +199,20 @@ struct request {
200 struct request *next_rq; 199 struct request *next_rq;
201}; 200};
202 201
202#define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS)
203#define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT)
204
205#define req_set_op(req, op) do { \
206 WARN_ON(op >= (1 << REQ_OP_BITS)); \
207 (req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \
208 (req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \
209} while (0)
210
211#define req_set_op_attrs(req, op, flags) do { \
212 req_set_op(req, op); \
213 (req)->cmd_flags |= flags; \
214} while (0)
215
203static inline unsigned short req_get_ioprio(struct request *req) 216static inline unsigned short req_get_ioprio(struct request *req)
204{ 217{
205 return req->ioprio; 218 return req->ioprio;
@@ -483,7 +496,7 @@ struct request_queue {
483#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ 496#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
484#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ 497#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
485#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 498#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
486#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ 499#define QUEUE_FLAG_SECERASE 17 /* supports secure erase */
487#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 500#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
488#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 501#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
489#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 502#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
@@ -492,6 +505,7 @@ struct request_queue {
492#define QUEUE_FLAG_WC 23 /* Write back caching */ 505#define QUEUE_FLAG_WC 23 /* Write back caching */
493#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */ 506#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
494#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */ 507#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
508#define QUEUE_FLAG_DAX 26 /* device supports DAX */
495 509
496#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 510#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
497 (1 << QUEUE_FLAG_STACKABLE) | \ 511 (1 << QUEUE_FLAG_STACKABLE) | \
@@ -579,8 +593,9 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
579#define blk_queue_stackable(q) \ 593#define blk_queue_stackable(q) \
580 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 594 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
581#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 595#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
582#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \ 596#define blk_queue_secure_erase(q) \
583 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags)) 597 (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
598#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
584 599
585#define blk_noretry_request(rq) \ 600#define blk_noretry_request(rq) \
586 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 601 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
@@ -597,7 +612,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
597 612
598#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) 613#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
599 614
600#define rq_data_dir(rq) ((int)((rq)->cmd_flags & 1)) 615#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
601 616
602/* 617/*
603 * Driver can handle struct request, if it either has an old style 618 * Driver can handle struct request, if it either has an old style
@@ -616,14 +631,14 @@ static inline unsigned int blk_queue_cluster(struct request_queue *q)
616/* 631/*
617 * We regard a request as sync, if either a read or a sync write 632 * We regard a request as sync, if either a read or a sync write
618 */ 633 */
619static inline bool rw_is_sync(unsigned int rw_flags) 634static inline bool rw_is_sync(int op, unsigned int rw_flags)
620{ 635{
621 return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); 636 return op == REQ_OP_READ || (rw_flags & REQ_SYNC);
622} 637}
623 638
624static inline bool rq_is_sync(struct request *rq) 639static inline bool rq_is_sync(struct request *rq)
625{ 640{
626 return rw_is_sync(rq->cmd_flags); 641 return rw_is_sync(req_op(rq), rq->cmd_flags);
627} 642}
628 643
629static inline bool blk_rl_full(struct request_list *rl, bool sync) 644static inline bool blk_rl_full(struct request_list *rl, bool sync)
@@ -652,22 +667,10 @@ static inline bool rq_mergeable(struct request *rq)
652 if (rq->cmd_type != REQ_TYPE_FS) 667 if (rq->cmd_type != REQ_TYPE_FS)
653 return false; 668 return false;
654 669
655 if (rq->cmd_flags & REQ_NOMERGE_FLAGS) 670 if (req_op(rq) == REQ_OP_FLUSH)
656 return false; 671 return false;
657 672
658 return true; 673 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
659}
660
661static inline bool blk_check_merge_flags(unsigned int flags1,
662 unsigned int flags2)
663{
664 if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD))
665 return false;
666
667 if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE))
668 return false;
669
670 if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME))
671 return false; 674 return false;
672 675
673 return true; 676 return true;
@@ -786,8 +789,6 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);
786extern void blk_put_request(struct request *); 789extern void blk_put_request(struct request *);
787extern void __blk_put_request(struct request_queue *, struct request *); 790extern void __blk_put_request(struct request_queue *, struct request *);
788extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 791extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
789extern struct request *blk_make_request(struct request_queue *, struct bio *,
790 gfp_t);
791extern void blk_rq_set_block_pc(struct request *); 792extern void blk_rq_set_block_pc(struct request *);
792extern void blk_requeue_request(struct request_queue *, struct request *); 793extern void blk_requeue_request(struct request_queue *, struct request *);
793extern void blk_add_request_payload(struct request *rq, struct page *page, 794extern void blk_add_request_payload(struct request *rq, struct page *page,
@@ -800,6 +801,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
800extern void blk_rq_unprep_clone(struct request *rq); 801extern void blk_rq_unprep_clone(struct request *rq);
801extern int blk_insert_cloned_request(struct request_queue *q, 802extern int blk_insert_cloned_request(struct request_queue *q,
802 struct request *rq); 803 struct request *rq);
804extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
803extern void blk_delay_queue(struct request_queue *, unsigned long); 805extern void blk_delay_queue(struct request_queue *, unsigned long);
804extern void blk_queue_split(struct request_queue *, struct bio **, 806extern void blk_queue_split(struct request_queue *, struct bio **,
805 struct bio_set *); 807 struct bio_set *);
@@ -879,12 +881,12 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
879} 881}
880 882
881static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, 883static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
882 unsigned int cmd_flags) 884 int op)
883{ 885{
884 if (unlikely(cmd_flags & REQ_DISCARD)) 886 if (unlikely(op == REQ_OP_DISCARD))
885 return min(q->limits.max_discard_sectors, UINT_MAX >> 9); 887 return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
886 888
887 if (unlikely(cmd_flags & REQ_WRITE_SAME)) 889 if (unlikely(op == REQ_OP_WRITE_SAME))
888 return q->limits.max_write_same_sectors; 890 return q->limits.max_write_same_sectors;
889 891
890 return q->limits.max_sectors; 892 return q->limits.max_sectors;
@@ -904,18 +906,19 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
904 (offset & (q->limits.chunk_sectors - 1)); 906 (offset & (q->limits.chunk_sectors - 1));
905} 907}
906 908
907static inline unsigned int blk_rq_get_max_sectors(struct request *rq) 909static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
910 sector_t offset)
908{ 911{
909 struct request_queue *q = rq->q; 912 struct request_queue *q = rq->q;
910 913
911 if (unlikely(rq->cmd_type != REQ_TYPE_FS)) 914 if (unlikely(rq->cmd_type != REQ_TYPE_FS))
912 return q->limits.max_hw_sectors; 915 return q->limits.max_hw_sectors;
913 916
914 if (!q->limits.chunk_sectors || (rq->cmd_flags & REQ_DISCARD)) 917 if (!q->limits.chunk_sectors || (req_op(rq) == REQ_OP_DISCARD))
915 return blk_queue_get_max_sectors(q, rq->cmd_flags); 918 return blk_queue_get_max_sectors(q, req_op(rq));
916 919
917 return min(blk_max_size_offset(q, blk_rq_pos(rq)), 920 return min(blk_max_size_offset(q, offset),
918 blk_queue_get_max_sectors(q, rq->cmd_flags)); 921 blk_queue_get_max_sectors(q, req_op(rq)));
919} 922}
920 923
921static inline unsigned int blk_rq_count_bios(struct request *rq) 924static inline unsigned int blk_rq_count_bios(struct request *rq)
@@ -1135,13 +1138,16 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
1135 return bqt->tag_index[tag]; 1138 return bqt->tag_index[tag];
1136} 1139}
1137 1140
1138#define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ 1141
1142#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
1143#define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */
1139 1144
1140extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1145extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
1141extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1146extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1142 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 1147 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
1143extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, 1148extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1144 sector_t nr_sects, gfp_t gfp_mask, int type, struct bio **biop); 1149 sector_t nr_sects, gfp_t gfp_mask, int flags,
1150 struct bio **biop);
1145extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 1151extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1146 sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1152 sector_t nr_sects, gfp_t gfp_mask, struct page *page);
1147extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 1153extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,