aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-10-28 10:48:16 -0400
committerJens Axboe <axboe@fb.com>2016-10-28 10:48:16 -0400
commitef295ecf090d3e86e5b742fc6ab34f1122a43773 (patch)
treee52e8be789b5654dfc43f626c50261f80615c03f /include/linux
parente806402130c9c494e22c73ae9ead4e79d2a5811c (diff)
block: better op and flags encoding
Now that we don't need the common flags to overflow outside the range of a 32-bit type we can encode them the same way for both the bio and request fields. This in addition allows us to place the operation first (and make some room for more ops while we're at it) and to stop having to shift around the operation values. In addition this allows passing around only one value in the block layer instead of two (and eventuall also in the file systems, but we can do that later) and thus clean up a lot of code. Last but not least this allows decreasing the size of the cmd_flags field in struct request to 32-bits. Various functions passing this value could also be updated, but I'd like to avoid the churn for now. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/blk-cgroup.h11
-rw-r--r--include/linux/blk_types.h83
-rw-r--r--include/linux/blkdev.h26
-rw-r--r--include/linux/blktrace_api.h2
-rw-r--r--include/linux/dm-io.h2
-rw-r--r--include/linux/elevator.h4
6 files changed, 48 insertions, 80 deletions
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 3bf5d33800ab..ddaf28d0988f 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -581,15 +581,14 @@ static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
581/** 581/**
582 * blkg_rwstat_add - add a value to a blkg_rwstat 582 * blkg_rwstat_add - add a value to a blkg_rwstat
583 * @rwstat: target blkg_rwstat 583 * @rwstat: target blkg_rwstat
584 * @op: REQ_OP 584 * @op: REQ_OP and flags
585 * @op_flags: rq_flag_bits
586 * @val: value to add 585 * @val: value to add
587 * 586 *
588 * Add @val to @rwstat. The counters are chosen according to @rw. The 587 * Add @val to @rwstat. The counters are chosen according to @rw. The
589 * caller is responsible for synchronizing calls to this function. 588 * caller is responsible for synchronizing calls to this function.
590 */ 589 */
591static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, 590static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
592 int op, int op_flags, uint64_t val) 591 unsigned int op, uint64_t val)
593{ 592{
594 struct percpu_counter *cnt; 593 struct percpu_counter *cnt;
595 594
@@ -600,7 +599,7 @@ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
600 599
601 __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH); 600 __percpu_counter_add(cnt, val, BLKG_STAT_CPU_BATCH);
602 601
603 if (op_flags & REQ_SYNC) 602 if (op & REQ_SYNC)
604 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC]; 603 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
605 else 604 else
606 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC]; 605 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
@@ -705,9 +704,9 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
705 704
706 if (!throtl) { 705 if (!throtl) {
707 blkg = blkg ?: q->root_blkg; 706 blkg = blkg ?: q->root_blkg;
708 blkg_rwstat_add(&blkg->stat_bytes, bio_op(bio), bio->bi_opf, 707 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
709 bio->bi_iter.bi_size); 708 bio->bi_iter.bi_size);
710 blkg_rwstat_add(&blkg->stat_ios, bio_op(bio), bio->bi_opf, 1); 709 blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
711 } 710 }
712 711
713 rcu_read_unlock(); 712 rcu_read_unlock();
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index ec69a8fe3b29..dca972d67548 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -88,24 +88,6 @@ struct bio {
88 struct bio_vec bi_inline_vecs[0]; 88 struct bio_vec bi_inline_vecs[0];
89}; 89};
90 90
91#define BIO_OP_SHIFT (8 * FIELD_SIZEOF(struct bio, bi_opf) - REQ_OP_BITS)
92#define bio_flags(bio) ((bio)->bi_opf & ((1 << BIO_OP_SHIFT) - 1))
93#define bio_op(bio) ((bio)->bi_opf >> BIO_OP_SHIFT)
94
95#define bio_set_op_attrs(bio, op, op_flags) do { \
96 if (__builtin_constant_p(op)) \
97 BUILD_BUG_ON((op) + 0U >= (1U << REQ_OP_BITS)); \
98 else \
99 WARN_ON_ONCE((op) + 0U >= (1U << REQ_OP_BITS)); \
100 if (__builtin_constant_p(op_flags)) \
101 BUILD_BUG_ON((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \
102 else \
103 WARN_ON_ONCE((op_flags) + 0U >= (1U << BIO_OP_SHIFT)); \
104 (bio)->bi_opf = bio_flags(bio); \
105 (bio)->bi_opf |= (((op) + 0U) << BIO_OP_SHIFT); \
106 (bio)->bi_opf |= (op_flags); \
107} while (0)
108
109#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) 91#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
110 92
111/* 93/*
@@ -147,26 +129,40 @@ struct bio {
147#endif /* CONFIG_BLOCK */ 129#endif /* CONFIG_BLOCK */
148 130
149/* 131/*
150 * Request flags. For use in the cmd_flags field of struct request, and in 132 * Operations and flags common to the bio and request structures.
151 * bi_opf of struct bio. Note that some flags are only valid in either one. 133 * We use 8 bits for encoding the operation, and the remaining 24 for flags.
152 */ 134 */
153enum rq_flag_bits { 135#define REQ_OP_BITS 8
154 /* common flags */ 136#define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
155 __REQ_FAILFAST_DEV, /* no driver retries of device errors */ 137#define REQ_FLAG_BITS 24
138
139enum req_opf {
140 REQ_OP_READ,
141 REQ_OP_WRITE,
142 REQ_OP_DISCARD, /* request to discard sectors */
143 REQ_OP_SECURE_ERASE, /* request to securely erase sectors */
144 REQ_OP_WRITE_SAME, /* write same block many times */
145 REQ_OP_FLUSH, /* request for cache flush */
146 REQ_OP_ZONE_REPORT, /* Get zone information */
147 REQ_OP_ZONE_RESET, /* Reset a zone write pointer */
148
149 REQ_OP_LAST,
150};
151
152enum req_flag_bits {
153 __REQ_FAILFAST_DEV = /* no driver retries of device errors */
154 REQ_OP_BITS,
156 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 155 __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
157 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 156 __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
158
159 __REQ_SYNC, /* request is sync (sync write or read) */ 157 __REQ_SYNC, /* request is sync (sync write or read) */
160 __REQ_META, /* metadata io request */ 158 __REQ_META, /* metadata io request */
161 __REQ_PRIO, /* boost priority in cfq */ 159 __REQ_PRIO, /* boost priority in cfq */
162
163 __REQ_NOMERGE, /* don't touch this for merging */ 160 __REQ_NOMERGE, /* don't touch this for merging */
164 __REQ_NOIDLE, /* don't anticipate more IO after this one */ 161 __REQ_NOIDLE, /* don't anticipate more IO after this one */
165 __REQ_INTEGRITY, /* I/O includes block integrity payload */ 162 __REQ_INTEGRITY, /* I/O includes block integrity payload */
166 __REQ_FUA, /* forced unit access */ 163 __REQ_FUA, /* forced unit access */
167 __REQ_PREFLUSH, /* request for cache flush */ 164 __REQ_PREFLUSH, /* request for cache flush */
168 __REQ_RAHEAD, /* read ahead, can fail anytime */ 165 __REQ_RAHEAD, /* read ahead, can fail anytime */
169
170 __REQ_NR_BITS, /* stops here */ 166 __REQ_NR_BITS, /* stops here */
171}; 167};
172 168
@@ -176,37 +172,32 @@ enum rq_flag_bits {
176#define REQ_SYNC (1ULL << __REQ_SYNC) 172#define REQ_SYNC (1ULL << __REQ_SYNC)
177#define REQ_META (1ULL << __REQ_META) 173#define REQ_META (1ULL << __REQ_META)
178#define REQ_PRIO (1ULL << __REQ_PRIO) 174#define REQ_PRIO (1ULL << __REQ_PRIO)
175#define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
179#define REQ_NOIDLE (1ULL << __REQ_NOIDLE) 176#define REQ_NOIDLE (1ULL << __REQ_NOIDLE)
180#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY) 177#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
178#define REQ_FUA (1ULL << __REQ_FUA)
179#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
180#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
181 181
182#define REQ_FAILFAST_MASK \ 182#define REQ_FAILFAST_MASK \
183 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 183 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
184#define REQ_COMMON_MASK \
185 (REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | REQ_NOIDLE | \
186 REQ_PREFLUSH | REQ_FUA | REQ_INTEGRITY | REQ_NOMERGE | REQ_RAHEAD)
187#define REQ_CLONE_MASK REQ_COMMON_MASK
188 184
189/* This mask is used for both bio and request merge checking */
190#define REQ_NOMERGE_FLAGS \ 185#define REQ_NOMERGE_FLAGS \
191 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA) 186 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
192 187
193#define REQ_RAHEAD (1ULL << __REQ_RAHEAD) 188#define bio_op(bio) \
194#define REQ_FUA (1ULL << __REQ_FUA) 189 ((bio)->bi_opf & REQ_OP_MASK)
195#define REQ_NOMERGE (1ULL << __REQ_NOMERGE) 190#define req_op(req) \
196#define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH) 191 ((req)->cmd_flags & REQ_OP_MASK)
197 192
198enum req_op { 193/* obsolete, don't use in new code */
199 REQ_OP_READ, 194#define bio_set_op_attrs(bio, op, op_flags) \
200 REQ_OP_WRITE, 195 ((bio)->bi_opf |= (op | op_flags))
201 REQ_OP_DISCARD, /* request to discard sectors */
202 REQ_OP_SECURE_ERASE, /* request to securely erase sectors */
203 REQ_OP_WRITE_SAME, /* write same block many times */
204 REQ_OP_FLUSH, /* request for cache flush */
205 REQ_OP_ZONE_REPORT, /* Get zone information */
206 REQ_OP_ZONE_RESET, /* Reset a zone write pointer */
207};
208 196
209#define REQ_OP_BITS 3 197static inline bool op_is_sync(unsigned int op)
198{
199 return (op & REQ_OP_MASK) == REQ_OP_READ || (op & REQ_SYNC);
200}
210 201
211typedef unsigned int blk_qc_t; 202typedef unsigned int blk_qc_t;
212#define BLK_QC_T_NONE -1U 203#define BLK_QC_T_NONE -1U
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b4415feac679..8396da2bb698 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -142,7 +142,7 @@ struct request {
142 142
143 int cpu; 143 int cpu;
144 unsigned cmd_type; 144 unsigned cmd_type;
145 u64 cmd_flags; 145 unsigned int cmd_flags; /* op and common flags */
146 req_flags_t rq_flags; 146 req_flags_t rq_flags;
147 unsigned long atomic_flags; 147 unsigned long atomic_flags;
148 148
@@ -244,20 +244,6 @@ struct request {
244 struct request *next_rq; 244 struct request *next_rq;
245}; 245};
246 246
247#define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS)
248#define req_op(req) ((req)->cmd_flags >> REQ_OP_SHIFT)
249
250#define req_set_op(req, op) do { \
251 WARN_ON(op >= (1 << REQ_OP_BITS)); \
252 (req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1); \
253 (req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT); \
254} while (0)
255
256#define req_set_op_attrs(req, op, flags) do { \
257 req_set_op(req, op); \
258 (req)->cmd_flags |= flags; \
259} while (0)
260
261static inline unsigned short req_get_ioprio(struct request *req) 247static inline unsigned short req_get_ioprio(struct request *req)
262{ 248{
263 return req->ioprio; 249 return req->ioprio;
@@ -741,17 +727,9 @@ static inline unsigned int blk_queue_zone_size(struct request_queue *q)
741 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; 727 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
742} 728}
743 729
744/*
745 * We regard a request as sync, if either a read or a sync write
746 */
747static inline bool rw_is_sync(int op, unsigned int rw_flags)
748{
749 return op == REQ_OP_READ || (rw_flags & REQ_SYNC);
750}
751
752static inline bool rq_is_sync(struct request *rq) 730static inline bool rq_is_sync(struct request *rq)
753{ 731{
754 return rw_is_sync(req_op(rq), rq->cmd_flags); 732 return op_is_sync(rq->cmd_flags);
755} 733}
756 734
757static inline bool blk_rl_full(struct request_list *rl, bool sync) 735static inline bool blk_rl_full(struct request_list *rl, bool sync)
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index cceb72f9e29f..e417f080219a 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -118,7 +118,7 @@ static inline int blk_cmd_buf_len(struct request *rq)
118} 118}
119 119
120extern void blk_dump_cmd(char *buf, struct request *rq); 120extern void blk_dump_cmd(char *buf, struct request *rq);
121extern void blk_fill_rwbs(char *rwbs, int op, u32 rw, int bytes); 121extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes);
122 122
123#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ 123#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
124 124
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
index b91b023deffb..a52c6580cc9a 100644
--- a/include/linux/dm-io.h
+++ b/include/linux/dm-io.h
@@ -58,7 +58,7 @@ struct dm_io_notify {
58struct dm_io_client; 58struct dm_io_client;
59struct dm_io_request { 59struct dm_io_request {
60 int bi_op; /* REQ_OP */ 60 int bi_op; /* REQ_OP */
61 int bi_op_flags; /* rq_flag_bits */ 61 int bi_op_flags; /* req_flag_bits */
62 struct dm_io_memory mem; /* Memory to use for io */ 62 struct dm_io_memory mem; /* Memory to use for io */
63 struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */ 63 struct dm_io_notify notify; /* Synchronous if notify.fn is NULL */
64 struct dm_io_client *client; /* Client memory handler */ 64 struct dm_io_client *client; /* Client memory handler */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index e7f358d2e5fc..f219c9aed360 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -30,7 +30,7 @@ typedef int (elevator_dispatch_fn) (struct request_queue *, int);
30typedef void (elevator_add_req_fn) (struct request_queue *, struct request *); 30typedef void (elevator_add_req_fn) (struct request_queue *, struct request *);
31typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *); 31typedef struct request *(elevator_request_list_fn) (struct request_queue *, struct request *);
32typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); 32typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
33typedef int (elevator_may_queue_fn) (struct request_queue *, int, int); 33typedef int (elevator_may_queue_fn) (struct request_queue *, unsigned int);
34 34
35typedef void (elevator_init_icq_fn) (struct io_cq *); 35typedef void (elevator_init_icq_fn) (struct io_cq *);
36typedef void (elevator_exit_icq_fn) (struct io_cq *); 36typedef void (elevator_exit_icq_fn) (struct io_cq *);
@@ -139,7 +139,7 @@ extern struct request *elv_former_request(struct request_queue *, struct request
139extern struct request *elv_latter_request(struct request_queue *, struct request *); 139extern struct request *elv_latter_request(struct request_queue *, struct request *);
140extern int elv_register_queue(struct request_queue *q); 140extern int elv_register_queue(struct request_queue *q);
141extern void elv_unregister_queue(struct request_queue *q); 141extern void elv_unregister_queue(struct request_queue *q);
142extern int elv_may_queue(struct request_queue *, int, int); 142extern int elv_may_queue(struct request_queue *, unsigned int);
143extern void elv_completed_request(struct request_queue *, struct request *); 143extern void elv_completed_request(struct request_queue *, struct request *);
144extern int elv_set_request(struct request_queue *q, struct request *rq, 144extern int elv_set_request(struct request_queue *q, struct request *rq,
145 struct bio *bio, gfp_t gfp_mask); 145 struct bio *bio, gfp_t gfp_mask);