diff options
author | Jens Axboe <axboe@suse.de> | 2006-08-10 02:44:47 -0400 |
---|---|---|
committer | Jens Axboe <axboe@nelson.home.kernel.dk> | 2006-09-30 14:23:37 -0400 |
commit | 4aff5e2333c9a1609662f2091f55c3f6fffdad36 (patch) | |
tree | b73d8c2b7c1bdc03d3313c108da7dfc95ee95525 /include | |
parent | 77ed74da26f50fa28471571ee7a2251b77526d84 (diff) |
[PATCH] Split struct request ->flags into two parts
Right now ->flags is a bit of a mess: some are request types, and
others are just modifiers. Clean this up by splitting it into
->cmd_type and ->cmd_flags. This allows introduction of generic
Linux block message types, useful for sending generic Linux commands
to block devices.
Signed-off-by: Jens Axboe <axboe@suse.de>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/blkdev.h | 180 | ||||
-rw-r--r-- | include/linux/blktrace_api.h | 2 | ||||
-rw-r--r-- | include/scsi/scsi_tcq.h | 2 |
3 files changed, 101 insertions, 83 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index cfde8b3ee919..b2a412cf468f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -120,6 +120,86 @@ struct request_list { | |||
120 | wait_queue_head_t wait[2]; | 120 | wait_queue_head_t wait[2]; |
121 | }; | 121 | }; |
122 | 122 | ||
123 | /* | ||
124 | * request command types | ||
125 | */ | ||
126 | enum rq_cmd_type_bits { | ||
127 | REQ_TYPE_FS = 1, /* fs request */ | ||
128 | REQ_TYPE_BLOCK_PC, /* scsi command */ | ||
129 | REQ_TYPE_SENSE, /* sense request */ | ||
130 | REQ_TYPE_PM_SUSPEND, /* suspend request */ | ||
131 | REQ_TYPE_PM_RESUME, /* resume request */ | ||
132 | REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ | ||
133 | REQ_TYPE_FLUSH, /* flush request */ | ||
134 | REQ_TYPE_SPECIAL, /* driver defined type */ | ||
135 | REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ | ||
136 | /* | ||
137 | * for ATA/ATAPI devices. this really doesn't belong here, ide should | ||
138 | * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver | ||
139 | * private REQ_LB opcodes to differentiate what type of request this is | ||
140 | */ | ||
141 | REQ_TYPE_ATA_CMD, | ||
142 | REQ_TYPE_ATA_TASK, | ||
143 | REQ_TYPE_ATA_TASKFILE, | ||
144 | }; | ||
145 | |||
146 | /* | ||
147 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being | ||
148 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a | ||
149 | * SCSI cdb. | ||
150 | * | ||
151 | * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need, | ||
152 | * typically to differentiate REQ_TYPE_SPECIAL requests. | ||
153 | * | ||
154 | */ | ||
155 | enum { | ||
156 | /* | ||
157 | * just examples for now | ||
158 | */ | ||
159 | REQ_LB_OP_EJECT = 0x40, /* eject request */ | ||
160 | REQ_LB_OP_FLUSH = 0x41, /* flush device */ | ||
161 | }; | ||
162 | |||
163 | /* | ||
164 | * request type modified bits. first three bits match BIO_RW* bits, important | ||
165 | */ | ||
166 | enum rq_flag_bits { | ||
167 | __REQ_RW, /* not set, read. set, write */ | ||
168 | __REQ_FAILFAST, /* no low level driver retries */ | ||
169 | __REQ_SORTED, /* elevator knows about this request */ | ||
170 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ | ||
171 | __REQ_HARDBARRIER, /* may not be passed by drive either */ | ||
172 | __REQ_FUA, /* forced unit access */ | ||
173 | __REQ_NOMERGE, /* don't touch this for merging */ | ||
174 | __REQ_STARTED, /* drive already may have started this one */ | ||
175 | __REQ_DONTPREP, /* don't call prep for this one */ | ||
176 | __REQ_QUEUED, /* uses queueing */ | ||
177 | __REQ_ELVPRIV, /* elevator private data attached */ | ||
178 | __REQ_FAILED, /* set if the request failed */ | ||
179 | __REQ_QUIET, /* don't worry about errors */ | ||
180 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ | ||
181 | __REQ_ORDERED_COLOR, /* is before or after barrier */ | ||
182 | __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ | ||
183 | __REQ_NR_BITS, /* stops here */ | ||
184 | }; | ||
185 | |||
186 | #define REQ_RW (1 << __REQ_RW) | ||
187 | #define REQ_FAILFAST (1 << __REQ_FAILFAST) | ||
188 | #define REQ_SORTED (1 << __REQ_SORTED) | ||
189 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) | ||
190 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) | ||
191 | #define REQ_FUA (1 << __REQ_FUA) | ||
192 | #define REQ_NOMERGE (1 << __REQ_NOMERGE) | ||
193 | #define REQ_STARTED (1 << __REQ_STARTED) | ||
194 | #define REQ_DONTPREP (1 << __REQ_DONTPREP) | ||
195 | #define REQ_QUEUED (1 << __REQ_QUEUED) | ||
196 | #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) | ||
197 | #define REQ_FAILED (1 << __REQ_FAILED) | ||
198 | #define REQ_QUIET (1 << __REQ_QUIET) | ||
199 | #define REQ_PREEMPT (1 << __REQ_PREEMPT) | ||
200 | #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) | ||
201 | #define REQ_RW_SYNC (1 << __REQ_RW_SYNC) | ||
202 | |||
123 | #define BLK_MAX_CDB 16 | 203 | #define BLK_MAX_CDB 16 |
124 | 204 | ||
125 | /* | 205 | /* |
@@ -129,7 +209,8 @@ struct request { | |||
129 | struct list_head queuelist; | 209 | struct list_head queuelist; |
130 | struct list_head donelist; | 210 | struct list_head donelist; |
131 | 211 | ||
132 | unsigned long flags; /* see REQ_ bits below */ | 212 | unsigned int cmd_flags; |
213 | enum rq_cmd_type_bits cmd_type; | ||
133 | 214 | ||
134 | /* Maintain bio traversal state for part by part I/O submission. | 215 | /* Maintain bio traversal state for part by part I/O submission. |
135 | * hard_* are block layer internals, no driver should touch them! | 216 | * hard_* are block layer internals, no driver should touch them! |
@@ -202,73 +283,7 @@ struct request { | |||
202 | }; | 283 | }; |
203 | 284 | ||
204 | /* | 285 | /* |
205 | * first three bits match BIO_RW* bits, important | 286 | * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME |
206 | */ | ||
207 | enum rq_flag_bits { | ||
208 | __REQ_RW, /* not set, read. set, write */ | ||
209 | __REQ_FAILFAST, /* no low level driver retries */ | ||
210 | __REQ_SORTED, /* elevator knows about this request */ | ||
211 | __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ | ||
212 | __REQ_HARDBARRIER, /* may not be passed by drive either */ | ||
213 | __REQ_FUA, /* forced unit access */ | ||
214 | __REQ_CMD, /* is a regular fs rw request */ | ||
215 | __REQ_NOMERGE, /* don't touch this for merging */ | ||
216 | __REQ_STARTED, /* drive already may have started this one */ | ||
217 | __REQ_DONTPREP, /* don't call prep for this one */ | ||
218 | __REQ_QUEUED, /* uses queueing */ | ||
219 | __REQ_ELVPRIV, /* elevator private data attached */ | ||
220 | /* | ||
221 | * for ATA/ATAPI devices | ||
222 | */ | ||
223 | __REQ_PC, /* packet command (special) */ | ||
224 | __REQ_BLOCK_PC, /* queued down pc from block layer */ | ||
225 | __REQ_SENSE, /* sense retrival */ | ||
226 | |||
227 | __REQ_FAILED, /* set if the request failed */ | ||
228 | __REQ_QUIET, /* don't worry about errors */ | ||
229 | __REQ_SPECIAL, /* driver suplied command */ | ||
230 | __REQ_DRIVE_CMD, | ||
231 | __REQ_DRIVE_TASK, | ||
232 | __REQ_DRIVE_TASKFILE, | ||
233 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ | ||
234 | __REQ_PM_SUSPEND, /* suspend request */ | ||
235 | __REQ_PM_RESUME, /* resume request */ | ||
236 | __REQ_PM_SHUTDOWN, /* shutdown request */ | ||
237 | __REQ_ORDERED_COLOR, /* is before or after barrier */ | ||
238 | __REQ_RW_SYNC, /* request is sync (O_DIRECT) */ | ||
239 | __REQ_NR_BITS, /* stops here */ | ||
240 | }; | ||
241 | |||
242 | #define REQ_RW (1 << __REQ_RW) | ||
243 | #define REQ_FAILFAST (1 << __REQ_FAILFAST) | ||
244 | #define REQ_SORTED (1 << __REQ_SORTED) | ||
245 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) | ||
246 | #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) | ||
247 | #define REQ_FUA (1 << __REQ_FUA) | ||
248 | #define REQ_CMD (1 << __REQ_CMD) | ||
249 | #define REQ_NOMERGE (1 << __REQ_NOMERGE) | ||
250 | #define REQ_STARTED (1 << __REQ_STARTED) | ||
251 | #define REQ_DONTPREP (1 << __REQ_DONTPREP) | ||
252 | #define REQ_QUEUED (1 << __REQ_QUEUED) | ||
253 | #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) | ||
254 | #define REQ_PC (1 << __REQ_PC) | ||
255 | #define REQ_BLOCK_PC (1 << __REQ_BLOCK_PC) | ||
256 | #define REQ_SENSE (1 << __REQ_SENSE) | ||
257 | #define REQ_FAILED (1 << __REQ_FAILED) | ||
258 | #define REQ_QUIET (1 << __REQ_QUIET) | ||
259 | #define REQ_SPECIAL (1 << __REQ_SPECIAL) | ||
260 | #define REQ_DRIVE_CMD (1 << __REQ_DRIVE_CMD) | ||
261 | #define REQ_DRIVE_TASK (1 << __REQ_DRIVE_TASK) | ||
262 | #define REQ_DRIVE_TASKFILE (1 << __REQ_DRIVE_TASKFILE) | ||
263 | #define REQ_PREEMPT (1 << __REQ_PREEMPT) | ||
264 | #define REQ_PM_SUSPEND (1 << __REQ_PM_SUSPEND) | ||
265 | #define REQ_PM_RESUME (1 << __REQ_PM_RESUME) | ||
266 | #define REQ_PM_SHUTDOWN (1 << __REQ_PM_SHUTDOWN) | ||
267 | #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) | ||
268 | #define REQ_RW_SYNC (1 << __REQ_RW_SYNC) | ||
269 | |||
270 | /* | ||
271 | * State information carried for REQ_PM_SUSPEND and REQ_PM_RESUME | ||
272 | * requests. Some step values could eventually be made generic. | 287 | * requests. Some step values could eventually be made generic. |
273 | */ | 288 | */ |
274 | struct request_pm_state | 289 | struct request_pm_state |
@@ -490,25 +505,28 @@ enum { | |||
490 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 505 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
491 | #define blk_queue_flushing(q) ((q)->ordseq) | 506 | #define blk_queue_flushing(q) ((q)->ordseq) |
492 | 507 | ||
493 | #define blk_fs_request(rq) ((rq)->flags & REQ_CMD) | 508 | #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) |
494 | #define blk_pc_request(rq) ((rq)->flags & REQ_BLOCK_PC) | 509 | #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) |
495 | #define blk_noretry_request(rq) ((rq)->flags & REQ_FAILFAST) | 510 | #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) |
496 | #define blk_rq_started(rq) ((rq)->flags & REQ_STARTED) | 511 | #define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) |
512 | |||
513 | #define blk_noretry_request(rq) ((rq)->cmd_flags & REQ_FAILFAST) | ||
514 | #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) | ||
497 | 515 | ||
498 | #define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) | 516 | #define blk_account_rq(rq) (blk_rq_started(rq) && blk_fs_request(rq)) |
499 | 517 | ||
500 | #define blk_pm_suspend_request(rq) ((rq)->flags & REQ_PM_SUSPEND) | 518 | #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) |
501 | #define blk_pm_resume_request(rq) ((rq)->flags & REQ_PM_RESUME) | 519 | #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) |
502 | #define blk_pm_request(rq) \ | 520 | #define blk_pm_request(rq) \ |
503 | ((rq)->flags & (REQ_PM_SUSPEND | REQ_PM_RESUME)) | 521 | (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) |
504 | 522 | ||
505 | #define blk_sorted_rq(rq) ((rq)->flags & REQ_SORTED) | 523 | #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) |
506 | #define blk_barrier_rq(rq) ((rq)->flags & REQ_HARDBARRIER) | 524 | #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) |
507 | #define blk_fua_rq(rq) ((rq)->flags & REQ_FUA) | 525 | #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) |
508 | 526 | ||
509 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) | 527 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) |
510 | 528 | ||
511 | #define rq_data_dir(rq) ((rq)->flags & 1) | 529 | #define rq_data_dir(rq) ((rq)->cmd_flags & 1) |
512 | 530 | ||
513 | static inline int blk_queue_full(struct request_queue *q, int rw) | 531 | static inline int blk_queue_full(struct request_queue *q, int rw) |
514 | { | 532 | { |
@@ -541,7 +559,7 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw) | |||
541 | #define RQ_NOMERGE_FLAGS \ | 559 | #define RQ_NOMERGE_FLAGS \ |
542 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) | 560 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) |
543 | #define rq_mergeable(rq) \ | 561 | #define rq_mergeable(rq) \ |
544 | (!((rq)->flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) | 562 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) |
545 | 563 | ||
546 | /* | 564 | /* |
547 | * noop, requests are automagically marked as active/inactive by I/O | 565 | * noop, requests are automagically marked as active/inactive by I/O |
@@ -737,7 +755,7 @@ extern void blk_put_queue(request_queue_t *); | |||
737 | */ | 755 | */ |
738 | #define blk_queue_tag_depth(q) ((q)->queue_tags->busy) | 756 | #define blk_queue_tag_depth(q) ((q)->queue_tags->busy) |
739 | #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) | 757 | #define blk_queue_tag_queue(q) ((q)->queue_tags->busy < (q)->queue_tags->max_depth) |
740 | #define blk_rq_tagged(rq) ((rq)->flags & REQ_QUEUED) | 758 | #define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) |
741 | extern int blk_queue_start_tag(request_queue_t *, struct request *); | 759 | extern int blk_queue_start_tag(request_queue_t *, struct request *); |
742 | extern struct request *blk_queue_find_tag(request_queue_t *, int); | 760 | extern struct request *blk_queue_find_tag(request_queue_t *, int); |
743 | extern void blk_queue_end_tag(request_queue_t *, struct request *); | 761 | extern void blk_queue_end_tag(request_queue_t *, struct request *); |
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 7520cc1ff9e2..ea48eb1b3fd3 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
@@ -148,7 +148,7 @@ static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq, | |||
148 | u32 what) | 148 | u32 what) |
149 | { | 149 | { |
150 | struct blk_trace *bt = q->blk_trace; | 150 | struct blk_trace *bt = q->blk_trace; |
151 | int rw = rq->flags & 0x03; | 151 | int rw = rq->cmd_flags & 0x03; |
152 | 152 | ||
153 | if (likely(!bt)) | 153 | if (likely(!bt)) |
154 | return; | 154 | return; |
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h index d04d05adfa9b..bbf66219b769 100644 --- a/include/scsi/scsi_tcq.h +++ b/include/scsi/scsi_tcq.h | |||
@@ -100,7 +100,7 @@ static inline int scsi_populate_tag_msg(struct scsi_cmnd *cmd, char *msg) | |||
100 | struct scsi_device *sdev = cmd->device; | 100 | struct scsi_device *sdev = cmd->device; |
101 | 101 | ||
102 | if (blk_rq_tagged(req)) { | 102 | if (blk_rq_tagged(req)) { |
103 | if (sdev->ordered_tags && req->flags & REQ_HARDBARRIER) | 103 | if (sdev->ordered_tags && req->cmd_flags & REQ_HARDBARRIER) |
104 | *msg++ = MSG_ORDERED_TAG; | 104 | *msg++ = MSG_ORDERED_TAG; |
105 | else | 105 | else |
106 | *msg++ = MSG_SIMPLE_TAG; | 106 | *msg++ = MSG_SIMPLE_TAG; |