diff options
author | Tejun Heo <tj@kernel.org> | 2008-11-27 23:32:02 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-12-29 02:28:44 -0500 |
commit | 313e42999dbc0f234ca5909a236f78f082cb43b1 (patch) | |
tree | 023ac251809e3926ebc6b6c2174d67f8c4ac535f | |
parent | ba744d5e290055d171c68067259fcc1e2721f542 (diff) |
block: reorganize QUEUE_ORDERED_* constants
Separate out ordering type (drain,) and action masks (preflush,
postflush, fua) from visible ordering mode selectors
(QUEUE_ORDERED_*). Ordering types are now named QUEUE_ORDERED_BY_*
while action masks are named QUEUE_ORDERED_DO_*.
This change is necessary to add QUEUE_ORDERED_DO_BAR and make it
optional to improve empty barrier implementation.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | block/blk-barrier.c | 20 | ||||
-rw-r--r-- | include/linux/blkdev.h | 39 |
2 files changed, 33 insertions, 26 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 6e72d661ae42..1d7adc72c95d 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c | |||
@@ -24,8 +24,8 @@ | |||
24 | int blk_queue_ordered(struct request_queue *q, unsigned ordered, | 24 | int blk_queue_ordered(struct request_queue *q, unsigned ordered, |
25 | prepare_flush_fn *prepare_flush_fn) | 25 | prepare_flush_fn *prepare_flush_fn) |
26 | { | 26 | { |
27 | if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && | 27 | if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH | |
28 | prepare_flush_fn == NULL) { | 28 | QUEUE_ORDERED_DO_POSTFLUSH))) { |
29 | printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__); | 29 | printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__); |
30 | return -EINVAL; | 30 | return -EINVAL; |
31 | } | 31 | } |
@@ -134,7 +134,7 @@ static void queue_flush(struct request_queue *q, unsigned which) | |||
134 | struct request *rq; | 134 | struct request *rq; |
135 | rq_end_io_fn *end_io; | 135 | rq_end_io_fn *end_io; |
136 | 136 | ||
137 | if (which == QUEUE_ORDERED_PREFLUSH) { | 137 | if (which == QUEUE_ORDERED_DO_PREFLUSH) { |
138 | rq = &q->pre_flush_rq; | 138 | rq = &q->pre_flush_rq; |
139 | end_io = pre_flush_end_io; | 139 | end_io = pre_flush_end_io; |
140 | } else { | 140 | } else { |
@@ -167,7 +167,7 @@ static inline struct request *start_ordered(struct request_queue *q, | |||
167 | blk_rq_init(q, rq); | 167 | blk_rq_init(q, rq); |
168 | if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) | 168 | if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) |
169 | rq->cmd_flags |= REQ_RW; | 169 | rq->cmd_flags |= REQ_RW; |
170 | if (q->ordered & QUEUE_ORDERED_FUA) | 170 | if (q->ordered & QUEUE_ORDERED_DO_FUA) |
171 | rq->cmd_flags |= REQ_FUA; | 171 | rq->cmd_flags |= REQ_FUA; |
172 | init_request_from_bio(rq, q->orig_bar_rq->bio); | 172 | init_request_from_bio(rq, q->orig_bar_rq->bio); |
173 | rq->end_io = bar_end_io; | 173 | rq->end_io = bar_end_io; |
@@ -181,20 +181,20 @@ static inline struct request *start_ordered(struct request_queue *q, | |||
181 | * there will be no data written between the pre and post flush. | 181 | * there will be no data written between the pre and post flush. |
182 | * Hence a single flush will suffice. | 182 | * Hence a single flush will suffice. |
183 | */ | 183 | */ |
184 | if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq)) | 184 | if ((q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) && !blk_empty_barrier(rq)) |
185 | queue_flush(q, QUEUE_ORDERED_POSTFLUSH); | 185 | queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH); |
186 | else | 186 | else |
187 | q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; | 187 | q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; |
188 | 188 | ||
189 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | 189 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); |
190 | 190 | ||
191 | if (q->ordered & QUEUE_ORDERED_PREFLUSH) { | 191 | if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) { |
192 | queue_flush(q, QUEUE_ORDERED_PREFLUSH); | 192 | queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH); |
193 | rq = &q->pre_flush_rq; | 193 | rq = &q->pre_flush_rq; |
194 | } else | 194 | } else |
195 | q->ordseq |= QUEUE_ORDSEQ_PREFLUSH; | 195 | q->ordseq |= QUEUE_ORDSEQ_PREFLUSH; |
196 | 196 | ||
197 | if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0) | 197 | if ((q->ordered & QUEUE_ORDERED_BY_TAG) || q->in_flight == 0) |
198 | q->ordseq |= QUEUE_ORDSEQ_DRAIN; | 198 | q->ordseq |= QUEUE_ORDSEQ_DRAIN; |
199 | else | 199 | else |
200 | rq = NULL; | 200 | rq = NULL; |
@@ -237,7 +237,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp) | |||
237 | rq != &q->pre_flush_rq && rq != &q->post_flush_rq) | 237 | rq != &q->pre_flush_rq && rq != &q->post_flush_rq) |
238 | return 1; | 238 | return 1; |
239 | 239 | ||
240 | if (q->ordered & QUEUE_ORDERED_TAG) { | 240 | if (q->ordered & QUEUE_ORDERED_BY_TAG) { |
241 | /* Ordered by tag. Blocking the next barrier is enough. */ | 241 | /* Ordered by tag. Blocking the next barrier is enough. */ |
242 | if (is_barrier && rq != &q->bar_rq) | 242 | if (is_barrier && rq != &q->bar_rq) |
243 | *rqp = NULL; | 243 | *rqp = NULL; |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e9bb73ff1d64..5c92b4432399 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -523,22 +523,29 @@ enum { | |||
523 | * TAG_FLUSH : ordering by tag w/ pre and post flushes | 523 | * TAG_FLUSH : ordering by tag w/ pre and post flushes |
524 | * TAG_FUA : ordering by tag w/ pre flush and FUA write | 524 | * TAG_FUA : ordering by tag w/ pre flush and FUA write |
525 | */ | 525 | */ |
526 | QUEUE_ORDERED_NONE = 0x00, | 526 | QUEUE_ORDERED_BY_DRAIN = 0x01, |
527 | QUEUE_ORDERED_DRAIN = 0x01, | 527 | QUEUE_ORDERED_BY_TAG = 0x02, |
528 | QUEUE_ORDERED_TAG = 0x02, | 528 | QUEUE_ORDERED_DO_PREFLUSH = 0x10, |
529 | 529 | QUEUE_ORDERED_DO_POSTFLUSH = 0x40, | |
530 | QUEUE_ORDERED_PREFLUSH = 0x10, | 530 | QUEUE_ORDERED_DO_FUA = 0x80, |
531 | QUEUE_ORDERED_POSTFLUSH = 0x20, | 531 | |
532 | QUEUE_ORDERED_FUA = 0x40, | 532 | QUEUE_ORDERED_NONE = 0x00, |
533 | 533 | ||
534 | QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | | 534 | QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN, |
535 | QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, | 535 | QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | |
536 | QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | | 536 | QUEUE_ORDERED_DO_PREFLUSH | |
537 | QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, | 537 | QUEUE_ORDERED_DO_POSTFLUSH, |
538 | QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | | 538 | QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | |
539 | QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH, | 539 | QUEUE_ORDERED_DO_PREFLUSH | |
540 | QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | | 540 | QUEUE_ORDERED_DO_FUA, |
541 | QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA, | 541 | |
542 | QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG, | ||
543 | QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | | ||
544 | QUEUE_ORDERED_DO_PREFLUSH | | ||
545 | QUEUE_ORDERED_DO_POSTFLUSH, | ||
546 | QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | | ||
547 | QUEUE_ORDERED_DO_PREFLUSH | | ||
548 | QUEUE_ORDERED_DO_FUA, | ||
542 | 549 | ||
543 | /* | 550 | /* |
544 | * Ordered operation sequence | 551 | * Ordered operation sequence |