aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2008-11-27 23:32:04 -0500
committerJens Axboe <jens.axboe@oracle.com>2008-12-29 02:28:45 -0500
commitf671620e7d895af221bdfeda751d54fa55ed9546 (patch)
treebeeb843a4a356d94b6b4faec97e078b2a4ad1f09
parenta7384677b2f4cd40948fd7ce024ba5e1821444ba (diff)
block: make every barrier action optional
In all barrier sequences, the barrier write itself was always assumed to be issued and thus didn't have corresponding control flag. This patch adds QUEUE_ORDERED_DO_BAR and unify action mask handling in start_ordered() such that any barrier action can be skipped. This patch doesn't introduce any visible behavior changes. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r--block/blk-barrier.c41
-rw-r--r--include/linux/blkdev.h7
2 files changed, 29 insertions, 19 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 43d479a1e664..1efabf829c53 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -158,19 +158,10 @@ static inline struct request *start_ordered(struct request_queue *q,
158 q->ordered = q->next_ordered; 158 q->ordered = q->next_ordered;
159 q->ordseq |= QUEUE_ORDSEQ_STARTED; 159 q->ordseq |= QUEUE_ORDSEQ_STARTED;
160 160
161 /* 161 /* stash away the original request */
162 * Prep proxy barrier request.
163 */
164 elv_dequeue_request(q, rq); 162 elv_dequeue_request(q, rq);
165 q->orig_bar_rq = rq; 163 q->orig_bar_rq = rq;
166 rq = &q->bar_rq; 164 rq = NULL;
167 blk_rq_init(q, rq);
168 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
169 rq->cmd_flags |= REQ_RW;
170 if (q->ordered & QUEUE_ORDERED_DO_FUA)
171 rq->cmd_flags |= REQ_FUA;
172 init_request_from_bio(rq, q->orig_bar_rq->bio);
173 rq->end_io = bar_end_io;
174 165
175 /* 166 /*
176 * Queue ordered sequence. As we stack them at the head, we 167 * Queue ordered sequence. As we stack them at the head, we
@@ -181,12 +172,28 @@ static inline struct request *start_ordered(struct request_queue *q,
181 * there will be no data written between the pre and post flush. 172 * there will be no data written between the pre and post flush.
182 * Hence a single flush will suffice. 173 * Hence a single flush will suffice.
183 */ 174 */
184 if ((q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) && !blk_empty_barrier(rq)) 175 if ((q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) &&
176 !blk_empty_barrier(q->orig_bar_rq)) {
185 queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH); 177 queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
186 else 178 rq = &q->post_flush_rq;
179 } else
187 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; 180 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
188 181
189 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 182 if (q->ordered & QUEUE_ORDERED_DO_BAR) {
183 rq = &q->bar_rq;
184
185 /* initialize proxy request and queue it */
186 blk_rq_init(q, rq);
187 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
188 rq->cmd_flags |= REQ_RW;
189 if (q->ordered & QUEUE_ORDERED_DO_FUA)
190 rq->cmd_flags |= REQ_FUA;
191 init_request_from_bio(rq, q->orig_bar_rq->bio);
192 rq->end_io = bar_end_io;
193
194 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
195 } else
196 q->ordseq |= QUEUE_ORDSEQ_BAR;
190 197
191 if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) { 198 if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
192 queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH); 199 queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
@@ -194,10 +201,10 @@ static inline struct request *start_ordered(struct request_queue *q,
194 } else 201 } else
195 q->ordseq |= QUEUE_ORDSEQ_PREFLUSH; 202 q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
196 203
197 if ((q->ordered & QUEUE_ORDERED_BY_TAG) || q->in_flight == 0) 204 if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight)
198 q->ordseq |= QUEUE_ORDSEQ_DRAIN;
199 else
200 rq = NULL; 205 rq = NULL;
206 else
207 q->ordseq |= QUEUE_ORDSEQ_DRAIN;
201 208
202 return rq; 209 return rq;
203} 210}
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5c92b4432399..b044267009ed 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -526,12 +526,14 @@ enum {
526 QUEUE_ORDERED_BY_DRAIN = 0x01, 526 QUEUE_ORDERED_BY_DRAIN = 0x01,
527 QUEUE_ORDERED_BY_TAG = 0x02, 527 QUEUE_ORDERED_BY_TAG = 0x02,
528 QUEUE_ORDERED_DO_PREFLUSH = 0x10, 528 QUEUE_ORDERED_DO_PREFLUSH = 0x10,
529 QUEUE_ORDERED_DO_BAR = 0x20,
529 QUEUE_ORDERED_DO_POSTFLUSH = 0x40, 530 QUEUE_ORDERED_DO_POSTFLUSH = 0x40,
530 QUEUE_ORDERED_DO_FUA = 0x80, 531 QUEUE_ORDERED_DO_FUA = 0x80,
531 532
532 QUEUE_ORDERED_NONE = 0x00, 533 QUEUE_ORDERED_NONE = 0x00,
533 534
534 QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN, 535 QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN |
536 QUEUE_ORDERED_DO_BAR,
535 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | 537 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
536 QUEUE_ORDERED_DO_PREFLUSH | 538 QUEUE_ORDERED_DO_PREFLUSH |
537 QUEUE_ORDERED_DO_POSTFLUSH, 539 QUEUE_ORDERED_DO_POSTFLUSH,
@@ -539,7 +541,8 @@ enum {
539 QUEUE_ORDERED_DO_PREFLUSH | 541 QUEUE_ORDERED_DO_PREFLUSH |
540 QUEUE_ORDERED_DO_FUA, 542 QUEUE_ORDERED_DO_FUA,
541 543
542 QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG, 544 QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG |
545 QUEUE_ORDERED_DO_BAR,
543 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | 546 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
544 QUEUE_ORDERED_DO_PREFLUSH | 547 QUEUE_ORDERED_DO_PREFLUSH |
545 QUEUE_ORDERED_DO_POSTFLUSH, 548 QUEUE_ORDERED_DO_POSTFLUSH,