diff options
author | Tejun Heo <tj@kernel.org> | 2008-11-27 23:32:04 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-12-29 02:28:45 -0500 |
commit | f671620e7d895af221bdfeda751d54fa55ed9546 (patch) | |
tree | beeb843a4a356d94b6b4faec97e078b2a4ad1f09 /block/blk-barrier.c | |
parent | a7384677b2f4cd40948fd7ce024ba5e1821444ba (diff) |
block: make every barrier action optional
In all barrier sequences, the barrier write itself was always assumed
to be issued and thus didn't have corresponding control flag. This
patch adds QUEUE_ORDERED_DO_BAR and unify action mask handling in
start_ordered() such that any barrier action can be skipped.
This patch doesn't introduce any visible behavior changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-barrier.c')
-rw-r--r-- | block/blk-barrier.c | 41 |
1 files changed, 24 insertions, 17 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 43d479a1e664..1efabf829c53 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c | |||
@@ -158,19 +158,10 @@ static inline struct request *start_ordered(struct request_queue *q, | |||
158 | q->ordered = q->next_ordered; | 158 | q->ordered = q->next_ordered; |
159 | q->ordseq |= QUEUE_ORDSEQ_STARTED; | 159 | q->ordseq |= QUEUE_ORDSEQ_STARTED; |
160 | 160 | ||
161 | /* | 161 | /* stash away the original request */ |
162 | * Prep proxy barrier request. | ||
163 | */ | ||
164 | elv_dequeue_request(q, rq); | 162 | elv_dequeue_request(q, rq); |
165 | q->orig_bar_rq = rq; | 163 | q->orig_bar_rq = rq; |
166 | rq = &q->bar_rq; | 164 | rq = NULL; |
167 | blk_rq_init(q, rq); | ||
168 | if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) | ||
169 | rq->cmd_flags |= REQ_RW; | ||
170 | if (q->ordered & QUEUE_ORDERED_DO_FUA) | ||
171 | rq->cmd_flags |= REQ_FUA; | ||
172 | init_request_from_bio(rq, q->orig_bar_rq->bio); | ||
173 | rq->end_io = bar_end_io; | ||
174 | 165 | ||
175 | /* | 166 | /* |
176 | * Queue ordered sequence. As we stack them at the head, we | 167 | * Queue ordered sequence. As we stack them at the head, we |
@@ -181,12 +172,28 @@ static inline struct request *start_ordered(struct request_queue *q, | |||
181 | * there will be no data written between the pre and post flush. | 172 | * there will be no data written between the pre and post flush. |
182 | * Hence a single flush will suffice. | 173 | * Hence a single flush will suffice. |
183 | */ | 174 | */ |
184 | if ((q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) && !blk_empty_barrier(rq)) | 175 | if ((q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) && |
176 | !blk_empty_barrier(q->orig_bar_rq)) { | ||
185 | queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH); | 177 | queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH); |
186 | else | 178 | rq = &q->post_flush_rq; |
179 | } else | ||
187 | q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; | 180 | q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; |
188 | 181 | ||
189 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | 182 | if (q->ordered & QUEUE_ORDERED_DO_BAR) { |
183 | rq = &q->bar_rq; | ||
184 | |||
185 | /* initialize proxy request and queue it */ | ||
186 | blk_rq_init(q, rq); | ||
187 | if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) | ||
188 | rq->cmd_flags |= REQ_RW; | ||
189 | if (q->ordered & QUEUE_ORDERED_DO_FUA) | ||
190 | rq->cmd_flags |= REQ_FUA; | ||
191 | init_request_from_bio(rq, q->orig_bar_rq->bio); | ||
192 | rq->end_io = bar_end_io; | ||
193 | |||
194 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | ||
195 | } else | ||
196 | q->ordseq |= QUEUE_ORDSEQ_BAR; | ||
190 | 197 | ||
191 | if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) { | 198 | if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) { |
192 | queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH); | 199 | queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH); |
@@ -194,10 +201,10 @@ static inline struct request *start_ordered(struct request_queue *q, | |||
194 | } else | 201 | } else |
195 | q->ordseq |= QUEUE_ORDSEQ_PREFLUSH; | 202 | q->ordseq |= QUEUE_ORDSEQ_PREFLUSH; |
196 | 203 | ||
197 | if ((q->ordered & QUEUE_ORDERED_BY_TAG) || q->in_flight == 0) | 204 | if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight) |
198 | q->ordseq |= QUEUE_ORDSEQ_DRAIN; | ||
199 | else | ||
200 | rq = NULL; | 205 | rq = NULL; |
206 | else | ||
207 | q->ordseq |= QUEUE_ORDSEQ_DRAIN; | ||
201 | 208 | ||
202 | return rq; | 209 | return rq; |
203 | } | 210 | } |