diff options
author | Tejun Heo <tj@kernel.org> | 2010-09-03 05:56:16 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-09-10 06:35:36 -0400 |
commit | dd831006d5be7f74c3fe7aef82380c51c3637960 (patch) | |
tree | 30a86749f0c4f7980b2982dbde8bbcfff37b08f9 /block/blk-barrier.c | |
parent | 9cbbdca44ae1a6f512ea1e2be11ced8bbb9d430a (diff) |
block: misc cleanups in barrier code
Make the following cleanups in preparation of barrier/flush update.
* blk_do_ordered() declaration is moved from include/linux/blkdev.h to
block/blk.h.
* blk_do_ordered() now returns pointer to struct request, with %NULL
meaning "try the next request" and ERR_PTR(-EAGAIN) "try again
later". The third case will be dropped with further changes.
* In the initialization of proxy barrier request, data direction is
already set by init_request_from_bio(). Drop unnecessary explicit
REQ_WRITE setting and move init_request_from_bio() above REQ_FUA
flag setting.
* add_request() is collapsed into __make_request().
These changes don't make any functional difference.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-barrier.c')
-rw-r--r-- | block/blk-barrier.c | 32 |
1 files changed, 14 insertions, 18 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c index ed0aba5463ab..f1be85ba2bb5 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c | |||
@@ -110,9 +110,9 @@ static void queue_flush(struct request_queue *q, unsigned which) | |||
110 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | 110 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); |
111 | } | 111 | } |
112 | 112 | ||
113 | static inline bool start_ordered(struct request_queue *q, struct request **rqp) | 113 | static inline struct request *start_ordered(struct request_queue *q, |
114 | struct request *rq) | ||
114 | { | 115 | { |
115 | struct request *rq = *rqp; | ||
116 | unsigned skip = 0; | 116 | unsigned skip = 0; |
117 | 117 | ||
118 | q->orderr = 0; | 118 | q->orderr = 0; |
@@ -149,11 +149,9 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp) | |||
149 | 149 | ||
150 | /* initialize proxy request and queue it */ | 150 | /* initialize proxy request and queue it */ |
151 | blk_rq_init(q, rq); | 151 | blk_rq_init(q, rq); |
152 | if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) | 152 | init_request_from_bio(rq, q->orig_bar_rq->bio); |
153 | rq->cmd_flags |= REQ_WRITE; | ||
154 | if (q->ordered & QUEUE_ORDERED_DO_FUA) | 153 | if (q->ordered & QUEUE_ORDERED_DO_FUA) |
155 | rq->cmd_flags |= REQ_FUA; | 154 | rq->cmd_flags |= REQ_FUA; |
156 | init_request_from_bio(rq, q->orig_bar_rq->bio); | ||
157 | rq->end_io = bar_end_io; | 155 | rq->end_io = bar_end_io; |
158 | 156 | ||
159 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | 157 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); |
@@ -171,27 +169,26 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp) | |||
171 | else | 169 | else |
172 | skip |= QUEUE_ORDSEQ_DRAIN; | 170 | skip |= QUEUE_ORDSEQ_DRAIN; |
173 | 171 | ||
174 | *rqp = rq; | ||
175 | |||
176 | /* | 172 | /* |
177 | * Complete skipped sequences. If whole sequence is complete, | 173 | * Complete skipped sequences. If whole sequence is complete, |
178 | * return false to tell elevator that this request is gone. | 174 | * return %NULL to tell elevator that this request is gone. |
179 | */ | 175 | */ |
180 | return !blk_ordered_complete_seq(q, skip, 0); | 176 | if (blk_ordered_complete_seq(q, skip, 0)) |
177 | rq = NULL; | ||
178 | return rq; | ||
181 | } | 179 | } |
182 | 180 | ||
183 | bool blk_do_ordered(struct request_queue *q, struct request **rqp) | 181 | struct request *blk_do_ordered(struct request_queue *q, struct request *rq) |
184 | { | 182 | { |
185 | struct request *rq = *rqp; | ||
186 | const int is_barrier = rq->cmd_type == REQ_TYPE_FS && | 183 | const int is_barrier = rq->cmd_type == REQ_TYPE_FS && |
187 | (rq->cmd_flags & REQ_HARDBARRIER); | 184 | (rq->cmd_flags & REQ_HARDBARRIER); |
188 | 185 | ||
189 | if (!q->ordseq) { | 186 | if (!q->ordseq) { |
190 | if (!is_barrier) | 187 | if (!is_barrier) |
191 | return true; | 188 | return rq; |
192 | 189 | ||
193 | if (q->next_ordered != QUEUE_ORDERED_NONE) | 190 | if (q->next_ordered != QUEUE_ORDERED_NONE) |
194 | return start_ordered(q, rqp); | 191 | return start_ordered(q, rq); |
195 | else { | 192 | else { |
196 | /* | 193 | /* |
197 | * Queue ordering not supported. Terminate | 194 | * Queue ordering not supported. Terminate |
@@ -199,8 +196,7 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp) | |||
199 | */ | 196 | */ |
200 | blk_dequeue_request(rq); | 197 | blk_dequeue_request(rq); |
201 | __blk_end_request_all(rq, -EOPNOTSUPP); | 198 | __blk_end_request_all(rq, -EOPNOTSUPP); |
202 | *rqp = NULL; | 199 | return NULL; |
203 | return false; | ||
204 | } | 200 | } |
205 | } | 201 | } |
206 | 202 | ||
@@ -211,14 +207,14 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp) | |||
211 | /* Special requests are not subject to ordering rules. */ | 207 | /* Special requests are not subject to ordering rules. */ |
212 | if (rq->cmd_type != REQ_TYPE_FS && | 208 | if (rq->cmd_type != REQ_TYPE_FS && |
213 | rq != &q->pre_flush_rq && rq != &q->post_flush_rq) | 209 | rq != &q->pre_flush_rq && rq != &q->post_flush_rq) |
214 | return true; | 210 | return rq; |
215 | 211 | ||
216 | /* Ordered by draining. Wait for turn. */ | 212 | /* Ordered by draining. Wait for turn. */ |
217 | WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q)); | 213 | WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q)); |
218 | if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q)) | 214 | if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q)) |
219 | *rqp = NULL; | 215 | rq = ERR_PTR(-EAGAIN); |
220 | 216 | ||
221 | return true; | 217 | return rq; |
222 | } | 218 | } |
223 | 219 | ||
224 | static void bio_end_empty_barrier(struct bio *bio, int err) | 220 | static void bio_end_empty_barrier(struct bio *bio, int err) |