aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-barrier.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-barrier.c')
-rw-r--r--block/blk-barrier.c45
1 files changed, 27 insertions, 18 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 1efabf829c53..b03d88013e1e 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -88,7 +88,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
88 return QUEUE_ORDSEQ_DONE; 88 return QUEUE_ORDSEQ_DONE;
89} 89}
90 90
91void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) 91bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
92{ 92{
93 struct request *rq; 93 struct request *rq;
94 94
@@ -99,7 +99,7 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
99 q->ordseq |= seq; 99 q->ordseq |= seq;
100 100
101 if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE) 101 if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
102 return; 102 return false;
103 103
104 /* 104 /*
105 * Okay, sequence complete. 105 * Okay, sequence complete.
@@ -109,6 +109,8 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
109 109
110 if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq))) 110 if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
111 BUG(); 111 BUG();
112
113 return true;
112} 114}
113 115
114static void pre_flush_end_io(struct request *rq, int error) 116static void pre_flush_end_io(struct request *rq, int error)
@@ -151,9 +153,11 @@ static void queue_flush(struct request_queue *q, unsigned which)
151 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 153 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
152} 154}
153 155
154static inline struct request *start_ordered(struct request_queue *q, 156static inline bool start_ordered(struct request_queue *q, struct request **rqp)
155 struct request *rq)
156{ 157{
158 struct request *rq = *rqp;
159 unsigned skip = 0;
160
157 q->orderr = 0; 161 q->orderr = 0;
158 q->ordered = q->next_ordered; 162 q->ordered = q->next_ordered;
159 q->ordseq |= QUEUE_ORDSEQ_STARTED; 163 q->ordseq |= QUEUE_ORDSEQ_STARTED;
@@ -177,7 +181,7 @@ static inline struct request *start_ordered(struct request_queue *q,
177 queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH); 181 queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
178 rq = &q->post_flush_rq; 182 rq = &q->post_flush_rq;
179 } else 183 } else
180 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; 184 skip |= QUEUE_ORDSEQ_POSTFLUSH;
181 185
182 if (q->ordered & QUEUE_ORDERED_DO_BAR) { 186 if (q->ordered & QUEUE_ORDERED_DO_BAR) {
183 rq = &q->bar_rq; 187 rq = &q->bar_rq;
@@ -193,35 +197,40 @@ static inline struct request *start_ordered(struct request_queue *q,
193 197
194 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 198 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
195 } else 199 } else
196 q->ordseq |= QUEUE_ORDSEQ_BAR; 200 skip |= QUEUE_ORDSEQ_BAR;
197 201
198 if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) { 202 if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
199 queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH); 203 queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
200 rq = &q->pre_flush_rq; 204 rq = &q->pre_flush_rq;
201 } else 205 } else
202 q->ordseq |= QUEUE_ORDSEQ_PREFLUSH; 206 skip |= QUEUE_ORDSEQ_PREFLUSH;
203 207
204 if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight) 208 if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight)
205 rq = NULL; 209 rq = NULL;
206 else 210 else
207 q->ordseq |= QUEUE_ORDSEQ_DRAIN; 211 skip |= QUEUE_ORDSEQ_DRAIN;
212
213 *rqp = rq;
208 214
209 return rq; 215 /*
216 * Complete skipped sequences. If whole sequence is complete,
217 * return false to tell elevator that this request is gone.
218 */
219 return !blk_ordered_complete_seq(q, skip, 0);
210} 220}
211 221
212int blk_do_ordered(struct request_queue *q, struct request **rqp) 222bool blk_do_ordered(struct request_queue *q, struct request **rqp)
213{ 223{
214 struct request *rq = *rqp; 224 struct request *rq = *rqp;
215 const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); 225 const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
216 226
217 if (!q->ordseq) { 227 if (!q->ordseq) {
218 if (!is_barrier) 228 if (!is_barrier)
219 return 1; 229 return true;
220 230
221 if (q->next_ordered != QUEUE_ORDERED_NONE) { 231 if (q->next_ordered != QUEUE_ORDERED_NONE)
222 *rqp = start_ordered(q, rq); 232 return start_ordered(q, rqp);
223 return 1; 233 else {
224 } else {
225 /* 234 /*
226 * Queue ordering not supported. Terminate 235 * Queue ordering not supported. Terminate
227 * with prejudice. 236 * with prejudice.
@@ -231,7 +240,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
231 blk_rq_bytes(rq))) 240 blk_rq_bytes(rq)))
232 BUG(); 241 BUG();
233 *rqp = NULL; 242 *rqp = NULL;
234 return 0; 243 return false;
235 } 244 }
236 } 245 }
237 246
@@ -242,7 +251,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
242 /* Special requests are not subject to ordering rules. */ 251 /* Special requests are not subject to ordering rules. */
243 if (!blk_fs_request(rq) && 252 if (!blk_fs_request(rq) &&
244 rq != &q->pre_flush_rq && rq != &q->post_flush_rq) 253 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
245 return 1; 254 return true;
246 255
247 if (q->ordered & QUEUE_ORDERED_BY_TAG) { 256 if (q->ordered & QUEUE_ORDERED_BY_TAG) {
248 /* Ordered by tag. Blocking the next barrier is enough. */ 257 /* Ordered by tag. Blocking the next barrier is enough. */
@@ -255,7 +264,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
255 *rqp = NULL; 264 *rqp = NULL;
256 } 265 }
257 266
258 return 1; 267 return true;
259} 268}
260 269
261static void bio_end_empty_barrier(struct bio *bio, int err) 270static void bio_end_empty_barrier(struct bio *bio, int err)