aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-barrier.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-barrier.c')
-rw-r--r--block/blk-barrier.c120
1 files changed, 76 insertions, 44 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 6e72d661ae42..8eba4e43bb0c 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -24,8 +24,8 @@
24int blk_queue_ordered(struct request_queue *q, unsigned ordered, 24int blk_queue_ordered(struct request_queue *q, unsigned ordered,
25 prepare_flush_fn *prepare_flush_fn) 25 prepare_flush_fn *prepare_flush_fn)
26{ 26{
27 if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && 27 if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH |
28 prepare_flush_fn == NULL) { 28 QUEUE_ORDERED_DO_POSTFLUSH))) {
29 printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__); 29 printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
30 return -EINVAL; 30 return -EINVAL;
31 } 31 }
@@ -88,7 +88,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
88 return QUEUE_ORDSEQ_DONE; 88 return QUEUE_ORDSEQ_DONE;
89} 89}
90 90
91void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error) 91bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
92{ 92{
93 struct request *rq; 93 struct request *rq;
94 94
@@ -99,7 +99,7 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
99 q->ordseq |= seq; 99 q->ordseq |= seq;
100 100
101 if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE) 101 if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
102 return; 102 return false;
103 103
104 /* 104 /*
105 * Okay, sequence complete. 105 * Okay, sequence complete.
@@ -109,6 +109,8 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
109 109
110 if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq))) 110 if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
111 BUG(); 111 BUG();
112
113 return true;
112} 114}
113 115
114static void pre_flush_end_io(struct request *rq, int error) 116static void pre_flush_end_io(struct request *rq, int error)
@@ -134,7 +136,7 @@ static void queue_flush(struct request_queue *q, unsigned which)
134 struct request *rq; 136 struct request *rq;
135 rq_end_io_fn *end_io; 137 rq_end_io_fn *end_io;
136 138
137 if (which == QUEUE_ORDERED_PREFLUSH) { 139 if (which == QUEUE_ORDERED_DO_PREFLUSH) {
138 rq = &q->pre_flush_rq; 140 rq = &q->pre_flush_rq;
139 end_io = pre_flush_end_io; 141 end_io = pre_flush_end_io;
140 } else { 142 } else {
@@ -151,80 +153,110 @@ static void queue_flush(struct request_queue *q, unsigned which)
151 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 153 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
152} 154}
153 155
154static inline struct request *start_ordered(struct request_queue *q, 156static inline bool start_ordered(struct request_queue *q, struct request **rqp)
155 struct request *rq)
156{ 157{
158 struct request *rq = *rqp;
159 unsigned skip = 0;
160
157 q->orderr = 0; 161 q->orderr = 0;
158 q->ordered = q->next_ordered; 162 q->ordered = q->next_ordered;
159 q->ordseq |= QUEUE_ORDSEQ_STARTED; 163 q->ordseq |= QUEUE_ORDSEQ_STARTED;
160 164
161 /* 165 /*
162 * Prep proxy barrier request. 166 * For an empty barrier, there's no actual BAR request, which
167 * in turn makes POSTFLUSH unnecessary. Mask them off.
163 */ 168 */
169 if (!rq->hard_nr_sectors) {
170 q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
171 QUEUE_ORDERED_DO_POSTFLUSH);
172 /*
173 * Empty barrier on a write-through device w/ ordered
174 * tag has no command to issue and without any command
175 * to issue, ordering by tag can't be used. Drain
176 * instead.
177 */
178 if ((q->ordered & QUEUE_ORDERED_BY_TAG) &&
179 !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) {
180 q->ordered &= ~QUEUE_ORDERED_BY_TAG;
181 q->ordered |= QUEUE_ORDERED_BY_DRAIN;
182 }
183 }
184
185 /* stash away the original request */
164 elv_dequeue_request(q, rq); 186 elv_dequeue_request(q, rq);
165 q->orig_bar_rq = rq; 187 q->orig_bar_rq = rq;
166 rq = &q->bar_rq; 188 rq = NULL;
167 blk_rq_init(q, rq);
168 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
169 rq->cmd_flags |= REQ_RW;
170 if (q->ordered & QUEUE_ORDERED_FUA)
171 rq->cmd_flags |= REQ_FUA;
172 init_request_from_bio(rq, q->orig_bar_rq->bio);
173 rq->end_io = bar_end_io;
174 189
175 /* 190 /*
176 * Queue ordered sequence. As we stack them at the head, we 191 * Queue ordered sequence. As we stack them at the head, we
177 * need to queue in reverse order. Note that we rely on that 192 * need to queue in reverse order. Note that we rely on that
178 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs 193 * no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
179 * request gets inbetween ordered sequence. If this request is 194 * request gets inbetween ordered sequence.
180 * an empty barrier, we don't need to do a postflush ever since
181 * there will be no data written between the pre and post flush.
182 * Hence a single flush will suffice.
183 */ 195 */
184 if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq)) 196 if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) {
185 queue_flush(q, QUEUE_ORDERED_POSTFLUSH); 197 queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
186 else 198 rq = &q->post_flush_rq;
187 q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH; 199 } else
200 skip |= QUEUE_ORDSEQ_POSTFLUSH;
188 201
189 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 202 if (q->ordered & QUEUE_ORDERED_DO_BAR) {
203 rq = &q->bar_rq;
204
205 /* initialize proxy request and queue it */
206 blk_rq_init(q, rq);
207 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
208 rq->cmd_flags |= REQ_RW;
209 if (q->ordered & QUEUE_ORDERED_DO_FUA)
210 rq->cmd_flags |= REQ_FUA;
211 init_request_from_bio(rq, q->orig_bar_rq->bio);
212 rq->end_io = bar_end_io;
190 213
191 if (q->ordered & QUEUE_ORDERED_PREFLUSH) { 214 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
192 queue_flush(q, QUEUE_ORDERED_PREFLUSH); 215 } else
216 skip |= QUEUE_ORDSEQ_BAR;
217
218 if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
219 queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
193 rq = &q->pre_flush_rq; 220 rq = &q->pre_flush_rq;
194 } else 221 } else
195 q->ordseq |= QUEUE_ORDSEQ_PREFLUSH; 222 skip |= QUEUE_ORDSEQ_PREFLUSH;
196 223
197 if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0) 224 if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight)
198 q->ordseq |= QUEUE_ORDSEQ_DRAIN;
199 else
200 rq = NULL; 225 rq = NULL;
226 else
227 skip |= QUEUE_ORDSEQ_DRAIN;
228
229 *rqp = rq;
201 230
202 return rq; 231 /*
232 * Complete skipped sequences. If whole sequence is complete,
233 * return false to tell elevator that this request is gone.
234 */
235 return !blk_ordered_complete_seq(q, skip, 0);
203} 236}
204 237
205int blk_do_ordered(struct request_queue *q, struct request **rqp) 238bool blk_do_ordered(struct request_queue *q, struct request **rqp)
206{ 239{
207 struct request *rq = *rqp; 240 struct request *rq = *rqp;
208 const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); 241 const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
209 242
210 if (!q->ordseq) { 243 if (!q->ordseq) {
211 if (!is_barrier) 244 if (!is_barrier)
212 return 1; 245 return true;
213 246
214 if (q->next_ordered != QUEUE_ORDERED_NONE) { 247 if (q->next_ordered != QUEUE_ORDERED_NONE)
215 *rqp = start_ordered(q, rq); 248 return start_ordered(q, rqp);
216 return 1; 249 else {
217 } else {
218 /* 250 /*
219 * This can happen when the queue switches to 251 * Queue ordering not supported. Terminate
220 * ORDERED_NONE while this request is on it. 252 * with prejudice.
221 */ 253 */
222 elv_dequeue_request(q, rq); 254 elv_dequeue_request(q, rq);
223 if (__blk_end_request(rq, -EOPNOTSUPP, 255 if (__blk_end_request(rq, -EOPNOTSUPP,
224 blk_rq_bytes(rq))) 256 blk_rq_bytes(rq)))
225 BUG(); 257 BUG();
226 *rqp = NULL; 258 *rqp = NULL;
227 return 0; 259 return false;
228 } 260 }
229 } 261 }
230 262
@@ -235,9 +267,9 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
235 /* Special requests are not subject to ordering rules. */ 267 /* Special requests are not subject to ordering rules. */
236 if (!blk_fs_request(rq) && 268 if (!blk_fs_request(rq) &&
237 rq != &q->pre_flush_rq && rq != &q->post_flush_rq) 269 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
238 return 1; 270 return true;
239 271
240 if (q->ordered & QUEUE_ORDERED_TAG) { 272 if (q->ordered & QUEUE_ORDERED_BY_TAG) {
241 /* Ordered by tag. Blocking the next barrier is enough. */ 273 /* Ordered by tag. Blocking the next barrier is enough. */
242 if (is_barrier && rq != &q->bar_rq) 274 if (is_barrier && rq != &q->bar_rq)
243 *rqp = NULL; 275 *rqp = NULL;
@@ -248,7 +280,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
248 *rqp = NULL; 280 *rqp = NULL;
249 } 281 }
250 282
251 return 1; 283 return true;
252} 284}
253 285
254static void bio_end_empty_barrier(struct bio *bio, int err) 286static void bio_end_empty_barrier(struct bio *bio, int err)