aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-barrier.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-barrier.c')
-rw-r--r--block/blk-barrier.c35
1 files changed, 7 insertions, 28 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index f0faefca032f..c807e9ca3a68 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -26,10 +26,7 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered)
26 if (ordered != QUEUE_ORDERED_NONE && 26 if (ordered != QUEUE_ORDERED_NONE &&
27 ordered != QUEUE_ORDERED_DRAIN && 27 ordered != QUEUE_ORDERED_DRAIN &&
28 ordered != QUEUE_ORDERED_DRAIN_FLUSH && 28 ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
29 ordered != QUEUE_ORDERED_DRAIN_FUA && 29 ordered != QUEUE_ORDERED_DRAIN_FUA) {
30 ordered != QUEUE_ORDERED_TAG &&
31 ordered != QUEUE_ORDERED_TAG_FLUSH &&
32 ordered != QUEUE_ORDERED_TAG_FUA) {
33 printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered); 30 printk(KERN_ERR "blk_queue_ordered: bad value %d\n", ordered);
34 return -EINVAL; 31 return -EINVAL;
35 } 32 }
@@ -155,21 +152,9 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
155 * For an empty barrier, there's no actual BAR request, which 152 * For an empty barrier, there's no actual BAR request, which
156 * in turn makes POSTFLUSH unnecessary. Mask them off. 153 * in turn makes POSTFLUSH unnecessary. Mask them off.
157 */ 154 */
158 if (!blk_rq_sectors(rq)) { 155 if (!blk_rq_sectors(rq))
159 q->ordered &= ~(QUEUE_ORDERED_DO_BAR | 156 q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
160 QUEUE_ORDERED_DO_POSTFLUSH); 157 QUEUE_ORDERED_DO_POSTFLUSH);
161 /*
162 * Empty barrier on a write-through device w/ ordered
163 * tag has no command to issue and without any command
164 * to issue, ordering by tag can't be used. Drain
165 * instead.
166 */
167 if ((q->ordered & QUEUE_ORDERED_BY_TAG) &&
168 !(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) {
169 q->ordered &= ~QUEUE_ORDERED_BY_TAG;
170 q->ordered |= QUEUE_ORDERED_BY_DRAIN;
171 }
172 }
173 158
174 /* stash away the original request */ 159 /* stash away the original request */
175 blk_dequeue_request(rq); 160 blk_dequeue_request(rq);
@@ -210,7 +195,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
210 } else 195 } else
211 skip |= QUEUE_ORDSEQ_PREFLUSH; 196 skip |= QUEUE_ORDSEQ_PREFLUSH;
212 197
213 if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q)) 198 if (queue_in_flight(q))
214 rq = NULL; 199 rq = NULL;
215 else 200 else
216 skip |= QUEUE_ORDSEQ_DRAIN; 201 skip |= QUEUE_ORDSEQ_DRAIN;
@@ -257,16 +242,10 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
257 rq != &q->pre_flush_rq && rq != &q->post_flush_rq) 242 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
258 return true; 243 return true;
259 244
260 if (q->ordered & QUEUE_ORDERED_BY_TAG) { 245 /* Ordered by draining. Wait for turn. */
261 /* Ordered by tag. Blocking the next barrier is enough. */ 246 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
262 if (is_barrier && rq != &q->bar_rq) 247 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
263 *rqp = NULL; 248 *rqp = NULL;
264 } else {
265 /* Ordered by draining. Wait for turn. */
266 WARN_ON(blk_ordered_req_seq(rq) < blk_ordered_cur_seq(q));
267 if (blk_ordered_req_seq(rq) > blk_ordered_cur_seq(q))
268 *rqp = NULL;
269 }
270 249
271 return true; 250 return true;
272} 251}