aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-barrier.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-barrier.c')
-rw-r--r--block/blk-barrier.c35
1 files changed, 18 insertions, 17 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 0d710c9d403b..f0faefca032f 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -13,7 +13,6 @@
13 * blk_queue_ordered - does this queue support ordered writes 13 * blk_queue_ordered - does this queue support ordered writes
14 * @q: the request queue 14 * @q: the request queue
15 * @ordered: one of QUEUE_ORDERED_* 15 * @ordered: one of QUEUE_ORDERED_*
16 * @prepare_flush_fn: rq setup helper for cache flush ordered writes
17 * 16 *
18 * Description: 17 * Description:
19 * For journalled file systems, doing ordered writes on a commit 18 * For journalled file systems, doing ordered writes on a commit
@@ -22,15 +21,8 @@
22 * feature should call this function and indicate so. 21 * feature should call this function and indicate so.
23 * 22 *
24 **/ 23 **/
25int blk_queue_ordered(struct request_queue *q, unsigned ordered, 24int blk_queue_ordered(struct request_queue *q, unsigned ordered)
26 prepare_flush_fn *prepare_flush_fn)
27{ 25{
28 if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH |
29 QUEUE_ORDERED_DO_POSTFLUSH))) {
30 printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
31 return -EINVAL;
32 }
33
34 if (ordered != QUEUE_ORDERED_NONE && 26 if (ordered != QUEUE_ORDERED_NONE &&
35 ordered != QUEUE_ORDERED_DRAIN && 27 ordered != QUEUE_ORDERED_DRAIN &&
36 ordered != QUEUE_ORDERED_DRAIN_FLUSH && 28 ordered != QUEUE_ORDERED_DRAIN_FLUSH &&
@@ -44,7 +36,6 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
44 36
45 q->ordered = ordered; 37 q->ordered = ordered;
46 q->next_ordered = ordered; 38 q->next_ordered = ordered;
47 q->prepare_flush_fn = prepare_flush_fn;
48 39
49 return 0; 40 return 0;
50} 41}
@@ -79,7 +70,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
79 * 70 *
80 * http://thread.gmane.org/gmane.linux.kernel/537473 71 * http://thread.gmane.org/gmane.linux.kernel/537473
81 */ 72 */
82 if (!blk_fs_request(rq)) 73 if (rq->cmd_type != REQ_TYPE_FS)
83 return QUEUE_ORDSEQ_DRAIN; 74 return QUEUE_ORDSEQ_DRAIN;
84 75
85 if ((rq->cmd_flags & REQ_ORDERED_COLOR) == 76 if ((rq->cmd_flags & REQ_ORDERED_COLOR) ==
@@ -143,10 +134,10 @@ static void queue_flush(struct request_queue *q, unsigned which)
143 } 134 }
144 135
145 blk_rq_init(q, rq); 136 blk_rq_init(q, rq);
146 rq->cmd_flags = REQ_HARDBARRIER; 137 rq->cmd_type = REQ_TYPE_FS;
147 rq->rq_disk = q->bar_rq.rq_disk; 138 rq->cmd_flags = REQ_HARDBARRIER | REQ_FLUSH;
139 rq->rq_disk = q->orig_bar_rq->rq_disk;
148 rq->end_io = end_io; 140 rq->end_io = end_io;
149 q->prepare_flush_fn(q, rq);
150 141
151 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 142 elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
152} 143}
@@ -203,7 +194,7 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
203 /* initialize proxy request and queue it */ 194 /* initialize proxy request and queue it */
204 blk_rq_init(q, rq); 195 blk_rq_init(q, rq);
205 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) 196 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
206 rq->cmd_flags |= REQ_RW; 197 rq->cmd_flags |= REQ_WRITE;
207 if (q->ordered & QUEUE_ORDERED_DO_FUA) 198 if (q->ordered & QUEUE_ORDERED_DO_FUA)
208 rq->cmd_flags |= REQ_FUA; 199 rq->cmd_flags |= REQ_FUA;
209 init_request_from_bio(rq, q->orig_bar_rq->bio); 200 init_request_from_bio(rq, q->orig_bar_rq->bio);
@@ -236,7 +227,8 @@ static inline bool start_ordered(struct request_queue *q, struct request **rqp)
236bool blk_do_ordered(struct request_queue *q, struct request **rqp) 227bool blk_do_ordered(struct request_queue *q, struct request **rqp)
237{ 228{
238 struct request *rq = *rqp; 229 struct request *rq = *rqp;
239 const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); 230 const int is_barrier = rq->cmd_type == REQ_TYPE_FS &&
231 (rq->cmd_flags & REQ_HARDBARRIER);
240 232
241 if (!q->ordseq) { 233 if (!q->ordseq) {
242 if (!is_barrier) 234 if (!is_barrier)
@@ -261,7 +253,7 @@ bool blk_do_ordered(struct request_queue *q, struct request **rqp)
261 */ 253 */
262 254
263 /* Special requests are not subject to ordering rules. */ 255 /* Special requests are not subject to ordering rules. */
264 if (!blk_fs_request(rq) && 256 if (rq->cmd_type != REQ_TYPE_FS &&
265 rq != &q->pre_flush_rq && rq != &q->post_flush_rq) 257 rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
266 return true; 258 return true;
267 259
@@ -319,6 +311,15 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
319 if (!q) 311 if (!q)
320 return -ENXIO; 312 return -ENXIO;
321 313
314 /*
315 * some block devices may not have their queue correctly set up here
316 * (e.g. loop device without a backing file) and so issuing a flush
317 * here will panic. Ensure there is a request function before issuing
318 * the barrier.
319 */
320 if (!q->make_request_fn)
321 return -ENXIO;
322
322 bio = bio_alloc(gfp_mask, 0); 323 bio = bio_alloc(gfp_mask, 0);
323 bio->bi_end_io = bio_end_empty_barrier; 324 bio->bi_end_io = bio_end_empty_barrier;
324 bio->bi_bdev = bdev; 325 bio->bi_bdev = bdev;