aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/block/xen-blkfront.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 31c8a643d109..76b874a79175 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -286,8 +286,18 @@ static int blkif_queue_request(struct request *req)
286 286
287 ring_req->operation = rq_data_dir(req) ? 287 ring_req->operation = rq_data_dir(req) ?
288 BLKIF_OP_WRITE : BLKIF_OP_READ; 288 BLKIF_OP_WRITE : BLKIF_OP_READ;
289 if (req->cmd_flags & REQ_FLUSH) 289
290 if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
291 /*
292 * Ideally we could just do an unordered
293 * flush-to-disk, but all we have is a full write
294 * barrier at the moment. However, a barrier write is
295 * a superset of FUA, so we can implement it the same
296 * way. (It's also a FLUSH+FUA, since it is
297 * guaranteed ordered WRT previous writes.)
298 */
290 ring_req->operation = BLKIF_OP_WRITE_BARRIER; 299 ring_req->operation = BLKIF_OP_WRITE_BARRIER;
300 }
291 301
292 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); 302 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
293 BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); 303 BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
@@ -1065,7 +1075,7 @@ static void blkfront_connect(struct blkfront_info *info)
1065 info->feature_flush = 0; 1075 info->feature_flush = 0;
1066 1076
1067 if (!err && barrier) 1077 if (!err && barrier)
1068 info->feature_flush = REQ_FLUSH; 1078 info->feature_flush = REQ_FLUSH | REQ_FUA;
1069 1079
1070 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); 1080 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
1071 if (err) { 1081 if (err) {