aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/xen-blkback
diff options
context:
space:
mode:
authorMike Christie <mchristi@redhat.com>2016-06-05 15:32:09 -0400
committerJens Axboe <axboe@fb.com>2016-06-07 15:41:38 -0400
commita022606e53fa16ac788fcc9e9362f5fbe4ae83c2 (patch)
tree9855bb7d0586cb3a8e9ee1d193d6c044aa1ec846 /drivers/block/xen-blkback
parente742fc32fcb468b9bb8a6de4fb4093e5c5f6839a (diff)
xen: use bio op accessors
Separate the op from the rq_flag_bits and have xen set/get the bio using bio_set_op_attrs/bio_op. Signed-off-by: Mike Christie <mchristi@redhat.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/block/xen-blkback')
-rw-r--r--drivers/block/xen-blkback/blkback.c27
1 files changed, 15 insertions, 12 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 79fe4934f18b..4a80ee752597 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -501,7 +501,7 @@ static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
501 struct xen_vbd *vbd = &blkif->vbd; 501 struct xen_vbd *vbd = &blkif->vbd;
502 int rc = -EACCES; 502 int rc = -EACCES;
503 503
504 if ((operation != READ) && vbd->readonly) 504 if ((operation != REQ_OP_READ) && vbd->readonly)
505 goto out; 505 goto out;
506 506
507 if (likely(req->nr_sects)) { 507 if (likely(req->nr_sects)) {
@@ -1014,7 +1014,7 @@ static int dispatch_discard_io(struct xen_blkif_ring *ring,
1014 preq.sector_number = req->u.discard.sector_number; 1014 preq.sector_number = req->u.discard.sector_number;
1015 preq.nr_sects = req->u.discard.nr_sectors; 1015 preq.nr_sects = req->u.discard.nr_sectors;
1016 1016
1017 err = xen_vbd_translate(&preq, blkif, WRITE); 1017 err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE);
1018 if (err) { 1018 if (err) {
1019 pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n", 1019 pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
1020 preq.sector_number, 1020 preq.sector_number,
@@ -1229,6 +1229,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1229 struct bio **biolist = pending_req->biolist; 1229 struct bio **biolist = pending_req->biolist;
1230 int i, nbio = 0; 1230 int i, nbio = 0;
1231 int operation; 1231 int operation;
1232 int operation_flags = 0;
1232 struct blk_plug plug; 1233 struct blk_plug plug;
1233 bool drain = false; 1234 bool drain = false;
1234 struct grant_page **pages = pending_req->segments; 1235 struct grant_page **pages = pending_req->segments;
@@ -1247,17 +1248,19 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1247 switch (req_operation) { 1248 switch (req_operation) {
1248 case BLKIF_OP_READ: 1249 case BLKIF_OP_READ:
1249 ring->st_rd_req++; 1250 ring->st_rd_req++;
1250 operation = READ; 1251 operation = REQ_OP_READ;
1251 break; 1252 break;
1252 case BLKIF_OP_WRITE: 1253 case BLKIF_OP_WRITE:
1253 ring->st_wr_req++; 1254 ring->st_wr_req++;
1254 operation = WRITE_ODIRECT; 1255 operation = REQ_OP_WRITE;
1256 operation_flags = WRITE_ODIRECT;
1255 break; 1257 break;
1256 case BLKIF_OP_WRITE_BARRIER: 1258 case BLKIF_OP_WRITE_BARRIER:
1257 drain = true; 1259 drain = true;
1258 case BLKIF_OP_FLUSH_DISKCACHE: 1260 case BLKIF_OP_FLUSH_DISKCACHE:
1259 ring->st_f_req++; 1261 ring->st_f_req++;
1260 operation = WRITE_FLUSH; 1262 operation = REQ_OP_WRITE;
1263 operation_flags = WRITE_FLUSH;
1261 break; 1264 break;
1262 default: 1265 default:
1263 operation = 0; /* make gcc happy */ 1266 operation = 0; /* make gcc happy */
@@ -1269,7 +1272,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1269 nseg = req->operation == BLKIF_OP_INDIRECT ? 1272 nseg = req->operation == BLKIF_OP_INDIRECT ?
1270 req->u.indirect.nr_segments : req->u.rw.nr_segments; 1273 req->u.indirect.nr_segments : req->u.rw.nr_segments;
1271 1274
1272 if (unlikely(nseg == 0 && operation != WRITE_FLUSH) || 1275 if (unlikely(nseg == 0 && operation_flags != WRITE_FLUSH) ||
1273 unlikely((req->operation != BLKIF_OP_INDIRECT) && 1276 unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1274 (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) || 1277 (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1275 unlikely((req->operation == BLKIF_OP_INDIRECT) && 1278 unlikely((req->operation == BLKIF_OP_INDIRECT) &&
@@ -1310,7 +1313,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1310 1313
1311 if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) { 1314 if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
1312 pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n", 1315 pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
1313 operation == READ ? "read" : "write", 1316 operation == REQ_OP_READ ? "read" : "write",
1314 preq.sector_number, 1317 preq.sector_number,
1315 preq.sector_number + preq.nr_sects, 1318 preq.sector_number + preq.nr_sects,
1316 ring->blkif->vbd.pdevice); 1319 ring->blkif->vbd.pdevice);
@@ -1369,7 +1372,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1369 bio->bi_private = pending_req; 1372 bio->bi_private = pending_req;
1370 bio->bi_end_io = end_block_io_op; 1373 bio->bi_end_io = end_block_io_op;
1371 bio->bi_iter.bi_sector = preq.sector_number; 1374 bio->bi_iter.bi_sector = preq.sector_number;
1372 bio->bi_rw = operation; 1375 bio_set_op_attrs(bio, operation, operation_flags);
1373 } 1376 }
1374 1377
1375 preq.sector_number += seg[i].nsec; 1378 preq.sector_number += seg[i].nsec;
@@ -1377,7 +1380,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1377 1380
1378 /* This will be hit if the operation was a flush or discard. */ 1381 /* This will be hit if the operation was a flush or discard. */
1379 if (!bio) { 1382 if (!bio) {
1380 BUG_ON(operation != WRITE_FLUSH); 1383 BUG_ON(operation_flags != WRITE_FLUSH);
1381 1384
1382 bio = bio_alloc(GFP_KERNEL, 0); 1385 bio = bio_alloc(GFP_KERNEL, 0);
1383 if (unlikely(bio == NULL)) 1386 if (unlikely(bio == NULL))
@@ -1387,7 +1390,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1387 bio->bi_bdev = preq.bdev; 1390 bio->bi_bdev = preq.bdev;
1388 bio->bi_private = pending_req; 1391 bio->bi_private = pending_req;
1389 bio->bi_end_io = end_block_io_op; 1392 bio->bi_end_io = end_block_io_op;
1390 bio->bi_rw = operation; 1393 bio_set_op_attrs(bio, operation, operation_flags);
1391 } 1394 }
1392 1395
1393 atomic_set(&pending_req->pendcnt, nbio); 1396 atomic_set(&pending_req->pendcnt, nbio);
@@ -1399,9 +1402,9 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1399 /* Let the I/Os go.. */ 1402 /* Let the I/Os go.. */
1400 blk_finish_plug(&plug); 1403 blk_finish_plug(&plug);
1401 1404
1402 if (operation == READ) 1405 if (operation == REQ_OP_READ)
1403 ring->st_rd_sect += preq.nr_sects; 1406 ring->st_rd_sect += preq.nr_sects;
1404 else if (operation & WRITE) 1407 else if (operation == REQ_OP_WRITE)
1405 ring->st_wr_sect += preq.nr_sects; 1408 ring->st_wr_sect += preq.nr_sects;
1406 1409
1407 return 0; 1410 return 0;