diff options
Diffstat (limited to 'drivers/block/xen-blkfront.c')
| -rw-r--r-- | drivers/block/xen-blkfront.c | 55 |
1 files changed, 33 insertions, 22 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 255035cfc88a..4f9e22f29138 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
| @@ -65,7 +65,7 @@ enum blkif_state { | |||
| 65 | 65 | ||
| 66 | struct blk_shadow { | 66 | struct blk_shadow { |
| 67 | struct blkif_request req; | 67 | struct blkif_request req; |
| 68 | unsigned long request; | 68 | struct request *request; |
| 69 | unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 69 | unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
| 70 | }; | 70 | }; |
| 71 | 71 | ||
| @@ -136,7 +136,7 @@ static void add_id_to_freelist(struct blkfront_info *info, | |||
| 136 | unsigned long id) | 136 | unsigned long id) |
| 137 | { | 137 | { |
| 138 | info->shadow[id].req.id = info->shadow_free; | 138 | info->shadow[id].req.id = info->shadow_free; |
| 139 | info->shadow[id].request = 0; | 139 | info->shadow[id].request = NULL; |
| 140 | info->shadow_free = id; | 140 | info->shadow_free = id; |
| 141 | } | 141 | } |
| 142 | 142 | ||
| @@ -245,14 +245,11 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode, | |||
| 245 | } | 245 | } |
| 246 | 246 | ||
| 247 | /* | 247 | /* |
| 248 | * blkif_queue_request | 248 | * Generate a Xen blkfront IO request from a blk layer request. Reads |
| 249 | * and writes are handled as expected. Since we lack a loose flush | ||
| 250 | * request, we map flushes into a full ordered barrier. | ||
| 249 | * | 251 | * |
| 250 | * request block io | 252 | * @req: a request struct |
| 251 | * | ||
| 252 | * id: for guest use only. | ||
| 253 | * operation: BLKIF_OP_{READ,WRITE,PROBE} | ||
| 254 | * buffer: buffer to read/write into. this should be a | ||
| 255 | * virtual address in the guest os. | ||
| 256 | */ | 253 | */ |
| 257 | static int blkif_queue_request(struct request *req) | 254 | static int blkif_queue_request(struct request *req) |
| 258 | { | 255 | { |
| @@ -281,7 +278,7 @@ static int blkif_queue_request(struct request *req) | |||
| 281 | /* Fill out a communications ring structure. */ | 278 | /* Fill out a communications ring structure. */ |
| 282 | ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); | 279 | ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); |
| 283 | id = get_id_from_freelist(info); | 280 | id = get_id_from_freelist(info); |
| 284 | info->shadow[id].request = (unsigned long)req; | 281 | info->shadow[id].request = req; |
| 285 | 282 | ||
| 286 | ring_req->id = id; | 283 | ring_req->id = id; |
| 287 | ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); | 284 | ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); |
| @@ -290,6 +287,18 @@ static int blkif_queue_request(struct request *req) | |||
| 290 | ring_req->operation = rq_data_dir(req) ? | 287 | ring_req->operation = rq_data_dir(req) ? |
| 291 | BLKIF_OP_WRITE : BLKIF_OP_READ; | 288 | BLKIF_OP_WRITE : BLKIF_OP_READ; |
| 292 | 289 | ||
| 290 | if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { | ||
| 291 | /* | ||
| 292 | * Ideally we could just do an unordered | ||
| 293 | * flush-to-disk, but all we have is a full write | ||
| 294 | * barrier at the moment. However, a barrier write is | ||
| 295 | * a superset of FUA, so we can implement it the same | ||
| 296 | * way. (It's also a FLUSH+FUA, since it is | ||
| 297 | * guaranteed ordered WRT previous writes.) | ||
| 298 | */ | ||
| 299 | ring_req->operation = BLKIF_OP_WRITE_BARRIER; | ||
| 300 | } | ||
| 301 | |||
| 293 | ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); | 302 | ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); |
| 294 | BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); | 303 | BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); |
| 295 | 304 | ||
| @@ -634,7 +643,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
| 634 | 643 | ||
| 635 | bret = RING_GET_RESPONSE(&info->ring, i); | 644 | bret = RING_GET_RESPONSE(&info->ring, i); |
| 636 | id = bret->id; | 645 | id = bret->id; |
| 637 | req = (struct request *)info->shadow[id].request; | 646 | req = info->shadow[id].request; |
| 638 | 647 | ||
| 639 | blkif_completion(&info->shadow[id]); | 648 | blkif_completion(&info->shadow[id]); |
| 640 | 649 | ||
| @@ -647,6 +656,16 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
| 647 | printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", | 656 | printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", |
| 648 | info->gd->disk_name); | 657 | info->gd->disk_name); |
| 649 | error = -EOPNOTSUPP; | 658 | error = -EOPNOTSUPP; |
| 659 | } | ||
| 660 | if (unlikely(bret->status == BLKIF_RSP_ERROR && | ||
| 661 | info->shadow[id].req.nr_segments == 0)) { | ||
| 662 | printk(KERN_WARNING "blkfront: %s: empty write barrier op failed\n", | ||
| 663 | info->gd->disk_name); | ||
| 664 | error = -EOPNOTSUPP; | ||
| 665 | } | ||
| 666 | if (unlikely(error)) { | ||
| 667 | if (error == -EOPNOTSUPP) | ||
| 668 | error = 0; | ||
| 650 | info->feature_flush = 0; | 669 | info->feature_flush = 0; |
| 651 | xlvbd_flush(info); | 670 | xlvbd_flush(info); |
| 652 | } | 671 | } |
| @@ -899,7 +918,7 @@ static int blkif_recover(struct blkfront_info *info) | |||
| 899 | /* Stage 3: Find pending requests and requeue them. */ | 918 | /* Stage 3: Find pending requests and requeue them. */ |
| 900 | for (i = 0; i < BLK_RING_SIZE; i++) { | 919 | for (i = 0; i < BLK_RING_SIZE; i++) { |
| 901 | /* Not in use? */ | 920 | /* Not in use? */ |
| 902 | if (copy[i].request == 0) | 921 | if (!copy[i].request) |
| 903 | continue; | 922 | continue; |
| 904 | 923 | ||
| 905 | /* Grab a request slot and copy shadow state into it. */ | 924 | /* Grab a request slot and copy shadow state into it. */ |
| @@ -916,9 +935,7 @@ static int blkif_recover(struct blkfront_info *info) | |||
| 916 | req->seg[j].gref, | 935 | req->seg[j].gref, |
| 917 | info->xbdev->otherend_id, | 936 | info->xbdev->otherend_id, |
| 918 | pfn_to_mfn(info->shadow[req->id].frame[j]), | 937 | pfn_to_mfn(info->shadow[req->id].frame[j]), |
| 919 | rq_data_dir( | 938 | rq_data_dir(info->shadow[req->id].request)); |
| 920 | (struct request *) | ||
| 921 | info->shadow[req->id].request)); | ||
| 922 | info->shadow[req->id].req = *req; | 939 | info->shadow[req->id].req = *req; |
| 923 | 940 | ||
| 924 | info->ring.req_prod_pvt++; | 941 | info->ring.req_prod_pvt++; |
| @@ -1067,14 +1084,8 @@ static void blkfront_connect(struct blkfront_info *info) | |||
| 1067 | */ | 1084 | */ |
| 1068 | info->feature_flush = 0; | 1085 | info->feature_flush = 0; |
| 1069 | 1086 | ||
| 1070 | /* | ||
| 1071 | * The driver doesn't properly handled empty flushes, so | ||
| 1072 | * lets disable barrier support for now. | ||
| 1073 | */ | ||
| 1074 | #if 0 | ||
| 1075 | if (!err && barrier) | 1087 | if (!err && barrier) |
| 1076 | info->feature_flush = REQ_FLUSH; | 1088 | info->feature_flush = REQ_FLUSH | REQ_FUA; |
| 1077 | #endif | ||
| 1078 | 1089 | ||
| 1079 | err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); | 1090 | err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); |
| 1080 | if (err) { | 1091 | if (err) { |
