diff options
Diffstat (limited to 'drivers/block/xen-blkfront.c')
-rw-r--r-- | drivers/block/xen-blkfront.c | 57 |
1 files changed, 33 insertions, 24 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 06e2812ba124..657873e4328d 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -65,14 +65,14 @@ enum blkif_state { | |||
65 | 65 | ||
66 | struct blk_shadow { | 66 | struct blk_shadow { |
67 | struct blkif_request req; | 67 | struct blkif_request req; |
68 | unsigned long request; | 68 | struct request *request; |
69 | unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 69 | unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
70 | }; | 70 | }; |
71 | 71 | ||
72 | static DEFINE_MUTEX(blkfront_mutex); | 72 | static DEFINE_MUTEX(blkfront_mutex); |
73 | static const struct block_device_operations xlvbd_block_fops; | 73 | static const struct block_device_operations xlvbd_block_fops; |
74 | 74 | ||
75 | #define BLK_RING_SIZE __RING_SIZE((struct blkif_sring *)0, PAGE_SIZE) | 75 | #define BLK_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE) |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * We have one of these per vbd, whether ide, scsi or 'other'. They | 78 | * We have one of these per vbd, whether ide, scsi or 'other'. They |
@@ -136,7 +136,7 @@ static void add_id_to_freelist(struct blkfront_info *info, | |||
136 | unsigned long id) | 136 | unsigned long id) |
137 | { | 137 | { |
138 | info->shadow[id].req.id = info->shadow_free; | 138 | info->shadow[id].req.id = info->shadow_free; |
139 | info->shadow[id].request = 0; | 139 | info->shadow[id].request = NULL; |
140 | info->shadow_free = id; | 140 | info->shadow_free = id; |
141 | } | 141 | } |
142 | 142 | ||
@@ -245,14 +245,11 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode, | |||
245 | } | 245 | } |
246 | 246 | ||
247 | /* | 247 | /* |
248 | * blkif_queue_request | 248 | * Generate a Xen blkfront IO request from a blk layer request. Reads |
249 | * and writes are handled as expected. Since we lack a loose flush | ||
250 | * request, we map flushes into a full ordered barrier. | ||
249 | * | 251 | * |
250 | * request block io | 252 | * @req: a request struct |
251 | * | ||
252 | * id: for guest use only. | ||
253 | * operation: BLKIF_OP_{READ,WRITE,PROBE} | ||
254 | * buffer: buffer to read/write into. this should be a | ||
255 | * virtual address in the guest os. | ||
256 | */ | 253 | */ |
257 | static int blkif_queue_request(struct request *req) | 254 | static int blkif_queue_request(struct request *req) |
258 | { | 255 | { |
@@ -281,7 +278,7 @@ static int blkif_queue_request(struct request *req) | |||
281 | /* Fill out a communications ring structure. */ | 278 | /* Fill out a communications ring structure. */ |
282 | ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); | 279 | ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); |
283 | id = get_id_from_freelist(info); | 280 | id = get_id_from_freelist(info); |
284 | info->shadow[id].request = (unsigned long)req; | 281 | info->shadow[id].request = req; |
285 | 282 | ||
286 | ring_req->id = id; | 283 | ring_req->id = id; |
287 | ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); | 284 | ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); |
@@ -289,8 +286,18 @@ static int blkif_queue_request(struct request *req) | |||
289 | 286 | ||
290 | ring_req->operation = rq_data_dir(req) ? | 287 | ring_req->operation = rq_data_dir(req) ? |
291 | BLKIF_OP_WRITE : BLKIF_OP_READ; | 288 | BLKIF_OP_WRITE : BLKIF_OP_READ; |
292 | if (req->cmd_flags & REQ_HARDBARRIER) | 289 | |
290 | if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) { | ||
291 | /* | ||
292 | * Ideally we could just do an unordered | ||
293 | * flush-to-disk, but all we have is a full write | ||
294 | * barrier at the moment. However, a barrier write is | ||
295 | * a superset of FUA, so we can implement it the same | ||
296 | * way. (It's also a FLUSH+FUA, since it is | ||
297 | * guaranteed ordered WRT previous writes.) | ||
298 | */ | ||
293 | ring_req->operation = BLKIF_OP_WRITE_BARRIER; | 299 | ring_req->operation = BLKIF_OP_WRITE_BARRIER; |
300 | } | ||
294 | 301 | ||
295 | ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); | 302 | ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); |
296 | BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); | 303 | BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); |
@@ -636,7 +643,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
636 | 643 | ||
637 | bret = RING_GET_RESPONSE(&info->ring, i); | 644 | bret = RING_GET_RESPONSE(&info->ring, i); |
638 | id = bret->id; | 645 | id = bret->id; |
639 | req = (struct request *)info->shadow[id].request; | 646 | req = info->shadow[id].request; |
640 | 647 | ||
641 | blkif_completion(&info->shadow[id]); | 648 | blkif_completion(&info->shadow[id]); |
642 | 649 | ||
@@ -649,6 +656,16 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) | |||
649 | printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", | 656 | printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", |
650 | info->gd->disk_name); | 657 | info->gd->disk_name); |
651 | error = -EOPNOTSUPP; | 658 | error = -EOPNOTSUPP; |
659 | } | ||
660 | if (unlikely(bret->status == BLKIF_RSP_ERROR && | ||
661 | info->shadow[id].req.nr_segments == 0)) { | ||
662 | printk(KERN_WARNING "blkfront: %s: empty write barrier op failed\n", | ||
663 | info->gd->disk_name); | ||
664 | error = -EOPNOTSUPP; | ||
665 | } | ||
666 | if (unlikely(error)) { | ||
667 | if (error == -EOPNOTSUPP) | ||
668 | error = 0; | ||
652 | info->feature_flush = 0; | 669 | info->feature_flush = 0; |
653 | xlvbd_flush(info); | 670 | xlvbd_flush(info); |
654 | } | 671 | } |
@@ -901,7 +918,7 @@ static int blkif_recover(struct blkfront_info *info) | |||
901 | /* Stage 3: Find pending requests and requeue them. */ | 918 | /* Stage 3: Find pending requests and requeue them. */ |
902 | for (i = 0; i < BLK_RING_SIZE; i++) { | 919 | for (i = 0; i < BLK_RING_SIZE; i++) { |
903 | /* Not in use? */ | 920 | /* Not in use? */ |
904 | if (copy[i].request == 0) | 921 | if (!copy[i].request) |
905 | continue; | 922 | continue; |
906 | 923 | ||
907 | /* Grab a request slot and copy shadow state into it. */ | 924 | /* Grab a request slot and copy shadow state into it. */ |
@@ -918,9 +935,7 @@ static int blkif_recover(struct blkfront_info *info) | |||
918 | req->seg[j].gref, | 935 | req->seg[j].gref, |
919 | info->xbdev->otherend_id, | 936 | info->xbdev->otherend_id, |
920 | pfn_to_mfn(info->shadow[req->id].frame[j]), | 937 | pfn_to_mfn(info->shadow[req->id].frame[j]), |
921 | rq_data_dir( | 938 | rq_data_dir(info->shadow[req->id].request)); |
922 | (struct request *) | ||
923 | info->shadow[req->id].request)); | ||
924 | info->shadow[req->id].req = *req; | 939 | info->shadow[req->id].req = *req; |
925 | 940 | ||
926 | info->ring.req_prod_pvt++; | 941 | info->ring.req_prod_pvt++; |
@@ -1069,14 +1084,8 @@ static void blkfront_connect(struct blkfront_info *info) | |||
1069 | */ | 1084 | */ |
1070 | info->feature_flush = 0; | 1085 | info->feature_flush = 0; |
1071 | 1086 | ||
1072 | /* | ||
1073 | * The driver doesn't properly handled empty flushes, so | ||
1074 | * lets disable barrier support for now. | ||
1075 | */ | ||
1076 | #if 0 | ||
1077 | if (!err && barrier) | 1087 | if (!err && barrier) |
1078 | info->feature_flush = REQ_FLUSH; | 1088 | info->feature_flush = REQ_FLUSH | REQ_FUA; |
1079 | #endif | ||
1080 | 1089 | ||
1081 | err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); | 1090 | err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); |
1082 | if (err) { | 1091 | if (err) { |