aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/xen-blkfront.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/xen-blkfront.c')
-rw-r--r--drivers/block/xen-blkfront.c44
1 files changed, 23 insertions, 21 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 7b2ec5908413..2c2c4be7d9d6 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -135,15 +135,15 @@ static int get_id_from_freelist(struct blkfront_info *info)
135{ 135{
136 unsigned long free = info->shadow_free; 136 unsigned long free = info->shadow_free;
137 BUG_ON(free >= BLK_RING_SIZE); 137 BUG_ON(free >= BLK_RING_SIZE);
138 info->shadow_free = info->shadow[free].req.id; 138 info->shadow_free = info->shadow[free].req.u.rw.id;
139 info->shadow[free].req.id = 0x0fffffee; /* debug */ 139 info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
140 return free; 140 return free;
141} 141}
142 142
143static void add_id_to_freelist(struct blkfront_info *info, 143static void add_id_to_freelist(struct blkfront_info *info,
144 unsigned long id) 144 unsigned long id)
145{ 145{
146 info->shadow[id].req.id = info->shadow_free; 146 info->shadow[id].req.u.rw.id = info->shadow_free;
147 info->shadow[id].request = NULL; 147 info->shadow[id].request = NULL;
148 info->shadow_free = id; 148 info->shadow_free = id;
149} 149}
@@ -287,9 +287,9 @@ static int blkif_queue_request(struct request *req)
287 id = get_id_from_freelist(info); 287 id = get_id_from_freelist(info);
288 info->shadow[id].request = req; 288 info->shadow[id].request = req;
289 289
290 ring_req->id = id; 290 ring_req->u.rw.id = id;
291 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req); 291 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
292 ring_req->handle = info->handle; 292 ring_req->u.rw.handle = info->handle;
293 293
294 ring_req->operation = rq_data_dir(req) ? 294 ring_req->operation = rq_data_dir(req) ?
295 BLKIF_OP_WRITE : BLKIF_OP_READ; 295 BLKIF_OP_WRITE : BLKIF_OP_READ;
@@ -308,13 +308,15 @@ static int blkif_queue_request(struct request *req)
308 if (unlikely(req->cmd_flags & REQ_DISCARD)) { 308 if (unlikely(req->cmd_flags & REQ_DISCARD)) {
309 /* id, sector_number and handle are set above. */ 309 /* id, sector_number and handle are set above. */
310 ring_req->operation = BLKIF_OP_DISCARD; 310 ring_req->operation = BLKIF_OP_DISCARD;
311 ring_req->nr_segments = 0; 311 ring_req->u.discard.nr_segments = 0;
312 ring_req->u.discard.nr_sectors = blk_rq_sectors(req); 312 ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
313 } else { 313 } else {
314 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); 314 ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req,
315 BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); 315 info->sg);
316 BUG_ON(ring_req->u.rw.nr_segments >
317 BLKIF_MAX_SEGMENTS_PER_REQUEST);
316 318
317 for_each_sg(info->sg, sg, ring_req->nr_segments, i) { 319 for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) {
318 buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg))); 320 buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
319 fsect = sg->offset >> 9; 321 fsect = sg->offset >> 9;
320 lsect = fsect + (sg->length >> 9) - 1; 322 lsect = fsect + (sg->length >> 9) - 1;
@@ -705,7 +707,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
705static void blkif_completion(struct blk_shadow *s) 707static void blkif_completion(struct blk_shadow *s)
706{ 708{
707 int i; 709 int i;
708 for (i = 0; i < s->req.nr_segments; i++) 710 for (i = 0; i < s->req.u.rw.nr_segments; i++)
709 gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL); 711 gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL);
710} 712}
711 713
@@ -763,7 +765,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
763 error = -EOPNOTSUPP; 765 error = -EOPNOTSUPP;
764 } 766 }
765 if (unlikely(bret->status == BLKIF_RSP_ERROR && 767 if (unlikely(bret->status == BLKIF_RSP_ERROR &&
766 info->shadow[id].req.nr_segments == 0)) { 768 info->shadow[id].req.u.rw.nr_segments == 0)) {
767 printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n", 769 printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
768 info->flush_op == BLKIF_OP_WRITE_BARRIER ? 770 info->flush_op == BLKIF_OP_WRITE_BARRIER ?
769 "barrier" : "flush disk cache", 771 "barrier" : "flush disk cache",
@@ -984,8 +986,8 @@ static int blkfront_probe(struct xenbus_device *dev,
984 INIT_WORK(&info->work, blkif_restart_queue); 986 INIT_WORK(&info->work, blkif_restart_queue);
985 987
986 for (i = 0; i < BLK_RING_SIZE; i++) 988 for (i = 0; i < BLK_RING_SIZE; i++)
987 info->shadow[i].req.id = i+1; 989 info->shadow[i].req.u.rw.id = i+1;
988 info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; 990 info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
989 991
990 /* Front end dir is a number, which is used as the id. */ 992 /* Front end dir is a number, which is used as the id. */
991 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); 993 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
@@ -1019,9 +1021,9 @@ static int blkif_recover(struct blkfront_info *info)
1019 /* Stage 2: Set up free list. */ 1021 /* Stage 2: Set up free list. */
1020 memset(&info->shadow, 0, sizeof(info->shadow)); 1022 memset(&info->shadow, 0, sizeof(info->shadow));
1021 for (i = 0; i < BLK_RING_SIZE; i++) 1023 for (i = 0; i < BLK_RING_SIZE; i++)
1022 info->shadow[i].req.id = i+1; 1024 info->shadow[i].req.u.rw.id = i+1;
1023 info->shadow_free = info->ring.req_prod_pvt; 1025 info->shadow_free = info->ring.req_prod_pvt;
1024 info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; 1026 info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
1025 1027
1026 /* Stage 3: Find pending requests and requeue them. */ 1028 /* Stage 3: Find pending requests and requeue them. */
1027 for (i = 0; i < BLK_RING_SIZE; i++) { 1029 for (i = 0; i < BLK_RING_SIZE; i++) {
@@ -1034,17 +1036,17 @@ static int blkif_recover(struct blkfront_info *info)
1034 *req = copy[i].req; 1036 *req = copy[i].req;
1035 1037
1036 /* We get a new request id, and must reset the shadow state. */ 1038 /* We get a new request id, and must reset the shadow state. */
1037 req->id = get_id_from_freelist(info); 1039 req->u.rw.id = get_id_from_freelist(info);
1038 memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i])); 1040 memcpy(&info->shadow[req->u.rw.id], &copy[i], sizeof(copy[i]));
1039 1041
1040 /* Rewrite any grant references invalidated by susp/resume. */ 1042 /* Rewrite any grant references invalidated by susp/resume. */
1041 for (j = 0; j < req->nr_segments; j++) 1043 for (j = 0; j < req->u.rw.nr_segments; j++)
1042 gnttab_grant_foreign_access_ref( 1044 gnttab_grant_foreign_access_ref(
1043 req->u.rw.seg[j].gref, 1045 req->u.rw.seg[j].gref,
1044 info->xbdev->otherend_id, 1046 info->xbdev->otherend_id,
1045 pfn_to_mfn(info->shadow[req->id].frame[j]), 1047 pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]),
1046 rq_data_dir(info->shadow[req->id].request)); 1048 rq_data_dir(info->shadow[req->u.rw.id].request));
1047 info->shadow[req->id].req = *req; 1049 info->shadow[req->u.rw.id].req = *req;
1048 1050
1049 info->ring.req_prod_pvt++; 1051 info->ring.req_prod_pvt++;
1050 } 1052 }