aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-10-12 12:12:36 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-11-18 13:27:59 -0500
commit97e36834f5a106459ab1b290e663a4eb6264639e (patch)
tree39e4843998cb7f75b94d80f27e4b11ffb3662f28
parentcfcfc9eca2bcbd26a8e206baeb005b055dbf8e37 (diff)
xen/blk[front|back]: Squash blkif_request_rw and blkif_request_discard together
In a union type structure to deal with the overlapping attributes in a easier manner. Suggested-by: Ian Campbell <Ian.Campbell@citrix.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-rw-r--r--drivers/block/xen-blkback/blkback.c13
-rw-r--r--drivers/block/xen-blkback/common.h64
-rw-r--r--drivers/block/xen-blkfront.c44
-rw-r--r--include/xen/interface/io/blkif.h24
4 files changed, 83 insertions, 62 deletions
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 15ec4db194d1..d7104abc8b72 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -362,7 +362,7 @@ static int xen_blkbk_map(struct blkif_request *req,
362{ 362{
363 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 363 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
364 int i; 364 int i;
365 int nseg = req->nr_segments; 365 int nseg = req->u.rw.nr_segments;
366 int ret = 0; 366 int ret = 0;
367 367
368 /* 368 /*
@@ -449,7 +449,7 @@ static void xen_blk_discard(struct xen_blkif *blkif, struct blkif_request *req)
449 } else if (err) 449 } else if (err)
450 status = BLKIF_RSP_ERROR; 450 status = BLKIF_RSP_ERROR;
451 451
452 make_response(blkif, req->id, req->operation, status); 452 make_response(blkif, req->u.discard.id, req->operation, status);
453} 453}
454 454
455static void xen_blk_drain_io(struct xen_blkif *blkif) 455static void xen_blk_drain_io(struct xen_blkif *blkif)
@@ -644,7 +644,8 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
644 } 644 }
645 645
646 /* Check that the number of segments is sane. */ 646 /* Check that the number of segments is sane. */
647 nseg = req->nr_segments; 647 nseg = req->u.rw.nr_segments;
648
648 if (unlikely(nseg == 0 && operation != WRITE_FLUSH && 649 if (unlikely(nseg == 0 && operation != WRITE_FLUSH &&
649 operation != REQ_DISCARD) || 650 operation != REQ_DISCARD) ||
650 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) { 651 unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
@@ -654,12 +655,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
654 goto fail_response; 655 goto fail_response;
655 } 656 }
656 657
657 preq.dev = req->handle; 658 preq.dev = req->u.rw.handle;
658 preq.sector_number = req->u.rw.sector_number; 659 preq.sector_number = req->u.rw.sector_number;
659 preq.nr_sects = 0; 660 preq.nr_sects = 0;
660 661
661 pending_req->blkif = blkif; 662 pending_req->blkif = blkif;
662 pending_req->id = req->id; 663 pending_req->id = req->u.rw.id;
663 pending_req->operation = req->operation; 664 pending_req->operation = req->operation;
664 pending_req->status = BLKIF_RSP_OKAY; 665 pending_req->status = BLKIF_RSP_OKAY;
665 pending_req->nr_pages = nseg; 666 pending_req->nr_pages = nseg;
@@ -784,7 +785,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
784 xen_blkbk_unmap(pending_req); 785 xen_blkbk_unmap(pending_req);
785 fail_response: 786 fail_response:
786 /* Haven't submitted any bio's yet. */ 787 /* Haven't submitted any bio's yet. */
787 make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR); 788 make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR);
788 free_req(pending_req); 789 free_req(pending_req);
789 msleep(1); /* back off a bit */ 790 msleep(1); /* back off a bit */
790 return -EIO; 791 return -EIO;
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index dfb1b3a43a5d..dbfe7b3b0737 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -60,58 +60,66 @@ struct blkif_common_response {
60 char dummy; 60 char dummy;
61}; 61};
62 62
63/* i386 protocol version */
64#pragma pack(push, 4)
65
66struct blkif_x86_32_request_rw { 63struct blkif_x86_32_request_rw {
64 uint8_t nr_segments; /* number of segments */
65 blkif_vdev_t handle; /* only for read/write requests */
66 uint64_t id; /* private guest value, echoed in resp */
67 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ 67 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
68 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 68 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
69}; 69} __attribute__((__packed__));
70 70
71struct blkif_x86_32_request_discard { 71struct blkif_x86_32_request_discard {
72 uint8_t nr_segments; /* number of segments */
73 blkif_vdev_t _pad1; /* was "handle" for read/write requests */
74 uint64_t id; /* private guest value, echoed in resp */
72 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ 75 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
73 uint64_t nr_sectors; 76 uint64_t nr_sectors;
74}; 77} __attribute__((__packed__));
75 78
76struct blkif_x86_32_request { 79struct blkif_x86_32_request {
77 uint8_t operation; /* BLKIF_OP_??? */ 80 uint8_t operation; /* BLKIF_OP_??? */
78 uint8_t nr_segments; /* number of segments */
79 blkif_vdev_t handle; /* only for read/write requests */
80 uint64_t id; /* private guest value, echoed in resp */
81 union { 81 union {
82 struct blkif_x86_32_request_rw rw; 82 struct blkif_x86_32_request_rw rw;
83 struct blkif_x86_32_request_discard discard; 83 struct blkif_x86_32_request_discard discard;
84 } u; 84 } u;
85}; 85} __attribute__((__packed__));
86
87/* i386 protocol version */
88#pragma pack(push, 4)
86struct blkif_x86_32_response { 89struct blkif_x86_32_response {
87 uint64_t id; /* copied from request */ 90 uint64_t id; /* copied from request */
88 uint8_t operation; /* copied from request */ 91 uint8_t operation; /* copied from request */
89 int16_t status; /* BLKIF_RSP_??? */ 92 int16_t status; /* BLKIF_RSP_??? */
90}; 93};
91#pragma pack(pop) 94#pragma pack(pop)
92
93/* x86_64 protocol version */ 95/* x86_64 protocol version */
94 96
95struct blkif_x86_64_request_rw { 97struct blkif_x86_64_request_rw {
98 uint8_t nr_segments; /* number of segments */
99 blkif_vdev_t handle; /* only for read/write requests */
100 uint32_t _pad1; /* offsetof(blkif_reqest..,u.rw.id)==8 */
101 uint64_t id;
96 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ 102 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
97 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 103 struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
98}; 104} __attribute__((__packed__));
99 105
100struct blkif_x86_64_request_discard { 106struct blkif_x86_64_request_discard {
107 uint8_t nr_segments; /* number of segments */
108 blkif_vdev_t _pad1; /* was "handle" for read/write requests */
109 uint32_t _pad2; /* offsetof(blkif_..,u.discard.id)==8 */
110 uint64_t id;
101 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ 111 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
102 uint64_t nr_sectors; 112 uint64_t nr_sectors;
103}; 113} __attribute__((__packed__));
104 114
105struct blkif_x86_64_request { 115struct blkif_x86_64_request {
106 uint8_t operation; /* BLKIF_OP_??? */ 116 uint8_t operation; /* BLKIF_OP_??? */
107 uint8_t nr_segments; /* number of segments */
108 blkif_vdev_t handle; /* only for read/write requests */
109 uint64_t __attribute__((__aligned__(8))) id;
110 union { 117 union {
111 struct blkif_x86_64_request_rw rw; 118 struct blkif_x86_64_request_rw rw;
112 struct blkif_x86_64_request_discard discard; 119 struct blkif_x86_64_request_discard discard;
113 } u; 120 } u;
114}; 121} __attribute__((__packed__));
122
115struct blkif_x86_64_response { 123struct blkif_x86_64_response {
116 uint64_t __attribute__((__aligned__(8))) id; 124 uint64_t __attribute__((__aligned__(8))) id;
117 uint8_t operation; /* copied from request */ 125 uint8_t operation; /* copied from request */
@@ -237,18 +245,18 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
237{ 245{
238 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; 246 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
239 dst->operation = src->operation; 247 dst->operation = src->operation;
240 dst->nr_segments = src->nr_segments;
241 dst->handle = src->handle;
242 dst->id = src->id;
243 switch (src->operation) { 248 switch (src->operation) {
244 case BLKIF_OP_READ: 249 case BLKIF_OP_READ:
245 case BLKIF_OP_WRITE: 250 case BLKIF_OP_WRITE:
246 case BLKIF_OP_WRITE_BARRIER: 251 case BLKIF_OP_WRITE_BARRIER:
247 case BLKIF_OP_FLUSH_DISKCACHE: 252 case BLKIF_OP_FLUSH_DISKCACHE:
253 dst->u.rw.nr_segments = src->u.rw.nr_segments;
254 dst->u.rw.handle = src->u.rw.handle;
255 dst->u.rw.id = src->u.rw.id;
248 dst->u.rw.sector_number = src->u.rw.sector_number; 256 dst->u.rw.sector_number = src->u.rw.sector_number;
249 barrier(); 257 barrier();
250 if (n > dst->nr_segments) 258 if (n > dst->u.rw.nr_segments)
251 n = dst->nr_segments; 259 n = dst->u.rw.nr_segments;
252 for (i = 0; i < n; i++) 260 for (i = 0; i < n; i++)
253 dst->u.rw.seg[i] = src->u.rw.seg[i]; 261 dst->u.rw.seg[i] = src->u.rw.seg[i];
254 break; 262 break;
@@ -266,18 +274,18 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
266{ 274{
267 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST; 275 int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
268 dst->operation = src->operation; 276 dst->operation = src->operation;
269 dst->nr_segments = src->nr_segments;
270 dst->handle = src->handle;
271 dst->id = src->id;
272 switch (src->operation) { 277 switch (src->operation) {
273 case BLKIF_OP_READ: 278 case BLKIF_OP_READ:
274 case BLKIF_OP_WRITE: 279 case BLKIF_OP_WRITE:
275 case BLKIF_OP_WRITE_BARRIER: 280 case BLKIF_OP_WRITE_BARRIER:
276 case BLKIF_OP_FLUSH_DISKCACHE: 281 case BLKIF_OP_FLUSH_DISKCACHE:
282 dst->u.rw.nr_segments = src->u.rw.nr_segments;
283 dst->u.rw.handle = src->u.rw.handle;
284 dst->u.rw.id = src->u.rw.id;
277 dst->u.rw.sector_number = src->u.rw.sector_number; 285 dst->u.rw.sector_number = src->u.rw.sector_number;
278 barrier(); 286 barrier();
279 if (n > dst->nr_segments) 287 if (n > dst->u.rw.nr_segments)
280 n = dst->nr_segments; 288 n = dst->u.rw.nr_segments;
281 for (i = 0; i < n; i++) 289 for (i = 0; i < n; i++)
282 dst->u.rw.seg[i] = src->u.rw.seg[i]; 290 dst->u.rw.seg[i] = src->u.rw.seg[i];
283 break; 291 break;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 7b2ec5908413..2c2c4be7d9d6 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -135,15 +135,15 @@ static int get_id_from_freelist(struct blkfront_info *info)
135{ 135{
136 unsigned long free = info->shadow_free; 136 unsigned long free = info->shadow_free;
137 BUG_ON(free >= BLK_RING_SIZE); 137 BUG_ON(free >= BLK_RING_SIZE);
138 info->shadow_free = info->shadow[free].req.id; 138 info->shadow_free = info->shadow[free].req.u.rw.id;
139 info->shadow[free].req.id = 0x0fffffee; /* debug */ 139 info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
140 return free; 140 return free;
141} 141}
142 142
143static void add_id_to_freelist(struct blkfront_info *info, 143static void add_id_to_freelist(struct blkfront_info *info,
144 unsigned long id) 144 unsigned long id)
145{ 145{
146 info->shadow[id].req.id = info->shadow_free; 146 info->shadow[id].req.u.rw.id = info->shadow_free;
147 info->shadow[id].request = NULL; 147 info->shadow[id].request = NULL;
148 info->shadow_free = id; 148 info->shadow_free = id;
149} 149}
@@ -287,9 +287,9 @@ static int blkif_queue_request(struct request *req)
287 id = get_id_from_freelist(info); 287 id = get_id_from_freelist(info);
288 info->shadow[id].request = req; 288 info->shadow[id].request = req;
289 289
290 ring_req->id = id; 290 ring_req->u.rw.id = id;
291 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req); 291 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
292 ring_req->handle = info->handle; 292 ring_req->u.rw.handle = info->handle;
293 293
294 ring_req->operation = rq_data_dir(req) ? 294 ring_req->operation = rq_data_dir(req) ?
295 BLKIF_OP_WRITE : BLKIF_OP_READ; 295 BLKIF_OP_WRITE : BLKIF_OP_READ;
@@ -308,13 +308,15 @@ static int blkif_queue_request(struct request *req)
308 if (unlikely(req->cmd_flags & REQ_DISCARD)) { 308 if (unlikely(req->cmd_flags & REQ_DISCARD)) {
309 /* id, sector_number and handle are set above. */ 309 /* id, sector_number and handle are set above. */
310 ring_req->operation = BLKIF_OP_DISCARD; 310 ring_req->operation = BLKIF_OP_DISCARD;
311 ring_req->nr_segments = 0; 311 ring_req->u.discard.nr_segments = 0;
312 ring_req->u.discard.nr_sectors = blk_rq_sectors(req); 312 ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
313 } else { 313 } else {
314 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); 314 ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req,
315 BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); 315 info->sg);
316 BUG_ON(ring_req->u.rw.nr_segments >
317 BLKIF_MAX_SEGMENTS_PER_REQUEST);
316 318
317 for_each_sg(info->sg, sg, ring_req->nr_segments, i) { 319 for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) {
318 buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg))); 320 buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
319 fsect = sg->offset >> 9; 321 fsect = sg->offset >> 9;
320 lsect = fsect + (sg->length >> 9) - 1; 322 lsect = fsect + (sg->length >> 9) - 1;
@@ -705,7 +707,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
705static void blkif_completion(struct blk_shadow *s) 707static void blkif_completion(struct blk_shadow *s)
706{ 708{
707 int i; 709 int i;
708 for (i = 0; i < s->req.nr_segments; i++) 710 for (i = 0; i < s->req.u.rw.nr_segments; i++)
709 gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL); 711 gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL);
710} 712}
711 713
@@ -763,7 +765,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
763 error = -EOPNOTSUPP; 765 error = -EOPNOTSUPP;
764 } 766 }
765 if (unlikely(bret->status == BLKIF_RSP_ERROR && 767 if (unlikely(bret->status == BLKIF_RSP_ERROR &&
766 info->shadow[id].req.nr_segments == 0)) { 768 info->shadow[id].req.u.rw.nr_segments == 0)) {
767 printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n", 769 printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
768 info->flush_op == BLKIF_OP_WRITE_BARRIER ? 770 info->flush_op == BLKIF_OP_WRITE_BARRIER ?
769 "barrier" : "flush disk cache", 771 "barrier" : "flush disk cache",
@@ -984,8 +986,8 @@ static int blkfront_probe(struct xenbus_device *dev,
984 INIT_WORK(&info->work, blkif_restart_queue); 986 INIT_WORK(&info->work, blkif_restart_queue);
985 987
986 for (i = 0; i < BLK_RING_SIZE; i++) 988 for (i = 0; i < BLK_RING_SIZE; i++)
987 info->shadow[i].req.id = i+1; 989 info->shadow[i].req.u.rw.id = i+1;
988 info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; 990 info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
989 991
990 /* Front end dir is a number, which is used as the id. */ 992 /* Front end dir is a number, which is used as the id. */
991 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); 993 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
@@ -1019,9 +1021,9 @@ static int blkif_recover(struct blkfront_info *info)
1019 /* Stage 2: Set up free list. */ 1021 /* Stage 2: Set up free list. */
1020 memset(&info->shadow, 0, sizeof(info->shadow)); 1022 memset(&info->shadow, 0, sizeof(info->shadow));
1021 for (i = 0; i < BLK_RING_SIZE; i++) 1023 for (i = 0; i < BLK_RING_SIZE; i++)
1022 info->shadow[i].req.id = i+1; 1024 info->shadow[i].req.u.rw.id = i+1;
1023 info->shadow_free = info->ring.req_prod_pvt; 1025 info->shadow_free = info->ring.req_prod_pvt;
1024 info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff; 1026 info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
1025 1027
1026 /* Stage 3: Find pending requests and requeue them. */ 1028 /* Stage 3: Find pending requests and requeue them. */
1027 for (i = 0; i < BLK_RING_SIZE; i++) { 1029 for (i = 0; i < BLK_RING_SIZE; i++) {
@@ -1034,17 +1036,17 @@ static int blkif_recover(struct blkfront_info *info)
1034 *req = copy[i].req; 1036 *req = copy[i].req;
1035 1037
1036 /* We get a new request id, and must reset the shadow state. */ 1038 /* We get a new request id, and must reset the shadow state. */
1037 req->id = get_id_from_freelist(info); 1039 req->u.rw.id = get_id_from_freelist(info);
1038 memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i])); 1040 memcpy(&info->shadow[req->u.rw.id], &copy[i], sizeof(copy[i]));
1039 1041
1040 /* Rewrite any grant references invalidated by susp/resume. */ 1042 /* Rewrite any grant references invalidated by susp/resume. */
1041 for (j = 0; j < req->nr_segments; j++) 1043 for (j = 0; j < req->u.rw.nr_segments; j++)
1042 gnttab_grant_foreign_access_ref( 1044 gnttab_grant_foreign_access_ref(
1043 req->u.rw.seg[j].gref, 1045 req->u.rw.seg[j].gref,
1044 info->xbdev->otherend_id, 1046 info->xbdev->otherend_id,
1045 pfn_to_mfn(info->shadow[req->id].frame[j]), 1047 pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]),
1046 rq_data_dir(info->shadow[req->id].request)); 1048 rq_data_dir(info->shadow[req->u.rw.id].request));
1047 info->shadow[req->id].req = *req; 1049 info->shadow[req->u.rw.id].req = *req;
1048 1050
1049 info->ring.req_prod_pvt++; 1051 info->ring.req_prod_pvt++;
1050 } 1052 }
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index 9324488f23f0..f88e28b6a27c 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -95,6 +95,12 @@ typedef uint64_t blkif_sector_t;
95#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 95#define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
96 96
97struct blkif_request_rw { 97struct blkif_request_rw {
98 uint8_t nr_segments; /* number of segments */
99 blkif_vdev_t handle; /* only for read/write requests */
100#ifdef CONFIG_X86_64
101 uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */
102#endif
103 uint64_t id; /* private guest value, echoed in resp */
98 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ 104 blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
99 struct blkif_request_segment { 105 struct blkif_request_segment {
100 grant_ref_t gref; /* reference to I/O buffer frame */ 106 grant_ref_t gref; /* reference to I/O buffer frame */
@@ -102,23 +108,27 @@ struct blkif_request_rw {
102 /* @last_sect: last sector in frame to transfer (inclusive). */ 108 /* @last_sect: last sector in frame to transfer (inclusive). */
103 uint8_t first_sect, last_sect; 109 uint8_t first_sect, last_sect;
104 } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 110 } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
105}; 111} __attribute__((__packed__));
106 112
107struct blkif_request_discard { 113struct blkif_request_discard {
114 uint8_t nr_segments; /* number of segments */
115 blkif_vdev_t _pad1; /* only for read/write requests */
116#ifdef CONFIG_X86_64
117 uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/
118#endif
119 uint64_t id; /* private guest value, echoed in resp */
108 blkif_sector_t sector_number; 120 blkif_sector_t sector_number;
109 uint64_t nr_sectors; 121 uint64_t nr_sectors;
110}; 122 uint8_t _pad3;
123} __attribute__((__packed__));
111 124
112struct blkif_request { 125struct blkif_request {
113 uint8_t operation; /* BLKIF_OP_??? */ 126 uint8_t operation; /* BLKIF_OP_??? */
114 uint8_t nr_segments; /* number of segments */
115 blkif_vdev_t handle; /* only for read/write requests */
116 uint64_t id; /* private guest value, echoed in resp */
117 union { 127 union {
118 struct blkif_request_rw rw; 128 struct blkif_request_rw rw;
119 struct blkif_request_discard discard; 129 struct blkif_request_discard discard;
120 } u; 130 } u;
121}; 131} __attribute__((__packed__));
122 132
123struct blkif_response { 133struct blkif_response {
124 uint64_t id; /* copied from request */ 134 uint64_t id; /* copied from request */