aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJulien Grall <julien.grall@citrix.com>2015-07-22 11:44:54 -0400
committerDavid Vrabel <david.vrabel@citrix.com>2015-10-23 09:20:39 -0400
commitc004a6fe0c405e2aa91b2a88aa1428724e6d06f6 (patch)
tree9a6332624ab915652e3de5081cbce04c07a57505
parent5ed5451d997f7a86c62a5557efc00dc3836dc559 (diff)
block/xen-blkfront: Make it running on 64KB page granularity
The PV block protocol is using 4KB page granularity. The goal of this patch is to allow a Linux using 64KB page granularity using block device on a non-modified Xen. The block API is using segment which should at least be the size of a Linux page. Therefore, the driver will have to break the page in chunk of 4K before giving the page to the backend. When breaking a 64KB segment in 4KB chunks, it is possible that some chunks are empty. As the PV protocol always require to have data in the chunk, we have to count the number of Xen page which will be in use and avoid sending empty chunks. Note that, a pre-defined number of grants are reserved before preparing the request. This pre-defined number is based on the number and the maximum size of the segments. If each segment contains a very small amount of data, the driver may reserve too many grants (16 grants is reserved per segment with 64KB page granularity). Furthermore, in the case of persistent grants we allocate one Linux page per grant although only the first 4KB of the page will be effectively in use. This could be improved by sharing the page with multiple grants. Signed-off-by: Julien Grall <julien.grall@citrix.com> Acked-by: Roger Pau Monné <roger.pau@citrix.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
-rw-r--r--drivers/block/xen-blkfront.c324
1 files changed, 213 insertions, 111 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index f5dfc1694679..057e05da83d1 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -78,6 +78,7 @@ struct blk_shadow {
78 struct grant **grants_used; 78 struct grant **grants_used;
79 struct grant **indirect_grants; 79 struct grant **indirect_grants;
80 struct scatterlist *sg; 80 struct scatterlist *sg;
81 unsigned int num_sg;
81}; 82};
82 83
83struct split_bio { 84struct split_bio {
@@ -106,8 +107,12 @@ static unsigned int xen_blkif_max_ring_order;
106module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO); 107module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
107MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring"); 108MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
108 109
109#define BLK_RING_SIZE(info) __CONST_RING_SIZE(blkif, PAGE_SIZE * (info)->nr_ring_pages) 110#define BLK_RING_SIZE(info) \
110#define BLK_MAX_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE * XENBUS_MAX_RING_PAGES) 111 __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
112
113#define BLK_MAX_RING_SIZE \
114 __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * XENBUS_MAX_RING_PAGES)
115
111/* 116/*
112 * ring-ref%i i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19 117 * ring-ref%i i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19
113 * characters are enough. Define to 20 to keep consist with backend. 118 * characters are enough. Define to 20 to keep consist with backend.
@@ -146,6 +151,7 @@ struct blkfront_info
146 unsigned int discard_granularity; 151 unsigned int discard_granularity;
147 unsigned int discard_alignment; 152 unsigned int discard_alignment;
148 unsigned int feature_persistent:1; 153 unsigned int feature_persistent:1;
154 /* Number of 4KB segments handled */
149 unsigned int max_indirect_segments; 155 unsigned int max_indirect_segments;
150 int is_ready; 156 int is_ready;
151 struct blk_mq_tag_set tag_set; 157 struct blk_mq_tag_set tag_set;
@@ -174,10 +180,23 @@ static DEFINE_SPINLOCK(minor_lock);
174 180
175#define DEV_NAME "xvd" /* name in /dev */ 181#define DEV_NAME "xvd" /* name in /dev */
176 182
177#define SEGS_PER_INDIRECT_FRAME \ 183/*
178 (PAGE_SIZE/sizeof(struct blkif_request_segment)) 184 * Grants are always the same size as a Xen page (i.e 4KB).
179#define INDIRECT_GREFS(_segs) \ 185 * A physical segment is always the same size as a Linux page.
180 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) 186 * Number of grants per physical segment
187 */
188#define GRANTS_PER_PSEG (PAGE_SIZE / XEN_PAGE_SIZE)
189
190#define GRANTS_PER_INDIRECT_FRAME \
191 (XEN_PAGE_SIZE / sizeof(struct blkif_request_segment))
192
193#define PSEGS_PER_INDIRECT_FRAME \
194 (GRANTS_INDIRECT_FRAME / GRANTS_PSEGS)
195
196#define INDIRECT_GREFS(_grants) \
197 DIV_ROUND_UP(_grants, GRANTS_PER_INDIRECT_FRAME)
198
199#define GREFS(_psegs) ((_psegs) * GRANTS_PER_PSEG)
181 200
182static int blkfront_setup_indirect(struct blkfront_info *info); 201static int blkfront_setup_indirect(struct blkfront_info *info);
183static int blkfront_gather_backend_features(struct blkfront_info *info); 202static int blkfront_gather_backend_features(struct blkfront_info *info);
@@ -465,14 +484,100 @@ static int blkif_queue_discard_req(struct request *req)
465 return 0; 484 return 0;
466} 485}
467 486
487struct setup_rw_req {
488 unsigned int grant_idx;
489 struct blkif_request_segment *segments;
490 struct blkfront_info *info;
491 struct blkif_request *ring_req;
492 grant_ref_t gref_head;
493 unsigned int id;
494 /* Only used when persistent grant is used and it's a read request */
495 bool need_copy;
496 unsigned int bvec_off;
497 char *bvec_data;
498};
499
500static void blkif_setup_rw_req_grant(unsigned long gfn, unsigned int offset,
501 unsigned int len, void *data)
502{
503 struct setup_rw_req *setup = data;
504 int n, ref;
505 struct grant *gnt_list_entry;
506 unsigned int fsect, lsect;
507 /* Convenient aliases */
508 unsigned int grant_idx = setup->grant_idx;
509 struct blkif_request *ring_req = setup->ring_req;
510 struct blkfront_info *info = setup->info;
511 struct blk_shadow *shadow = &info->shadow[setup->id];
512
513 if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
514 (grant_idx % GRANTS_PER_INDIRECT_FRAME == 0)) {
515 if (setup->segments)
516 kunmap_atomic(setup->segments);
517
518 n = grant_idx / GRANTS_PER_INDIRECT_FRAME;
519 gnt_list_entry = get_indirect_grant(&setup->gref_head, info);
520 shadow->indirect_grants[n] = gnt_list_entry;
521 setup->segments = kmap_atomic(gnt_list_entry->page);
522 ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
523 }
524
525 gnt_list_entry = get_grant(&setup->gref_head, gfn, info);
526 ref = gnt_list_entry->gref;
527 shadow->grants_used[grant_idx] = gnt_list_entry;
528
529 if (setup->need_copy) {
530 void *shared_data;
531
532 shared_data = kmap_atomic(gnt_list_entry->page);
533 /*
534 * this does not wipe data stored outside the
535 * range sg->offset..sg->offset+sg->length.
536 * Therefore, blkback *could* see data from
537 * previous requests. This is OK as long as
538 * persistent grants are shared with just one
539 * domain. It may need refactoring if this
540 * changes
541 */
542 memcpy(shared_data + offset,
543 setup->bvec_data + setup->bvec_off,
544 len);
545
546 kunmap_atomic(shared_data);
547 setup->bvec_off += len;
548 }
549
550 fsect = offset >> 9;
551 lsect = fsect + (len >> 9) - 1;
552 if (ring_req->operation != BLKIF_OP_INDIRECT) {
553 ring_req->u.rw.seg[grant_idx] =
554 (struct blkif_request_segment) {
555 .gref = ref,
556 .first_sect = fsect,
557 .last_sect = lsect };
558 } else {
559 setup->segments[grant_idx % GRANTS_PER_INDIRECT_FRAME] =
560 (struct blkif_request_segment) {
561 .gref = ref,
562 .first_sect = fsect,
563 .last_sect = lsect };
564 }
565
566 (setup->grant_idx)++;
567}
568
468static int blkif_queue_rw_req(struct request *req) 569static int blkif_queue_rw_req(struct request *req)
469{ 570{
470 struct blkfront_info *info = req->rq_disk->private_data; 571 struct blkfront_info *info = req->rq_disk->private_data;
471 struct blkif_request *ring_req; 572 struct blkif_request *ring_req;
472 unsigned long id; 573 unsigned long id;
473 unsigned int fsect, lsect; 574 int i;
474 int i, ref, n; 575 struct setup_rw_req setup = {
475 struct blkif_request_segment *segments = NULL; 576 .grant_idx = 0,
577 .segments = NULL,
578 .info = info,
579 .need_copy = rq_data_dir(req) && info->feature_persistent,
580 };
476 581
477 /* 582 /*
478 * Used to store if we are able to queue the request by just using 583 * Used to store if we are able to queue the request by just using
@@ -480,25 +585,23 @@ static int blkif_queue_rw_req(struct request *req)
480 * as there are not sufficiently many free. 585 * as there are not sufficiently many free.
481 */ 586 */
482 bool new_persistent_gnts; 587 bool new_persistent_gnts;
483 grant_ref_t gref_head;
484 struct grant *gnt_list_entry = NULL;
485 struct scatterlist *sg; 588 struct scatterlist *sg;
486 int nseg, max_grefs; 589 int num_sg, max_grefs, num_grant;
487 590
488 max_grefs = req->nr_phys_segments; 591 max_grefs = req->nr_phys_segments * GRANTS_PER_PSEG;
489 if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST) 592 if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
490 /* 593 /*
491 * If we are using indirect segments we need to account 594 * If we are using indirect segments we need to account
492 * for the indirect grefs used in the request. 595 * for the indirect grefs used in the request.
493 */ 596 */
494 max_grefs += INDIRECT_GREFS(req->nr_phys_segments); 597 max_grefs += INDIRECT_GREFS(max_grefs);
495 598
496 /* Check if we have enough grants to allocate a requests */ 599 /* Check if we have enough grants to allocate a requests */
497 if (info->persistent_gnts_c < max_grefs) { 600 if (info->persistent_gnts_c < max_grefs) {
498 new_persistent_gnts = 1; 601 new_persistent_gnts = 1;
499 if (gnttab_alloc_grant_references( 602 if (gnttab_alloc_grant_references(
500 max_grefs - info->persistent_gnts_c, 603 max_grefs - info->persistent_gnts_c,
501 &gref_head) < 0) { 604 &setup.gref_head) < 0) {
502 gnttab_request_free_callback( 605 gnttab_request_free_callback(
503 &info->callback, 606 &info->callback,
504 blkif_restart_queue_callback, 607 blkif_restart_queue_callback,
@@ -515,12 +618,19 @@ static int blkif_queue_rw_req(struct request *req)
515 info->shadow[id].request = req; 618 info->shadow[id].request = req;
516 619
517 BUG_ON(info->max_indirect_segments == 0 && 620 BUG_ON(info->max_indirect_segments == 0 &&
518 req->nr_phys_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); 621 GREFS(req->nr_phys_segments) > BLKIF_MAX_SEGMENTS_PER_REQUEST);
519 BUG_ON(info->max_indirect_segments && 622 BUG_ON(info->max_indirect_segments &&
520 req->nr_phys_segments > info->max_indirect_segments); 623 GREFS(req->nr_phys_segments) > info->max_indirect_segments);
521 nseg = blk_rq_map_sg(req->q, req, info->shadow[id].sg); 624
625 num_sg = blk_rq_map_sg(req->q, req, info->shadow[id].sg);
626 num_grant = 0;
627 /* Calculate the number of grant used */
628 for_each_sg(info->shadow[id].sg, sg, num_sg, i)
629 num_grant += gnttab_count_grant(sg->offset, sg->length);
630
522 ring_req->u.rw.id = id; 631 ring_req->u.rw.id = id;
523 if (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) { 632 info->shadow[id].num_sg = num_sg;
633 if (num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST) {
524 /* 634 /*
525 * The indirect operation can only be a BLKIF_OP_READ or 635 * The indirect operation can only be a BLKIF_OP_READ or
526 * BLKIF_OP_WRITE 636 * BLKIF_OP_WRITE
@@ -531,7 +641,7 @@ static int blkif_queue_rw_req(struct request *req)
531 BLKIF_OP_WRITE : BLKIF_OP_READ; 641 BLKIF_OP_WRITE : BLKIF_OP_READ;
532 ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req); 642 ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
533 ring_req->u.indirect.handle = info->handle; 643 ring_req->u.indirect.handle = info->handle;
534 ring_req->u.indirect.nr_segments = nseg; 644 ring_req->u.indirect.nr_segments = num_grant;
535 } else { 645 } else {
536 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req); 646 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
537 ring_req->u.rw.handle = info->handle; 647 ring_req->u.rw.handle = info->handle;
@@ -559,73 +669,30 @@ static int blkif_queue_rw_req(struct request *req)
559 ring_req->operation = 0; 669 ring_req->operation = 0;
560 } 670 }
561 } 671 }
562 ring_req->u.rw.nr_segments = nseg; 672 ring_req->u.rw.nr_segments = num_grant;
563 } 673 }
564 for_each_sg(info->shadow[id].sg, sg, nseg, i) {
565 fsect = sg->offset >> 9;
566 lsect = fsect + (sg->length >> 9) - 1;
567
568 if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
569 (i % SEGS_PER_INDIRECT_FRAME == 0)) {
570 if (segments)
571 kunmap_atomic(segments);
572
573 n = i / SEGS_PER_INDIRECT_FRAME;
574 gnt_list_entry = get_indirect_grant(&gref_head, info);
575 info->shadow[id].indirect_grants[n] = gnt_list_entry;
576 segments = kmap_atomic(gnt_list_entry->page);
577 ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
578 }
579
580 gnt_list_entry = get_grant(&gref_head,
581 xen_page_to_gfn(sg_page(sg)),
582 info);
583 ref = gnt_list_entry->gref;
584
585 info->shadow[id].grants_used[i] = gnt_list_entry;
586
587 if (rq_data_dir(req) && info->feature_persistent) {
588 char *bvec_data;
589 void *shared_data;
590 674
591 BUG_ON(sg->offset + sg->length > PAGE_SIZE); 675 setup.ring_req = ring_req;
676 setup.id = id;
677 for_each_sg(info->shadow[id].sg, sg, num_sg, i) {
678 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
592 679
593 shared_data = kmap_atomic(gnt_list_entry->page); 680 if (setup.need_copy) {
594 bvec_data = kmap_atomic(sg_page(sg)); 681 setup.bvec_off = sg->offset;
682 setup.bvec_data = kmap_atomic(sg_page(sg));
683 }
595 684
596 /* 685 gnttab_foreach_grant_in_range(sg_page(sg),
597 * this does not wipe data stored outside the 686 sg->offset,
598 * range sg->offset..sg->offset+sg->length. 687 sg->length,
599 * Therefore, blkback *could* see data from 688 blkif_setup_rw_req_grant,
600 * previous requests. This is OK as long as 689 &setup);
601 * persistent grants are shared with just one
602 * domain. It may need refactoring if this
603 * changes
604 */
605 memcpy(shared_data + sg->offset,
606 bvec_data + sg->offset,
607 sg->length);
608 690
609 kunmap_atomic(bvec_data); 691 if (setup.need_copy)
610 kunmap_atomic(shared_data); 692 kunmap_atomic(setup.bvec_data);
611 }
612 if (ring_req->operation != BLKIF_OP_INDIRECT) {
613 ring_req->u.rw.seg[i] =
614 (struct blkif_request_segment) {
615 .gref = ref,
616 .first_sect = fsect,
617 .last_sect = lsect };
618 } else {
619 n = i % SEGS_PER_INDIRECT_FRAME;
620 segments[n] =
621 (struct blkif_request_segment) {
622 .gref = ref,
623 .first_sect = fsect,
624 .last_sect = lsect };
625 }
626 } 693 }
627 if (segments) 694 if (setup.segments)
628 kunmap_atomic(segments); 695 kunmap_atomic(setup.segments);
629 696
630 info->ring.req_prod_pvt++; 697 info->ring.req_prod_pvt++;
631 698
@@ -633,7 +700,7 @@ static int blkif_queue_rw_req(struct request *req)
633 info->shadow[id].req = *ring_req; 700 info->shadow[id].req = *ring_req;
634 701
635 if (new_persistent_gnts) 702 if (new_persistent_gnts)
636 gnttab_free_grant_references(gref_head); 703 gnttab_free_grant_references(setup.gref_head);
637 704
638 return 0; 705 return 0;
639} 706}
@@ -750,14 +817,14 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
750 /* Hard sector size and max sectors impersonate the equiv. hardware. */ 817 /* Hard sector size and max sectors impersonate the equiv. hardware. */
751 blk_queue_logical_block_size(rq, sector_size); 818 blk_queue_logical_block_size(rq, sector_size);
752 blk_queue_physical_block_size(rq, physical_sector_size); 819 blk_queue_physical_block_size(rq, physical_sector_size);
753 blk_queue_max_hw_sectors(rq, (segments * PAGE_SIZE) / 512); 820 blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
754 821
755 /* Each segment in a request is up to an aligned page in size. */ 822 /* Each segment in a request is up to an aligned page in size. */
756 blk_queue_segment_boundary(rq, PAGE_SIZE - 1); 823 blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
757 blk_queue_max_segment_size(rq, PAGE_SIZE); 824 blk_queue_max_segment_size(rq, PAGE_SIZE);
758 825
759 /* Ensure a merged request will fit in a single I/O ring slot. */ 826 /* Ensure a merged request will fit in a single I/O ring slot. */
760 blk_queue_max_segments(rq, segments); 827 blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
761 828
762 /* Make sure buffer addresses are sector-aligned. */ 829 /* Make sure buffer addresses are sector-aligned. */
763 blk_queue_dma_alignment(rq, 511); 830 blk_queue_dma_alignment(rq, 511);
@@ -1116,32 +1183,65 @@ free_shadow:
1116 1183
1117} 1184}
1118 1185
1186struct copy_from_grant {
1187 const struct blk_shadow *s;
1188 unsigned int grant_idx;
1189 unsigned int bvec_offset;
1190 char *bvec_data;
1191};
1192
1193static void blkif_copy_from_grant(unsigned long gfn, unsigned int offset,
1194 unsigned int len, void *data)
1195{
1196 struct copy_from_grant *info = data;
1197 char *shared_data;
1198 /* Convenient aliases */
1199 const struct blk_shadow *s = info->s;
1200
1201 shared_data = kmap_atomic(s->grants_used[info->grant_idx]->page);
1202
1203 memcpy(info->bvec_data + info->bvec_offset,
1204 shared_data + offset, len);
1205
1206 info->bvec_offset += len;
1207 info->grant_idx++;
1208
1209 kunmap_atomic(shared_data);
1210}
1211
1119static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info, 1212static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
1120 struct blkif_response *bret) 1213 struct blkif_response *bret)
1121{ 1214{
1122 int i = 0; 1215 int i = 0;
1123 struct scatterlist *sg; 1216 struct scatterlist *sg;
1124 char *bvec_data; 1217 int num_sg, num_grant;
1125 void *shared_data; 1218 struct copy_from_grant data = {
1126 int nseg; 1219 .s = s,
1220 .grant_idx = 0,
1221 };
1127 1222
1128 nseg = s->req.operation == BLKIF_OP_INDIRECT ? 1223 num_grant = s->req.operation == BLKIF_OP_INDIRECT ?
1129 s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments; 1224 s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
1225 num_sg = s->num_sg;
1130 1226
1131 if (bret->operation == BLKIF_OP_READ && info->feature_persistent) { 1227 if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
1132 for_each_sg(s->sg, sg, nseg, i) { 1228 for_each_sg(s->sg, sg, num_sg, i) {
1133 BUG_ON(sg->offset + sg->length > PAGE_SIZE); 1229 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
1134 shared_data = kmap_atomic(s->grants_used[i]->page); 1230
1135 bvec_data = kmap_atomic(sg_page(sg)); 1231 data.bvec_offset = sg->offset;
1136 memcpy(bvec_data + sg->offset, 1232 data.bvec_data = kmap_atomic(sg_page(sg));
1137 shared_data + sg->offset, 1233
1138 sg->length); 1234 gnttab_foreach_grant_in_range(sg_page(sg),
1139 kunmap_atomic(bvec_data); 1235 sg->offset,
1140 kunmap_atomic(shared_data); 1236 sg->length,
1237 blkif_copy_from_grant,
1238 &data);
1239
1240 kunmap_atomic(data.bvec_data);
1141 } 1241 }
1142 } 1242 }
1143 /* Add the persistent grant into the list of free grants */ 1243 /* Add the persistent grant into the list of free grants */
1144 for (i = 0; i < nseg; i++) { 1244 for (i = 0; i < num_grant; i++) {
1145 if (gnttab_query_foreign_access(s->grants_used[i]->gref)) { 1245 if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
1146 /* 1246 /*
1147 * If the grant is still mapped by the backend (the 1247 * If the grant is still mapped by the backend (the
@@ -1167,7 +1267,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
1167 } 1267 }
1168 } 1268 }
1169 if (s->req.operation == BLKIF_OP_INDIRECT) { 1269 if (s->req.operation == BLKIF_OP_INDIRECT) {
1170 for (i = 0; i < INDIRECT_GREFS(nseg); i++) { 1270 for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
1171 if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) { 1271 if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
1172 if (!info->feature_persistent) 1272 if (!info->feature_persistent)
1173 pr_alert_ratelimited("backed has not unmapped grant: %u\n", 1273 pr_alert_ratelimited("backed has not unmapped grant: %u\n",
@@ -1312,7 +1412,7 @@ static int setup_blkring(struct xenbus_device *dev,
1312{ 1412{
1313 struct blkif_sring *sring; 1413 struct blkif_sring *sring;
1314 int err, i; 1414 int err, i;
1315 unsigned long ring_size = info->nr_ring_pages * PAGE_SIZE; 1415 unsigned long ring_size = info->nr_ring_pages * XEN_PAGE_SIZE;
1316 grant_ref_t gref[XENBUS_MAX_RING_PAGES]; 1416 grant_ref_t gref[XENBUS_MAX_RING_PAGES];
1317 1417
1318 for (i = 0; i < info->nr_ring_pages; i++) 1418 for (i = 0; i < info->nr_ring_pages; i++)
@@ -1641,8 +1741,8 @@ static int blkif_recover(struct blkfront_info *info)
1641 atomic_set(&split_bio->pending, pending); 1741 atomic_set(&split_bio->pending, pending);
1642 split_bio->bio = bio; 1742 split_bio->bio = bio;
1643 for (i = 0; i < pending; i++) { 1743 for (i = 0; i < pending; i++) {
1644 offset = (i * segs * PAGE_SIZE) >> 9; 1744 offset = (i * segs * XEN_PAGE_SIZE) >> 9;
1645 size = min((unsigned int)(segs * PAGE_SIZE) >> 9, 1745 size = min((unsigned int)(segs * XEN_PAGE_SIZE) >> 9,
1646 (unsigned int)bio_sectors(bio) - offset); 1746 (unsigned int)bio_sectors(bio) - offset);
1647 cloned_bio = bio_clone(bio, GFP_NOIO); 1747 cloned_bio = bio_clone(bio, GFP_NOIO);
1648 BUG_ON(cloned_bio == NULL); 1748 BUG_ON(cloned_bio == NULL);
@@ -1753,15 +1853,17 @@ static void blkfront_setup_discard(struct blkfront_info *info)
1753 1853
1754static int blkfront_setup_indirect(struct blkfront_info *info) 1854static int blkfront_setup_indirect(struct blkfront_info *info)
1755{ 1855{
1756 unsigned int segs; 1856 unsigned int psegs, grants;
1757 int err, i; 1857 int err, i;
1758 1858
1759 if (info->max_indirect_segments == 0) 1859 if (info->max_indirect_segments == 0)
1760 segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; 1860 grants = BLKIF_MAX_SEGMENTS_PER_REQUEST;
1761 else 1861 else
1762 segs = info->max_indirect_segments; 1862 grants = info->max_indirect_segments;
1863 psegs = grants / GRANTS_PER_PSEG;
1763 1864
1764 err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info)); 1865 err = fill_grant_buffer(info,
1866 (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
1765 if (err) 1867 if (err)
1766 goto out_of_memory; 1868 goto out_of_memory;
1767 1869
@@ -1771,7 +1873,7 @@ static int blkfront_setup_indirect(struct blkfront_info *info)
1771 * grants, we need to allocate a set of pages that can be 1873 * grants, we need to allocate a set of pages that can be
1772 * used for mapping indirect grefs 1874 * used for mapping indirect grefs
1773 */ 1875 */
1774 int num = INDIRECT_GREFS(segs) * BLK_RING_SIZE(info); 1876 int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
1775 1877
1776 BUG_ON(!list_empty(&info->indirect_pages)); 1878 BUG_ON(!list_empty(&info->indirect_pages));
1777 for (i = 0; i < num; i++) { 1879 for (i = 0; i < num; i++) {
@@ -1784,20 +1886,20 @@ static int blkfront_setup_indirect(struct blkfront_info *info)
1784 1886
1785 for (i = 0; i < BLK_RING_SIZE(info); i++) { 1887 for (i = 0; i < BLK_RING_SIZE(info); i++) {
1786 info->shadow[i].grants_used = kzalloc( 1888 info->shadow[i].grants_used = kzalloc(
1787 sizeof(info->shadow[i].grants_used[0]) * segs, 1889 sizeof(info->shadow[i].grants_used[0]) * grants,
1788 GFP_NOIO); 1890 GFP_NOIO);
1789 info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * segs, GFP_NOIO); 1891 info->shadow[i].sg = kzalloc(sizeof(info->shadow[i].sg[0]) * psegs, GFP_NOIO);
1790 if (info->max_indirect_segments) 1892 if (info->max_indirect_segments)
1791 info->shadow[i].indirect_grants = kzalloc( 1893 info->shadow[i].indirect_grants = kzalloc(
1792 sizeof(info->shadow[i].indirect_grants[0]) * 1894 sizeof(info->shadow[i].indirect_grants[0]) *
1793 INDIRECT_GREFS(segs), 1895 INDIRECT_GREFS(grants),
1794 GFP_NOIO); 1896 GFP_NOIO);
1795 if ((info->shadow[i].grants_used == NULL) || 1897 if ((info->shadow[i].grants_used == NULL) ||
1796 (info->shadow[i].sg == NULL) || 1898 (info->shadow[i].sg == NULL) ||
1797 (info->max_indirect_segments && 1899 (info->max_indirect_segments &&
1798 (info->shadow[i].indirect_grants == NULL))) 1900 (info->shadow[i].indirect_grants == NULL)))
1799 goto out_of_memory; 1901 goto out_of_memory;
1800 sg_init_table(info->shadow[i].sg, segs); 1902 sg_init_table(info->shadow[i].sg, psegs);
1801 } 1903 }
1802 1904
1803 1905