summaryrefslogtreecommitdiffstats
path: root/drivers/block/xen-blkfront.c
diff options
context:
space:
mode:
authorBob Liu <bob.liu@oracle.com>2015-11-13 22:12:12 -0500
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2016-01-04 12:20:57 -0500
commit3df0e5059908b8fdba351c4b5dd77caadd95a949 (patch)
tree0c1ef08f4f61c7e885afcfd4e2339ee224df4078 /drivers/block/xen-blkfront.c
parent81f351615772365d46ceeac3e50c9dd4e8f9dc89 (diff)
xen/blkfront: pseudo support for multi hardware queues/rings
Preparatory patch for multiple hardware queues (rings). The number of rings is unconditionally set to 1, larger number will be enabled in patch "xen/blkfront: negotiate number of queues/rings to be used with backend" so as to make review easier. Note that blkfront_gather_backend_features does not call blkfront_setup_indirect anymore (as that needs to be done per ring). That means that in blkif_recover/blkif_connect we have to do it in a loop (bounded by nr_rings). Signed-off-by: Bob Liu <bob.liu@oracle.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'drivers/block/xen-blkfront.c')
-rw-r--r--drivers/block/xen-blkfront.c343
1 files changed, 198 insertions, 145 deletions
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 0c3ad214a792..0638b1722a40 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -150,6 +150,7 @@ struct blkfront_info
150 int vdevice; 150 int vdevice;
151 blkif_vdev_t handle; 151 blkif_vdev_t handle;
152 enum blkif_state connected; 152 enum blkif_state connected;
153 /* Number of pages per ring buffer. */
153 unsigned int nr_ring_pages; 154 unsigned int nr_ring_pages;
154 struct request_queue *rq; 155 struct request_queue *rq;
155 struct list_head grants; 156 struct list_head grants;
@@ -164,7 +165,8 @@ struct blkfront_info
164 unsigned int max_indirect_segments; 165 unsigned int max_indirect_segments;
165 int is_ready; 166 int is_ready;
166 struct blk_mq_tag_set tag_set; 167 struct blk_mq_tag_set tag_set;
167 struct blkfront_ring_info rinfo; 168 struct blkfront_ring_info *rinfo;
169 unsigned int nr_rings;
168}; 170};
169 171
170static unsigned int nr_minors; 172static unsigned int nr_minors;
@@ -209,7 +211,7 @@ static DEFINE_SPINLOCK(minor_lock);
209#define GREFS(_psegs) ((_psegs) * GRANTS_PER_PSEG) 211#define GREFS(_psegs) ((_psegs) * GRANTS_PER_PSEG)
210 212
211static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo); 213static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
212static int blkfront_gather_backend_features(struct blkfront_info *info); 214static void blkfront_gather_backend_features(struct blkfront_info *info);
213 215
214static int get_id_from_freelist(struct blkfront_ring_info *rinfo) 216static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
215{ 217{
@@ -338,8 +340,8 @@ static struct grant *get_indirect_grant(grant_ref_t *gref_head,
338 struct page *indirect_page; 340 struct page *indirect_page;
339 341
340 /* Fetch a pre-allocated page to use for indirect grefs */ 342 /* Fetch a pre-allocated page to use for indirect grefs */
341 BUG_ON(list_empty(&info->rinfo.indirect_pages)); 343 BUG_ON(list_empty(&info->rinfo->indirect_pages));
342 indirect_page = list_first_entry(&info->rinfo.indirect_pages, 344 indirect_page = list_first_entry(&info->rinfo->indirect_pages,
343 struct page, lru); 345 struct page, lru);
344 list_del(&indirect_page->lru); 346 list_del(&indirect_page->lru);
345 gnt_list_entry->page = indirect_page; 347 gnt_list_entry->page = indirect_page;
@@ -597,7 +599,6 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
597 * existing persistent grants, or if we have to get new grants, 599 * existing persistent grants, or if we have to get new grants,
598 * as there are not sufficiently many free. 600 * as there are not sufficiently many free.
599 */ 601 */
600 bool new_persistent_gnts;
601 struct scatterlist *sg; 602 struct scatterlist *sg;
602 int num_sg, max_grefs, num_grant; 603 int num_sg, max_grefs, num_grant;
603 604
@@ -609,12 +610,12 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
609 */ 610 */
610 max_grefs += INDIRECT_GREFS(max_grefs); 611 max_grefs += INDIRECT_GREFS(max_grefs);
611 612
612 /* Check if we have enough grants to allocate a requests */ 613 /*
613 if (info->persistent_gnts_c < max_grefs) { 614 * We have to reserve 'max_grefs' grants because persistent
614 new_persistent_gnts = 1; 615 * grants are shared by all rings.
615 if (gnttab_alloc_grant_references( 616 */
616 max_grefs - info->persistent_gnts_c, 617 if (max_grefs > 0)
617 &setup.gref_head) < 0) { 618 if (gnttab_alloc_grant_references(max_grefs, &setup.gref_head) < 0) {
618 gnttab_request_free_callback( 619 gnttab_request_free_callback(
619 &rinfo->callback, 620 &rinfo->callback,
620 blkif_restart_queue_callback, 621 blkif_restart_queue_callback,
@@ -622,8 +623,6 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
622 max_grefs); 623 max_grefs);
623 return 1; 624 return 1;
624 } 625 }
625 } else
626 new_persistent_gnts = 0;
627 626
628 /* Fill out a communications ring structure. */ 627 /* Fill out a communications ring structure. */
629 ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt); 628 ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
@@ -712,7 +711,7 @@ static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *ri
712 /* Keep a private copy so we can reissue requests when recovering. */ 711 /* Keep a private copy so we can reissue requests when recovering. */
713 rinfo->shadow[id].req = *ring_req; 712 rinfo->shadow[id].req = *ring_req;
714 713
715 if (new_persistent_gnts) 714 if (max_grefs > 0)
716 gnttab_free_grant_references(setup.gref_head); 715 gnttab_free_grant_references(setup.gref_head);
717 716
718 return 0; 717 return 0;
@@ -791,7 +790,8 @@ static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
791{ 790{
792 struct blkfront_info *info = (struct blkfront_info *)data; 791 struct blkfront_info *info = (struct blkfront_info *)data;
793 792
794 hctx->driver_data = &info->rinfo; 793 BUG_ON(info->nr_rings <= index);
794 hctx->driver_data = &info->rinfo[index];
795 return 0; 795 return 0;
796} 796}
797 797
@@ -1050,8 +1050,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
1050 1050
1051static void xlvbd_release_gendisk(struct blkfront_info *info) 1051static void xlvbd_release_gendisk(struct blkfront_info *info)
1052{ 1052{
1053 unsigned int minor, nr_minors; 1053 unsigned int minor, nr_minors, i;
1054 struct blkfront_ring_info *rinfo = &info->rinfo;
1055 1054
1056 if (info->rq == NULL) 1055 if (info->rq == NULL)
1057 return; 1056 return;
@@ -1059,11 +1058,15 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
1059 /* No more blkif_request(). */ 1058 /* No more blkif_request(). */
1060 blk_mq_stop_hw_queues(info->rq); 1059 blk_mq_stop_hw_queues(info->rq);
1061 1060
1062 /* No more gnttab callback work. */ 1061 for (i = 0; i < info->nr_rings; i++) {
1063 gnttab_cancel_free_callback(&rinfo->callback); 1062 struct blkfront_ring_info *rinfo = &info->rinfo[i];
1064 1063
1065 /* Flush gnttab callback work. Must be done with no locks held. */ 1064 /* No more gnttab callback work. */
1066 flush_work(&rinfo->work); 1065 gnttab_cancel_free_callback(&rinfo->callback);
1066
1067 /* Flush gnttab callback work. Must be done with no locks held. */
1068 flush_work(&rinfo->work);
1069 }
1067 1070
1068 del_gendisk(info->gd); 1071 del_gendisk(info->gd);
1069 1072
@@ -1096,37 +1099,11 @@ static void blkif_restart_queue(struct work_struct *work)
1096 spin_unlock_irq(&rinfo->dev_info->io_lock); 1099 spin_unlock_irq(&rinfo->dev_info->io_lock);
1097} 1100}
1098 1101
1099static void blkif_free(struct blkfront_info *info, int suspend) 1102static void blkif_free_ring(struct blkfront_ring_info *rinfo)
1100{ 1103{
1101 struct grant *persistent_gnt; 1104 struct grant *persistent_gnt;
1102 struct grant *n; 1105 struct blkfront_info *info = rinfo->dev_info;
1103 int i, j, segs; 1106 int i, j, segs;
1104 struct blkfront_ring_info *rinfo = &info->rinfo;
1105
1106 /* Prevent new requests being issued until we fix things up. */
1107 spin_lock_irq(&info->io_lock);
1108 info->connected = suspend ?
1109 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
1110 /* No more blkif_request(). */
1111 if (info->rq)
1112 blk_mq_stop_hw_queues(info->rq);
1113
1114 /* Remove all persistent grants */
1115 if (!list_empty(&info->grants)) {
1116 list_for_each_entry_safe(persistent_gnt, n,
1117 &info->grants, node) {
1118 list_del(&persistent_gnt->node);
1119 if (persistent_gnt->gref != GRANT_INVALID_REF) {
1120 gnttab_end_foreign_access(persistent_gnt->gref,
1121 0, 0UL);
1122 info->persistent_gnts_c--;
1123 }
1124 if (info->feature_persistent)
1125 __free_page(persistent_gnt->page);
1126 kfree(persistent_gnt);
1127 }
1128 }
1129 BUG_ON(info->persistent_gnts_c != 0);
1130 1107
1131 /* 1108 /*
1132 * Remove indirect pages, this only happens when using indirect 1109 * Remove indirect pages, this only happens when using indirect
@@ -1186,7 +1163,6 @@ free_shadow:
1186 1163
1187 /* No more gnttab callback work. */ 1164 /* No more gnttab callback work. */
1188 gnttab_cancel_free_callback(&rinfo->callback); 1165 gnttab_cancel_free_callback(&rinfo->callback);
1189 spin_unlock_irq(&info->io_lock);
1190 1166
1191 /* Flush gnttab callback work. Must be done with no locks held. */ 1167 /* Flush gnttab callback work. Must be done with no locks held. */
1192 flush_work(&rinfo->work); 1168 flush_work(&rinfo->work);
@@ -1204,7 +1180,45 @@ free_shadow:
1204 if (rinfo->irq) 1180 if (rinfo->irq)
1205 unbind_from_irqhandler(rinfo->irq, rinfo); 1181 unbind_from_irqhandler(rinfo->irq, rinfo);
1206 rinfo->evtchn = rinfo->irq = 0; 1182 rinfo->evtchn = rinfo->irq = 0;
1183}
1207 1184
1185static void blkif_free(struct blkfront_info *info, int suspend)
1186{
1187 struct grant *persistent_gnt, *n;
1188 unsigned int i;
1189
1190 /* Prevent new requests being issued until we fix things up. */
1191 spin_lock_irq(&info->io_lock);
1192 info->connected = suspend ?
1193 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
1194 /* No more blkif_request(). */
1195 if (info->rq)
1196 blk_mq_stop_hw_queues(info->rq);
1197
1198 /* Remove all persistent grants */
1199 if (!list_empty(&info->grants)) {
1200 list_for_each_entry_safe(persistent_gnt, n,
1201 &info->grants, node) {
1202 list_del(&persistent_gnt->node);
1203 if (persistent_gnt->gref != GRANT_INVALID_REF) {
1204 gnttab_end_foreign_access(persistent_gnt->gref,
1205 0, 0UL);
1206 info->persistent_gnts_c--;
1207 }
1208 if (info->feature_persistent)
1209 __free_page(persistent_gnt->page);
1210 kfree(persistent_gnt);
1211 }
1212 }
1213 BUG_ON(info->persistent_gnts_c != 0);
1214
1215 for (i = 0; i < info->nr_rings; i++)
1216 blkif_free_ring(&info->rinfo[i]);
1217
1218 kfree(info->rinfo);
1219 info->rinfo = NULL;
1220 info->nr_rings = 0;
1221 spin_unlock_irq(&info->io_lock);
1208} 1222}
1209 1223
1210struct copy_from_grant { 1224struct copy_from_grant {
@@ -1492,7 +1506,7 @@ static int talk_to_blkback(struct xenbus_device *dev,
1492 int err, i; 1506 int err, i;
1493 unsigned int max_page_order = 0; 1507 unsigned int max_page_order = 0;
1494 unsigned int ring_page_order = 0; 1508 unsigned int ring_page_order = 0;
1495 struct blkfront_ring_info *rinfo = &info->rinfo; 1509 struct blkfront_ring_info *rinfo;
1496 1510
1497 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 1511 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1498 "max-ring-page-order", "%u", &max_page_order); 1512 "max-ring-page-order", "%u", &max_page_order);
@@ -1503,10 +1517,13 @@ static int talk_to_blkback(struct xenbus_device *dev,
1503 info->nr_ring_pages = 1 << ring_page_order; 1517 info->nr_ring_pages = 1 << ring_page_order;
1504 } 1518 }
1505 1519
1506 /* Create shared ring, alloc event channel. */ 1520 for (i = 0; i < info->nr_rings; i++) {
1507 err = setup_blkring(dev, rinfo); 1521 rinfo = &info->rinfo[i];
1508 if (err) 1522 /* Create shared ring, alloc event channel. */
1509 goto out; 1523 err = setup_blkring(dev, rinfo);
1524 if (err)
1525 goto destroy_blkring;
1526 }
1510 1527
1511again: 1528again:
1512 err = xenbus_transaction_start(&xbt); 1529 err = xenbus_transaction_start(&xbt);
@@ -1515,37 +1532,43 @@ again:
1515 goto destroy_blkring; 1532 goto destroy_blkring;
1516 } 1533 }
1517 1534
1518 if (info->nr_ring_pages == 1) { 1535 if (info->nr_rings == 1) {
1519 err = xenbus_printf(xbt, dev->nodename, 1536 rinfo = &info->rinfo[0];
1520 "ring-ref", "%u", rinfo->ring_ref[0]); 1537 if (info->nr_ring_pages == 1) {
1521 if (err) { 1538 err = xenbus_printf(xbt, dev->nodename,
1522 message = "writing ring-ref"; 1539 "ring-ref", "%u", rinfo->ring_ref[0]);
1523 goto abort_transaction;
1524 }
1525 } else {
1526 err = xenbus_printf(xbt, dev->nodename,
1527 "ring-page-order", "%u", ring_page_order);
1528 if (err) {
1529 message = "writing ring-page-order";
1530 goto abort_transaction;
1531 }
1532
1533 for (i = 0; i < info->nr_ring_pages; i++) {
1534 char ring_ref_name[RINGREF_NAME_LEN];
1535
1536 snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
1537 err = xenbus_printf(xbt, dev->nodename, ring_ref_name,
1538 "%u", rinfo->ring_ref[i]);
1539 if (err) { 1540 if (err) {
1540 message = "writing ring-ref"; 1541 message = "writing ring-ref";
1541 goto abort_transaction; 1542 goto abort_transaction;
1542 } 1543 }
1544 } else {
1545 err = xenbus_printf(xbt, dev->nodename,
1546 "ring-page-order", "%u", ring_page_order);
1547 if (err) {
1548 message = "writing ring-page-order";
1549 goto abort_transaction;
1550 }
1551
1552 for (i = 0; i < info->nr_ring_pages; i++) {
1553 char ring_ref_name[RINGREF_NAME_LEN];
1554
1555 snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
1556 err = xenbus_printf(xbt, dev->nodename, ring_ref_name,
1557 "%u", rinfo->ring_ref[i]);
1558 if (err) {
1559 message = "writing ring-ref";
1560 goto abort_transaction;
1561 }
1562 }
1543 } 1563 }
1544 } 1564 err = xenbus_printf(xbt, dev->nodename,
1545 err = xenbus_printf(xbt, dev->nodename, 1565 "event-channel", "%u", rinfo->evtchn);
1546 "event-channel", "%u", rinfo->evtchn); 1566 if (err) {
1547 if (err) { 1567 message = "writing event-channel";
1548 message = "writing event-channel"; 1568 goto abort_transaction;
1569 }
1570 } else {
1571 /* Not supported at this stage. */
1549 goto abort_transaction; 1572 goto abort_transaction;
1550 } 1573 }
1551 err = xenbus_printf(xbt, dev->nodename, "protocol", "%s", 1574 err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
@@ -1568,9 +1591,15 @@ again:
1568 goto destroy_blkring; 1591 goto destroy_blkring;
1569 } 1592 }
1570 1593
1571 for (i = 0; i < BLK_RING_SIZE(info); i++) 1594 for (i = 0; i < info->nr_rings; i++) {
1572 rinfo->shadow[i].req.u.rw.id = i+1; 1595 unsigned int j;
1573 rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff; 1596
1597 rinfo = &info->rinfo[i];
1598
1599 for (j = 0; j < BLK_RING_SIZE(info); j++)
1600 rinfo->shadow[j].req.u.rw.id = j + 1;
1601 rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
1602 }
1574 xenbus_switch_state(dev, XenbusStateInitialised); 1603 xenbus_switch_state(dev, XenbusStateInitialised);
1575 1604
1576 return 0; 1605 return 0;
@@ -1581,7 +1610,7 @@ again:
1581 xenbus_dev_fatal(dev, err, "%s", message); 1610 xenbus_dev_fatal(dev, err, "%s", message);
1582 destroy_blkring: 1611 destroy_blkring:
1583 blkif_free(info, 0); 1612 blkif_free(info, 0);
1584 out: 1613
1585 return err; 1614 return err;
1586} 1615}
1587 1616
@@ -1595,8 +1624,8 @@ static int blkfront_probe(struct xenbus_device *dev,
1595 const struct xenbus_device_id *id) 1624 const struct xenbus_device_id *id)
1596{ 1625{
1597 int err, vdevice; 1626 int err, vdevice;
1627 unsigned int r_index;
1598 struct blkfront_info *info; 1628 struct blkfront_info *info;
1599 struct blkfront_ring_info *rinfo;
1600 1629
1601 /* FIXME: Use dynamic device id if this is not set. */ 1630 /* FIXME: Use dynamic device id if this is not set. */
1602 err = xenbus_scanf(XBT_NIL, dev->nodename, 1631 err = xenbus_scanf(XBT_NIL, dev->nodename,
@@ -1646,10 +1675,22 @@ static int blkfront_probe(struct xenbus_device *dev,
1646 return -ENOMEM; 1675 return -ENOMEM;
1647 } 1676 }
1648 1677
1649 rinfo = &info->rinfo; 1678 info->nr_rings = 1;
1650 INIT_LIST_HEAD(&rinfo->indirect_pages); 1679 info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
1651 rinfo->dev_info = info; 1680 if (!info->rinfo) {
1652 INIT_WORK(&rinfo->work, blkif_restart_queue); 1681 xenbus_dev_fatal(dev, -ENOMEM, "allocating ring_info structure");
1682 kfree(info);
1683 return -ENOMEM;
1684 }
1685
1686 for (r_index = 0; r_index < info->nr_rings; r_index++) {
1687 struct blkfront_ring_info *rinfo;
1688
1689 rinfo = &info->rinfo[r_index];
1690 INIT_LIST_HEAD(&rinfo->indirect_pages);
1691 rinfo->dev_info = info;
1692 INIT_WORK(&rinfo->work, blkif_restart_queue);
1693 }
1653 1694
1654 mutex_init(&info->mutex); 1695 mutex_init(&info->mutex);
1655 spin_lock_init(&info->io_lock); 1696 spin_lock_init(&info->io_lock);
@@ -1681,7 +1722,7 @@ static void split_bio_end(struct bio *bio)
1681 1722
1682static int blkif_recover(struct blkfront_info *info) 1723static int blkif_recover(struct blkfront_info *info)
1683{ 1724{
1684 int i; 1725 unsigned int i, r_index;
1685 struct request *req, *n; 1726 struct request *req, *n;
1686 struct blk_shadow *copy; 1727 struct blk_shadow *copy;
1687 int rc; 1728 int rc;
@@ -1691,57 +1732,62 @@ static int blkif_recover(struct blkfront_info *info)
1691 int pending, size; 1732 int pending, size;
1692 struct split_bio *split_bio; 1733 struct split_bio *split_bio;
1693 struct list_head requests; 1734 struct list_head requests;
1694 struct blkfront_ring_info *rinfo = &info->rinfo;
1695
1696 /* Stage 1: Make a safe copy of the shadow state. */
1697 copy = kmemdup(rinfo->shadow, sizeof(rinfo->shadow),
1698 GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
1699 if (!copy)
1700 return -ENOMEM;
1701
1702 /* Stage 2: Set up free list. */
1703 memset(&rinfo->shadow, 0, sizeof(rinfo->shadow));
1704 for (i = 0; i < BLK_RING_SIZE(info); i++)
1705 rinfo->shadow[i].req.u.rw.id = i+1;
1706 rinfo->shadow_free = rinfo->ring.req_prod_pvt;
1707 rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
1708
1709 rc = blkfront_gather_backend_features(info);
1710 if (rc) {
1711 kfree(copy);
1712 return rc;
1713 }
1714 1735
1736 blkfront_gather_backend_features(info);
1715 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; 1737 segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
1716 blk_queue_max_segments(info->rq, segs); 1738 blk_queue_max_segments(info->rq, segs);
1717 bio_list_init(&bio_list); 1739 bio_list_init(&bio_list);
1718 INIT_LIST_HEAD(&requests); 1740 INIT_LIST_HEAD(&requests);
1719 for (i = 0; i < BLK_RING_SIZE(info); i++) {
1720 /* Not in use? */
1721 if (!copy[i].request)
1722 continue;
1723 1741
1724 /* 1742 for (r_index = 0; r_index < info->nr_rings; r_index++) {
1725 * Get the bios in the request so we can re-queue them. 1743 struct blkfront_ring_info *rinfo;
1726 */ 1744
1727 if (copy[i].request->cmd_flags & 1745 rinfo = &info->rinfo[r_index];
1728 (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) { 1746 /* Stage 1: Make a safe copy of the shadow state. */
1747 copy = kmemdup(rinfo->shadow, sizeof(rinfo->shadow),
1748 GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
1749 if (!copy)
1750 return -ENOMEM;
1751
1752 /* Stage 2: Set up free list. */
1753 memset(&rinfo->shadow, 0, sizeof(rinfo->shadow));
1754 for (i = 0; i < BLK_RING_SIZE(info); i++)
1755 rinfo->shadow[i].req.u.rw.id = i+1;
1756 rinfo->shadow_free = rinfo->ring.req_prod_pvt;
1757 rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
1758
1759 rc = blkfront_setup_indirect(rinfo);
1760 if (rc) {
1761 kfree(copy);
1762 return rc;
1763 }
1764
1765 for (i = 0; i < BLK_RING_SIZE(info); i++) {
1766 /* Not in use? */
1767 if (!copy[i].request)
1768 continue;
1769
1729 /* 1770 /*
1730 * Flush operations don't contain bios, so 1771 * Get the bios in the request so we can re-queue them.
1731 * we need to requeue the whole request
1732 */ 1772 */
1733 list_add(&copy[i].request->queuelist, &requests); 1773 if (copy[i].request->cmd_flags &
1734 continue; 1774 (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
1775 /*
1776 * Flush operations don't contain bios, so
1777 * we need to requeue the whole request
1778 */
1779 list_add(&copy[i].request->queuelist, &requests);
1780 continue;
1781 }
1782 merge_bio.head = copy[i].request->bio;
1783 merge_bio.tail = copy[i].request->biotail;
1784 bio_list_merge(&bio_list, &merge_bio);
1785 copy[i].request->bio = NULL;
1786 blk_end_request_all(copy[i].request, 0);
1735 } 1787 }
1736 merge_bio.head = copy[i].request->bio;
1737 merge_bio.tail = copy[i].request->biotail;
1738 bio_list_merge(&bio_list, &merge_bio);
1739 copy[i].request->bio = NULL;
1740 blk_end_request_all(copy[i].request, 0);
1741 }
1742
1743 kfree(copy);
1744 1788
1789 kfree(copy);
1790 }
1745 xenbus_switch_state(info->xbdev, XenbusStateConnected); 1791 xenbus_switch_state(info->xbdev, XenbusStateConnected);
1746 1792
1747 spin_lock_irq(&info->io_lock); 1793 spin_lock_irq(&info->io_lock);
@@ -1749,8 +1795,13 @@ static int blkif_recover(struct blkfront_info *info)
1749 /* Now safe for us to use the shared ring */ 1795 /* Now safe for us to use the shared ring */
1750 info->connected = BLKIF_STATE_CONNECTED; 1796 info->connected = BLKIF_STATE_CONNECTED;
1751 1797
1752 /* Kick any other new requests queued since we resumed */ 1798 for (r_index = 0; r_index < info->nr_rings; r_index++) {
1753 kick_pending_request_queues(rinfo); 1799 struct blkfront_ring_info *rinfo;
1800
1801 rinfo = &info->rinfo[r_index];
1802 /* Kick any other new requests queued since we resumed */
1803 kick_pending_request_queues(rinfo);
1804 }
1754 1805
1755 list_for_each_entry_safe(req, n, &requests, queuelist) { 1806 list_for_each_entry_safe(req, n, &requests, queuelist) {
1756 /* Requeue pending requests (flush or discard) */ 1807 /* Requeue pending requests (flush or discard) */
@@ -1961,7 +2012,7 @@ out_of_memory:
1961/* 2012/*
1962 * Gather all backend feature-* 2013 * Gather all backend feature-*
1963 */ 2014 */
1964static int blkfront_gather_backend_features(struct blkfront_info *info) 2015static void blkfront_gather_backend_features(struct blkfront_info *info)
1965{ 2016{
1966 int err; 2017 int err;
1967 int barrier, flush, discard, persistent; 2018 int barrier, flush, discard, persistent;
@@ -2016,8 +2067,6 @@ static int blkfront_gather_backend_features(struct blkfront_info *info)
2016 else 2067 else
2017 info->max_indirect_segments = min(indirect_segments, 2068 info->max_indirect_segments = min(indirect_segments,
2018 xen_blkif_max_segments); 2069 xen_blkif_max_segments);
2019
2020 return blkfront_setup_indirect(&info->rinfo);
2021} 2070}
2022 2071
2023/* 2072/*
@@ -2030,8 +2079,7 @@ static void blkfront_connect(struct blkfront_info *info)
2030 unsigned long sector_size; 2079 unsigned long sector_size;
2031 unsigned int physical_sector_size; 2080 unsigned int physical_sector_size;
2032 unsigned int binfo; 2081 unsigned int binfo;
2033 int err; 2082 int err, i;
2034 struct blkfront_ring_info *rinfo = &info->rinfo;
2035 2083
2036 switch (info->connected) { 2084 switch (info->connected) {
2037 case BLKIF_STATE_CONNECTED: 2085 case BLKIF_STATE_CONNECTED:
@@ -2088,11 +2136,15 @@ static void blkfront_connect(struct blkfront_info *info)
2088 if (err != 1) 2136 if (err != 1)
2089 physical_sector_size = sector_size; 2137 physical_sector_size = sector_size;
2090 2138
2091 err = blkfront_gather_backend_features(info); 2139 blkfront_gather_backend_features(info);
2092 if (err) { 2140 for (i = 0; i < info->nr_rings; i++) {
2093 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", 2141 err = blkfront_setup_indirect(&info->rinfo[i]);
2094 info->xbdev->otherend); 2142 if (err) {
2095 return; 2143 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
2144 info->xbdev->otherend);
2145 blkif_free(info, 0);
2146 break;
2147 }
2096 } 2148 }
2097 2149
2098 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size, 2150 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size,
@@ -2108,7 +2160,8 @@ static void blkfront_connect(struct blkfront_info *info)
2108 /* Kick pending requests. */ 2160 /* Kick pending requests. */
2109 spin_lock_irq(&info->io_lock); 2161 spin_lock_irq(&info->io_lock);
2110 info->connected = BLKIF_STATE_CONNECTED; 2162 info->connected = BLKIF_STATE_CONNECTED;
2111 kick_pending_request_queues(rinfo); 2163 for (i = 0; i < info->nr_rings; i++)
2164 kick_pending_request_queues(&info->rinfo[i]);
2112 spin_unlock_irq(&info->io_lock); 2165 spin_unlock_irq(&info->io_lock);
2113 2166
2114 add_disk(info->gd); 2167 add_disk(info->gd);