diff options
author | Wei Liu <wei.liu2@citrix.com> | 2015-04-03 02:44:59 -0400 |
---|---|---|
committer | David Vrabel <david.vrabel@citrix.com> | 2015-04-15 05:56:47 -0400 |
commit | ccc9d90a9a8b5c4ad7e9708ec41f75ff9e98d61d (patch) | |
tree | 2a147c10a289a3f4283d6008708297f07df052a6 | |
parent | 278edfc07875779a69277f6c5773ec9318a994ee (diff) |
xenbus_client: Extend interface to support multi-page ring
Originally Xen PV drivers only use single-page ring to pass along
information. This might limit the throughput between frontend and
backend.
The patch extends Xenbus driver to support multi-page ring, which in
general should improve throughput if ring is the bottleneck. Changes to
various frontend / backend to adapt to the new interface are also
included.
Affected Xen drivers:
* blkfront/back
* netfront/back
* pcifront/back
* scsifront/back
* vtpmfront
The interface is documented, as before, in xenbus_client.c.
Signed-off-by: Wei Liu <wei.liu2@citrix.com>
Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Signed-off-by: Bob Liu <bob.liu@oracle.com>
Cc: Konrad Wilk <konrad.wilk@oracle.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
-rw-r--r-- | drivers/block/xen-blkback/xenbus.c | 5 | ||||
-rw-r--r-- | drivers/block/xen-blkfront.c | 5 | ||||
-rw-r--r-- | drivers/char/tpm/xen-tpmfront.c | 5 | ||||
-rw-r--r-- | drivers/net/xen-netback/netback.c | 4 | ||||
-rw-r--r-- | drivers/net/xen-netfront.c | 9 | ||||
-rw-r--r-- | drivers/pci/xen-pcifront.c | 5 | ||||
-rw-r--r-- | drivers/scsi/xen-scsifront.c | 5 | ||||
-rw-r--r-- | drivers/xen/xen-pciback/xenbus.c | 2 | ||||
-rw-r--r-- | drivers/xen/xen-scsiback.c | 2 | ||||
-rw-r--r-- | drivers/xen/xenbus/xenbus_client.c | 387 | ||||
-rw-r--r-- | include/xen/xenbus.h | 20 |
11 files changed, 324 insertions, 125 deletions
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c index e3afe97280b1..ff3025922c14 100644 --- a/drivers/block/xen-blkback/xenbus.c +++ b/drivers/block/xen-blkback/xenbus.c | |||
@@ -193,7 +193,7 @@ fail: | |||
193 | return ERR_PTR(-ENOMEM); | 193 | return ERR_PTR(-ENOMEM); |
194 | } | 194 | } |
195 | 195 | ||
196 | static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page, | 196 | static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t gref, |
197 | unsigned int evtchn) | 197 | unsigned int evtchn) |
198 | { | 198 | { |
199 | int err; | 199 | int err; |
@@ -202,7 +202,8 @@ static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page, | |||
202 | if (blkif->irq) | 202 | if (blkif->irq) |
203 | return 0; | 203 | return 0; |
204 | 204 | ||
205 | err = xenbus_map_ring_valloc(blkif->be->dev, shared_page, &blkif->blk_ring); | 205 | err = xenbus_map_ring_valloc(blkif->be->dev, &gref, 1, |
206 | &blkif->blk_ring); | ||
206 | if (err < 0) | 207 | if (err < 0) |
207 | return err; | 208 | return err; |
208 | 209 | ||
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 37779e4c4585..2c61cf8c6f61 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -1245,6 +1245,7 @@ static int setup_blkring(struct xenbus_device *dev, | |||
1245 | struct blkfront_info *info) | 1245 | struct blkfront_info *info) |
1246 | { | 1246 | { |
1247 | struct blkif_sring *sring; | 1247 | struct blkif_sring *sring; |
1248 | grant_ref_t gref; | ||
1248 | int err; | 1249 | int err; |
1249 | 1250 | ||
1250 | info->ring_ref = GRANT_INVALID_REF; | 1251 | info->ring_ref = GRANT_INVALID_REF; |
@@ -1257,13 +1258,13 @@ static int setup_blkring(struct xenbus_device *dev, | |||
1257 | SHARED_RING_INIT(sring); | 1258 | SHARED_RING_INIT(sring); |
1258 | FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); | 1259 | FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); |
1259 | 1260 | ||
1260 | err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring)); | 1261 | err = xenbus_grant_ring(dev, info->ring.sring, 1, &gref); |
1261 | if (err < 0) { | 1262 | if (err < 0) { |
1262 | free_page((unsigned long)sring); | 1263 | free_page((unsigned long)sring); |
1263 | info->ring.sring = NULL; | 1264 | info->ring.sring = NULL; |
1264 | goto fail; | 1265 | goto fail; |
1265 | } | 1266 | } |
1266 | info->ring_ref = err; | 1267 | info->ring_ref = gref; |
1267 | 1268 | ||
1268 | err = xenbus_alloc_evtchn(dev, &info->evtchn); | 1269 | err = xenbus_alloc_evtchn(dev, &info->evtchn); |
1269 | if (err) | 1270 | if (err) |
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c index c3b4f5a5ac10..3111f2778079 100644 --- a/drivers/char/tpm/xen-tpmfront.c +++ b/drivers/char/tpm/xen-tpmfront.c | |||
@@ -193,6 +193,7 @@ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv) | |||
193 | struct xenbus_transaction xbt; | 193 | struct xenbus_transaction xbt; |
194 | const char *message = NULL; | 194 | const char *message = NULL; |
195 | int rv; | 195 | int rv; |
196 | grant_ref_t gref; | ||
196 | 197 | ||
197 | priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); | 198 | priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); |
198 | if (!priv->shr) { | 199 | if (!priv->shr) { |
@@ -200,11 +201,11 @@ static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv) | |||
200 | return -ENOMEM; | 201 | return -ENOMEM; |
201 | } | 202 | } |
202 | 203 | ||
203 | rv = xenbus_grant_ring(dev, virt_to_mfn(priv->shr)); | 204 | rv = xenbus_grant_ring(dev, &priv->shr, 1, &gref); |
204 | if (rv < 0) | 205 | if (rv < 0) |
205 | return rv; | 206 | return rv; |
206 | 207 | ||
207 | priv->ring_ref = rv; | 208 | priv->ring_ref = gref; |
208 | 209 | ||
209 | rv = xenbus_alloc_evtchn(dev, &priv->evtchn); | 210 | rv = xenbus_alloc_evtchn(dev, &priv->evtchn); |
210 | if (rv) | 211 | if (rv) |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index cab9f5257f57..99a49479843d 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -1781,7 +1781,7 @@ int xenvif_map_frontend_rings(struct xenvif_queue *queue, | |||
1781 | int err = -ENOMEM; | 1781 | int err = -ENOMEM; |
1782 | 1782 | ||
1783 | err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), | 1783 | err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), |
1784 | tx_ring_ref, &addr); | 1784 | &tx_ring_ref, 1, &addr); |
1785 | if (err) | 1785 | if (err) |
1786 | goto err; | 1786 | goto err; |
1787 | 1787 | ||
@@ -1789,7 +1789,7 @@ int xenvif_map_frontend_rings(struct xenvif_queue *queue, | |||
1789 | BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE); | 1789 | BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE); |
1790 | 1790 | ||
1791 | err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), | 1791 | err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), |
1792 | rx_ring_ref, &addr); | 1792 | &rx_ring_ref, 1, &addr); |
1793 | if (err) | 1793 | if (err) |
1794 | goto err; | 1794 | goto err; |
1795 | 1795 | ||
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index e9b960f0ff32..13f5e7ff4bb5 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -1486,6 +1486,7 @@ static int setup_netfront(struct xenbus_device *dev, | |||
1486 | { | 1486 | { |
1487 | struct xen_netif_tx_sring *txs; | 1487 | struct xen_netif_tx_sring *txs; |
1488 | struct xen_netif_rx_sring *rxs; | 1488 | struct xen_netif_rx_sring *rxs; |
1489 | grant_ref_t gref; | ||
1489 | int err; | 1490 | int err; |
1490 | 1491 | ||
1491 | queue->tx_ring_ref = GRANT_INVALID_REF; | 1492 | queue->tx_ring_ref = GRANT_INVALID_REF; |
@@ -1502,10 +1503,10 @@ static int setup_netfront(struct xenbus_device *dev, | |||
1502 | SHARED_RING_INIT(txs); | 1503 | SHARED_RING_INIT(txs); |
1503 | FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE); | 1504 | FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE); |
1504 | 1505 | ||
1505 | err = xenbus_grant_ring(dev, virt_to_mfn(txs)); | 1506 | err = xenbus_grant_ring(dev, txs, 1, &gref); |
1506 | if (err < 0) | 1507 | if (err < 0) |
1507 | goto grant_tx_ring_fail; | 1508 | goto grant_tx_ring_fail; |
1508 | queue->tx_ring_ref = err; | 1509 | queue->tx_ring_ref = gref; |
1509 | 1510 | ||
1510 | rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); | 1511 | rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); |
1511 | if (!rxs) { | 1512 | if (!rxs) { |
@@ -1516,10 +1517,10 @@ static int setup_netfront(struct xenbus_device *dev, | |||
1516 | SHARED_RING_INIT(rxs); | 1517 | SHARED_RING_INIT(rxs); |
1517 | FRONT_RING_INIT(&queue->rx, rxs, PAGE_SIZE); | 1518 | FRONT_RING_INIT(&queue->rx, rxs, PAGE_SIZE); |
1518 | 1519 | ||
1519 | err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); | 1520 | err = xenbus_grant_ring(dev, rxs, 1, &gref); |
1520 | if (err < 0) | 1521 | if (err < 0) |
1521 | goto grant_rx_ring_fail; | 1522 | goto grant_rx_ring_fail; |
1522 | queue->rx_ring_ref = err; | 1523 | queue->rx_ring_ref = gref; |
1523 | 1524 | ||
1524 | if (feature_split_evtchn) | 1525 | if (feature_split_evtchn) |
1525 | err = setup_netfront_split(queue); | 1526 | err = setup_netfront_split(queue); |
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index b1ffebec9b9e..7cfd2db02deb 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c | |||
@@ -777,12 +777,13 @@ static int pcifront_publish_info(struct pcifront_device *pdev) | |||
777 | { | 777 | { |
778 | int err = 0; | 778 | int err = 0; |
779 | struct xenbus_transaction trans; | 779 | struct xenbus_transaction trans; |
780 | grant_ref_t gref; | ||
780 | 781 | ||
781 | err = xenbus_grant_ring(pdev->xdev, virt_to_mfn(pdev->sh_info)); | 782 | err = xenbus_grant_ring(pdev->xdev, pdev->sh_info, 1, &gref); |
782 | if (err < 0) | 783 | if (err < 0) |
783 | goto out; | 784 | goto out; |
784 | 785 | ||
785 | pdev->gnt_ref = err; | 786 | pdev->gnt_ref = gref; |
786 | 787 | ||
787 | err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn); | 788 | err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn); |
788 | if (err) | 789 | if (err) |
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c index 78d95069ac6a..fad22caf0eff 100644 --- a/drivers/scsi/xen-scsifront.c +++ b/drivers/scsi/xen-scsifront.c | |||
@@ -714,6 +714,7 @@ static int scsifront_alloc_ring(struct vscsifrnt_info *info) | |||
714 | { | 714 | { |
715 | struct xenbus_device *dev = info->dev; | 715 | struct xenbus_device *dev = info->dev; |
716 | struct vscsiif_sring *sring; | 716 | struct vscsiif_sring *sring; |
717 | grant_ref_t gref; | ||
717 | int err = -ENOMEM; | 718 | int err = -ENOMEM; |
718 | 719 | ||
719 | /***** Frontend to Backend ring start *****/ | 720 | /***** Frontend to Backend ring start *****/ |
@@ -726,14 +727,14 @@ static int scsifront_alloc_ring(struct vscsifrnt_info *info) | |||
726 | SHARED_RING_INIT(sring); | 727 | SHARED_RING_INIT(sring); |
727 | FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); | 728 | FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE); |
728 | 729 | ||
729 | err = xenbus_grant_ring(dev, virt_to_mfn(sring)); | 730 | err = xenbus_grant_ring(dev, sring, 1, &gref); |
730 | if (err < 0) { | 731 | if (err < 0) { |
731 | free_page((unsigned long)sring); | 732 | free_page((unsigned long)sring); |
732 | xenbus_dev_fatal(dev, err, | 733 | xenbus_dev_fatal(dev, err, |
733 | "fail to grant shared ring (Front to Back)"); | 734 | "fail to grant shared ring (Front to Back)"); |
734 | return err; | 735 | return err; |
735 | } | 736 | } |
736 | info->ring_ref = err; | 737 | info->ring_ref = gref; |
737 | 738 | ||
738 | err = xenbus_alloc_evtchn(dev, &info->evtchn); | 739 | err = xenbus_alloc_evtchn(dev, &info->evtchn); |
739 | if (err) { | 740 | if (err) { |
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c index fe17c80ff4b7..98bc345f296e 100644 --- a/drivers/xen/xen-pciback/xenbus.c +++ b/drivers/xen/xen-pciback/xenbus.c | |||
@@ -113,7 +113,7 @@ static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref, | |||
113 | "Attaching to frontend resources - gnt_ref=%d evtchn=%d\n", | 113 | "Attaching to frontend resources - gnt_ref=%d evtchn=%d\n", |
114 | gnt_ref, remote_evtchn); | 114 | gnt_ref, remote_evtchn); |
115 | 115 | ||
116 | err = xenbus_map_ring_valloc(pdev->xdev, gnt_ref, &vaddr); | 116 | err = xenbus_map_ring_valloc(pdev->xdev, &gnt_ref, 1, &vaddr); |
117 | if (err < 0) { | 117 | if (err < 0) { |
118 | xenbus_dev_fatal(pdev->xdev, err, | 118 | xenbus_dev_fatal(pdev->xdev, err, |
119 | "Error mapping other domain page in ours."); | 119 | "Error mapping other domain page in ours."); |
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index 2eab75892c23..db42b59b0162 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c | |||
@@ -809,7 +809,7 @@ static int scsiback_init_sring(struct vscsibk_info *info, grant_ref_t ring_ref, | |||
809 | if (info->irq) | 809 | if (info->irq) |
810 | return -1; | 810 | return -1; |
811 | 811 | ||
812 | err = xenbus_map_ring_valloc(info->dev, ring_ref, &area); | 812 | err = xenbus_map_ring_valloc(info->dev, &ring_ref, 1, &area); |
813 | if (err) | 813 | if (err) |
814 | return err; | 814 | return err; |
815 | 815 | ||
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c index ca744102b666..96b2011d25f3 100644 --- a/drivers/xen/xenbus/xenbus_client.c +++ b/drivers/xen/xenbus/xenbus_client.c | |||
@@ -52,17 +52,25 @@ | |||
52 | struct xenbus_map_node { | 52 | struct xenbus_map_node { |
53 | struct list_head next; | 53 | struct list_head next; |
54 | union { | 54 | union { |
55 | struct vm_struct *area; /* PV */ | 55 | struct { |
56 | struct page *page; /* HVM */ | 56 | struct vm_struct *area; |
57 | } pv; | ||
58 | struct { | ||
59 | struct page *pages[XENBUS_MAX_RING_PAGES]; | ||
60 | void *addr; | ||
61 | } hvm; | ||
57 | }; | 62 | }; |
58 | grant_handle_t handle; | 63 | grant_handle_t handles[XENBUS_MAX_RING_PAGES]; |
64 | unsigned int nr_handles; | ||
59 | }; | 65 | }; |
60 | 66 | ||
61 | static DEFINE_SPINLOCK(xenbus_valloc_lock); | 67 | static DEFINE_SPINLOCK(xenbus_valloc_lock); |
62 | static LIST_HEAD(xenbus_valloc_pages); | 68 | static LIST_HEAD(xenbus_valloc_pages); |
63 | 69 | ||
64 | struct xenbus_ring_ops { | 70 | struct xenbus_ring_ops { |
65 | int (*map)(struct xenbus_device *dev, int gnt, void **vaddr); | 71 | int (*map)(struct xenbus_device *dev, |
72 | grant_ref_t *gnt_refs, unsigned int nr_grefs, | ||
73 | void **vaddr); | ||
66 | int (*unmap)(struct xenbus_device *dev, void *vaddr); | 74 | int (*unmap)(struct xenbus_device *dev, void *vaddr); |
67 | }; | 75 | }; |
68 | 76 | ||
@@ -355,17 +363,39 @@ static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err, | |||
355 | /** | 363 | /** |
356 | * xenbus_grant_ring | 364 | * xenbus_grant_ring |
357 | * @dev: xenbus device | 365 | * @dev: xenbus device |
358 | * @ring_mfn: mfn of ring to grant | 366 | * @vaddr: starting virtual address of the ring |
359 | 367 | * @nr_pages: number of pages to be granted | |
360 | * Grant access to the given @ring_mfn to the peer of the given device. Return | 368 | * @grefs: grant reference array to be filled in |
361 | * a grant reference on success, or -errno on error. On error, the device will | 369 | * |
362 | * switch to XenbusStateClosing, and the error will be saved in the store. | 370 | * Grant access to the given @vaddr to the peer of the given device. |
371 | * Then fill in @grefs with grant references. Return 0 on success, or | ||
372 | * -errno on error. On error, the device will switch to | ||
373 | * XenbusStateClosing, and the error will be saved in the store. | ||
363 | */ | 374 | */ |
364 | int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn) | 375 | int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, |
376 | unsigned int nr_pages, grant_ref_t *grefs) | ||
365 | { | 377 | { |
366 | int err = gnttab_grant_foreign_access(dev->otherend_id, ring_mfn, 0); | 378 | int err; |
367 | if (err < 0) | 379 | int i, j; |
368 | xenbus_dev_fatal(dev, err, "granting access to ring page"); | 380 | |
381 | for (i = 0; i < nr_pages; i++) { | ||
382 | unsigned long addr = (unsigned long)vaddr + | ||
383 | (PAGE_SIZE * i); | ||
384 | err = gnttab_grant_foreign_access(dev->otherend_id, | ||
385 | virt_to_mfn(addr), 0); | ||
386 | if (err < 0) { | ||
387 | xenbus_dev_fatal(dev, err, | ||
388 | "granting access to ring page"); | ||
389 | goto fail; | ||
390 | } | ||
391 | grefs[i] = err; | ||
392 | } | ||
393 | |||
394 | return 0; | ||
395 | |||
396 | fail: | ||
397 | for (j = 0; j < i; j++) | ||
398 | gnttab_end_foreign_access_ref(grefs[j], 0); | ||
369 | return err; | 399 | return err; |
370 | } | 400 | } |
371 | EXPORT_SYMBOL_GPL(xenbus_grant_ring); | 401 | EXPORT_SYMBOL_GPL(xenbus_grant_ring); |
@@ -419,62 +449,130 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn); | |||
419 | /** | 449 | /** |
420 | * xenbus_map_ring_valloc | 450 | * xenbus_map_ring_valloc |
421 | * @dev: xenbus device | 451 | * @dev: xenbus device |
422 | * @gnt_ref: grant reference | 452 | * @gnt_refs: grant reference array |
453 | * @nr_grefs: number of grant references | ||
423 | * @vaddr: pointer to address to be filled out by mapping | 454 | * @vaddr: pointer to address to be filled out by mapping |
424 | * | 455 | * |
425 | * Based on Rusty Russell's skeleton driver's map_page. | 456 | * Map @nr_grefs pages of memory into this domain from another |
426 | * Map a page of memory into this domain from another domain's grant table. | 457 | * domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs |
427 | * xenbus_map_ring_valloc allocates a page of virtual address space, maps the | 458 | * pages of virtual address space, maps the pages to that address, and |
428 | * page to that address, and sets *vaddr to that address. | 459 | * sets *vaddr to that address. Returns 0 on success, and GNTST_* |
429 | * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) | 460 | * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on |
430 | * or -ENOMEM on error. If an error is returned, device will switch to | 461 | * error. If an error is returned, device will switch to |
431 | * XenbusStateClosing and the error message will be saved in XenStore. | 462 | * XenbusStateClosing and the error message will be saved in XenStore. |
432 | */ | 463 | */ |
433 | int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr) | 464 | int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs, |
465 | unsigned int nr_grefs, void **vaddr) | ||
434 | { | 466 | { |
435 | return ring_ops->map(dev, gnt_ref, vaddr); | 467 | return ring_ops->map(dev, gnt_refs, nr_grefs, vaddr); |
436 | } | 468 | } |
437 | EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); | 469 | EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); |
438 | 470 | ||
471 | /* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned | ||
472 | * long), e.g. 32-on-64. Caller is responsible for preparing the | ||
473 | * right array to feed into this function */ | ||
474 | static int __xenbus_map_ring(struct xenbus_device *dev, | ||
475 | grant_ref_t *gnt_refs, | ||
476 | unsigned int nr_grefs, | ||
477 | grant_handle_t *handles, | ||
478 | phys_addr_t *addrs, | ||
479 | unsigned int flags, | ||
480 | bool *leaked) | ||
481 | { | ||
482 | struct gnttab_map_grant_ref map[XENBUS_MAX_RING_PAGES]; | ||
483 | struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES]; | ||
484 | int i, j; | ||
485 | int err = GNTST_okay; | ||
486 | |||
487 | if (nr_grefs > XENBUS_MAX_RING_PAGES) | ||
488 | return -EINVAL; | ||
489 | |||
490 | for (i = 0; i < nr_grefs; i++) { | ||
491 | memset(&map[i], 0, sizeof(map[i])); | ||
492 | gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i], | ||
493 | dev->otherend_id); | ||
494 | handles[i] = INVALID_GRANT_HANDLE; | ||
495 | } | ||
496 | |||
497 | gnttab_batch_map(map, i); | ||
498 | |||
499 | for (i = 0; i < nr_grefs; i++) { | ||
500 | if (map[i].status != GNTST_okay) { | ||
501 | err = map[i].status; | ||
502 | xenbus_dev_fatal(dev, map[i].status, | ||
503 | "mapping in shared page %d from domain %d", | ||
504 | gnt_refs[i], dev->otherend_id); | ||
505 | goto fail; | ||
506 | } else | ||
507 | handles[i] = map[i].handle; | ||
508 | } | ||
509 | |||
510 | return GNTST_okay; | ||
511 | |||
512 | fail: | ||
513 | for (i = j = 0; i < nr_grefs; i++) { | ||
514 | if (handles[i] != INVALID_GRANT_HANDLE) { | ||
515 | memset(&unmap[j], 0, sizeof(unmap[j])); | ||
516 | gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i], | ||
517 | GNTMAP_host_map, handles[i]); | ||
518 | j++; | ||
519 | } | ||
520 | } | ||
521 | |||
522 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j)) | ||
523 | BUG(); | ||
524 | |||
525 | *leaked = false; | ||
526 | for (i = 0; i < j; i++) { | ||
527 | if (unmap[i].status != GNTST_okay) { | ||
528 | *leaked = true; | ||
529 | break; | ||
530 | } | ||
531 | } | ||
532 | |||
533 | return err; | ||
534 | } | ||
535 | |||
439 | static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, | 536 | static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, |
440 | int gnt_ref, void **vaddr) | 537 | grant_ref_t *gnt_refs, |
538 | unsigned int nr_grefs, | ||
539 | void **vaddr) | ||
441 | { | 540 | { |
442 | struct gnttab_map_grant_ref op = { | ||
443 | .flags = GNTMAP_host_map | GNTMAP_contains_pte, | ||
444 | .ref = gnt_ref, | ||
445 | .dom = dev->otherend_id, | ||
446 | }; | ||
447 | struct xenbus_map_node *node; | 541 | struct xenbus_map_node *node; |
448 | struct vm_struct *area; | 542 | struct vm_struct *area; |
449 | pte_t *pte; | 543 | pte_t *ptes[XENBUS_MAX_RING_PAGES]; |
544 | phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES]; | ||
545 | int err = GNTST_okay; | ||
546 | int i; | ||
547 | bool leaked; | ||
450 | 548 | ||
451 | *vaddr = NULL; | 549 | *vaddr = NULL; |
452 | 550 | ||
551 | if (nr_grefs > XENBUS_MAX_RING_PAGES) | ||
552 | return -EINVAL; | ||
553 | |||
453 | node = kzalloc(sizeof(*node), GFP_KERNEL); | 554 | node = kzalloc(sizeof(*node), GFP_KERNEL); |
454 | if (!node) | 555 | if (!node) |
455 | return -ENOMEM; | 556 | return -ENOMEM; |
456 | 557 | ||
457 | area = alloc_vm_area(PAGE_SIZE, &pte); | 558 | area = alloc_vm_area(PAGE_SIZE * nr_grefs, ptes); |
458 | if (!area) { | 559 | if (!area) { |
459 | kfree(node); | 560 | kfree(node); |
460 | return -ENOMEM; | 561 | return -ENOMEM; |
461 | } | 562 | } |
462 | 563 | ||
463 | op.host_addr = arbitrary_virt_to_machine(pte).maddr; | 564 | for (i = 0; i < nr_grefs; i++) |
565 | phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr; | ||
464 | 566 | ||
465 | gnttab_batch_map(&op, 1); | 567 | err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles, |
466 | 568 | phys_addrs, | |
467 | if (op.status != GNTST_okay) { | 569 | GNTMAP_host_map | GNTMAP_contains_pte, |
468 | free_vm_area(area); | 570 | &leaked); |
469 | kfree(node); | 571 | if (err) |
470 | xenbus_dev_fatal(dev, op.status, | 572 | goto failed; |
471 | "mapping in shared page %d from domain %d", | ||
472 | gnt_ref, dev->otherend_id); | ||
473 | return op.status; | ||
474 | } | ||
475 | 573 | ||
476 | node->handle = op.handle; | 574 | node->nr_handles = nr_grefs; |
477 | node->area = area; | 575 | node->pv.area = area; |
478 | 576 | ||
479 | spin_lock(&xenbus_valloc_lock); | 577 | spin_lock(&xenbus_valloc_lock); |
480 | list_add(&node->next, &xenbus_valloc_pages); | 578 | list_add(&node->next, &xenbus_valloc_pages); |
@@ -482,14 +580,33 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev, | |||
482 | 580 | ||
483 | *vaddr = area->addr; | 581 | *vaddr = area->addr; |
484 | return 0; | 582 | return 0; |
583 | |||
584 | failed: | ||
585 | if (!leaked) | ||
586 | free_vm_area(area); | ||
587 | else | ||
588 | pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs); | ||
589 | |||
590 | kfree(node); | ||
591 | return err; | ||
485 | } | 592 | } |
486 | 593 | ||
487 | static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, | 594 | static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, |
488 | int gnt_ref, void **vaddr) | 595 | grant_ref_t *gnt_ref, |
596 | unsigned int nr_grefs, | ||
597 | void **vaddr) | ||
489 | { | 598 | { |
490 | struct xenbus_map_node *node; | 599 | struct xenbus_map_node *node; |
600 | int i; | ||
491 | int err; | 601 | int err; |
492 | void *addr; | 602 | void *addr; |
603 | bool leaked = false; | ||
604 | /* Why do we need two arrays? See comment of __xenbus_map_ring */ | ||
605 | phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES]; | ||
606 | unsigned long addrs[XENBUS_MAX_RING_PAGES]; | ||
607 | |||
608 | if (nr_grefs > XENBUS_MAX_RING_PAGES) | ||
609 | return -EINVAL; | ||
493 | 610 | ||
494 | *vaddr = NULL; | 611 | *vaddr = NULL; |
495 | 612 | ||
@@ -497,15 +614,32 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, | |||
497 | if (!node) | 614 | if (!node) |
498 | return -ENOMEM; | 615 | return -ENOMEM; |
499 | 616 | ||
500 | err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */); | 617 | err = alloc_xenballooned_pages(nr_grefs, node->hvm.pages, |
618 | false /* lowmem */); | ||
501 | if (err) | 619 | if (err) |
502 | goto out_err; | 620 | goto out_err; |
503 | 621 | ||
504 | addr = pfn_to_kaddr(page_to_pfn(node->page)); | 622 | for (i = 0; i < nr_grefs; i++) { |
623 | unsigned long pfn = page_to_pfn(node->hvm.pages[i]); | ||
624 | phys_addrs[i] = (unsigned long)pfn_to_kaddr(pfn); | ||
625 | addrs[i] = (unsigned long)pfn_to_kaddr(pfn); | ||
626 | } | ||
627 | |||
628 | err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles, | ||
629 | phys_addrs, GNTMAP_host_map, &leaked); | ||
630 | node->nr_handles = nr_grefs; | ||
505 | 631 | ||
506 | err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr); | ||
507 | if (err) | 632 | if (err) |
508 | goto out_err_free_ballooned_pages; | 633 | goto out_free_ballooned_pages; |
634 | |||
635 | addr = vmap(node->hvm.pages, nr_grefs, VM_MAP | VM_IOREMAP, | ||
636 | PAGE_KERNEL); | ||
637 | if (!addr) { | ||
638 | err = -ENOMEM; | ||
639 | goto out_xenbus_unmap_ring; | ||
640 | } | ||
641 | |||
642 | node->hvm.addr = addr; | ||
509 | 643 | ||
510 | spin_lock(&xenbus_valloc_lock); | 644 | spin_lock(&xenbus_valloc_lock); |
511 | list_add(&node->next, &xenbus_valloc_pages); | 645 | list_add(&node->next, &xenbus_valloc_pages); |
@@ -514,8 +648,16 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, | |||
514 | *vaddr = addr; | 648 | *vaddr = addr; |
515 | return 0; | 649 | return 0; |
516 | 650 | ||
517 | out_err_free_ballooned_pages: | 651 | out_xenbus_unmap_ring: |
518 | free_xenballooned_pages(1, &node->page); | 652 | if (!leaked) |
653 | xenbus_unmap_ring(dev, node->handles, node->nr_handles, | ||
654 | addrs); | ||
655 | else | ||
656 | pr_alert("leaking %p size %u page(s)", | ||
657 | addr, nr_grefs); | ||
658 | out_free_ballooned_pages: | ||
659 | if (!leaked) | ||
660 | free_xenballooned_pages(nr_grefs, node->hvm.pages); | ||
519 | out_err: | 661 | out_err: |
520 | kfree(node); | 662 | kfree(node); |
521 | return err; | 663 | return err; |
@@ -525,35 +667,37 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, | |||
525 | /** | 667 | /** |
526 | * xenbus_map_ring | 668 | * xenbus_map_ring |
527 | * @dev: xenbus device | 669 | * @dev: xenbus device |
528 | * @gnt_ref: grant reference | 670 | * @gnt_refs: grant reference array |
529 | * @handle: pointer to grant handle to be filled | 671 | * @nr_grefs: number of grant reference |
530 | * @vaddr: address to be mapped to | 672 | * @handles: pointer to grant handle to be filled |
673 | * @vaddrs: addresses to be mapped to | ||
674 | * @leaked: fail to clean up a failed map, caller should not free vaddr | ||
531 | * | 675 | * |
532 | * Map a page of memory into this domain from another domain's grant table. | 676 | * Map pages of memory into this domain from another domain's grant table. |
533 | * xenbus_map_ring does not allocate the virtual address space (you must do | 677 | * xenbus_map_ring does not allocate the virtual address space (you must do |
534 | * this yourself!). It only maps in the page to the specified address. | 678 | * this yourself!). It only maps in the pages to the specified address. |
535 | * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) | 679 | * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h) |
536 | * or -ENOMEM on error. If an error is returned, device will switch to | 680 | * or -ENOMEM / -EINVAL on error. If an error is returned, device will switch to |
537 | * XenbusStateClosing and the error message will be saved in XenStore. | 681 | * XenbusStateClosing and the first error message will be saved in XenStore. |
682 | * Further more if we fail to map the ring, caller should check @leaked. | ||
683 | * If @leaked is not zero it means xenbus_map_ring fails to clean up, caller | ||
684 | * should not free the address space of @vaddr. | ||
538 | */ | 685 | */ |
539 | int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, | 686 | int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t *gnt_refs, |
540 | grant_handle_t *handle, void *vaddr) | 687 | unsigned int nr_grefs, grant_handle_t *handles, |
688 | unsigned long *vaddrs, bool *leaked) | ||
541 | { | 689 | { |
542 | struct gnttab_map_grant_ref op; | 690 | phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES]; |
543 | 691 | int i; | |
544 | gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map, gnt_ref, | ||
545 | dev->otherend_id); | ||
546 | 692 | ||
547 | gnttab_batch_map(&op, 1); | 693 | if (nr_grefs > XENBUS_MAX_RING_PAGES) |
694 | return -EINVAL; | ||
548 | 695 | ||
549 | if (op.status != GNTST_okay) { | 696 | for (i = 0; i < nr_grefs; i++) |
550 | xenbus_dev_fatal(dev, op.status, | 697 | phys_addrs[i] = (unsigned long)vaddrs[i]; |
551 | "mapping in shared page %d from domain %d", | ||
552 | gnt_ref, dev->otherend_id); | ||
553 | } else | ||
554 | *handle = op.handle; | ||
555 | 698 | ||
556 | return op.status; | 699 | return __xenbus_map_ring(dev, gnt_refs, nr_grefs, handles, |
700 | phys_addrs, GNTMAP_host_map, leaked); | ||
557 | } | 701 | } |
558 | EXPORT_SYMBOL_GPL(xenbus_map_ring); | 702 | EXPORT_SYMBOL_GPL(xenbus_map_ring); |
559 | 703 | ||
@@ -579,14 +723,15 @@ EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); | |||
579 | static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) | 723 | static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) |
580 | { | 724 | { |
581 | struct xenbus_map_node *node; | 725 | struct xenbus_map_node *node; |
582 | struct gnttab_unmap_grant_ref op = { | 726 | struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES]; |
583 | .host_addr = (unsigned long)vaddr, | ||
584 | }; | ||
585 | unsigned int level; | 727 | unsigned int level; |
728 | int i; | ||
729 | bool leaked = false; | ||
730 | int err; | ||
586 | 731 | ||
587 | spin_lock(&xenbus_valloc_lock); | 732 | spin_lock(&xenbus_valloc_lock); |
588 | list_for_each_entry(node, &xenbus_valloc_pages, next) { | 733 | list_for_each_entry(node, &xenbus_valloc_pages, next) { |
589 | if (node->area->addr == vaddr) { | 734 | if (node->pv.area->addr == vaddr) { |
590 | list_del(&node->next); | 735 | list_del(&node->next); |
591 | goto found; | 736 | goto found; |
592 | } | 737 | } |
@@ -601,22 +746,41 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) | |||
601 | return GNTST_bad_virt_addr; | 746 | return GNTST_bad_virt_addr; |
602 | } | 747 | } |
603 | 748 | ||
604 | op.handle = node->handle; | 749 | for (i = 0; i < node->nr_handles; i++) { |
605 | op.host_addr = arbitrary_virt_to_machine( | 750 | unsigned long addr; |
606 | lookup_address((unsigned long)vaddr, &level)).maddr; | 751 | |
752 | memset(&unmap[i], 0, sizeof(unmap[i])); | ||
753 | addr = (unsigned long)vaddr + (PAGE_SIZE * i); | ||
754 | unmap[i].host_addr = arbitrary_virt_to_machine( | ||
755 | lookup_address(addr, &level)).maddr; | ||
756 | unmap[i].dev_bus_addr = 0; | ||
757 | unmap[i].handle = node->handles[i]; | ||
758 | } | ||
607 | 759 | ||
608 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) | 760 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i)) |
609 | BUG(); | 761 | BUG(); |
610 | 762 | ||
611 | if (op.status == GNTST_okay) | 763 | err = GNTST_okay; |
612 | free_vm_area(node->area); | 764 | leaked = false; |
765 | for (i = 0; i < node->nr_handles; i++) { | ||
766 | if (unmap[i].status != GNTST_okay) { | ||
767 | leaked = true; | ||
768 | xenbus_dev_error(dev, unmap[i].status, | ||
769 | "unmapping page at handle %d error %d", | ||
770 | node->handles[i], unmap[i].status); | ||
771 | err = unmap[i].status; | ||
772 | break; | ||
773 | } | ||
774 | } | ||
775 | |||
776 | if (!leaked) | ||
777 | free_vm_area(node->pv.area); | ||
613 | else | 778 | else |
614 | xenbus_dev_error(dev, op.status, | 779 | pr_alert("leaking VM area %p size %u page(s)", |
615 | "unmapping page at handle %d error %d", | 780 | node->pv.area, node->nr_handles); |
616 | node->handle, op.status); | ||
617 | 781 | ||
618 | kfree(node); | 782 | kfree(node); |
619 | return op.status; | 783 | return err; |
620 | } | 784 | } |
621 | 785 | ||
622 | static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) | 786 | static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) |
@@ -624,10 +788,12 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) | |||
624 | int rv; | 788 | int rv; |
625 | struct xenbus_map_node *node; | 789 | struct xenbus_map_node *node; |
626 | void *addr; | 790 | void *addr; |
791 | unsigned long addrs[XENBUS_MAX_RING_PAGES]; | ||
792 | int i; | ||
627 | 793 | ||
628 | spin_lock(&xenbus_valloc_lock); | 794 | spin_lock(&xenbus_valloc_lock); |
629 | list_for_each_entry(node, &xenbus_valloc_pages, next) { | 795 | list_for_each_entry(node, &xenbus_valloc_pages, next) { |
630 | addr = pfn_to_kaddr(page_to_pfn(node->page)); | 796 | addr = node->hvm.addr; |
631 | if (addr == vaddr) { | 797 | if (addr == vaddr) { |
632 | list_del(&node->next); | 798 | list_del(&node->next); |
633 | goto found; | 799 | goto found; |
@@ -643,12 +809,16 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) | |||
643 | return GNTST_bad_virt_addr; | 809 | return GNTST_bad_virt_addr; |
644 | } | 810 | } |
645 | 811 | ||
646 | rv = xenbus_unmap_ring(dev, node->handle, addr); | 812 | for (i = 0; i < node->nr_handles; i++) |
813 | addrs[i] = (unsigned long)pfn_to_kaddr(page_to_pfn(node->hvm.pages[i])); | ||
647 | 814 | ||
815 | rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles, | ||
816 | addrs); | ||
648 | if (!rv) | 817 | if (!rv) |
649 | free_xenballooned_pages(1, &node->page); | 818 | vunmap(vaddr); |
650 | else | 819 | else |
651 | WARN(1, "Leaking %p\n", vaddr); | 820 | WARN(1, "Leaking %p, size %u page(s)\n", vaddr, |
821 | node->nr_handles); | ||
652 | 822 | ||
653 | kfree(node); | 823 | kfree(node); |
654 | return rv; | 824 | return rv; |
@@ -657,29 +827,44 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) | |||
657 | /** | 827 | /** |
658 | * xenbus_unmap_ring | 828 | * xenbus_unmap_ring |
659 | * @dev: xenbus device | 829 | * @dev: xenbus device |
660 | * @handle: grant handle | 830 | * @handles: grant handle array |
661 | * @vaddr: addr to unmap | 831 | * @nr_handles: number of handles in the array |
832 | * @vaddrs: addresses to unmap | ||
662 | * | 833 | * |
663 | * Unmap a page of memory in this domain that was imported from another domain. | 834 | * Unmap memory in this domain that was imported from another domain. |
664 | * Returns 0 on success and returns GNTST_* on error | 835 | * Returns 0 on success and returns GNTST_* on error |
665 | * (see xen/include/interface/grant_table.h). | 836 | * (see xen/include/interface/grant_table.h). |
666 | */ | 837 | */ |
667 | int xenbus_unmap_ring(struct xenbus_device *dev, | 838 | int xenbus_unmap_ring(struct xenbus_device *dev, |
668 | grant_handle_t handle, void *vaddr) | 839 | grant_handle_t *handles, unsigned int nr_handles, |
840 | unsigned long *vaddrs) | ||
669 | { | 841 | { |
670 | struct gnttab_unmap_grant_ref op; | 842 | struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES]; |
843 | int i; | ||
844 | int err; | ||
671 | 845 | ||
672 | gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map, handle); | 846 | if (nr_handles > XENBUS_MAX_RING_PAGES) |
847 | return -EINVAL; | ||
673 | 848 | ||
674 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1)) | 849 | for (i = 0; i < nr_handles; i++) |
850 | gnttab_set_unmap_op(&unmap[i], vaddrs[i], | ||
851 | GNTMAP_host_map, handles[i]); | ||
852 | |||
853 | if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i)) | ||
675 | BUG(); | 854 | BUG(); |
676 | 855 | ||
677 | if (op.status != GNTST_okay) | 856 | err = GNTST_okay; |
678 | xenbus_dev_error(dev, op.status, | 857 | for (i = 0; i < nr_handles; i++) { |
679 | "unmapping page at handle %d error %d", | 858 | if (unmap[i].status != GNTST_okay) { |
680 | handle, op.status); | 859 | xenbus_dev_error(dev, unmap[i].status, |
860 | "unmapping page at handle %d error %d", | ||
861 | handles[i], unmap[i].status); | ||
862 | err = unmap[i].status; | ||
863 | break; | ||
864 | } | ||
865 | } | ||
681 | 866 | ||
682 | return op.status; | 867 | return err; |
683 | } | 868 | } |
684 | EXPORT_SYMBOL_GPL(xenbus_unmap_ring); | 869 | EXPORT_SYMBOL_GPL(xenbus_unmap_ring); |
685 | 870 | ||
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index b0f1c9e5d687..289c0b5f08fe 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h | |||
@@ -46,6 +46,10 @@ | |||
46 | #include <xen/interface/io/xenbus.h> | 46 | #include <xen/interface/io/xenbus.h> |
47 | #include <xen/interface/io/xs_wire.h> | 47 | #include <xen/interface/io/xs_wire.h> |
48 | 48 | ||
49 | #define XENBUS_MAX_RING_PAGE_ORDER 4 | ||
50 | #define XENBUS_MAX_RING_PAGES (1U << XENBUS_MAX_RING_PAGE_ORDER) | ||
51 | #define INVALID_GRANT_HANDLE (~0U) | ||
52 | |||
49 | /* Register callback to watch this node. */ | 53 | /* Register callback to watch this node. */ |
50 | struct xenbus_watch | 54 | struct xenbus_watch |
51 | { | 55 | { |
@@ -199,15 +203,19 @@ int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch, | |||
199 | const char *pathfmt, ...); | 203 | const char *pathfmt, ...); |
200 | 204 | ||
201 | int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state); | 205 | int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state new_state); |
202 | int xenbus_grant_ring(struct xenbus_device *dev, unsigned long ring_mfn); | 206 | int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr, |
203 | int xenbus_map_ring_valloc(struct xenbus_device *dev, | 207 | unsigned int nr_pages, grant_ref_t *grefs); |
204 | int gnt_ref, void **vaddr); | 208 | int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs, |
205 | int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref, | 209 | unsigned int nr_grefs, void **vaddr); |
206 | grant_handle_t *handle, void *vaddr); | 210 | int xenbus_map_ring(struct xenbus_device *dev, |
211 | grant_ref_t *gnt_refs, unsigned int nr_grefs, | ||
212 | grant_handle_t *handles, unsigned long *vaddrs, | ||
213 | bool *leaked); | ||
207 | 214 | ||
208 | int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr); | 215 | int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr); |
209 | int xenbus_unmap_ring(struct xenbus_device *dev, | 216 | int xenbus_unmap_ring(struct xenbus_device *dev, |
210 | grant_handle_t handle, void *vaddr); | 217 | grant_handle_t *handles, unsigned int nr_handles, |
218 | unsigned long *vaddrs); | ||
211 | 219 | ||
212 | int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port); | 220 | int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port); |
213 | int xenbus_free_evtchn(struct xenbus_device *dev, int port); | 221 | int xenbus_free_evtchn(struct xenbus_device *dev, int port); |