aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJulien Grall <julien.grall@citrix.com>2015-10-13 12:50:13 -0400
committerDavid Vrabel <david.vrabel@citrix.com>2015-10-23 09:20:47 -0400
commit89bf4b4e4a8d9ab219cd03aada24e782cf0ac359 (patch)
treeb4ca7d378810e5321d78178f15c46e77effc8c87
parentf73314b28148f9ee9f89a0ae961c8fb36e3269fa (diff)
xenbus: Support multiple grants ring with 64KB
The PV ring may use multiple grants and expect them to be mapped contiguously in the virtual memory. Although, the current code is relying on a Linux page will be mapped to a single grant. On build where Linux is using a different page size than the grant (i.e other than 4KB), the grant will always be mapped on the first 4KB of each Linux page which make the final ring not contiguous in the memory. This can be fixed by mapping multiple grant in a same Linux page. Signed-off-by: Julien Grall <julien.grall@citrix.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
-rw-r--r--drivers/xen/xenbus/xenbus_client.c97
1 files changed, 72 insertions, 25 deletions
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index b77643361853..056da6ee1a35 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -49,6 +49,10 @@
49 49
50#include "xenbus_probe.h" 50#include "xenbus_probe.h"
51 51
52#define XENBUS_PAGES(_grants) (DIV_ROUND_UP(_grants, XEN_PFN_PER_PAGE))
53
54#define XENBUS_MAX_RING_PAGES (XENBUS_PAGES(XENBUS_MAX_RING_GRANTS))
55
52struct xenbus_map_node { 56struct xenbus_map_node {
53 struct list_head next; 57 struct list_head next;
54 union { 58 union {
@@ -56,7 +60,8 @@ struct xenbus_map_node {
56 struct vm_struct *area; 60 struct vm_struct *area;
57 } pv; 61 } pv;
58 struct { 62 struct {
59 struct page *pages[XENBUS_MAX_RING_GRANTS]; 63 struct page *pages[XENBUS_MAX_RING_PAGES];
64 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
60 void *addr; 65 void *addr;
61 } hvm; 66 } hvm;
62 }; 67 };
@@ -591,19 +596,42 @@ failed:
591 return err; 596 return err;
592} 597}
593 598
599struct map_ring_valloc_hvm
600{
601 unsigned int idx;
602
603 /* Why do we need two arrays? See comment of __xenbus_map_ring */
604 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
605 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
606};
607
608static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
609 unsigned int goffset,
610 unsigned int len,
611 void *data)
612{
613 struct map_ring_valloc_hvm *info = data;
614 unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
615
616 info->phys_addrs[info->idx] = vaddr;
617 info->addrs[info->idx] = vaddr;
618
619 info->idx++;
620}
621
594static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev, 622static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
595 grant_ref_t *gnt_ref, 623 grant_ref_t *gnt_ref,
596 unsigned int nr_grefs, 624 unsigned int nr_grefs,
597 void **vaddr) 625 void **vaddr)
598{ 626{
599 struct xenbus_map_node *node; 627 struct xenbus_map_node *node;
600 int i;
601 int err; 628 int err;
602 void *addr; 629 void *addr;
603 bool leaked = false; 630 bool leaked = false;
604 /* Why do we need two arrays? See comment of __xenbus_map_ring */ 631 struct map_ring_valloc_hvm info = {
605 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; 632 .idx = 0,
606 unsigned long addrs[XENBUS_MAX_RING_GRANTS]; 633 };
634 unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
607 635
608 if (nr_grefs > XENBUS_MAX_RING_GRANTS) 636 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
609 return -EINVAL; 637 return -EINVAL;
@@ -614,24 +642,22 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
614 if (!node) 642 if (!node)
615 return -ENOMEM; 643 return -ENOMEM;
616 644
617 err = alloc_xenballooned_pages(nr_grefs, node->hvm.pages); 645 err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
618 if (err) 646 if (err)
619 goto out_err; 647 goto out_err;
620 648
621 for (i = 0; i < nr_grefs; i++) { 649 gnttab_foreach_grant(node->hvm.pages, nr_grefs,
622 unsigned long pfn = page_to_pfn(node->hvm.pages[i]); 650 xenbus_map_ring_setup_grant_hvm,
623 phys_addrs[i] = (unsigned long)pfn_to_kaddr(pfn); 651 &info);
624 addrs[i] = (unsigned long)pfn_to_kaddr(pfn);
625 }
626 652
627 err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles, 653 err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
628 phys_addrs, GNTMAP_host_map, &leaked); 654 info.phys_addrs, GNTMAP_host_map, &leaked);
629 node->nr_handles = nr_grefs; 655 node->nr_handles = nr_grefs;
630 656
631 if (err) 657 if (err)
632 goto out_free_ballooned_pages; 658 goto out_free_ballooned_pages;
633 659
634 addr = vmap(node->hvm.pages, nr_grefs, VM_MAP | VM_IOREMAP, 660 addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP,
635 PAGE_KERNEL); 661 PAGE_KERNEL);
636 if (!addr) { 662 if (!addr) {
637 err = -ENOMEM; 663 err = -ENOMEM;
@@ -649,14 +675,13 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
649 675
650 out_xenbus_unmap_ring: 676 out_xenbus_unmap_ring:
651 if (!leaked) 677 if (!leaked)
652 xenbus_unmap_ring(dev, node->handles, node->nr_handles, 678 xenbus_unmap_ring(dev, node->handles, nr_grefs, info.addrs);
653 addrs);
654 else 679 else
655 pr_alert("leaking %p size %u page(s)", 680 pr_alert("leaking %p size %u page(s)",
656 addr, nr_grefs); 681 addr, nr_pages);
657 out_free_ballooned_pages: 682 out_free_ballooned_pages:
658 if (!leaked) 683 if (!leaked)
659 free_xenballooned_pages(nr_grefs, node->hvm.pages); 684 free_xenballooned_pages(nr_pages, node->hvm.pages);
660 out_err: 685 out_err:
661 kfree(node); 686 kfree(node);
662 return err; 687 return err;
@@ -782,13 +807,33 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
782 return err; 807 return err;
783} 808}
784 809
810struct unmap_ring_vfree_hvm
811{
812 unsigned int idx;
813 unsigned long addrs[XENBUS_MAX_RING_GRANTS];
814};
815
816static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
817 unsigned int goffset,
818 unsigned int len,
819 void *data)
820{
821 struct unmap_ring_vfree_hvm *info = data;
822
823 info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
824
825 info->idx++;
826}
827
785static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr) 828static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
786{ 829{
787 int rv; 830 int rv;
788 struct xenbus_map_node *node; 831 struct xenbus_map_node *node;
789 void *addr; 832 void *addr;
790 unsigned long addrs[XENBUS_MAX_RING_GRANTS]; 833 struct unmap_ring_vfree_hvm info = {
791 int i; 834 .idx = 0,
835 };
836 unsigned int nr_pages;
792 837
793 spin_lock(&xenbus_valloc_lock); 838 spin_lock(&xenbus_valloc_lock);
794 list_for_each_entry(node, &xenbus_valloc_pages, next) { 839 list_for_each_entry(node, &xenbus_valloc_pages, next) {
@@ -808,18 +853,20 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
808 return GNTST_bad_virt_addr; 853 return GNTST_bad_virt_addr;
809 } 854 }
810 855
811 for (i = 0; i < node->nr_handles; i++) 856 nr_pages = XENBUS_PAGES(node->nr_handles);
812 addrs[i] = (unsigned long)pfn_to_kaddr(page_to_pfn(node->hvm.pages[i])); 857
858 gnttab_foreach_grant(node->hvm.pages, node->nr_handles,
859 xenbus_unmap_ring_setup_grant_hvm,
860 &info);
813 861
814 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles, 862 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
815 addrs); 863 info.addrs);
816 if (!rv) { 864 if (!rv) {
817 vunmap(vaddr); 865 vunmap(vaddr);
818 free_xenballooned_pages(node->nr_handles, node->hvm.pages); 866 free_xenballooned_pages(nr_pages, node->hvm.pages);
819 } 867 }
820 else 868 else
821 WARN(1, "Leaking %p, size %u page(s)\n", vaddr, 869 WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages);
822 node->nr_handles);
823 870
824 kfree(node); 871 kfree(node);
825 return rv; 872 return rv;