aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/xen/enlighten.c90
-rw-r--r--arch/x86/xen/mmu.c110
2 files changed, 6 insertions, 194 deletions
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 263a2044c65b..5c04389fc9ef 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -53,105 +53,21 @@ EXPORT_SYMBOL_GPL(xen_platform_pci_unplug);
53 53
54static __read_mostly int xen_events_irq = -1; 54static __read_mostly int xen_events_irq = -1;
55 55
56/* map fgmfn of domid to lpfn in the current domain */
57static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
58 unsigned int domid)
59{
60 int rc;
61 struct xen_add_to_physmap_range xatp = {
62 .domid = DOMID_SELF,
63 .foreign_domid = domid,
64 .size = 1,
65 .space = XENMAPSPACE_gmfn_foreign,
66 };
67 xen_ulong_t idx = fgmfn;
68 xen_pfn_t gpfn = lpfn;
69 int err = 0;
70
71 set_xen_guest_handle(xatp.idxs, &idx);
72 set_xen_guest_handle(xatp.gpfns, &gpfn);
73 set_xen_guest_handle(xatp.errs, &err);
74
75 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
76 if (rc || err) {
77 pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n",
78 rc, err, lpfn, fgmfn);
79 return 1;
80 }
81 return 0;
82}
83
84struct remap_data {
85 xen_pfn_t fgmfn; /* foreign domain's gmfn */
86 pgprot_t prot;
87 domid_t domid;
88 struct vm_area_struct *vma;
89 int index;
90 struct page **pages;
91 struct xen_remap_mfn_info *info;
92};
93
94static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
95 void *data)
96{
97 struct remap_data *info = data;
98 struct page *page = info->pages[info->index++];
99 unsigned long pfn = page_to_pfn(page);
100 pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
101
102 if (map_foreign_page(pfn, info->fgmfn, info->domid))
103 return -EFAULT;
104 set_pte_at(info->vma->vm_mm, addr, ptep, pte);
105
106 return 0;
107}
108
109int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 56int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
110 unsigned long addr, 57 unsigned long addr,
111 xen_pfn_t mfn, int nr, 58 xen_pfn_t mfn, int nr,
112 pgprot_t prot, unsigned domid, 59 pgprot_t prot, unsigned domid,
113 struct page **pages) 60 struct page **pages)
114{ 61{
115 int err; 62 return xen_xlate_remap_gfn_range(vma, addr, mfn, nr,
116 struct remap_data data; 63 prot, domid, pages);
117
118 /* TBD: Batching, current sole caller only does page at a time */
119 if (nr > 1)
120 return -EINVAL;
121
122 data.fgmfn = mfn;
123 data.prot = prot;
124 data.domid = domid;
125 data.vma = vma;
126 data.index = 0;
127 data.pages = pages;
128 err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
129 remap_pte_fn, &data);
130 return err;
131} 64}
132EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 65EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
133 66
134int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, 67int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
135 int nr, struct page **pages) 68 int nr, struct page **pages)
136{ 69{
137 int i; 70 return xen_xlate_unmap_gfn_range(vma, nr, pages);
138
139 for (i = 0; i < nr; i++) {
140 struct xen_remove_from_physmap xrp;
141 unsigned long rc, pfn;
142
143 pfn = page_to_pfn(pages[i]);
144
145 xrp.domid = DOMID_SELF;
146 xrp.gpfn = pfn;
147 rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
148 if (rc) {
149 pr_warn("Failed to unmap pfn:%lx rc:%ld\n",
150 pfn, rc);
151 return rc;
152 }
153 }
154 return 0;
155} 71}
156EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); 72EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
157 73
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index adca9e2b6553..3d536a56ddf1 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2436,95 +2436,6 @@ void __init xen_hvm_init_mmu_ops(void)
2436} 2436}
2437#endif 2437#endif
2438 2438
2439#ifdef CONFIG_XEN_PVH
2440/*
2441 * Map foreign gfn (fgfn), to local pfn (lpfn). This for the user
2442 * space creating new guest on pvh dom0 and needing to map domU pages.
2443 */
2444static int xlate_add_to_p2m(unsigned long lpfn, unsigned long fgfn,
2445 unsigned int domid)
2446{
2447 int rc, err = 0;
2448 xen_pfn_t gpfn = lpfn;
2449 xen_ulong_t idx = fgfn;
2450
2451 struct xen_add_to_physmap_range xatp = {
2452 .domid = DOMID_SELF,
2453 .foreign_domid = domid,
2454 .size = 1,
2455 .space = XENMAPSPACE_gmfn_foreign,
2456 };
2457 set_xen_guest_handle(xatp.idxs, &idx);
2458 set_xen_guest_handle(xatp.gpfns, &gpfn);
2459 set_xen_guest_handle(xatp.errs, &err);
2460
2461 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
2462 if (rc < 0)
2463 return rc;
2464 return err;
2465}
2466
2467static int xlate_remove_from_p2m(unsigned long spfn, int count)
2468{
2469 struct xen_remove_from_physmap xrp;
2470 int i, rc;
2471
2472 for (i = 0; i < count; i++) {
2473 xrp.domid = DOMID_SELF;
2474 xrp.gpfn = spfn+i;
2475 rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
2476 if (rc)
2477 break;
2478 }
2479 return rc;
2480}
2481
2482struct xlate_remap_data {
2483 unsigned long fgfn; /* foreign domain's gfn */
2484 pgprot_t prot;
2485 domid_t domid;
2486 int index;
2487 struct page **pages;
2488};
2489
2490static int xlate_map_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
2491 void *data)
2492{
2493 int rc;
2494 struct xlate_remap_data *remap = data;
2495 unsigned long pfn = page_to_pfn(remap->pages[remap->index++]);
2496 pte_t pteval = pte_mkspecial(pfn_pte(pfn, remap->prot));
2497
2498 rc = xlate_add_to_p2m(pfn, remap->fgfn, remap->domid);
2499 if (rc)
2500 return rc;
2501 native_set_pte(ptep, pteval);
2502
2503 return 0;
2504}
2505
2506static int xlate_remap_gfn_range(struct vm_area_struct *vma,
2507 unsigned long addr, unsigned long mfn,
2508 int nr, pgprot_t prot, unsigned domid,
2509 struct page **pages)
2510{
2511 int err;
2512 struct xlate_remap_data pvhdata;
2513
2514 BUG_ON(!pages);
2515
2516 pvhdata.fgfn = mfn;
2517 pvhdata.prot = prot;
2518 pvhdata.domid = domid;
2519 pvhdata.index = 0;
2520 pvhdata.pages = pages;
2521 err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
2522 xlate_map_pte_fn, &pvhdata);
2523 flush_tlb_all();
2524 return err;
2525}
2526#endif
2527
2528#define REMAP_BATCH_SIZE 16 2439#define REMAP_BATCH_SIZE 16
2529 2440
2530struct remap_data { 2441struct remap_data {
@@ -2564,8 +2475,8 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2564 if (xen_feature(XENFEAT_auto_translated_physmap)) { 2475 if (xen_feature(XENFEAT_auto_translated_physmap)) {
2565#ifdef CONFIG_XEN_PVH 2476#ifdef CONFIG_XEN_PVH
2566 /* We need to update the local page tables and the xen HAP */ 2477 /* We need to update the local page tables and the xen HAP */
2567 return xlate_remap_gfn_range(vma, addr, mfn, nr, prot, 2478 return xen_xlate_remap_gfn_range(vma, addr, mfn, nr, prot,
2568 domid, pages); 2479 domid, pages);
2569#else 2480#else
2570 return -EINVAL; 2481 return -EINVAL;
2571#endif 2482#endif
@@ -2609,22 +2520,7 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
2609 return 0; 2520 return 0;
2610 2521
2611#ifdef CONFIG_XEN_PVH 2522#ifdef CONFIG_XEN_PVH
2612 while (numpgs--) { 2523 return xen_xlate_unmap_gfn_range(vma, numpgs, pages);
2613 /*
2614 * The mmu has already cleaned up the process mmu
2615 * resources at this point (lookup_address will return
2616 * NULL).
2617 */
2618 unsigned long pfn = page_to_pfn(pages[numpgs]);
2619
2620 xlate_remove_from_p2m(pfn, 1);
2621 }
2622 /*
2623 * We don't need to flush tlbs because as part of
2624 * xlate_remove_from_p2m, the hypervisor will do tlb flushes
2625 * after removing the p2m entries from the EPT/NPT
2626 */
2627 return 0;
2628#else 2524#else
2629 return -EINVAL; 2525 return -EINVAL;
2630#endif 2526#endif