aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJulien Grall <julien.grall@citrix.com>2015-08-07 12:34:41 -0400
committerJulien Grall <julien.grall@citrix.com>2015-09-08 13:03:54 -0400
commita13d7201d7deedcbb6ac6efa94a1a7d34d3d79ec (patch)
treeccbf55b3856cac4525d13acc671a55398288cfec
parent859e3267c515d0cc7cc11528e80a2b7f3edc3bd9 (diff)
xen/privcmd: Further s/MFN/GFN/ clean-up
The privcmd code is mixing the usage of GFN and MFN within the same functions which make the code difficult to understand when you only work with auto-translated guests. The privcmd driver is only dealing with GFN so replace all the mention of MFN into GFN. The ioctl structure used to map foreign change has been left unchanged given that the userspace is using it. Nonetheless, add a comment to explain the expected value within the "mfn" field. Signed-off-by: Julien Grall <julien.grall@citrix.com> Reviewed-by: David Vrabel <david.vrabel@citrix.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
-rw-r--r--arch/arm/xen/enlighten.c18
-rw-r--r--arch/x86/xen/mmu.c32
-rw-r--r--drivers/xen/privcmd.c44
-rw-r--r--drivers/xen/xlate_mmu.c18
-rw-r--r--include/uapi/xen/privcmd.h4
-rw-r--r--include/xen/xen-ops.h10
6 files changed, 65 insertions, 61 deletions
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index c50c8d33f874..eeeab074e154 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -49,35 +49,35 @@ static __read_mostly unsigned int xen_events_irq;
49 49
50static __initdata struct device_node *xen_node; 50static __initdata struct device_node *xen_node;
51 51
52int xen_remap_domain_mfn_array(struct vm_area_struct *vma, 52int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
53 unsigned long addr, 53 unsigned long addr,
54 xen_pfn_t *mfn, int nr, 54 xen_pfn_t *gfn, int nr,
55 int *err_ptr, pgprot_t prot, 55 int *err_ptr, pgprot_t prot,
56 unsigned domid, 56 unsigned domid,
57 struct page **pages) 57 struct page **pages)
58{ 58{
59 return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr, 59 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
60 prot, domid, pages); 60 prot, domid, pages);
61} 61}
62EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array); 62EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
63 63
64/* Not used by XENFEAT_auto_translated guests. */ 64/* Not used by XENFEAT_auto_translated guests. */
65int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 65int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
66 unsigned long addr, 66 unsigned long addr,
67 xen_pfn_t mfn, int nr, 67 xen_pfn_t gfn, int nr,
68 pgprot_t prot, unsigned domid, 68 pgprot_t prot, unsigned domid,
69 struct page **pages) 69 struct page **pages)
70{ 70{
71 return -ENOSYS; 71 return -ENOSYS;
72} 72}
73EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 73EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
74 74
75int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, 75int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
76 int nr, struct page **pages) 76 int nr, struct page **pages)
77{ 77{
78 return xen_xlate_unmap_gfn_range(vma, nr, pages); 78 return xen_xlate_unmap_gfn_range(vma, nr, pages);
79} 79}
80EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); 80EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
81 81
82static void xen_percpu_init(void) 82static void xen_percpu_init(void)
83{ 83{
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 2c50b445884e..9c479fe40459 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2812,9 +2812,9 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2812 return 0; 2812 return 0;
2813} 2813}
2814 2814
2815static int do_remap_mfn(struct vm_area_struct *vma, 2815static int do_remap_gfn(struct vm_area_struct *vma,
2816 unsigned long addr, 2816 unsigned long addr,
2817 xen_pfn_t *mfn, int nr, 2817 xen_pfn_t *gfn, int nr,
2818 int *err_ptr, pgprot_t prot, 2818 int *err_ptr, pgprot_t prot,
2819 unsigned domid, 2819 unsigned domid,
2820 struct page **pages) 2820 struct page **pages)
@@ -2830,14 +2830,14 @@ static int do_remap_mfn(struct vm_area_struct *vma,
2830 if (xen_feature(XENFEAT_auto_translated_physmap)) { 2830 if (xen_feature(XENFEAT_auto_translated_physmap)) {
2831#ifdef CONFIG_XEN_PVH 2831#ifdef CONFIG_XEN_PVH
2832 /* We need to update the local page tables and the xen HAP */ 2832 /* We need to update the local page tables and the xen HAP */
2833 return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr, 2833 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
2834 prot, domid, pages); 2834 prot, domid, pages);
2835#else 2835#else
2836 return -EINVAL; 2836 return -EINVAL;
2837#endif 2837#endif
2838 } 2838 }
2839 2839
2840 rmd.mfn = mfn; 2840 rmd.mfn = gfn;
2841 rmd.prot = prot; 2841 rmd.prot = prot;
2842 /* We use the err_ptr to indicate if there we are doing a contigious 2842 /* We use the err_ptr to indicate if there we are doing a contigious
2843 * mapping or a discontigious mapping. */ 2843 * mapping or a discontigious mapping. */
@@ -2865,8 +2865,8 @@ static int do_remap_mfn(struct vm_area_struct *vma,
2865 batch_left, &done, domid); 2865 batch_left, &done, domid);
2866 2866
2867 /* 2867 /*
2868 * @err_ptr may be the same buffer as @mfn, so 2868 * @err_ptr may be the same buffer as @gfn, so
2869 * only clear it after each chunk of @mfn is 2869 * only clear it after each chunk of @gfn is
2870 * used. 2870 * used.
2871 */ 2871 */
2872 if (err_ptr) { 2872 if (err_ptr) {
@@ -2896,19 +2896,19 @@ out:
2896 return err < 0 ? err : mapped; 2896 return err < 0 ? err : mapped;
2897} 2897}
2898 2898
2899int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 2899int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
2900 unsigned long addr, 2900 unsigned long addr,
2901 xen_pfn_t mfn, int nr, 2901 xen_pfn_t gfn, int nr,
2902 pgprot_t prot, unsigned domid, 2902 pgprot_t prot, unsigned domid,
2903 struct page **pages) 2903 struct page **pages)
2904{ 2904{
2905 return do_remap_mfn(vma, addr, &mfn, nr, NULL, prot, domid, pages); 2905 return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
2906} 2906}
2907EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 2907EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
2908 2908
2909int xen_remap_domain_mfn_array(struct vm_area_struct *vma, 2909int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
2910 unsigned long addr, 2910 unsigned long addr,
2911 xen_pfn_t *mfn, int nr, 2911 xen_pfn_t *gfn, int nr,
2912 int *err_ptr, pgprot_t prot, 2912 int *err_ptr, pgprot_t prot,
2913 unsigned domid, struct page **pages) 2913 unsigned domid, struct page **pages)
2914{ 2914{
@@ -2917,13 +2917,13 @@ int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
2917 * cause of "wrong memory was mapped in". 2917 * cause of "wrong memory was mapped in".
2918 */ 2918 */
2919 BUG_ON(err_ptr == NULL); 2919 BUG_ON(err_ptr == NULL);
2920 return do_remap_mfn(vma, addr, mfn, nr, err_ptr, prot, domid, pages); 2920 return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
2921} 2921}
2922EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array); 2922EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
2923 2923
2924 2924
2925/* Returns: 0 success */ 2925/* Returns: 0 success */
2926int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, 2926int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
2927 int numpgs, struct page **pages) 2927 int numpgs, struct page **pages)
2928{ 2928{
2929 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap)) 2929 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
@@ -2935,4 +2935,4 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
2935 return -EINVAL; 2935 return -EINVAL;
2936#endif 2936#endif
2937} 2937}
2938EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); 2938EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 5a296161d843..c6deb87c5c69 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -193,16 +193,16 @@ static int traverse_pages_block(unsigned nelem, size_t size,
193 return ret; 193 return ret;
194} 194}
195 195
196struct mmap_mfn_state { 196struct mmap_gfn_state {
197 unsigned long va; 197 unsigned long va;
198 struct vm_area_struct *vma; 198 struct vm_area_struct *vma;
199 domid_t domain; 199 domid_t domain;
200}; 200};
201 201
202static int mmap_mfn_range(void *data, void *state) 202static int mmap_gfn_range(void *data, void *state)
203{ 203{
204 struct privcmd_mmap_entry *msg = data; 204 struct privcmd_mmap_entry *msg = data;
205 struct mmap_mfn_state *st = state; 205 struct mmap_gfn_state *st = state;
206 struct vm_area_struct *vma = st->vma; 206 struct vm_area_struct *vma = st->vma;
207 int rc; 207 int rc;
208 208
@@ -216,7 +216,7 @@ static int mmap_mfn_range(void *data, void *state)
216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) 216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
217 return -EINVAL; 217 return -EINVAL;
218 218
219 rc = xen_remap_domain_mfn_range(vma, 219 rc = xen_remap_domain_gfn_range(vma,
220 msg->va & PAGE_MASK, 220 msg->va & PAGE_MASK,
221 msg->mfn, msg->npages, 221 msg->mfn, msg->npages,
222 vma->vm_page_prot, 222 vma->vm_page_prot,
@@ -236,7 +236,7 @@ static long privcmd_ioctl_mmap(void __user *udata)
236 struct vm_area_struct *vma; 236 struct vm_area_struct *vma;
237 int rc; 237 int rc;
238 LIST_HEAD(pagelist); 238 LIST_HEAD(pagelist);
239 struct mmap_mfn_state state; 239 struct mmap_gfn_state state;
240 240
241 /* We only support privcmd_ioctl_mmap_batch for auto translated. */ 241 /* We only support privcmd_ioctl_mmap_batch for auto translated. */
242 if (xen_feature(XENFEAT_auto_translated_physmap)) 242 if (xen_feature(XENFEAT_auto_translated_physmap))
@@ -273,7 +273,7 @@ static long privcmd_ioctl_mmap(void __user *udata)
273 273
274 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry), 274 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
275 &pagelist, 275 &pagelist,
276 mmap_mfn_range, &state); 276 mmap_gfn_range, &state);
277 277
278 278
279out_up: 279out_up:
@@ -299,18 +299,18 @@ struct mmap_batch_state {
299 int global_error; 299 int global_error;
300 int version; 300 int version;
301 301
302 /* User-space mfn array to store errors in the second pass for V1. */ 302 /* User-space gfn array to store errors in the second pass for V1. */
303 xen_pfn_t __user *user_mfn; 303 xen_pfn_t __user *user_gfn;
304 /* User-space int array to store errors in the second pass for V2. */ 304 /* User-space int array to store errors in the second pass for V2. */
305 int __user *user_err; 305 int __user *user_err;
306}; 306};
307 307
308/* auto translated dom0 note: if domU being created is PV, then mfn is 308/* auto translated dom0 note: if domU being created is PV, then gfn is
309 * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP). 309 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
310 */ 310 */
311static int mmap_batch_fn(void *data, int nr, void *state) 311static int mmap_batch_fn(void *data, int nr, void *state)
312{ 312{
313 xen_pfn_t *mfnp = data; 313 xen_pfn_t *gfnp = data;
314 struct mmap_batch_state *st = state; 314 struct mmap_batch_state *st = state;
315 struct vm_area_struct *vma = st->vma; 315 struct vm_area_struct *vma = st->vma;
316 struct page **pages = vma->vm_private_data; 316 struct page **pages = vma->vm_private_data;
@@ -321,8 +321,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
321 cur_pages = &pages[st->index]; 321 cur_pages = &pages[st->index];
322 322
323 BUG_ON(nr < 0); 323 BUG_ON(nr < 0);
324 ret = xen_remap_domain_mfn_array(st->vma, st->va & PAGE_MASK, mfnp, nr, 324 ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
325 (int *)mfnp, st->vma->vm_page_prot, 325 (int *)gfnp, st->vma->vm_page_prot,
326 st->domain, cur_pages); 326 st->domain, cur_pages);
327 327
328 /* Adjust the global_error? */ 328 /* Adjust the global_error? */
@@ -347,22 +347,22 @@ static int mmap_return_error(int err, struct mmap_batch_state *st)
347 347
348 if (st->version == 1) { 348 if (st->version == 1) {
349 if (err) { 349 if (err) {
350 xen_pfn_t mfn; 350 xen_pfn_t gfn;
351 351
352 ret = get_user(mfn, st->user_mfn); 352 ret = get_user(gfn, st->user_gfn);
353 if (ret < 0) 353 if (ret < 0)
354 return ret; 354 return ret;
355 /* 355 /*
356 * V1 encodes the error codes in the 32bit top 356 * V1 encodes the error codes in the 32bit top
357 * nibble of the mfn (with its known 357 * nibble of the gfn (with its known
358 * limitations vis-a-vis 64 bit callers). 358 * limitations vis-a-vis 64 bit callers).
359 */ 359 */
360 mfn |= (err == -ENOENT) ? 360 gfn |= (err == -ENOENT) ?
361 PRIVCMD_MMAPBATCH_PAGED_ERROR : 361 PRIVCMD_MMAPBATCH_PAGED_ERROR :
362 PRIVCMD_MMAPBATCH_MFN_ERROR; 362 PRIVCMD_MMAPBATCH_MFN_ERROR;
363 return __put_user(mfn, st->user_mfn++); 363 return __put_user(gfn, st->user_gfn++);
364 } else 364 } else
365 st->user_mfn++; 365 st->user_gfn++;
366 } else { /* st->version == 2 */ 366 } else { /* st->version == 2 */
367 if (err) 367 if (err)
368 return __put_user(err, st->user_err++); 368 return __put_user(err, st->user_err++);
@@ -388,7 +388,7 @@ static int mmap_return_errors(void *data, int nr, void *state)
388 return 0; 388 return 0;
389} 389}
390 390
391/* Allocate pfns that are then mapped with gmfns from foreign domid. Update 391/* Allocate pfns that are then mapped with gfns from foreign domid. Update
392 * the vma with the page info to use later. 392 * the vma with the page info to use later.
393 * Returns: 0 if success, otherwise -errno 393 * Returns: 0 if success, otherwise -errno
394 */ 394 */
@@ -526,7 +526,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
526 526
527 if (state.global_error) { 527 if (state.global_error) {
528 /* Write back errors in second pass. */ 528 /* Write back errors in second pass. */
529 state.user_mfn = (xen_pfn_t *)m.arr; 529 state.user_gfn = (xen_pfn_t *)m.arr;
530 state.user_err = m.err; 530 state.user_err = m.err;
531 ret = traverse_pages_block(m.num, sizeof(xen_pfn_t), 531 ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
532 &pagelist, mmap_return_errors, &state); 532 &pagelist, mmap_return_errors, &state);
@@ -587,7 +587,7 @@ static void privcmd_close(struct vm_area_struct *vma)
587 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) 587 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
588 return; 588 return;
589 589
590 rc = xen_unmap_domain_mfn_range(vma, numpgs, pages); 590 rc = xen_unmap_domain_gfn_range(vma, numpgs, pages);
591 if (rc == 0) 591 if (rc == 0)
592 free_xenballooned_pages(numpgs, pages); 592 free_xenballooned_pages(numpgs, pages);
593 else 593 else
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index 58a5389aec89..cff23872c5a9 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -38,8 +38,8 @@
38#include <xen/interface/xen.h> 38#include <xen/interface/xen.h>
39#include <xen/interface/memory.h> 39#include <xen/interface/memory.h>
40 40
41/* map fgmfn of domid to lpfn in the current domain */ 41/* map fgfn of domid to lpfn in the current domain */
42static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, 42static int map_foreign_page(unsigned long lpfn, unsigned long fgfn,
43 unsigned int domid) 43 unsigned int domid)
44{ 44{
45 int rc; 45 int rc;
@@ -49,7 +49,7 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
49 .size = 1, 49 .size = 1,
50 .space = XENMAPSPACE_gmfn_foreign, 50 .space = XENMAPSPACE_gmfn_foreign,
51 }; 51 };
52 xen_ulong_t idx = fgmfn; 52 xen_ulong_t idx = fgfn;
53 xen_pfn_t gpfn = lpfn; 53 xen_pfn_t gpfn = lpfn;
54 int err = 0; 54 int err = 0;
55 55
@@ -62,13 +62,13 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
62} 62}
63 63
64struct remap_data { 64struct remap_data {
65 xen_pfn_t *fgmfn; /* foreign domain's gmfn */ 65 xen_pfn_t *fgfn; /* foreign domain's gfn */
66 pgprot_t prot; 66 pgprot_t prot;
67 domid_t domid; 67 domid_t domid;
68 struct vm_area_struct *vma; 68 struct vm_area_struct *vma;
69 int index; 69 int index;
70 struct page **pages; 70 struct page **pages;
71 struct xen_remap_mfn_info *info; 71 struct xen_remap_gfn_info *info;
72 int *err_ptr; 72 int *err_ptr;
73 int mapped; 73 int mapped;
74}; 74};
@@ -82,20 +82,20 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
82 pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); 82 pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
83 int rc; 83 int rc;
84 84
85 rc = map_foreign_page(pfn, *info->fgmfn, info->domid); 85 rc = map_foreign_page(pfn, *info->fgfn, info->domid);
86 *info->err_ptr++ = rc; 86 *info->err_ptr++ = rc;
87 if (!rc) { 87 if (!rc) {
88 set_pte_at(info->vma->vm_mm, addr, ptep, pte); 88 set_pte_at(info->vma->vm_mm, addr, ptep, pte);
89 info->mapped++; 89 info->mapped++;
90 } 90 }
91 info->fgmfn++; 91 info->fgfn++;
92 92
93 return 0; 93 return 0;
94} 94}
95 95
96int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, 96int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
97 unsigned long addr, 97 unsigned long addr,
98 xen_pfn_t *mfn, int nr, 98 xen_pfn_t *gfn, int nr,
99 int *err_ptr, pgprot_t prot, 99 int *err_ptr, pgprot_t prot,
100 unsigned domid, 100 unsigned domid,
101 struct page **pages) 101 struct page **pages)
@@ -108,7 +108,7 @@ int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
108 x86 PVOPS */ 108 x86 PVOPS */
109 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); 109 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
110 110
111 data.fgmfn = mfn; 111 data.fgfn = gfn;
112 data.prot = prot; 112 data.prot = prot;
113 data.domid = domid; 113 data.domid = domid;
114 data.vma = vma; 114 data.vma = vma;
diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h
index a85316811d79..7ddeeda93809 100644
--- a/include/uapi/xen/privcmd.h
+++ b/include/uapi/xen/privcmd.h
@@ -44,6 +44,10 @@ struct privcmd_hypercall {
44 44
45struct privcmd_mmap_entry { 45struct privcmd_mmap_entry {
46 __u64 va; 46 __u64 va;
47 /*
48 * This should be a GFN. It's not possible to change the name because
49 * it's exposed to the user-space.
50 */
47 __u64 mfn; 51 __u64 mfn;
48 __u64 npages; 52 __u64 npages;
49}; 53};
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 0ce4f32017ea..e4e214a5abd5 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -30,7 +30,7 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
30struct vm_area_struct; 30struct vm_area_struct;
31 31
32/* 32/*
33 * xen_remap_domain_mfn_array() - map an array of foreign frames 33 * xen_remap_domain_gfn_array() - map an array of foreign frames
34 * @vma: VMA to map the pages into 34 * @vma: VMA to map the pages into
35 * @addr: Address at which to map the pages 35 * @addr: Address at which to map the pages
36 * @gfn: Array of GFNs to map 36 * @gfn: Array of GFNs to map
@@ -46,14 +46,14 @@ struct vm_area_struct;
46 * Returns the number of successfully mapped frames, or a -ve error 46 * Returns the number of successfully mapped frames, or a -ve error
47 * code. 47 * code.
48 */ 48 */
49int xen_remap_domain_mfn_array(struct vm_area_struct *vma, 49int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
50 unsigned long addr, 50 unsigned long addr,
51 xen_pfn_t *gfn, int nr, 51 xen_pfn_t *gfn, int nr,
52 int *err_ptr, pgprot_t prot, 52 int *err_ptr, pgprot_t prot,
53 unsigned domid, 53 unsigned domid,
54 struct page **pages); 54 struct page **pages);
55 55
56/* xen_remap_domain_mfn_range() - map a range of foreign frames 56/* xen_remap_domain_gfn_range() - map a range of foreign frames
57 * @vma: VMA to map the pages into 57 * @vma: VMA to map the pages into
58 * @addr: Address at which to map the pages 58 * @addr: Address at which to map the pages
59 * @gfn: First GFN to map. 59 * @gfn: First GFN to map.
@@ -65,12 +65,12 @@ int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
65 * Returns the number of successfully mapped frames, or a -ve error 65 * Returns the number of successfully mapped frames, or a -ve error
66 * code. 66 * code.
67 */ 67 */
68int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 68int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
69 unsigned long addr, 69 unsigned long addr,
70 xen_pfn_t gfn, int nr, 70 xen_pfn_t gfn, int nr,
71 pgprot_t prot, unsigned domid, 71 pgprot_t prot, unsigned domid,
72 struct page **pages); 72 struct page **pages);
73int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, 73int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
74 int numpgs, struct page **pages); 74 int numpgs, struct page **pages);
75int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, 75int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
76 unsigned long addr, 76 unsigned long addr,