aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-10 19:21:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-10 19:21:11 -0400
commit06ab838c2024db468855118087db16d8fa905ddc (patch)
tree316ddb218bf3d5482bf16d38c129b71504780835
parent573c577af079184ca523984e3279644eb37756a3 (diff)
parent5f51042f876b88a3b81a135cc4ca0adb3d246112 (diff)
Merge tag 'for-linus-4.3-rc0b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen terminology fixes from David Vrabel: "Use the correct GFN/BFN terms more consistently" * tag 'for-linus-4.3-rc0b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/xenbus: Rename the variable xen_store_mfn to xen_store_gfn xen/privcmd: Further s/MFN/GFN/ clean-up hvc/xen: Further s/MFN/GFN clean-up video/xen-fbfront: Further s/MFN/GFN clean-up xen/tmem: Use xen_page_to_gfn rather than pfn_to_gfn xen: Use correctly the Xen memory terminologies arm/xen: implement correctly pfn_to_mfn xen: Make clear that swiotlb and biomerge are dealing with DMA address
-rw-r--r--arch/arm/include/asm/xen/page.h28
-rw-r--r--arch/arm/xen/enlighten.c18
-rw-r--r--arch/arm/xen/mm.c4
-rw-r--r--arch/x86/include/asm/xen/page.h39
-rw-r--r--arch/x86/xen/mmu.c32
-rw-r--r--arch/x86/xen/smp.c2
-rw-r--r--drivers/block/xen-blkfront.c6
-rw-r--r--drivers/input/misc/xen-kbdfront.c4
-rw-r--r--drivers/net/xen-netback/netback.c4
-rw-r--r--drivers/net/xen-netfront.c12
-rw-r--r--drivers/scsi/xen-scsifront.c10
-rw-r--r--drivers/tty/hvc/hvc_xen.c18
-rw-r--r--drivers/video/fbdev/xen-fbfront.c20
-rw-r--r--drivers/xen/balloon.c2
-rw-r--r--drivers/xen/biomerge.c6
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/events/events_fifo.c4
-rw-r--r--drivers/xen/gntalloc.c3
-rw-r--r--drivers/xen/manage.c2
-rw-r--r--drivers/xen/privcmd.c44
-rw-r--r--drivers/xen/swiotlb-xen.c16
-rw-r--r--drivers/xen/tmem.c24
-rw-r--r--drivers/xen/xenbus/xenbus_client.c2
-rw-r--r--drivers/xen/xenbus/xenbus_dev_backend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c16
-rw-r--r--drivers/xen/xlate_mmu.c18
-rw-r--r--include/uapi/xen/privcmd.h4
-rw-r--r--include/xen/page.h4
-rw-r--r--include/xen/xen-ops.h10
29 files changed, 198 insertions, 158 deletions
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 98b1084f8282..127956353b00 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -34,7 +34,19 @@ typedef struct xpaddr {
34unsigned long __pfn_to_mfn(unsigned long pfn); 34unsigned long __pfn_to_mfn(unsigned long pfn);
35extern struct rb_root phys_to_mach; 35extern struct rb_root phys_to_mach;
36 36
37static inline unsigned long pfn_to_mfn(unsigned long pfn) 37/* Pseudo-physical <-> Guest conversion */
38static inline unsigned long pfn_to_gfn(unsigned long pfn)
39{
40 return pfn;
41}
42
43static inline unsigned long gfn_to_pfn(unsigned long gfn)
44{
45 return gfn;
46}
47
48/* Pseudo-physical <-> BUS conversion */
49static inline unsigned long pfn_to_bfn(unsigned long pfn)
38{ 50{
39 unsigned long mfn; 51 unsigned long mfn;
40 52
@@ -47,16 +59,16 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
47 return pfn; 59 return pfn;
48} 60}
49 61
50static inline unsigned long mfn_to_pfn(unsigned long mfn) 62static inline unsigned long bfn_to_pfn(unsigned long bfn)
51{ 63{
52 return mfn; 64 return bfn;
53} 65}
54 66
55#define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn) 67#define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn)
56 68
57/* VIRT <-> MACHINE conversion */ 69/* VIRT <-> GUEST conversion */
58#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) 70#define virt_to_gfn(v) (pfn_to_gfn(virt_to_pfn(v)))
59#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) 71#define gfn_to_virt(m) (__va(gfn_to_pfn(m) << PAGE_SHIFT))
60 72
61/* Only used in PV code. But ARM guests are always HVM. */ 73/* Only used in PV code. But ARM guests are always HVM. */
62static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr) 74static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
@@ -96,7 +108,7 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
96 108
97bool xen_arch_need_swiotlb(struct device *dev, 109bool xen_arch_need_swiotlb(struct device *dev,
98 unsigned long pfn, 110 unsigned long pfn,
99 unsigned long mfn); 111 unsigned long bfn);
100unsigned long xen_get_swiotlb_free_pages(unsigned int order); 112unsigned long xen_get_swiotlb_free_pages(unsigned int order);
101 113
102#endif /* _ASM_ARM_XEN_PAGE_H */ 114#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index c50c8d33f874..eeeab074e154 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -49,35 +49,35 @@ static __read_mostly unsigned int xen_events_irq;
49 49
50static __initdata struct device_node *xen_node; 50static __initdata struct device_node *xen_node;
51 51
52int xen_remap_domain_mfn_array(struct vm_area_struct *vma, 52int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
53 unsigned long addr, 53 unsigned long addr,
54 xen_pfn_t *mfn, int nr, 54 xen_pfn_t *gfn, int nr,
55 int *err_ptr, pgprot_t prot, 55 int *err_ptr, pgprot_t prot,
56 unsigned domid, 56 unsigned domid,
57 struct page **pages) 57 struct page **pages)
58{ 58{
59 return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr, 59 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
60 prot, domid, pages); 60 prot, domid, pages);
61} 61}
62EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array); 62EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
63 63
64/* Not used by XENFEAT_auto_translated guests. */ 64/* Not used by XENFEAT_auto_translated guests. */
65int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 65int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
66 unsigned long addr, 66 unsigned long addr,
67 xen_pfn_t mfn, int nr, 67 xen_pfn_t gfn, int nr,
68 pgprot_t prot, unsigned domid, 68 pgprot_t prot, unsigned domid,
69 struct page **pages) 69 struct page **pages)
70{ 70{
71 return -ENOSYS; 71 return -ENOSYS;
72} 72}
73EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 73EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
74 74
75int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, 75int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
76 int nr, struct page **pages) 76 int nr, struct page **pages)
77{ 77{
78 return xen_xlate_unmap_gfn_range(vma, nr, pages); 78 return xen_xlate_unmap_gfn_range(vma, nr, pages);
79} 79}
80EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); 80EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
81 81
82static void xen_percpu_init(void) 82static void xen_percpu_init(void)
83{ 83{
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 03e75fef15b8..6dd911d1f0ac 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -139,9 +139,9 @@ void __xen_dma_sync_single_for_device(struct device *hwdev,
139 139
140bool xen_arch_need_swiotlb(struct device *dev, 140bool xen_arch_need_swiotlb(struct device *dev,
141 unsigned long pfn, 141 unsigned long pfn,
142 unsigned long mfn) 142 unsigned long bfn)
143{ 143{
144 return (!hypercall_cflush && (pfn != mfn) && !is_device_dma_coherent(dev)); 144 return (!hypercall_cflush && (pfn != bfn) && !is_device_dma_coherent(dev));
145} 145}
146 146
147int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, 147int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index a3804fbe1f36..0679e11d2cf7 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -101,6 +101,11 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
101{ 101{
102 unsigned long mfn; 102 unsigned long mfn;
103 103
104 /*
105 * Some x86 code are still using pfn_to_mfn instead of
106 * pfn_to_mfn. This will have to be removed when we figured
107 * out which call.
108 */
104 if (xen_feature(XENFEAT_auto_translated_physmap)) 109 if (xen_feature(XENFEAT_auto_translated_physmap))
105 return pfn; 110 return pfn;
106 111
@@ -147,6 +152,11 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
147{ 152{
148 unsigned long pfn; 153 unsigned long pfn;
149 154
155 /*
156 * Some x86 code are still using mfn_to_pfn instead of
157 * gfn_to_pfn. This will have to be removed when we figure
158 * out which call.
159 */
150 if (xen_feature(XENFEAT_auto_translated_physmap)) 160 if (xen_feature(XENFEAT_auto_translated_physmap))
151 return mfn; 161 return mfn;
152 162
@@ -176,6 +186,27 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine)
176 return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset); 186 return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
177} 187}
178 188
189/* Pseudo-physical <-> Guest conversion */
190static inline unsigned long pfn_to_gfn(unsigned long pfn)
191{
192 if (xen_feature(XENFEAT_auto_translated_physmap))
193 return pfn;
194 else
195 return pfn_to_mfn(pfn);
196}
197
198static inline unsigned long gfn_to_pfn(unsigned long gfn)
199{
200 if (xen_feature(XENFEAT_auto_translated_physmap))
201 return gfn;
202 else
203 return mfn_to_pfn(gfn);
204}
205
206/* Pseudo-physical <-> Bus conversion */
207#define pfn_to_bfn(pfn) pfn_to_gfn(pfn)
208#define bfn_to_pfn(bfn) gfn_to_pfn(bfn)
209
179/* 210/*
180 * We detect special mappings in one of two ways: 211 * We detect special mappings in one of two ways:
181 * 1. If the MFN is an I/O page then Xen will set the m2p entry 212 * 1. If the MFN is an I/O page then Xen will set the m2p entry
@@ -196,7 +227,7 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine)
196 * require. In all the cases we care about, the FOREIGN_FRAME bit is 227 * require. In all the cases we care about, the FOREIGN_FRAME bit is
197 * masked (e.g., pfn_to_mfn()) so behaviour there is correct. 228 * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
198 */ 229 */
199static inline unsigned long mfn_to_local_pfn(unsigned long mfn) 230static inline unsigned long bfn_to_local_pfn(unsigned long mfn)
200{ 231{
201 unsigned long pfn; 232 unsigned long pfn;
202 233
@@ -215,6 +246,10 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
215#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v))) 246#define virt_to_mfn(v) (pfn_to_mfn(virt_to_pfn(v)))
216#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT)) 247#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
217 248
249/* VIRT <-> GUEST conversion */
250#define virt_to_gfn(v) (pfn_to_gfn(virt_to_pfn(v)))
251#define gfn_to_virt(g) (__va(gfn_to_pfn(g) << PAGE_SHIFT))
252
218static inline unsigned long pte_mfn(pte_t pte) 253static inline unsigned long pte_mfn(pte_t pte)
219{ 254{
220 return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT; 255 return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
@@ -262,7 +297,7 @@ void make_lowmem_page_readwrite(void *vaddr);
262 297
263static inline bool xen_arch_need_swiotlb(struct device *dev, 298static inline bool xen_arch_need_swiotlb(struct device *dev,
264 unsigned long pfn, 299 unsigned long pfn,
265 unsigned long mfn) 300 unsigned long bfn)
266{ 301{
267 return false; 302 return false;
268} 303}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 2c50b445884e..9c479fe40459 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2812,9 +2812,9 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2812 return 0; 2812 return 0;
2813} 2813}
2814 2814
2815static int do_remap_mfn(struct vm_area_struct *vma, 2815static int do_remap_gfn(struct vm_area_struct *vma,
2816 unsigned long addr, 2816 unsigned long addr,
2817 xen_pfn_t *mfn, int nr, 2817 xen_pfn_t *gfn, int nr,
2818 int *err_ptr, pgprot_t prot, 2818 int *err_ptr, pgprot_t prot,
2819 unsigned domid, 2819 unsigned domid,
2820 struct page **pages) 2820 struct page **pages)
@@ -2830,14 +2830,14 @@ static int do_remap_mfn(struct vm_area_struct *vma,
2830 if (xen_feature(XENFEAT_auto_translated_physmap)) { 2830 if (xen_feature(XENFEAT_auto_translated_physmap)) {
2831#ifdef CONFIG_XEN_PVH 2831#ifdef CONFIG_XEN_PVH
2832 /* We need to update the local page tables and the xen HAP */ 2832 /* We need to update the local page tables and the xen HAP */
2833 return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr, 2833 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
2834 prot, domid, pages); 2834 prot, domid, pages);
2835#else 2835#else
2836 return -EINVAL; 2836 return -EINVAL;
2837#endif 2837#endif
2838 } 2838 }
2839 2839
2840 rmd.mfn = mfn; 2840 rmd.mfn = gfn;
2841 rmd.prot = prot; 2841 rmd.prot = prot;
2842 /* We use the err_ptr to indicate if there we are doing a contigious 2842 /* We use the err_ptr to indicate if there we are doing a contigious
2843 * mapping or a discontigious mapping. */ 2843 * mapping or a discontigious mapping. */
@@ -2865,8 +2865,8 @@ static int do_remap_mfn(struct vm_area_struct *vma,
2865 batch_left, &done, domid); 2865 batch_left, &done, domid);
2866 2866
2867 /* 2867 /*
2868 * @err_ptr may be the same buffer as @mfn, so 2868 * @err_ptr may be the same buffer as @gfn, so
2869 * only clear it after each chunk of @mfn is 2869 * only clear it after each chunk of @gfn is
2870 * used. 2870 * used.
2871 */ 2871 */
2872 if (err_ptr) { 2872 if (err_ptr) {
@@ -2896,19 +2896,19 @@ out:
2896 return err < 0 ? err : mapped; 2896 return err < 0 ? err : mapped;
2897} 2897}
2898 2898
2899int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 2899int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
2900 unsigned long addr, 2900 unsigned long addr,
2901 xen_pfn_t mfn, int nr, 2901 xen_pfn_t gfn, int nr,
2902 pgprot_t prot, unsigned domid, 2902 pgprot_t prot, unsigned domid,
2903 struct page **pages) 2903 struct page **pages)
2904{ 2904{
2905 return do_remap_mfn(vma, addr, &mfn, nr, NULL, prot, domid, pages); 2905 return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
2906} 2906}
2907EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 2907EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
2908 2908
2909int xen_remap_domain_mfn_array(struct vm_area_struct *vma, 2909int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
2910 unsigned long addr, 2910 unsigned long addr,
2911 xen_pfn_t *mfn, int nr, 2911 xen_pfn_t *gfn, int nr,
2912 int *err_ptr, pgprot_t prot, 2912 int *err_ptr, pgprot_t prot,
2913 unsigned domid, struct page **pages) 2913 unsigned domid, struct page **pages)
2914{ 2914{
@@ -2917,13 +2917,13 @@ int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
2917 * cause of "wrong memory was mapped in". 2917 * cause of "wrong memory was mapped in".
2918 */ 2918 */
2919 BUG_ON(err_ptr == NULL); 2919 BUG_ON(err_ptr == NULL);
2920 return do_remap_mfn(vma, addr, mfn, nr, err_ptr, prot, domid, pages); 2920 return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
2921} 2921}
2922EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array); 2922EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
2923 2923
2924 2924
2925/* Returns: 0 success */ 2925/* Returns: 0 success */
2926int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, 2926int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
2927 int numpgs, struct page **pages) 2927 int numpgs, struct page **pages)
2928{ 2928{
2929 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap)) 2929 if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
@@ -2935,4 +2935,4 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
2935 return -EINVAL; 2935 return -EINVAL;
2936#endif 2936#endif
2937} 2937}
2938EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); 2938EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 2a9ff7342791..3f4ebf0261f2 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -453,7 +453,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
453 } 453 }
454#endif 454#endif
455 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); 455 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
456 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir)); 456 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
457 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt)) 457 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
458 BUG(); 458 BUG();
459 459
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 15083539df15..0823a96902f8 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -249,7 +249,7 @@ static struct grant *get_grant(grant_ref_t *gref_head,
249 struct blkfront_info *info) 249 struct blkfront_info *info)
250{ 250{
251 struct grant *gnt_list_entry; 251 struct grant *gnt_list_entry;
252 unsigned long buffer_mfn; 252 unsigned long buffer_gfn;
253 253
254 BUG_ON(list_empty(&info->grants)); 254 BUG_ON(list_empty(&info->grants));
255 gnt_list_entry = list_first_entry(&info->grants, struct grant, 255 gnt_list_entry = list_first_entry(&info->grants, struct grant,
@@ -268,10 +268,10 @@ static struct grant *get_grant(grant_ref_t *gref_head,
268 BUG_ON(!pfn); 268 BUG_ON(!pfn);
269 gnt_list_entry->pfn = pfn; 269 gnt_list_entry->pfn = pfn;
270 } 270 }
271 buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn); 271 buffer_gfn = pfn_to_gfn(gnt_list_entry->pfn);
272 gnttab_grant_foreign_access_ref(gnt_list_entry->gref, 272 gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
273 info->xbdev->otherend_id, 273 info->xbdev->otherend_id,
274 buffer_mfn, 0); 274 buffer_gfn, 0);
275 return gnt_list_entry; 275 return gnt_list_entry;
276} 276}
277 277
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index 95599e478e19..23d0549539d4 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -232,7 +232,7 @@ static int xenkbd_connect_backend(struct xenbus_device *dev,
232 struct xenbus_transaction xbt; 232 struct xenbus_transaction xbt;
233 233
234 ret = gnttab_grant_foreign_access(dev->otherend_id, 234 ret = gnttab_grant_foreign_access(dev->otherend_id,
235 virt_to_mfn(info->page), 0); 235 virt_to_gfn(info->page), 0);
236 if (ret < 0) 236 if (ret < 0)
237 return ret; 237 return ret;
238 info->gref = ret; 238 info->gref = ret;
@@ -255,7 +255,7 @@ static int xenkbd_connect_backend(struct xenbus_device *dev,
255 goto error_irqh; 255 goto error_irqh;
256 } 256 }
257 ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu", 257 ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
258 virt_to_mfn(info->page)); 258 virt_to_gfn(info->page));
259 if (ret) 259 if (ret)
260 goto error_xenbus; 260 goto error_xenbus;
261 ret = xenbus_printf(xbt, dev->nodename, "page-gref", "%u", info->gref); 261 ret = xenbus_printf(xbt, dev->nodename, "page-gref", "%u", info->gref);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index abc1381264fc..ec98d43916a8 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -325,7 +325,7 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
325 } else { 325 } else {
326 copy_gop->source.domid = DOMID_SELF; 326 copy_gop->source.domid = DOMID_SELF;
327 copy_gop->source.u.gmfn = 327 copy_gop->source.u.gmfn =
328 virt_to_mfn(page_address(page)); 328 virt_to_gfn(page_address(page));
329 } 329 }
330 copy_gop->source.offset = offset; 330 copy_gop->source.offset = offset;
331 331
@@ -1406,7 +1406,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1406 queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset; 1406 queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
1407 1407
1408 queue->tx_copy_ops[*copy_ops].dest.u.gmfn = 1408 queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
1409 virt_to_mfn(skb->data); 1409 virt_to_gfn(skb->data);
1410 queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; 1410 queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
1411 queue->tx_copy_ops[*copy_ops].dest.offset = 1411 queue->tx_copy_ops[*copy_ops].dest.offset =
1412 offset_in_page(skb->data); 1412 offset_in_page(skb->data);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index b9c637a0036b..f821a97d7827 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -291,7 +291,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
291 struct sk_buff *skb; 291 struct sk_buff *skb;
292 unsigned short id; 292 unsigned short id;
293 grant_ref_t ref; 293 grant_ref_t ref;
294 unsigned long pfn; 294 unsigned long gfn;
295 struct xen_netif_rx_request *req; 295 struct xen_netif_rx_request *req;
296 296
297 skb = xennet_alloc_one_rx_buffer(queue); 297 skb = xennet_alloc_one_rx_buffer(queue);
@@ -307,12 +307,12 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
307 BUG_ON((signed short)ref < 0); 307 BUG_ON((signed short)ref < 0);
308 queue->grant_rx_ref[id] = ref; 308 queue->grant_rx_ref[id] = ref;
309 309
310 pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0])); 310 gfn = xen_page_to_gfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
311 311
312 req = RING_GET_REQUEST(&queue->rx, req_prod); 312 req = RING_GET_REQUEST(&queue->rx, req_prod);
313 gnttab_grant_foreign_access_ref(ref, 313 gnttab_grant_foreign_access_ref(ref,
314 queue->info->xbdev->otherend_id, 314 queue->info->xbdev->otherend_id,
315 pfn_to_mfn(pfn), 315 gfn,
316 0); 316 0);
317 317
318 req->id = id; 318 req->id = id;
@@ -430,8 +430,10 @@ static struct xen_netif_tx_request *xennet_make_one_txreq(
430 ref = gnttab_claim_grant_reference(&queue->gref_tx_head); 430 ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
431 BUG_ON((signed short)ref < 0); 431 BUG_ON((signed short)ref < 0);
432 432
433 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, 433 gnttab_grant_foreign_access_ref(ref,
434 page_to_mfn(page), GNTMAP_readonly); 434 queue->info->xbdev->otherend_id,
435 xen_page_to_gfn(page),
436 GNTMAP_readonly);
435 437
436 queue->tx_skbs[id].skb = skb; 438 queue->tx_skbs[id].skb = skb;
437 queue->grant_tx_page[id] = page; 439 queue->grant_tx_page[id] = page;
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index fad22caf0eff..9dc8687bf048 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -377,7 +377,6 @@ static int map_data_for_request(struct vscsifrnt_info *info,
377 unsigned int data_len = scsi_bufflen(sc); 377 unsigned int data_len = scsi_bufflen(sc);
378 unsigned int data_grants = 0, seg_grants = 0; 378 unsigned int data_grants = 0, seg_grants = 0;
379 struct scatterlist *sg; 379 struct scatterlist *sg;
380 unsigned long mfn;
381 struct scsiif_request_segment *seg; 380 struct scsiif_request_segment *seg;
382 381
383 ring_req->nr_segments = 0; 382 ring_req->nr_segments = 0;
@@ -420,9 +419,9 @@ static int map_data_for_request(struct vscsifrnt_info *info,
420 ref = gnttab_claim_grant_reference(&gref_head); 419 ref = gnttab_claim_grant_reference(&gref_head);
421 BUG_ON(ref == -ENOSPC); 420 BUG_ON(ref == -ENOSPC);
422 421
423 mfn = pfn_to_mfn(page_to_pfn(page));
424 gnttab_grant_foreign_access_ref(ref, 422 gnttab_grant_foreign_access_ref(ref,
425 info->dev->otherend_id, mfn, 1); 423 info->dev->otherend_id,
424 xen_page_to_gfn(page), 1);
426 shadow->gref[ref_cnt] = ref; 425 shadow->gref[ref_cnt] = ref;
427 ring_req->seg[ref_cnt].gref = ref; 426 ring_req->seg[ref_cnt].gref = ref;
428 ring_req->seg[ref_cnt].offset = (uint16_t)off; 427 ring_req->seg[ref_cnt].offset = (uint16_t)off;
@@ -454,9 +453,10 @@ static int map_data_for_request(struct vscsifrnt_info *info,
454 ref = gnttab_claim_grant_reference(&gref_head); 453 ref = gnttab_claim_grant_reference(&gref_head);
455 BUG_ON(ref == -ENOSPC); 454 BUG_ON(ref == -ENOSPC);
456 455
457 mfn = pfn_to_mfn(page_to_pfn(page));
458 gnttab_grant_foreign_access_ref(ref, 456 gnttab_grant_foreign_access_ref(ref,
459 info->dev->otherend_id, mfn, grant_ro); 457 info->dev->otherend_id,
458 xen_page_to_gfn(page),
459 grant_ro);
460 460
461 shadow->gref[ref_cnt] = ref; 461 shadow->gref[ref_cnt] = ref;
462 seg->gref = ref; 462 seg->gref = ref;
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index a9d837f83ce8..10beb1589d83 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -200,7 +200,7 @@ static int xen_hvm_console_init(void)
200{ 200{
201 int r; 201 int r;
202 uint64_t v = 0; 202 uint64_t v = 0;
203 unsigned long mfn; 203 unsigned long gfn;
204 struct xencons_info *info; 204 struct xencons_info *info;
205 205
206 if (!xen_hvm_domain()) 206 if (!xen_hvm_domain())
@@ -217,7 +217,7 @@ static int xen_hvm_console_init(void)
217 } 217 }
218 /* 218 /*
219 * If the toolstack (or the hypervisor) hasn't set these values, the 219 * If the toolstack (or the hypervisor) hasn't set these values, the
220 * default value is 0. Even though mfn = 0 and evtchn = 0 are 220 * default value is 0. Even though gfn = 0 and evtchn = 0 are
221 * theoretically correct values, in practice they never are and they 221 * theoretically correct values, in practice they never are and they
222 * mean that a legacy toolstack hasn't initialized the pv console correctly. 222 * mean that a legacy toolstack hasn't initialized the pv console correctly.
223 */ 223 */
@@ -229,8 +229,8 @@ static int xen_hvm_console_init(void)
229 r = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v); 229 r = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v);
230 if (r < 0 || v == 0) 230 if (r < 0 || v == 0)
231 goto err; 231 goto err;
232 mfn = v; 232 gfn = v;
233 info->intf = xen_remap(mfn << PAGE_SHIFT, PAGE_SIZE); 233 info->intf = xen_remap(gfn << PAGE_SHIFT, PAGE_SIZE);
234 if (info->intf == NULL) 234 if (info->intf == NULL)
235 goto err; 235 goto err;
236 info->vtermno = HVC_COOKIE; 236 info->vtermno = HVC_COOKIE;
@@ -265,7 +265,8 @@ static int xen_pv_console_init(void)
265 return 0; 265 return 0;
266 } 266 }
267 info->evtchn = xen_start_info->console.domU.evtchn; 267 info->evtchn = xen_start_info->console.domU.evtchn;
268 info->intf = mfn_to_virt(xen_start_info->console.domU.mfn); 268 /* GFN == MFN for PV guest */
269 info->intf = gfn_to_virt(xen_start_info->console.domU.mfn);
269 info->vtermno = HVC_COOKIE; 270 info->vtermno = HVC_COOKIE;
270 271
271 spin_lock(&xencons_lock); 272 spin_lock(&xencons_lock);
@@ -374,7 +375,6 @@ static int xencons_connect_backend(struct xenbus_device *dev,
374 int ret, evtchn, devid, ref, irq; 375 int ret, evtchn, devid, ref, irq;
375 struct xenbus_transaction xbt; 376 struct xenbus_transaction xbt;
376 grant_ref_t gref_head; 377 grant_ref_t gref_head;
377 unsigned long mfn;
378 378
379 ret = xenbus_alloc_evtchn(dev, &evtchn); 379 ret = xenbus_alloc_evtchn(dev, &evtchn);
380 if (ret) 380 if (ret)
@@ -389,10 +389,6 @@ static int xencons_connect_backend(struct xenbus_device *dev,
389 irq, &domU_hvc_ops, 256); 389 irq, &domU_hvc_ops, 256);
390 if (IS_ERR(info->hvc)) 390 if (IS_ERR(info->hvc))
391 return PTR_ERR(info->hvc); 391 return PTR_ERR(info->hvc);
392 if (xen_pv_domain())
393 mfn = virt_to_mfn(info->intf);
394 else
395 mfn = __pa(info->intf) >> PAGE_SHIFT;
396 ret = gnttab_alloc_grant_references(1, &gref_head); 392 ret = gnttab_alloc_grant_references(1, &gref_head);
397 if (ret < 0) 393 if (ret < 0)
398 return ret; 394 return ret;
@@ -401,7 +397,7 @@ static int xencons_connect_backend(struct xenbus_device *dev,
401 if (ref < 0) 397 if (ref < 0)
402 return ref; 398 return ref;
403 gnttab_grant_foreign_access_ref(ref, info->xbdev->otherend_id, 399 gnttab_grant_foreign_access_ref(ref, info->xbdev->otherend_id,
404 mfn, 0); 400 virt_to_gfn(info->intf), 0);
405 401
406 again: 402 again:
407 ret = xenbus_transaction_start(&xbt); 403 ret = xenbus_transaction_start(&xbt);
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index 09dc44736c1a..0567d517eed3 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -46,7 +46,7 @@ struct xenfb_info {
46 int nr_pages; 46 int nr_pages;
47 int irq; 47 int irq;
48 struct xenfb_page *page; 48 struct xenfb_page *page;
49 unsigned long *mfns; 49 unsigned long *gfns;
50 int update_wanted; /* XENFB_TYPE_UPDATE wanted */ 50 int update_wanted; /* XENFB_TYPE_UPDATE wanted */
51 int feature_resize; /* XENFB_TYPE_RESIZE ok */ 51 int feature_resize; /* XENFB_TYPE_RESIZE ok */
52 struct xenfb_resize resize; /* protected by resize_lock */ 52 struct xenfb_resize resize; /* protected by resize_lock */
@@ -402,8 +402,8 @@ static int xenfb_probe(struct xenbus_device *dev,
402 402
403 info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 403 info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
404 404
405 info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages); 405 info->gfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
406 if (!info->mfns) 406 if (!info->gfns)
407 goto error_nomem; 407 goto error_nomem;
408 408
409 /* set up shared page */ 409 /* set up shared page */
@@ -530,29 +530,29 @@ static int xenfb_remove(struct xenbus_device *dev)
530 framebuffer_release(info->fb_info); 530 framebuffer_release(info->fb_info);
531 } 531 }
532 free_page((unsigned long)info->page); 532 free_page((unsigned long)info->page);
533 vfree(info->mfns); 533 vfree(info->gfns);
534 vfree(info->fb); 534 vfree(info->fb);
535 kfree(info); 535 kfree(info);
536 536
537 return 0; 537 return 0;
538} 538}
539 539
540static unsigned long vmalloc_to_mfn(void *address) 540static unsigned long vmalloc_to_gfn(void *address)
541{ 541{
542 return pfn_to_mfn(vmalloc_to_pfn(address)); 542 return xen_page_to_gfn(vmalloc_to_page(address));
543} 543}
544 544
545static void xenfb_init_shared_page(struct xenfb_info *info, 545static void xenfb_init_shared_page(struct xenfb_info *info,
546 struct fb_info *fb_info) 546 struct fb_info *fb_info)
547{ 547{
548 int i; 548 int i;
549 int epd = PAGE_SIZE / sizeof(info->mfns[0]); 549 int epd = PAGE_SIZE / sizeof(info->gfns[0]);
550 550
551 for (i = 0; i < info->nr_pages; i++) 551 for (i = 0; i < info->nr_pages; i++)
552 info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE); 552 info->gfns[i] = vmalloc_to_gfn(info->fb + i * PAGE_SIZE);
553 553
554 for (i = 0; i * epd < info->nr_pages; i++) 554 for (i = 0; i * epd < info->nr_pages; i++)
555 info->page->pd[i] = vmalloc_to_mfn(&info->mfns[i * epd]); 555 info->page->pd[i] = vmalloc_to_gfn(&info->gfns[i * epd]);
556 556
557 info->page->width = fb_info->var.xres; 557 info->page->width = fb_info->var.xres;
558 info->page->height = fb_info->var.yres; 558 info->page->height = fb_info->var.yres;
@@ -586,7 +586,7 @@ static int xenfb_connect_backend(struct xenbus_device *dev,
586 goto unbind_irq; 586 goto unbind_irq;
587 } 587 }
588 ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu", 588 ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
589 virt_to_mfn(info->page)); 589 virt_to_gfn(info->page));
590 if (ret) 590 if (ret)
591 goto error_xenbus; 591 goto error_xenbus;
592 ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u", 592 ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 1fa633b2d556..c79329fcfa78 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -441,7 +441,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
441 /* Update direct mapping, invalidate P2M, and add to balloon. */ 441 /* Update direct mapping, invalidate P2M, and add to balloon. */
442 for (i = 0; i < nr_pages; i++) { 442 for (i = 0; i < nr_pages; i++) {
443 pfn = frame_list[i]; 443 pfn = frame_list[i];
444 frame_list[i] = pfn_to_mfn(pfn); 444 frame_list[i] = pfn_to_gfn(pfn);
445 page = pfn_to_page(pfn); 445 page = pfn_to_page(pfn);
446 446
447#ifdef CONFIG_XEN_HAVE_PVMMU 447#ifdef CONFIG_XEN_HAVE_PVMMU
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index 0edb91c0de6b..8ae2fc90e1ea 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -6,10 +6,10 @@
6bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, 6bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
7 const struct bio_vec *vec2) 7 const struct bio_vec *vec2)
8{ 8{
9 unsigned long mfn1 = pfn_to_mfn(page_to_pfn(vec1->bv_page)); 9 unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
10 unsigned long mfn2 = pfn_to_mfn(page_to_pfn(vec2->bv_page)); 10 unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
11 11
12 return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) && 12 return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
13 ((mfn1 == mfn2) || ((mfn1+1) == mfn2)); 13 ((bfn1 == bfn2) || ((bfn1+1) == bfn2));
14} 14}
15EXPORT_SYMBOL(xen_biovec_phys_mergeable); 15EXPORT_SYMBOL(xen_biovec_phys_mergeable);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 68d129019e8f..6cd5e65c4aff 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1688,7 +1688,7 @@ void __init xen_init_IRQ(void)
1688 struct physdev_pirq_eoi_gmfn eoi_gmfn; 1688 struct physdev_pirq_eoi_gmfn eoi_gmfn;
1689 1689
1690 pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); 1690 pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
1691 eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map); 1691 eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
1692 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn); 1692 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
1693 /* TODO: No PVH support for PIRQ EOI */ 1693 /* TODO: No PVH support for PIRQ EOI */
1694 if (rc != 0) { 1694 if (rc != 0) {
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index ed673e1acd61..1d4baf56c36b 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -111,7 +111,7 @@ static int init_control_block(int cpu,
111 for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++) 111 for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++)
112 q->head[i] = 0; 112 q->head[i] = 0;
113 113
114 init_control.control_gfn = virt_to_mfn(control_block); 114 init_control.control_gfn = virt_to_gfn(control_block);
115 init_control.offset = 0; 115 init_control.offset = 0;
116 init_control.vcpu = cpu; 116 init_control.vcpu = cpu;
117 117
@@ -167,7 +167,7 @@ static int evtchn_fifo_setup(struct irq_info *info)
167 /* Mask all events in this page before adding it. */ 167 /* Mask all events in this page before adding it. */
168 init_array_page(array_page); 168 init_array_page(array_page);
169 169
170 expand_array.array_gfn = virt_to_mfn(array_page); 170 expand_array.array_gfn = virt_to_gfn(array_page);
171 171
172 ret = HYPERVISOR_event_channel_op(EVTCHNOP_expand_array, &expand_array); 172 ret = HYPERVISOR_event_channel_op(EVTCHNOP_expand_array, &expand_array);
173 if (ret < 0) 173 if (ret < 0)
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index e53fe191738c..14370df9ac1c 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -142,7 +142,8 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
142 142
143 /* Grant foreign access to the page. */ 143 /* Grant foreign access to the page. */
144 rc = gnttab_grant_foreign_access(op->domid, 144 rc = gnttab_grant_foreign_access(op->domid,
145 pfn_to_mfn(page_to_pfn(gref->page)), readonly); 145 xen_page_to_gfn(gref->page),
146 readonly);
146 if (rc < 0) 147 if (rc < 0)
147 goto undo; 148 goto undo;
148 gref_ids[i] = gref->gref_id = rc; 149 gref_ids[i] = gref->gref_id = rc;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index d10effee9b9e..e12bd3635f83 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -80,7 +80,7 @@ static int xen_suspend(void *data)
80 * is resuming in a new domain. 80 * is resuming in a new domain.
81 */ 81 */
82 si->cancelled = HYPERVISOR_suspend(xen_pv_domain() 82 si->cancelled = HYPERVISOR_suspend(xen_pv_domain()
83 ? virt_to_mfn(xen_start_info) 83 ? virt_to_gfn(xen_start_info)
84 : 0); 84 : 0);
85 85
86 xen_arch_post_suspend(si->cancelled); 86 xen_arch_post_suspend(si->cancelled);
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 5a296161d843..c6deb87c5c69 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -193,16 +193,16 @@ static int traverse_pages_block(unsigned nelem, size_t size,
193 return ret; 193 return ret;
194} 194}
195 195
196struct mmap_mfn_state { 196struct mmap_gfn_state {
197 unsigned long va; 197 unsigned long va;
198 struct vm_area_struct *vma; 198 struct vm_area_struct *vma;
199 domid_t domain; 199 domid_t domain;
200}; 200};
201 201
202static int mmap_mfn_range(void *data, void *state) 202static int mmap_gfn_range(void *data, void *state)
203{ 203{
204 struct privcmd_mmap_entry *msg = data; 204 struct privcmd_mmap_entry *msg = data;
205 struct mmap_mfn_state *st = state; 205 struct mmap_gfn_state *st = state;
206 struct vm_area_struct *vma = st->vma; 206 struct vm_area_struct *vma = st->vma;
207 int rc; 207 int rc;
208 208
@@ -216,7 +216,7 @@ static int mmap_mfn_range(void *data, void *state)
216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) 216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
217 return -EINVAL; 217 return -EINVAL;
218 218
219 rc = xen_remap_domain_mfn_range(vma, 219 rc = xen_remap_domain_gfn_range(vma,
220 msg->va & PAGE_MASK, 220 msg->va & PAGE_MASK,
221 msg->mfn, msg->npages, 221 msg->mfn, msg->npages,
222 vma->vm_page_prot, 222 vma->vm_page_prot,
@@ -236,7 +236,7 @@ static long privcmd_ioctl_mmap(void __user *udata)
236 struct vm_area_struct *vma; 236 struct vm_area_struct *vma;
237 int rc; 237 int rc;
238 LIST_HEAD(pagelist); 238 LIST_HEAD(pagelist);
239 struct mmap_mfn_state state; 239 struct mmap_gfn_state state;
240 240
241 /* We only support privcmd_ioctl_mmap_batch for auto translated. */ 241 /* We only support privcmd_ioctl_mmap_batch for auto translated. */
242 if (xen_feature(XENFEAT_auto_translated_physmap)) 242 if (xen_feature(XENFEAT_auto_translated_physmap))
@@ -273,7 +273,7 @@ static long privcmd_ioctl_mmap(void __user *udata)
273 273
274 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry), 274 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
275 &pagelist, 275 &pagelist,
276 mmap_mfn_range, &state); 276 mmap_gfn_range, &state);
277 277
278 278
279out_up: 279out_up:
@@ -299,18 +299,18 @@ struct mmap_batch_state {
299 int global_error; 299 int global_error;
300 int version; 300 int version;
301 301
302 /* User-space mfn array to store errors in the second pass for V1. */ 302 /* User-space gfn array to store errors in the second pass for V1. */
303 xen_pfn_t __user *user_mfn; 303 xen_pfn_t __user *user_gfn;
304 /* User-space int array to store errors in the second pass for V2. */ 304 /* User-space int array to store errors in the second pass for V2. */
305 int __user *user_err; 305 int __user *user_err;
306}; 306};
307 307
308/* auto translated dom0 note: if domU being created is PV, then mfn is 308/* auto translated dom0 note: if domU being created is PV, then gfn is
309 * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP). 309 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
310 */ 310 */
311static int mmap_batch_fn(void *data, int nr, void *state) 311static int mmap_batch_fn(void *data, int nr, void *state)
312{ 312{
313 xen_pfn_t *mfnp = data; 313 xen_pfn_t *gfnp = data;
314 struct mmap_batch_state *st = state; 314 struct mmap_batch_state *st = state;
315 struct vm_area_struct *vma = st->vma; 315 struct vm_area_struct *vma = st->vma;
316 struct page **pages = vma->vm_private_data; 316 struct page **pages = vma->vm_private_data;
@@ -321,8 +321,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
321 cur_pages = &pages[st->index]; 321 cur_pages = &pages[st->index];
322 322
323 BUG_ON(nr < 0); 323 BUG_ON(nr < 0);
324 ret = xen_remap_domain_mfn_array(st->vma, st->va & PAGE_MASK, mfnp, nr, 324 ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
325 (int *)mfnp, st->vma->vm_page_prot, 325 (int *)gfnp, st->vma->vm_page_prot,
326 st->domain, cur_pages); 326 st->domain, cur_pages);
327 327
328 /* Adjust the global_error? */ 328 /* Adjust the global_error? */
@@ -347,22 +347,22 @@ static int mmap_return_error(int err, struct mmap_batch_state *st)
347 347
348 if (st->version == 1) { 348 if (st->version == 1) {
349 if (err) { 349 if (err) {
350 xen_pfn_t mfn; 350 xen_pfn_t gfn;
351 351
352 ret = get_user(mfn, st->user_mfn); 352 ret = get_user(gfn, st->user_gfn);
353 if (ret < 0) 353 if (ret < 0)
354 return ret; 354 return ret;
355 /* 355 /*
356 * V1 encodes the error codes in the 32bit top 356 * V1 encodes the error codes in the 32bit top
357 * nibble of the mfn (with its known 357 * nibble of the gfn (with its known
358 * limitations vis-a-vis 64 bit callers). 358 * limitations vis-a-vis 64 bit callers).
359 */ 359 */
360 mfn |= (err == -ENOENT) ? 360 gfn |= (err == -ENOENT) ?
361 PRIVCMD_MMAPBATCH_PAGED_ERROR : 361 PRIVCMD_MMAPBATCH_PAGED_ERROR :
362 PRIVCMD_MMAPBATCH_MFN_ERROR; 362 PRIVCMD_MMAPBATCH_MFN_ERROR;
363 return __put_user(mfn, st->user_mfn++); 363 return __put_user(gfn, st->user_gfn++);
364 } else 364 } else
365 st->user_mfn++; 365 st->user_gfn++;
366 } else { /* st->version == 2 */ 366 } else { /* st->version == 2 */
367 if (err) 367 if (err)
368 return __put_user(err, st->user_err++); 368 return __put_user(err, st->user_err++);
@@ -388,7 +388,7 @@ static int mmap_return_errors(void *data, int nr, void *state)
388 return 0; 388 return 0;
389} 389}
390 390
391/* Allocate pfns that are then mapped with gmfns from foreign domid. Update 391/* Allocate pfns that are then mapped with gfns from foreign domid. Update
392 * the vma with the page info to use later. 392 * the vma with the page info to use later.
393 * Returns: 0 if success, otherwise -errno 393 * Returns: 0 if success, otherwise -errno
394 */ 394 */
@@ -526,7 +526,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
526 526
527 if (state.global_error) { 527 if (state.global_error) {
528 /* Write back errors in second pass. */ 528 /* Write back errors in second pass. */
529 state.user_mfn = (xen_pfn_t *)m.arr; 529 state.user_gfn = (xen_pfn_t *)m.arr;
530 state.user_err = m.err; 530 state.user_err = m.err;
531 ret = traverse_pages_block(m.num, sizeof(xen_pfn_t), 531 ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
532 &pagelist, mmap_return_errors, &state); 532 &pagelist, mmap_return_errors, &state);
@@ -587,7 +587,7 @@ static void privcmd_close(struct vm_area_struct *vma)
587 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) 587 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
588 return; 588 return;
589 589
590 rc = xen_unmap_domain_mfn_range(vma, numpgs, pages); 590 rc = xen_unmap_domain_gfn_range(vma, numpgs, pages);
591 if (rc == 0) 591 if (rc == 0)
592 free_xenballooned_pages(numpgs, pages); 592 free_xenballooned_pages(numpgs, pages);
593 else 593 else
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 4c549323c605..d757a3e610c6 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -82,8 +82,8 @@ static u64 start_dma_addr;
82 */ 82 */
83static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr) 83static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
84{ 84{
85 unsigned long mfn = pfn_to_mfn(PFN_DOWN(paddr)); 85 unsigned long bfn = pfn_to_bfn(PFN_DOWN(paddr));
86 dma_addr_t dma = (dma_addr_t)mfn << PAGE_SHIFT; 86 dma_addr_t dma = (dma_addr_t)bfn << PAGE_SHIFT;
87 87
88 dma |= paddr & ~PAGE_MASK; 88 dma |= paddr & ~PAGE_MASK;
89 89
@@ -92,7 +92,7 @@ static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
92 92
93static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr) 93static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
94{ 94{
95 unsigned long pfn = mfn_to_pfn(PFN_DOWN(baddr)); 95 unsigned long pfn = bfn_to_pfn(PFN_DOWN(baddr));
96 dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT; 96 dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
97 phys_addr_t paddr = dma; 97 phys_addr_t paddr = dma;
98 98
@@ -110,15 +110,15 @@ static int check_pages_physically_contiguous(unsigned long pfn,
110 unsigned int offset, 110 unsigned int offset,
111 size_t length) 111 size_t length)
112{ 112{
113 unsigned long next_mfn; 113 unsigned long next_bfn;
114 int i; 114 int i;
115 int nr_pages; 115 int nr_pages;
116 116
117 next_mfn = pfn_to_mfn(pfn); 117 next_bfn = pfn_to_bfn(pfn);
118 nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT; 118 nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
119 119
120 for (i = 1; i < nr_pages; i++) { 120 for (i = 1; i < nr_pages; i++) {
121 if (pfn_to_mfn(++pfn) != ++next_mfn) 121 if (pfn_to_bfn(++pfn) != ++next_bfn)
122 return 0; 122 return 0;
123 } 123 }
124 return 1; 124 return 1;
@@ -138,8 +138,8 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
138 138
139static int is_xen_swiotlb_buffer(dma_addr_t dma_addr) 139static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
140{ 140{
141 unsigned long mfn = PFN_DOWN(dma_addr); 141 unsigned long bfn = PFN_DOWN(dma_addr);
142 unsigned long pfn = mfn_to_local_pfn(mfn); 142 unsigned long pfn = bfn_to_local_pfn(bfn);
143 phys_addr_t paddr; 143 phys_addr_t paddr;
144 144
145 /* If the address is outside our domain, it CAN 145 /* If the address is outside our domain, it CAN
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index 239738f944ba..945fc4327201 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -129,21 +129,17 @@ static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
129/* xen generic tmem ops */ 129/* xen generic tmem ops */
130 130
131static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid, 131static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
132 u32 index, unsigned long pfn) 132 u32 index, struct page *page)
133{ 133{
134 unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
135
136 return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index, 134 return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
137 gmfn, 0, 0, 0); 135 xen_page_to_gfn(page), 0, 0, 0);
138} 136}
139 137
140static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid, 138static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
141 u32 index, unsigned long pfn) 139 u32 index, struct page *page)
142{ 140{
143 unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
144
145 return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index, 141 return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
146 gmfn, 0, 0, 0); 142 xen_page_to_gfn(page), 0, 0, 0);
147} 143}
148 144
149static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index) 145static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
@@ -173,14 +169,13 @@ static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
173{ 169{
174 u32 ind = (u32) index; 170 u32 ind = (u32) index;
175 struct tmem_oid oid = *(struct tmem_oid *)&key; 171 struct tmem_oid oid = *(struct tmem_oid *)&key;
176 unsigned long pfn = page_to_pfn(page);
177 172
178 if (pool < 0) 173 if (pool < 0)
179 return; 174 return;
180 if (ind != index) 175 if (ind != index)
181 return; 176 return;
182 mb(); /* ensure page is quiescent; tmem may address it with an alias */ 177 mb(); /* ensure page is quiescent; tmem may address it with an alias */
183 (void)xen_tmem_put_page((u32)pool, oid, ind, pfn); 178 (void)xen_tmem_put_page((u32)pool, oid, ind, page);
184} 179}
185 180
186static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key, 181static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
@@ -188,7 +183,6 @@ static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
188{ 183{
189 u32 ind = (u32) index; 184 u32 ind = (u32) index;
190 struct tmem_oid oid = *(struct tmem_oid *)&key; 185 struct tmem_oid oid = *(struct tmem_oid *)&key;
191 unsigned long pfn = page_to_pfn(page);
192 int ret; 186 int ret;
193 187
194 /* translate return values to linux semantics */ 188 /* translate return values to linux semantics */
@@ -196,7 +190,7 @@ static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
196 return -1; 190 return -1;
197 if (ind != index) 191 if (ind != index)
198 return -1; 192 return -1;
199 ret = xen_tmem_get_page((u32)pool, oid, ind, pfn); 193 ret = xen_tmem_get_page((u32)pool, oid, ind, page);
200 if (ret == 1) 194 if (ret == 1)
201 return 0; 195 return 0;
202 else 196 else
@@ -287,7 +281,6 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset,
287{ 281{
288 u64 ind64 = (u64)offset; 282 u64 ind64 = (u64)offset;
289 u32 ind = (u32)offset; 283 u32 ind = (u32)offset;
290 unsigned long pfn = page_to_pfn(page);
291 int pool = tmem_frontswap_poolid; 284 int pool = tmem_frontswap_poolid;
292 int ret; 285 int ret;
293 286
@@ -296,7 +289,7 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset,
296 if (ind64 != ind) 289 if (ind64 != ind)
297 return -1; 290 return -1;
298 mb(); /* ensure page is quiescent; tmem may address it with an alias */ 291 mb(); /* ensure page is quiescent; tmem may address it with an alias */
299 ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn); 292 ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), page);
300 /* translate Xen tmem return values to linux semantics */ 293 /* translate Xen tmem return values to linux semantics */
301 if (ret == 1) 294 if (ret == 1)
302 return 0; 295 return 0;
@@ -313,7 +306,6 @@ static int tmem_frontswap_load(unsigned type, pgoff_t offset,
313{ 306{
314 u64 ind64 = (u64)offset; 307 u64 ind64 = (u64)offset;
315 u32 ind = (u32)offset; 308 u32 ind = (u32)offset;
316 unsigned long pfn = page_to_pfn(page);
317 int pool = tmem_frontswap_poolid; 309 int pool = tmem_frontswap_poolid;
318 int ret; 310 int ret;
319 311
@@ -321,7 +313,7 @@ static int tmem_frontswap_load(unsigned type, pgoff_t offset,
321 return -1; 313 return -1;
322 if (ind64 != ind) 314 if (ind64 != ind)
323 return -1; 315 return -1;
324 ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn); 316 ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), page);
325 /* translate Xen tmem return values to linux semantics */ 317 /* translate Xen tmem return values to linux semantics */
326 if (ret == 1) 318 if (ret == 1)
327 return 0; 319 return 0;
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index e30353575d5d..2ba09c1195c8 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -380,7 +380,7 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
380 380
381 for (i = 0; i < nr_pages; i++) { 381 for (i = 0; i < nr_pages; i++) {
382 err = gnttab_grant_foreign_access(dev->otherend_id, 382 err = gnttab_grant_foreign_access(dev->otherend_id,
383 virt_to_mfn(vaddr), 0); 383 virt_to_gfn(vaddr), 0);
384 if (err < 0) { 384 if (err < 0) {
385 xenbus_dev_fatal(dev, err, 385 xenbus_dev_fatal(dev, err,
386 "granting access to ring page"); 386 "granting access to ring page");
diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c
index b17707ee07d4..ee6d9efd7b76 100644
--- a/drivers/xen/xenbus/xenbus_dev_backend.c
+++ b/drivers/xen/xenbus/xenbus_dev_backend.c
@@ -49,7 +49,7 @@ static long xenbus_alloc(domid_t domid)
49 goto out_err; 49 goto out_err;
50 50
51 gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid, 51 gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid,
52 virt_to_mfn(xen_store_interface), 0 /* writable */); 52 virt_to_gfn(xen_store_interface), 0 /* writable */);
53 53
54 arg.dom = DOMID_SELF; 54 arg.dom = DOMID_SELF;
55 arg.remote_dom = domid; 55 arg.remote_dom = domid;
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 4308fb3cf7c2..3cbe0556de26 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -75,7 +75,7 @@ EXPORT_SYMBOL_GPL(xen_store_interface);
75enum xenstore_init xen_store_domain_type; 75enum xenstore_init xen_store_domain_type;
76EXPORT_SYMBOL_GPL(xen_store_domain_type); 76EXPORT_SYMBOL_GPL(xen_store_domain_type);
77 77
78static unsigned long xen_store_mfn; 78static unsigned long xen_store_gfn;
79 79
80static BLOCKING_NOTIFIER_HEAD(xenstore_chain); 80static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
81 81
@@ -711,9 +711,7 @@ static int __init xenstored_local_init(void)
711 if (!page) 711 if (!page)
712 goto out_err; 712 goto out_err;
713 713
714 xen_store_mfn = xen_start_info->store_mfn = 714 xen_store_gfn = xen_start_info->store_mfn = virt_to_gfn((void *)page);
715 pfn_to_mfn(virt_to_phys((void *)page) >>
716 PAGE_SHIFT);
717 715
718 /* Next allocate a local port which xenstored can bind to */ 716 /* Next allocate a local port which xenstored can bind to */
719 alloc_unbound.dom = DOMID_SELF; 717 alloc_unbound.dom = DOMID_SELF;
@@ -787,12 +785,12 @@ static int __init xenbus_init(void)
787 err = xenstored_local_init(); 785 err = xenstored_local_init();
788 if (err) 786 if (err)
789 goto out_error; 787 goto out_error;
790 xen_store_interface = mfn_to_virt(xen_store_mfn); 788 xen_store_interface = gfn_to_virt(xen_store_gfn);
791 break; 789 break;
792 case XS_PV: 790 case XS_PV:
793 xen_store_evtchn = xen_start_info->store_evtchn; 791 xen_store_evtchn = xen_start_info->store_evtchn;
794 xen_store_mfn = xen_start_info->store_mfn; 792 xen_store_gfn = xen_start_info->store_mfn;
795 xen_store_interface = mfn_to_virt(xen_store_mfn); 793 xen_store_interface = gfn_to_virt(xen_store_gfn);
796 break; 794 break;
797 case XS_HVM: 795 case XS_HVM:
798 err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); 796 err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
@@ -802,9 +800,9 @@ static int __init xenbus_init(void)
802 err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); 800 err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
803 if (err) 801 if (err)
804 goto out_error; 802 goto out_error;
805 xen_store_mfn = (unsigned long)v; 803 xen_store_gfn = (unsigned long)v;
806 xen_store_interface = 804 xen_store_interface =
807 xen_remap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); 805 xen_remap(xen_store_gfn << PAGE_SHIFT, PAGE_SIZE);
808 break; 806 break;
809 default: 807 default:
810 pr_warn("Xenstore state unknown\n"); 808 pr_warn("Xenstore state unknown\n");
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index 58a5389aec89..cff23872c5a9 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -38,8 +38,8 @@
38#include <xen/interface/xen.h> 38#include <xen/interface/xen.h>
39#include <xen/interface/memory.h> 39#include <xen/interface/memory.h>
40 40
41/* map fgmfn of domid to lpfn in the current domain */ 41/* map fgfn of domid to lpfn in the current domain */
42static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, 42static int map_foreign_page(unsigned long lpfn, unsigned long fgfn,
43 unsigned int domid) 43 unsigned int domid)
44{ 44{
45 int rc; 45 int rc;
@@ -49,7 +49,7 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
49 .size = 1, 49 .size = 1,
50 .space = XENMAPSPACE_gmfn_foreign, 50 .space = XENMAPSPACE_gmfn_foreign,
51 }; 51 };
52 xen_ulong_t idx = fgmfn; 52 xen_ulong_t idx = fgfn;
53 xen_pfn_t gpfn = lpfn; 53 xen_pfn_t gpfn = lpfn;
54 int err = 0; 54 int err = 0;
55 55
@@ -62,13 +62,13 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
62} 62}
63 63
64struct remap_data { 64struct remap_data {
65 xen_pfn_t *fgmfn; /* foreign domain's gmfn */ 65 xen_pfn_t *fgfn; /* foreign domain's gfn */
66 pgprot_t prot; 66 pgprot_t prot;
67 domid_t domid; 67 domid_t domid;
68 struct vm_area_struct *vma; 68 struct vm_area_struct *vma;
69 int index; 69 int index;
70 struct page **pages; 70 struct page **pages;
71 struct xen_remap_mfn_info *info; 71 struct xen_remap_gfn_info *info;
72 int *err_ptr; 72 int *err_ptr;
73 int mapped; 73 int mapped;
74}; 74};
@@ -82,20 +82,20 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
82 pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); 82 pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
83 int rc; 83 int rc;
84 84
85 rc = map_foreign_page(pfn, *info->fgmfn, info->domid); 85 rc = map_foreign_page(pfn, *info->fgfn, info->domid);
86 *info->err_ptr++ = rc; 86 *info->err_ptr++ = rc;
87 if (!rc) { 87 if (!rc) {
88 set_pte_at(info->vma->vm_mm, addr, ptep, pte); 88 set_pte_at(info->vma->vm_mm, addr, ptep, pte);
89 info->mapped++; 89 info->mapped++;
90 } 90 }
91 info->fgmfn++; 91 info->fgfn++;
92 92
93 return 0; 93 return 0;
94} 94}
95 95
96int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, 96int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
97 unsigned long addr, 97 unsigned long addr,
98 xen_pfn_t *mfn, int nr, 98 xen_pfn_t *gfn, int nr,
99 int *err_ptr, pgprot_t prot, 99 int *err_ptr, pgprot_t prot,
100 unsigned domid, 100 unsigned domid,
101 struct page **pages) 101 struct page **pages)
@@ -108,7 +108,7 @@ int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
108 x86 PVOPS */ 108 x86 PVOPS */
109 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); 109 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
110 110
111 data.fgmfn = mfn; 111 data.fgfn = gfn;
112 data.prot = prot; 112 data.prot = prot;
113 data.domid = domid; 113 data.domid = domid;
114 data.vma = vma; 114 data.vma = vma;
diff --git a/include/uapi/xen/privcmd.h b/include/uapi/xen/privcmd.h
index a85316811d79..7ddeeda93809 100644
--- a/include/uapi/xen/privcmd.h
+++ b/include/uapi/xen/privcmd.h
@@ -44,6 +44,10 @@ struct privcmd_hypercall {
44 44
45struct privcmd_mmap_entry { 45struct privcmd_mmap_entry {
46 __u64 va; 46 __u64 va;
47 /*
48 * This should be a GFN. It's not possible to change the name because
49 * it's exposed to the user-space.
50 */
47 __u64 mfn; 51 __u64 mfn;
48 __u64 npages; 52 __u64 npages;
49}; 53};
diff --git a/include/xen/page.h b/include/xen/page.h
index a5983da2f5cd..1daae485e336 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -3,9 +3,9 @@
3 3
4#include <asm/xen/page.h> 4#include <asm/xen/page.h>
5 5
6static inline unsigned long page_to_mfn(struct page *page) 6static inline unsigned long xen_page_to_gfn(struct page *page)
7{ 7{
8 return pfn_to_mfn(page_to_pfn(page)); 8 return pfn_to_gfn(page_to_pfn(page));
9} 9}
10 10
11struct xen_memory_region { 11struct xen_memory_region {
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 0ce4f32017ea..e4e214a5abd5 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -30,7 +30,7 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
30struct vm_area_struct; 30struct vm_area_struct;
31 31
32/* 32/*
33 * xen_remap_domain_mfn_array() - map an array of foreign frames 33 * xen_remap_domain_gfn_array() - map an array of foreign frames
34 * @vma: VMA to map the pages into 34 * @vma: VMA to map the pages into
35 * @addr: Address at which to map the pages 35 * @addr: Address at which to map the pages
36 * @gfn: Array of GFNs to map 36 * @gfn: Array of GFNs to map
@@ -46,14 +46,14 @@ struct vm_area_struct;
46 * Returns the number of successfully mapped frames, or a -ve error 46 * Returns the number of successfully mapped frames, or a -ve error
47 * code. 47 * code.
48 */ 48 */
49int xen_remap_domain_mfn_array(struct vm_area_struct *vma, 49int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
50 unsigned long addr, 50 unsigned long addr,
51 xen_pfn_t *gfn, int nr, 51 xen_pfn_t *gfn, int nr,
52 int *err_ptr, pgprot_t prot, 52 int *err_ptr, pgprot_t prot,
53 unsigned domid, 53 unsigned domid,
54 struct page **pages); 54 struct page **pages);
55 55
56/* xen_remap_domain_mfn_range() - map a range of foreign frames 56/* xen_remap_domain_gfn_range() - map a range of foreign frames
57 * @vma: VMA to map the pages into 57 * @vma: VMA to map the pages into
58 * @addr: Address at which to map the pages 58 * @addr: Address at which to map the pages
59 * @gfn: First GFN to map. 59 * @gfn: First GFN to map.
@@ -65,12 +65,12 @@ int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
65 * Returns the number of successfully mapped frames, or a -ve error 65 * Returns the number of successfully mapped frames, or a -ve error
66 * code. 66 * code.
67 */ 67 */
68int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 68int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
69 unsigned long addr, 69 unsigned long addr,
70 xen_pfn_t gfn, int nr, 70 xen_pfn_t gfn, int nr,
71 pgprot_t prot, unsigned domid, 71 pgprot_t prot, unsigned domid,
72 struct page **pages); 72 struct page **pages);
73int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, 73int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
74 int numpgs, struct page **pages); 74 int numpgs, struct page **pages);
75int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, 75int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
76 unsigned long addr, 76 unsigned long addr,