aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-09-10 19:21:11 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-10 19:21:11 -0400
commit06ab838c2024db468855118087db16d8fa905ddc (patch)
tree316ddb218bf3d5482bf16d38c129b71504780835 /drivers/xen
parent573c577af079184ca523984e3279644eb37756a3 (diff)
parent5f51042f876b88a3b81a135cc4ca0adb3d246112 (diff)
Merge tag 'for-linus-4.3-rc0b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen terminology fixes from David Vrabel: "Use the correct GFN/BFN terms more consistently" * tag 'for-linus-4.3-rc0b-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/xenbus: Rename the variable xen_store_mfn to xen_store_gfn xen/privcmd: Further s/MFN/GFN/ clean-up hvc/xen: Further s/MFN/GFN clean-up video/xen-fbfront: Further s/MFN/GFN clean-up xen/tmem: Use xen_page_to_gfn rather than pfn_to_gfn xen: Use correctly the Xen memory terminologies arm/xen: implement correctly pfn_to_mfn xen: Make clear that swiotlb and biomerge are dealing with DMA address
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/balloon.c2
-rw-r--r--drivers/xen/biomerge.c6
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/events/events_fifo.c4
-rw-r--r--drivers/xen/gntalloc.c3
-rw-r--r--drivers/xen/manage.c2
-rw-r--r--drivers/xen/privcmd.c44
-rw-r--r--drivers/xen/swiotlb-xen.c16
-rw-r--r--drivers/xen/tmem.c24
-rw-r--r--drivers/xen/xenbus/xenbus_client.c2
-rw-r--r--drivers/xen/xenbus/xenbus_dev_backend.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c16
-rw-r--r--drivers/xen/xlate_mmu.c18
13 files changed, 66 insertions, 75 deletions
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 1fa633b2d556..c79329fcfa78 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -441,7 +441,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
441 /* Update direct mapping, invalidate P2M, and add to balloon. */ 441 /* Update direct mapping, invalidate P2M, and add to balloon. */
442 for (i = 0; i < nr_pages; i++) { 442 for (i = 0; i < nr_pages; i++) {
443 pfn = frame_list[i]; 443 pfn = frame_list[i];
444 frame_list[i] = pfn_to_mfn(pfn); 444 frame_list[i] = pfn_to_gfn(pfn);
445 page = pfn_to_page(pfn); 445 page = pfn_to_page(pfn);
446 446
447#ifdef CONFIG_XEN_HAVE_PVMMU 447#ifdef CONFIG_XEN_HAVE_PVMMU
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index 0edb91c0de6b..8ae2fc90e1ea 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -6,10 +6,10 @@
6bool xen_biovec_phys_mergeable(const struct bio_vec *vec1, 6bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
7 const struct bio_vec *vec2) 7 const struct bio_vec *vec2)
8{ 8{
9 unsigned long mfn1 = pfn_to_mfn(page_to_pfn(vec1->bv_page)); 9 unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
10 unsigned long mfn2 = pfn_to_mfn(page_to_pfn(vec2->bv_page)); 10 unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
11 11
12 return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) && 12 return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
13 ((mfn1 == mfn2) || ((mfn1+1) == mfn2)); 13 ((bfn1 == bfn2) || ((bfn1+1) == bfn2));
14} 14}
15EXPORT_SYMBOL(xen_biovec_phys_mergeable); 15EXPORT_SYMBOL(xen_biovec_phys_mergeable);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 68d129019e8f..6cd5e65c4aff 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1688,7 +1688,7 @@ void __init xen_init_IRQ(void)
1688 struct physdev_pirq_eoi_gmfn eoi_gmfn; 1688 struct physdev_pirq_eoi_gmfn eoi_gmfn;
1689 1689
1690 pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO); 1690 pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
1691 eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map); 1691 eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
1692 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn); 1692 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
1693 /* TODO: No PVH support for PIRQ EOI */ 1693 /* TODO: No PVH support for PIRQ EOI */
1694 if (rc != 0) { 1694 if (rc != 0) {
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index ed673e1acd61..1d4baf56c36b 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -111,7 +111,7 @@ static int init_control_block(int cpu,
111 for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++) 111 for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++)
112 q->head[i] = 0; 112 q->head[i] = 0;
113 113
114 init_control.control_gfn = virt_to_mfn(control_block); 114 init_control.control_gfn = virt_to_gfn(control_block);
115 init_control.offset = 0; 115 init_control.offset = 0;
116 init_control.vcpu = cpu; 116 init_control.vcpu = cpu;
117 117
@@ -167,7 +167,7 @@ static int evtchn_fifo_setup(struct irq_info *info)
167 /* Mask all events in this page before adding it. */ 167 /* Mask all events in this page before adding it. */
168 init_array_page(array_page); 168 init_array_page(array_page);
169 169
170 expand_array.array_gfn = virt_to_mfn(array_page); 170 expand_array.array_gfn = virt_to_gfn(array_page);
171 171
172 ret = HYPERVISOR_event_channel_op(EVTCHNOP_expand_array, &expand_array); 172 ret = HYPERVISOR_event_channel_op(EVTCHNOP_expand_array, &expand_array);
173 if (ret < 0) 173 if (ret < 0)
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index e53fe191738c..14370df9ac1c 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -142,7 +142,8 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
142 142
143 /* Grant foreign access to the page. */ 143 /* Grant foreign access to the page. */
144 rc = gnttab_grant_foreign_access(op->domid, 144 rc = gnttab_grant_foreign_access(op->domid,
145 pfn_to_mfn(page_to_pfn(gref->page)), readonly); 145 xen_page_to_gfn(gref->page),
146 readonly);
146 if (rc < 0) 147 if (rc < 0)
147 goto undo; 148 goto undo;
148 gref_ids[i] = gref->gref_id = rc; 149 gref_ids[i] = gref->gref_id = rc;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index d10effee9b9e..e12bd3635f83 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -80,7 +80,7 @@ static int xen_suspend(void *data)
80 * is resuming in a new domain. 80 * is resuming in a new domain.
81 */ 81 */
82 si->cancelled = HYPERVISOR_suspend(xen_pv_domain() 82 si->cancelled = HYPERVISOR_suspend(xen_pv_domain()
83 ? virt_to_mfn(xen_start_info) 83 ? virt_to_gfn(xen_start_info)
84 : 0); 84 : 0);
85 85
86 xen_arch_post_suspend(si->cancelled); 86 xen_arch_post_suspend(si->cancelled);
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 5a296161d843..c6deb87c5c69 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -193,16 +193,16 @@ static int traverse_pages_block(unsigned nelem, size_t size,
193 return ret; 193 return ret;
194} 194}
195 195
196struct mmap_mfn_state { 196struct mmap_gfn_state {
197 unsigned long va; 197 unsigned long va;
198 struct vm_area_struct *vma; 198 struct vm_area_struct *vma;
199 domid_t domain; 199 domid_t domain;
200}; 200};
201 201
202static int mmap_mfn_range(void *data, void *state) 202static int mmap_gfn_range(void *data, void *state)
203{ 203{
204 struct privcmd_mmap_entry *msg = data; 204 struct privcmd_mmap_entry *msg = data;
205 struct mmap_mfn_state *st = state; 205 struct mmap_gfn_state *st = state;
206 struct vm_area_struct *vma = st->vma; 206 struct vm_area_struct *vma = st->vma;
207 int rc; 207 int rc;
208 208
@@ -216,7 +216,7 @@ static int mmap_mfn_range(void *data, void *state)
216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) 216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
217 return -EINVAL; 217 return -EINVAL;
218 218
219 rc = xen_remap_domain_mfn_range(vma, 219 rc = xen_remap_domain_gfn_range(vma,
220 msg->va & PAGE_MASK, 220 msg->va & PAGE_MASK,
221 msg->mfn, msg->npages, 221 msg->mfn, msg->npages,
222 vma->vm_page_prot, 222 vma->vm_page_prot,
@@ -236,7 +236,7 @@ static long privcmd_ioctl_mmap(void __user *udata)
236 struct vm_area_struct *vma; 236 struct vm_area_struct *vma;
237 int rc; 237 int rc;
238 LIST_HEAD(pagelist); 238 LIST_HEAD(pagelist);
239 struct mmap_mfn_state state; 239 struct mmap_gfn_state state;
240 240
241 /* We only support privcmd_ioctl_mmap_batch for auto translated. */ 241 /* We only support privcmd_ioctl_mmap_batch for auto translated. */
242 if (xen_feature(XENFEAT_auto_translated_physmap)) 242 if (xen_feature(XENFEAT_auto_translated_physmap))
@@ -273,7 +273,7 @@ static long privcmd_ioctl_mmap(void __user *udata)
273 273
274 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry), 274 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
275 &pagelist, 275 &pagelist,
276 mmap_mfn_range, &state); 276 mmap_gfn_range, &state);
277 277
278 278
279out_up: 279out_up:
@@ -299,18 +299,18 @@ struct mmap_batch_state {
299 int global_error; 299 int global_error;
300 int version; 300 int version;
301 301
302 /* User-space mfn array to store errors in the second pass for V1. */ 302 /* User-space gfn array to store errors in the second pass for V1. */
303 xen_pfn_t __user *user_mfn; 303 xen_pfn_t __user *user_gfn;
304 /* User-space int array to store errors in the second pass for V2. */ 304 /* User-space int array to store errors in the second pass for V2. */
305 int __user *user_err; 305 int __user *user_err;
306}; 306};
307 307
308/* auto translated dom0 note: if domU being created is PV, then mfn is 308/* auto translated dom0 note: if domU being created is PV, then gfn is
309 * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP). 309 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
310 */ 310 */
311static int mmap_batch_fn(void *data, int nr, void *state) 311static int mmap_batch_fn(void *data, int nr, void *state)
312{ 312{
313 xen_pfn_t *mfnp = data; 313 xen_pfn_t *gfnp = data;
314 struct mmap_batch_state *st = state; 314 struct mmap_batch_state *st = state;
315 struct vm_area_struct *vma = st->vma; 315 struct vm_area_struct *vma = st->vma;
316 struct page **pages = vma->vm_private_data; 316 struct page **pages = vma->vm_private_data;
@@ -321,8 +321,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
321 cur_pages = &pages[st->index]; 321 cur_pages = &pages[st->index];
322 322
323 BUG_ON(nr < 0); 323 BUG_ON(nr < 0);
324 ret = xen_remap_domain_mfn_array(st->vma, st->va & PAGE_MASK, mfnp, nr, 324 ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
325 (int *)mfnp, st->vma->vm_page_prot, 325 (int *)gfnp, st->vma->vm_page_prot,
326 st->domain, cur_pages); 326 st->domain, cur_pages);
327 327
328 /* Adjust the global_error? */ 328 /* Adjust the global_error? */
@@ -347,22 +347,22 @@ static int mmap_return_error(int err, struct mmap_batch_state *st)
347 347
348 if (st->version == 1) { 348 if (st->version == 1) {
349 if (err) { 349 if (err) {
350 xen_pfn_t mfn; 350 xen_pfn_t gfn;
351 351
352 ret = get_user(mfn, st->user_mfn); 352 ret = get_user(gfn, st->user_gfn);
353 if (ret < 0) 353 if (ret < 0)
354 return ret; 354 return ret;
355 /* 355 /*
356 * V1 encodes the error codes in the 32bit top 356 * V1 encodes the error codes in the 32bit top
357 * nibble of the mfn (with its known 357 * nibble of the gfn (with its known
358 * limitations vis-a-vis 64 bit callers). 358 * limitations vis-a-vis 64 bit callers).
359 */ 359 */
360 mfn |= (err == -ENOENT) ? 360 gfn |= (err == -ENOENT) ?
361 PRIVCMD_MMAPBATCH_PAGED_ERROR : 361 PRIVCMD_MMAPBATCH_PAGED_ERROR :
362 PRIVCMD_MMAPBATCH_MFN_ERROR; 362 PRIVCMD_MMAPBATCH_MFN_ERROR;
363 return __put_user(mfn, st->user_mfn++); 363 return __put_user(gfn, st->user_gfn++);
364 } else 364 } else
365 st->user_mfn++; 365 st->user_gfn++;
366 } else { /* st->version == 2 */ 366 } else { /* st->version == 2 */
367 if (err) 367 if (err)
368 return __put_user(err, st->user_err++); 368 return __put_user(err, st->user_err++);
@@ -388,7 +388,7 @@ static int mmap_return_errors(void *data, int nr, void *state)
388 return 0; 388 return 0;
389} 389}
390 390
391/* Allocate pfns that are then mapped with gmfns from foreign domid. Update 391/* Allocate pfns that are then mapped with gfns from foreign domid. Update
392 * the vma with the page info to use later. 392 * the vma with the page info to use later.
393 * Returns: 0 if success, otherwise -errno 393 * Returns: 0 if success, otherwise -errno
394 */ 394 */
@@ -526,7 +526,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
526 526
527 if (state.global_error) { 527 if (state.global_error) {
528 /* Write back errors in second pass. */ 528 /* Write back errors in second pass. */
529 state.user_mfn = (xen_pfn_t *)m.arr; 529 state.user_gfn = (xen_pfn_t *)m.arr;
530 state.user_err = m.err; 530 state.user_err = m.err;
531 ret = traverse_pages_block(m.num, sizeof(xen_pfn_t), 531 ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
532 &pagelist, mmap_return_errors, &state); 532 &pagelist, mmap_return_errors, &state);
@@ -587,7 +587,7 @@ static void privcmd_close(struct vm_area_struct *vma)
587 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) 587 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
588 return; 588 return;
589 589
590 rc = xen_unmap_domain_mfn_range(vma, numpgs, pages); 590 rc = xen_unmap_domain_gfn_range(vma, numpgs, pages);
591 if (rc == 0) 591 if (rc == 0)
592 free_xenballooned_pages(numpgs, pages); 592 free_xenballooned_pages(numpgs, pages);
593 else 593 else
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 4c549323c605..d757a3e610c6 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -82,8 +82,8 @@ static u64 start_dma_addr;
82 */ 82 */
83static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr) 83static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
84{ 84{
85 unsigned long mfn = pfn_to_mfn(PFN_DOWN(paddr)); 85 unsigned long bfn = pfn_to_bfn(PFN_DOWN(paddr));
86 dma_addr_t dma = (dma_addr_t)mfn << PAGE_SHIFT; 86 dma_addr_t dma = (dma_addr_t)bfn << PAGE_SHIFT;
87 87
88 dma |= paddr & ~PAGE_MASK; 88 dma |= paddr & ~PAGE_MASK;
89 89
@@ -92,7 +92,7 @@ static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
92 92
93static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr) 93static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
94{ 94{
95 unsigned long pfn = mfn_to_pfn(PFN_DOWN(baddr)); 95 unsigned long pfn = bfn_to_pfn(PFN_DOWN(baddr));
96 dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT; 96 dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
97 phys_addr_t paddr = dma; 97 phys_addr_t paddr = dma;
98 98
@@ -110,15 +110,15 @@ static int check_pages_physically_contiguous(unsigned long pfn,
110 unsigned int offset, 110 unsigned int offset,
111 size_t length) 111 size_t length)
112{ 112{
113 unsigned long next_mfn; 113 unsigned long next_bfn;
114 int i; 114 int i;
115 int nr_pages; 115 int nr_pages;
116 116
117 next_mfn = pfn_to_mfn(pfn); 117 next_bfn = pfn_to_bfn(pfn);
118 nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT; 118 nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
119 119
120 for (i = 1; i < nr_pages; i++) { 120 for (i = 1; i < nr_pages; i++) {
121 if (pfn_to_mfn(++pfn) != ++next_mfn) 121 if (pfn_to_bfn(++pfn) != ++next_bfn)
122 return 0; 122 return 0;
123 } 123 }
124 return 1; 124 return 1;
@@ -138,8 +138,8 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
138 138
139static int is_xen_swiotlb_buffer(dma_addr_t dma_addr) 139static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
140{ 140{
141 unsigned long mfn = PFN_DOWN(dma_addr); 141 unsigned long bfn = PFN_DOWN(dma_addr);
142 unsigned long pfn = mfn_to_local_pfn(mfn); 142 unsigned long pfn = bfn_to_local_pfn(bfn);
143 phys_addr_t paddr; 143 phys_addr_t paddr;
144 144
145 /* If the address is outside our domain, it CAN 145 /* If the address is outside our domain, it CAN
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index 239738f944ba..945fc4327201 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -129,21 +129,17 @@ static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
129/* xen generic tmem ops */ 129/* xen generic tmem ops */
130 130
131static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid, 131static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
132 u32 index, unsigned long pfn) 132 u32 index, struct page *page)
133{ 133{
134 unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
135
136 return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index, 134 return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
137 gmfn, 0, 0, 0); 135 xen_page_to_gfn(page), 0, 0, 0);
138} 136}
139 137
140static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid, 138static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
141 u32 index, unsigned long pfn) 139 u32 index, struct page *page)
142{ 140{
143 unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
144
145 return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index, 141 return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
146 gmfn, 0, 0, 0); 142 xen_page_to_gfn(page), 0, 0, 0);
147} 143}
148 144
149static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index) 145static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
@@ -173,14 +169,13 @@ static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
173{ 169{
174 u32 ind = (u32) index; 170 u32 ind = (u32) index;
175 struct tmem_oid oid = *(struct tmem_oid *)&key; 171 struct tmem_oid oid = *(struct tmem_oid *)&key;
176 unsigned long pfn = page_to_pfn(page);
177 172
178 if (pool < 0) 173 if (pool < 0)
179 return; 174 return;
180 if (ind != index) 175 if (ind != index)
181 return; 176 return;
182 mb(); /* ensure page is quiescent; tmem may address it with an alias */ 177 mb(); /* ensure page is quiescent; tmem may address it with an alias */
183 (void)xen_tmem_put_page((u32)pool, oid, ind, pfn); 178 (void)xen_tmem_put_page((u32)pool, oid, ind, page);
184} 179}
185 180
186static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key, 181static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
@@ -188,7 +183,6 @@ static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
188{ 183{
189 u32 ind = (u32) index; 184 u32 ind = (u32) index;
190 struct tmem_oid oid = *(struct tmem_oid *)&key; 185 struct tmem_oid oid = *(struct tmem_oid *)&key;
191 unsigned long pfn = page_to_pfn(page);
192 int ret; 186 int ret;
193 187
194 /* translate return values to linux semantics */ 188 /* translate return values to linux semantics */
@@ -196,7 +190,7 @@ static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
196 return -1; 190 return -1;
197 if (ind != index) 191 if (ind != index)
198 return -1; 192 return -1;
199 ret = xen_tmem_get_page((u32)pool, oid, ind, pfn); 193 ret = xen_tmem_get_page((u32)pool, oid, ind, page);
200 if (ret == 1) 194 if (ret == 1)
201 return 0; 195 return 0;
202 else 196 else
@@ -287,7 +281,6 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset,
287{ 281{
288 u64 ind64 = (u64)offset; 282 u64 ind64 = (u64)offset;
289 u32 ind = (u32)offset; 283 u32 ind = (u32)offset;
290 unsigned long pfn = page_to_pfn(page);
291 int pool = tmem_frontswap_poolid; 284 int pool = tmem_frontswap_poolid;
292 int ret; 285 int ret;
293 286
@@ -296,7 +289,7 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset,
296 if (ind64 != ind) 289 if (ind64 != ind)
297 return -1; 290 return -1;
298 mb(); /* ensure page is quiescent; tmem may address it with an alias */ 291 mb(); /* ensure page is quiescent; tmem may address it with an alias */
299 ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn); 292 ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), page);
300 /* translate Xen tmem return values to linux semantics */ 293 /* translate Xen tmem return values to linux semantics */
301 if (ret == 1) 294 if (ret == 1)
302 return 0; 295 return 0;
@@ -313,7 +306,6 @@ static int tmem_frontswap_load(unsigned type, pgoff_t offset,
313{ 306{
314 u64 ind64 = (u64)offset; 307 u64 ind64 = (u64)offset;
315 u32 ind = (u32)offset; 308 u32 ind = (u32)offset;
316 unsigned long pfn = page_to_pfn(page);
317 int pool = tmem_frontswap_poolid; 309 int pool = tmem_frontswap_poolid;
318 int ret; 310 int ret;
319 311
@@ -321,7 +313,7 @@ static int tmem_frontswap_load(unsigned type, pgoff_t offset,
321 return -1; 313 return -1;
322 if (ind64 != ind) 314 if (ind64 != ind)
323 return -1; 315 return -1;
324 ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn); 316 ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), page);
325 /* translate Xen tmem return values to linux semantics */ 317 /* translate Xen tmem return values to linux semantics */
326 if (ret == 1) 318 if (ret == 1)
327 return 0; 319 return 0;
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index e30353575d5d..2ba09c1195c8 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -380,7 +380,7 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
380 380
381 for (i = 0; i < nr_pages; i++) { 381 for (i = 0; i < nr_pages; i++) {
382 err = gnttab_grant_foreign_access(dev->otherend_id, 382 err = gnttab_grant_foreign_access(dev->otherend_id,
383 virt_to_mfn(vaddr), 0); 383 virt_to_gfn(vaddr), 0);
384 if (err < 0) { 384 if (err < 0) {
385 xenbus_dev_fatal(dev, err, 385 xenbus_dev_fatal(dev, err,
386 "granting access to ring page"); 386 "granting access to ring page");
diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c
index b17707ee07d4..ee6d9efd7b76 100644
--- a/drivers/xen/xenbus/xenbus_dev_backend.c
+++ b/drivers/xen/xenbus/xenbus_dev_backend.c
@@ -49,7 +49,7 @@ static long xenbus_alloc(domid_t domid)
49 goto out_err; 49 goto out_err;
50 50
51 gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid, 51 gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid,
52 virt_to_mfn(xen_store_interface), 0 /* writable */); 52 virt_to_gfn(xen_store_interface), 0 /* writable */);
53 53
54 arg.dom = DOMID_SELF; 54 arg.dom = DOMID_SELF;
55 arg.remote_dom = domid; 55 arg.remote_dom = domid;
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 4308fb3cf7c2..3cbe0556de26 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -75,7 +75,7 @@ EXPORT_SYMBOL_GPL(xen_store_interface);
75enum xenstore_init xen_store_domain_type; 75enum xenstore_init xen_store_domain_type;
76EXPORT_SYMBOL_GPL(xen_store_domain_type); 76EXPORT_SYMBOL_GPL(xen_store_domain_type);
77 77
78static unsigned long xen_store_mfn; 78static unsigned long xen_store_gfn;
79 79
80static BLOCKING_NOTIFIER_HEAD(xenstore_chain); 80static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
81 81
@@ -711,9 +711,7 @@ static int __init xenstored_local_init(void)
711 if (!page) 711 if (!page)
712 goto out_err; 712 goto out_err;
713 713
714 xen_store_mfn = xen_start_info->store_mfn = 714 xen_store_gfn = xen_start_info->store_mfn = virt_to_gfn((void *)page);
715 pfn_to_mfn(virt_to_phys((void *)page) >>
716 PAGE_SHIFT);
717 715
718 /* Next allocate a local port which xenstored can bind to */ 716 /* Next allocate a local port which xenstored can bind to */
719 alloc_unbound.dom = DOMID_SELF; 717 alloc_unbound.dom = DOMID_SELF;
@@ -787,12 +785,12 @@ static int __init xenbus_init(void)
787 err = xenstored_local_init(); 785 err = xenstored_local_init();
788 if (err) 786 if (err)
789 goto out_error; 787 goto out_error;
790 xen_store_interface = mfn_to_virt(xen_store_mfn); 788 xen_store_interface = gfn_to_virt(xen_store_gfn);
791 break; 789 break;
792 case XS_PV: 790 case XS_PV:
793 xen_store_evtchn = xen_start_info->store_evtchn; 791 xen_store_evtchn = xen_start_info->store_evtchn;
794 xen_store_mfn = xen_start_info->store_mfn; 792 xen_store_gfn = xen_start_info->store_mfn;
795 xen_store_interface = mfn_to_virt(xen_store_mfn); 793 xen_store_interface = gfn_to_virt(xen_store_gfn);
796 break; 794 break;
797 case XS_HVM: 795 case XS_HVM:
798 err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v); 796 err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
@@ -802,9 +800,9 @@ static int __init xenbus_init(void)
802 err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v); 800 err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
803 if (err) 801 if (err)
804 goto out_error; 802 goto out_error;
805 xen_store_mfn = (unsigned long)v; 803 xen_store_gfn = (unsigned long)v;
806 xen_store_interface = 804 xen_store_interface =
807 xen_remap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE); 805 xen_remap(xen_store_gfn << PAGE_SHIFT, PAGE_SIZE);
808 break; 806 break;
809 default: 807 default:
810 pr_warn("Xenstore state unknown\n"); 808 pr_warn("Xenstore state unknown\n");
diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c
index 58a5389aec89..cff23872c5a9 100644
--- a/drivers/xen/xlate_mmu.c
+++ b/drivers/xen/xlate_mmu.c
@@ -38,8 +38,8 @@
38#include <xen/interface/xen.h> 38#include <xen/interface/xen.h>
39#include <xen/interface/memory.h> 39#include <xen/interface/memory.h>
40 40
41/* map fgmfn of domid to lpfn in the current domain */ 41/* map fgfn of domid to lpfn in the current domain */
42static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, 42static int map_foreign_page(unsigned long lpfn, unsigned long fgfn,
43 unsigned int domid) 43 unsigned int domid)
44{ 44{
45 int rc; 45 int rc;
@@ -49,7 +49,7 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
49 .size = 1, 49 .size = 1,
50 .space = XENMAPSPACE_gmfn_foreign, 50 .space = XENMAPSPACE_gmfn_foreign,
51 }; 51 };
52 xen_ulong_t idx = fgmfn; 52 xen_ulong_t idx = fgfn;
53 xen_pfn_t gpfn = lpfn; 53 xen_pfn_t gpfn = lpfn;
54 int err = 0; 54 int err = 0;
55 55
@@ -62,13 +62,13 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
62} 62}
63 63
64struct remap_data { 64struct remap_data {
65 xen_pfn_t *fgmfn; /* foreign domain's gmfn */ 65 xen_pfn_t *fgfn; /* foreign domain's gfn */
66 pgprot_t prot; 66 pgprot_t prot;
67 domid_t domid; 67 domid_t domid;
68 struct vm_area_struct *vma; 68 struct vm_area_struct *vma;
69 int index; 69 int index;
70 struct page **pages; 70 struct page **pages;
71 struct xen_remap_mfn_info *info; 71 struct xen_remap_gfn_info *info;
72 int *err_ptr; 72 int *err_ptr;
73 int mapped; 73 int mapped;
74}; 74};
@@ -82,20 +82,20 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
82 pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); 82 pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
83 int rc; 83 int rc;
84 84
85 rc = map_foreign_page(pfn, *info->fgmfn, info->domid); 85 rc = map_foreign_page(pfn, *info->fgfn, info->domid);
86 *info->err_ptr++ = rc; 86 *info->err_ptr++ = rc;
87 if (!rc) { 87 if (!rc) {
88 set_pte_at(info->vma->vm_mm, addr, ptep, pte); 88 set_pte_at(info->vma->vm_mm, addr, ptep, pte);
89 info->mapped++; 89 info->mapped++;
90 } 90 }
91 info->fgmfn++; 91 info->fgfn++;
92 92
93 return 0; 93 return 0;
94} 94}
95 95
96int xen_xlate_remap_gfn_array(struct vm_area_struct *vma, 96int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
97 unsigned long addr, 97 unsigned long addr,
98 xen_pfn_t *mfn, int nr, 98 xen_pfn_t *gfn, int nr,
99 int *err_ptr, pgprot_t prot, 99 int *err_ptr, pgprot_t prot,
100 unsigned domid, 100 unsigned domid,
101 struct page **pages) 101 struct page **pages)
@@ -108,7 +108,7 @@ int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
108 x86 PVOPS */ 108 x86 PVOPS */
109 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); 109 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
110 110
111 data.fgmfn = mfn; 111 data.fgfn = gfn;
112 data.prot = prot; 112 data.prot = prot;
113 data.domid = domid; 113 data.domid = domid;
114 data.vma = vma; 114 data.vma = vma;