aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen/privcmd.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen/privcmd.c')
-rw-r--r--drivers/xen/privcmd.c44
1 files changed, 22 insertions, 22 deletions
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 5a296161d843..c6deb87c5c69 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -193,16 +193,16 @@ static int traverse_pages_block(unsigned nelem, size_t size,
193 return ret; 193 return ret;
194} 194}
195 195
196struct mmap_mfn_state { 196struct mmap_gfn_state {
197 unsigned long va; 197 unsigned long va;
198 struct vm_area_struct *vma; 198 struct vm_area_struct *vma;
199 domid_t domain; 199 domid_t domain;
200}; 200};
201 201
202static int mmap_mfn_range(void *data, void *state) 202static int mmap_gfn_range(void *data, void *state)
203{ 203{
204 struct privcmd_mmap_entry *msg = data; 204 struct privcmd_mmap_entry *msg = data;
205 struct mmap_mfn_state *st = state; 205 struct mmap_gfn_state *st = state;
206 struct vm_area_struct *vma = st->vma; 206 struct vm_area_struct *vma = st->vma;
207 int rc; 207 int rc;
208 208
@@ -216,7 +216,7 @@ static int mmap_mfn_range(void *data, void *state)
216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) 216 ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
217 return -EINVAL; 217 return -EINVAL;
218 218
219 rc = xen_remap_domain_mfn_range(vma, 219 rc = xen_remap_domain_gfn_range(vma,
220 msg->va & PAGE_MASK, 220 msg->va & PAGE_MASK,
221 msg->mfn, msg->npages, 221 msg->mfn, msg->npages,
222 vma->vm_page_prot, 222 vma->vm_page_prot,
@@ -236,7 +236,7 @@ static long privcmd_ioctl_mmap(void __user *udata)
236 struct vm_area_struct *vma; 236 struct vm_area_struct *vma;
237 int rc; 237 int rc;
238 LIST_HEAD(pagelist); 238 LIST_HEAD(pagelist);
239 struct mmap_mfn_state state; 239 struct mmap_gfn_state state;
240 240
241 /* We only support privcmd_ioctl_mmap_batch for auto translated. */ 241 /* We only support privcmd_ioctl_mmap_batch for auto translated. */
242 if (xen_feature(XENFEAT_auto_translated_physmap)) 242 if (xen_feature(XENFEAT_auto_translated_physmap))
@@ -273,7 +273,7 @@ static long privcmd_ioctl_mmap(void __user *udata)
273 273
274 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry), 274 rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
275 &pagelist, 275 &pagelist,
276 mmap_mfn_range, &state); 276 mmap_gfn_range, &state);
277 277
278 278
279out_up: 279out_up:
@@ -299,18 +299,18 @@ struct mmap_batch_state {
299 int global_error; 299 int global_error;
300 int version; 300 int version;
301 301
302 /* User-space mfn array to store errors in the second pass for V1. */ 302 /* User-space gfn array to store errors in the second pass for V1. */
303 xen_pfn_t __user *user_mfn; 303 xen_pfn_t __user *user_gfn;
304 /* User-space int array to store errors in the second pass for V2. */ 304 /* User-space int array to store errors in the second pass for V2. */
305 int __user *user_err; 305 int __user *user_err;
306}; 306};
307 307
308/* auto translated dom0 note: if domU being created is PV, then mfn is 308/* auto translated dom0 note: if domU being created is PV, then gfn is
309 * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP). 309 * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
310 */ 310 */
311static int mmap_batch_fn(void *data, int nr, void *state) 311static int mmap_batch_fn(void *data, int nr, void *state)
312{ 312{
313 xen_pfn_t *mfnp = data; 313 xen_pfn_t *gfnp = data;
314 struct mmap_batch_state *st = state; 314 struct mmap_batch_state *st = state;
315 struct vm_area_struct *vma = st->vma; 315 struct vm_area_struct *vma = st->vma;
316 struct page **pages = vma->vm_private_data; 316 struct page **pages = vma->vm_private_data;
@@ -321,8 +321,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
321 cur_pages = &pages[st->index]; 321 cur_pages = &pages[st->index];
322 322
323 BUG_ON(nr < 0); 323 BUG_ON(nr < 0);
324 ret = xen_remap_domain_mfn_array(st->vma, st->va & PAGE_MASK, mfnp, nr, 324 ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
325 (int *)mfnp, st->vma->vm_page_prot, 325 (int *)gfnp, st->vma->vm_page_prot,
326 st->domain, cur_pages); 326 st->domain, cur_pages);
327 327
328 /* Adjust the global_error? */ 328 /* Adjust the global_error? */
@@ -347,22 +347,22 @@ static int mmap_return_error(int err, struct mmap_batch_state *st)
347 347
348 if (st->version == 1) { 348 if (st->version == 1) {
349 if (err) { 349 if (err) {
350 xen_pfn_t mfn; 350 xen_pfn_t gfn;
351 351
352 ret = get_user(mfn, st->user_mfn); 352 ret = get_user(gfn, st->user_gfn);
353 if (ret < 0) 353 if (ret < 0)
354 return ret; 354 return ret;
355 /* 355 /*
356 * V1 encodes the error codes in the 32bit top 356 * V1 encodes the error codes in the 32bit top
357 * nibble of the mfn (with its known 357 * nibble of the gfn (with its known
358 * limitations vis-a-vis 64 bit callers). 358 * limitations vis-a-vis 64 bit callers).
359 */ 359 */
360 mfn |= (err == -ENOENT) ? 360 gfn |= (err == -ENOENT) ?
361 PRIVCMD_MMAPBATCH_PAGED_ERROR : 361 PRIVCMD_MMAPBATCH_PAGED_ERROR :
362 PRIVCMD_MMAPBATCH_MFN_ERROR; 362 PRIVCMD_MMAPBATCH_MFN_ERROR;
363 return __put_user(mfn, st->user_mfn++); 363 return __put_user(gfn, st->user_gfn++);
364 } else 364 } else
365 st->user_mfn++; 365 st->user_gfn++;
366 } else { /* st->version == 2 */ 366 } else { /* st->version == 2 */
367 if (err) 367 if (err)
368 return __put_user(err, st->user_err++); 368 return __put_user(err, st->user_err++);
@@ -388,7 +388,7 @@ static int mmap_return_errors(void *data, int nr, void *state)
388 return 0; 388 return 0;
389} 389}
390 390
391/* Allocate pfns that are then mapped with gmfns from foreign domid. Update 391/* Allocate pfns that are then mapped with gfns from foreign domid. Update
392 * the vma with the page info to use later. 392 * the vma with the page info to use later.
393 * Returns: 0 if success, otherwise -errno 393 * Returns: 0 if success, otherwise -errno
394 */ 394 */
@@ -526,7 +526,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
526 526
527 if (state.global_error) { 527 if (state.global_error) {
528 /* Write back errors in second pass. */ 528 /* Write back errors in second pass. */
529 state.user_mfn = (xen_pfn_t *)m.arr; 529 state.user_gfn = (xen_pfn_t *)m.arr;
530 state.user_err = m.err; 530 state.user_err = m.err;
531 ret = traverse_pages_block(m.num, sizeof(xen_pfn_t), 531 ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
532 &pagelist, mmap_return_errors, &state); 532 &pagelist, mmap_return_errors, &state);
@@ -587,7 +587,7 @@ static void privcmd_close(struct vm_area_struct *vma)
587 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) 587 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
588 return; 588 return;
589 589
590 rc = xen_unmap_domain_mfn_range(vma, numpgs, pages); 590 rc = xen_unmap_domain_gfn_range(vma, numpgs, pages);
591 if (rc == 0) 591 if (rc == 0)
592 free_xenballooned_pages(numpgs, pages); 592 free_xenballooned_pages(numpgs, pages);
593 else 593 else