diff options
author | Mukesh Rathor <mukesh.rathor@oracle.com> | 2012-10-17 20:11:21 -0400 |
---|---|---|
committer | Ian Campbell <ian.campbell@citrix.com> | 2012-11-29 07:57:52 -0500 |
commit | d71f513985c22f1050295d1a7e4327cf9fb060da (patch) | |
tree | 3da00d6cad246972d2cfcd76a945b96efedcac80 /drivers/xen/privcmd.c | |
parent | 9a032e393a8bc888a9b0c898cbdb9db2cee7b536 (diff) |
xen: privcmd: support autotranslated physmap guests.
PVH and ARM only support the batch interface. To map a foreign page to
a process, the PFN must be allocated and the autotranslated path uses
ballooning for that purpose.
The returned PFN is then mapped to the foreign page.
xen_unmap_domain_mfn_range() is introduced to unmap these pages via the
privcmd close call.
Acked-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: Mukesh Rathor <mukesh.rathor@oracle.com>
[v1: Fix up privcmd_close]
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
[v2: used for ARM too]
Diffstat (limited to 'drivers/xen/privcmd.c')
-rw-r--r-- | drivers/xen/privcmd.c | 69 |
1 files changed, 67 insertions, 2 deletions
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index b612267a8cb6..b9d08987a5a5 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c | |||
@@ -33,11 +33,14 @@ | |||
33 | #include <xen/features.h> | 33 | #include <xen/features.h> |
34 | #include <xen/page.h> | 34 | #include <xen/page.h> |
35 | #include <xen/xen-ops.h> | 35 | #include <xen/xen-ops.h> |
36 | #include <xen/balloon.h> | ||
36 | 37 | ||
37 | #include "privcmd.h" | 38 | #include "privcmd.h" |
38 | 39 | ||
39 | MODULE_LICENSE("GPL"); | 40 | MODULE_LICENSE("GPL"); |
40 | 41 | ||
42 | #define PRIV_VMA_LOCKED ((void *)1) | ||
43 | |||
41 | #ifndef HAVE_ARCH_PRIVCMD_MMAP | 44 | #ifndef HAVE_ARCH_PRIVCMD_MMAP |
42 | static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma); | 45 | static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma); |
43 | #endif | 46 | #endif |
@@ -199,6 +202,10 @@ static long privcmd_ioctl_mmap(void __user *udata) | |||
199 | if (!xen_initial_domain()) | 202 | if (!xen_initial_domain()) |
200 | return -EPERM; | 203 | return -EPERM; |
201 | 204 | ||
205 | /* We only support privcmd_ioctl_mmap_batch for auto translated. */ | ||
206 | if (xen_feature(XENFEAT_auto_translated_physmap)) | ||
207 | return -ENOSYS; | ||
208 | |||
202 | if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd))) | 209 | if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd))) |
203 | return -EFAULT; | 210 | return -EFAULT; |
204 | 211 | ||
@@ -246,6 +253,7 @@ struct mmap_batch_state { | |||
246 | domid_t domain; | 253 | domid_t domain; |
247 | unsigned long va; | 254 | unsigned long va; |
248 | struct vm_area_struct *vma; | 255 | struct vm_area_struct *vma; |
256 | int index; | ||
249 | /* A tristate: | 257 | /* A tristate: |
250 | * 0 for no errors | 258 | * 0 for no errors |
251 | * 1 if at least one error has happened (and no | 259 | * 1 if at least one error has happened (and no |
@@ -260,15 +268,24 @@ struct mmap_batch_state { | |||
260 | xen_pfn_t __user *user_mfn; | 268 | xen_pfn_t __user *user_mfn; |
261 | }; | 269 | }; |
262 | 270 | ||
271 | /* auto translated dom0 note: if domU being created is PV, then mfn is | ||
272 | * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP). | ||
273 | */ | ||
263 | static int mmap_batch_fn(void *data, void *state) | 274 | static int mmap_batch_fn(void *data, void *state) |
264 | { | 275 | { |
265 | xen_pfn_t *mfnp = data; | 276 | xen_pfn_t *mfnp = data; |
266 | struct mmap_batch_state *st = state; | 277 | struct mmap_batch_state *st = state; |
278 | struct vm_area_struct *vma = st->vma; | ||
279 | struct page **pages = vma->vm_private_data; | ||
280 | struct page *cur_page = NULL; | ||
267 | int ret; | 281 | int ret; |
268 | 282 | ||
283 | if (xen_feature(XENFEAT_auto_translated_physmap)) | ||
284 | cur_page = pages[st->index++]; | ||
285 | |||
269 | ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1, | 286 | ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1, |
270 | st->vma->vm_page_prot, st->domain, | 287 | st->vma->vm_page_prot, st->domain, |
271 | NULL); | 288 | &cur_page); |
272 | 289 | ||
273 | /* Store error code for second pass. */ | 290 | /* Store error code for second pass. */ |
274 | *(st->err++) = ret; | 291 | *(st->err++) = ret; |
@@ -304,6 +321,32 @@ static int mmap_return_errors_v1(void *data, void *state) | |||
304 | return __put_user(*mfnp, st->user_mfn++); | 321 | return __put_user(*mfnp, st->user_mfn++); |
305 | } | 322 | } |
306 | 323 | ||
324 | /* Allocate pfns that are then mapped with gmfns from foreign domid. Update | ||
325 | * the vma with the page info to use later. | ||
326 | * Returns: 0 if success, otherwise -errno | ||
327 | */ | ||
328 | static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs) | ||
329 | { | ||
330 | int rc; | ||
331 | struct page **pages; | ||
332 | |||
333 | pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL); | ||
334 | if (pages == NULL) | ||
335 | return -ENOMEM; | ||
336 | |||
337 | rc = alloc_xenballooned_pages(numpgs, pages, 0); | ||
338 | if (rc != 0) { | ||
339 | pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__, | ||
340 | numpgs, rc); | ||
341 | kfree(pages); | ||
342 | return -ENOMEM; | ||
343 | } | ||
344 | BUG_ON(vma->vm_private_data != PRIV_VMA_LOCKED); | ||
345 | vma->vm_private_data = pages; | ||
346 | |||
347 | return 0; | ||
348 | } | ||
349 | |||
307 | static struct vm_operations_struct privcmd_vm_ops; | 350 | static struct vm_operations_struct privcmd_vm_ops; |
308 | 351 | ||
309 | static long privcmd_ioctl_mmap_batch(void __user *udata, int version) | 352 | static long privcmd_ioctl_mmap_batch(void __user *udata, int version) |
@@ -371,10 +414,18 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) | |||
371 | up_write(&mm->mmap_sem); | 414 | up_write(&mm->mmap_sem); |
372 | goto out; | 415 | goto out; |
373 | } | 416 | } |
417 | if (xen_feature(XENFEAT_auto_translated_physmap)) { | ||
418 | ret = alloc_empty_pages(vma, m.num); | ||
419 | if (ret < 0) { | ||
420 | up_write(&mm->mmap_sem); | ||
421 | goto out; | ||
422 | } | ||
423 | } | ||
374 | 424 | ||
375 | state.domain = m.dom; | 425 | state.domain = m.dom; |
376 | state.vma = vma; | 426 | state.vma = vma; |
377 | state.va = m.addr; | 427 | state.va = m.addr; |
428 | state.index = 0; | ||
378 | state.global_error = 0; | 429 | state.global_error = 0; |
379 | state.err = err_array; | 430 | state.err = err_array; |
380 | 431 | ||
@@ -439,6 +490,19 @@ static long privcmd_ioctl(struct file *file, | |||
439 | return ret; | 490 | return ret; |
440 | } | 491 | } |
441 | 492 | ||
493 | static void privcmd_close(struct vm_area_struct *vma) | ||
494 | { | ||
495 | struct page **pages = vma->vm_private_data; | ||
496 | int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | ||
497 | |||
498 | if (!xen_feature(XENFEAT_auto_translated_physmap || !numpgs || !pages)) | ||
499 | return; | ||
500 | |||
501 | xen_unmap_domain_mfn_range(vma, numpgs, pages); | ||
502 | free_xenballooned_pages(numpgs, pages); | ||
503 | kfree(pages); | ||
504 | } | ||
505 | |||
442 | static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 506 | static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
443 | { | 507 | { |
444 | printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", | 508 | printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", |
@@ -449,6 +513,7 @@ static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
449 | } | 513 | } |
450 | 514 | ||
451 | static struct vm_operations_struct privcmd_vm_ops = { | 515 | static struct vm_operations_struct privcmd_vm_ops = { |
516 | .close = privcmd_close, | ||
452 | .fault = privcmd_fault | 517 | .fault = privcmd_fault |
453 | }; | 518 | }; |
454 | 519 | ||
@@ -466,7 +531,7 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma) | |||
466 | 531 | ||
467 | static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma) | 532 | static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma) |
468 | { | 533 | { |
469 | return (xchg(&vma->vm_private_data, (void *)1) == NULL); | 534 | return !cmpxchg(&vma->vm_private_data, NULL, PRIV_VMA_LOCKED); |
470 | } | 535 | } |
471 | 536 | ||
472 | const struct file_operations xen_privcmd_fops = { | 537 | const struct file_operations xen_privcmd_fops = { |