aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen/privcmd.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-13 17:29:16 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-13 17:29:16 -0500
commit896ea17d3da5f44b2625c9cda9874d7dfe447393 (patch)
tree089f00dd300a49c81f042e9b52ef32cd1333bdbc /drivers/xen/privcmd.c
parentc7708fac5a878d6e0f2de0aa19f9749cff4f707f (diff)
parent6a7ed405114b2a53ccd99631b0636aaeabf71b3e (diff)
Merge tag 'stable/for-linus-3.8-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen
Pull Xen updates from Konrad Rzeszutek Wilk: - Add necessary infrastructure to make balloon driver work under ARM. - Add /dev/xen/privcmd interfaces to work with ARM and PVH. - Improve Xen PCIBack wild-card parsing. - Add Xen ACPI PAD (Processor Aggregator) support - so can offline/ online sockets depending on the power consumption. - PVHVM + kexec = use an E820_RESV region for the shared region so we don't overwrite said region during kexec reboot. - Cleanups, compile fixes. Fix up some trivial conflicts due to the balloon driver now working on ARM, and there were changes next to the previous work-arounds that are now gone. * tag 'stable/for-linus-3.8-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen: xen/PVonHVM: fix compile warning in init_hvm_pv_info xen: arm: implement remap interfaces needed for privcmd mappings. xen: correctly use xen_pfn_t in remap_domain_mfn_range. xen: arm: enable balloon driver xen: balloon: allow PVMMU interfaces to be compiled out xen: privcmd: support autotranslated physmap guests. xen: add pages parameter to xen_remap_domain_mfn_range xen/acpi: Move the xen_running_on_version_or_later function. xen/xenbus: Remove duplicate inclusion of asm/xen/hypervisor.h xen/acpi: Fix compile error by missing decleration for xen_domain. xen/acpi: revert pad config check in xen_check_mwait xen/acpi: ACPI PAD driver xen-pciback: reject out of range inputs xen-pciback: simplify and tighten parsing of device IDs xen PVonHVM: use E820_Reserved area for shared_info
Diffstat (limited to 'drivers/xen/privcmd.c')
-rw-r--r--drivers/xen/privcmd.c72
1 files changed, 69 insertions, 3 deletions
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 71f5c459b08..0bbbccbb1f1 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -33,11 +33,14 @@
33#include <xen/features.h> 33#include <xen/features.h>
34#include <xen/page.h> 34#include <xen/page.h>
35#include <xen/xen-ops.h> 35#include <xen/xen-ops.h>
36#include <xen/balloon.h>
36 37
37#include "privcmd.h" 38#include "privcmd.h"
38 39
39MODULE_LICENSE("GPL"); 40MODULE_LICENSE("GPL");
40 41
42#define PRIV_VMA_LOCKED ((void *)1)
43
41#ifndef HAVE_ARCH_PRIVCMD_MMAP 44#ifndef HAVE_ARCH_PRIVCMD_MMAP
42static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma); 45static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
43#endif 46#endif
@@ -178,7 +181,7 @@ static int mmap_mfn_range(void *data, void *state)
178 msg->va & PAGE_MASK, 181 msg->va & PAGE_MASK,
179 msg->mfn, msg->npages, 182 msg->mfn, msg->npages,
180 vma->vm_page_prot, 183 vma->vm_page_prot,
181 st->domain); 184 st->domain, NULL);
182 if (rc < 0) 185 if (rc < 0)
183 return rc; 186 return rc;
184 187
@@ -199,6 +202,10 @@ static long privcmd_ioctl_mmap(void __user *udata)
199 if (!xen_initial_domain()) 202 if (!xen_initial_domain())
200 return -EPERM; 203 return -EPERM;
201 204
205 /* We only support privcmd_ioctl_mmap_batch for auto translated. */
206 if (xen_feature(XENFEAT_auto_translated_physmap))
207 return -ENOSYS;
208
202 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd))) 209 if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
203 return -EFAULT; 210 return -EFAULT;
204 211
@@ -246,6 +253,7 @@ struct mmap_batch_state {
246 domid_t domain; 253 domid_t domain;
247 unsigned long va; 254 unsigned long va;
248 struct vm_area_struct *vma; 255 struct vm_area_struct *vma;
256 int index;
249 /* A tristate: 257 /* A tristate:
250 * 0 for no errors 258 * 0 for no errors
251 * 1 if at least one error has happened (and no 259 * 1 if at least one error has happened (and no
@@ -260,14 +268,24 @@ struct mmap_batch_state {
260 xen_pfn_t __user *user_mfn; 268 xen_pfn_t __user *user_mfn;
261}; 269};
262 270
271/* auto translated dom0 note: if domU being created is PV, then mfn is
272 * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
273 */
263static int mmap_batch_fn(void *data, void *state) 274static int mmap_batch_fn(void *data, void *state)
264{ 275{
265 xen_pfn_t *mfnp = data; 276 xen_pfn_t *mfnp = data;
266 struct mmap_batch_state *st = state; 277 struct mmap_batch_state *st = state;
278 struct vm_area_struct *vma = st->vma;
279 struct page **pages = vma->vm_private_data;
280 struct page *cur_page = NULL;
267 int ret; 281 int ret;
268 282
283 if (xen_feature(XENFEAT_auto_translated_physmap))
284 cur_page = pages[st->index++];
285
269 ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1, 286 ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
270 st->vma->vm_page_prot, st->domain); 287 st->vma->vm_page_prot, st->domain,
288 &cur_page);
271 289
272 /* Store error code for second pass. */ 290 /* Store error code for second pass. */
273 *(st->err++) = ret; 291 *(st->err++) = ret;
@@ -303,6 +321,32 @@ static int mmap_return_errors_v1(void *data, void *state)
303 return __put_user(*mfnp, st->user_mfn++); 321 return __put_user(*mfnp, st->user_mfn++);
304} 322}
305 323
324/* Allocate pfns that are then mapped with gmfns from foreign domid. Update
325 * the vma with the page info to use later.
326 * Returns: 0 if success, otherwise -errno
327 */
328static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
329{
330 int rc;
331 struct page **pages;
332
333 pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
334 if (pages == NULL)
335 return -ENOMEM;
336
337 rc = alloc_xenballooned_pages(numpgs, pages, 0);
338 if (rc != 0) {
339 pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
340 numpgs, rc);
341 kfree(pages);
342 return -ENOMEM;
343 }
344 BUG_ON(vma->vm_private_data != PRIV_VMA_LOCKED);
345 vma->vm_private_data = pages;
346
347 return 0;
348}
349
306static struct vm_operations_struct privcmd_vm_ops; 350static struct vm_operations_struct privcmd_vm_ops;
307 351
308static long privcmd_ioctl_mmap_batch(void __user *udata, int version) 352static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
@@ -370,10 +414,18 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
370 ret = -EINVAL; 414 ret = -EINVAL;
371 goto out; 415 goto out;
372 } 416 }
417 if (xen_feature(XENFEAT_auto_translated_physmap)) {
418 ret = alloc_empty_pages(vma, m.num);
419 if (ret < 0) {
420 up_write(&mm->mmap_sem);
421 goto out;
422 }
423 }
373 424
374 state.domain = m.dom; 425 state.domain = m.dom;
375 state.vma = vma; 426 state.vma = vma;
376 state.va = m.addr; 427 state.va = m.addr;
428 state.index = 0;
377 state.global_error = 0; 429 state.global_error = 0;
378 state.err = err_array; 430 state.err = err_array;
379 431
@@ -442,6 +494,19 @@ static long privcmd_ioctl(struct file *file,
442 return ret; 494 return ret;
443} 495}
444 496
497static void privcmd_close(struct vm_area_struct *vma)
498{
499 struct page **pages = vma->vm_private_data;
500 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
501
502 if (!xen_feature(XENFEAT_auto_translated_physmap || !numpgs || !pages))
503 return;
504
505 xen_unmap_domain_mfn_range(vma, numpgs, pages);
506 free_xenballooned_pages(numpgs, pages);
507 kfree(pages);
508}
509
445static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 510static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
446{ 511{
447 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n", 512 printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
@@ -452,6 +517,7 @@ static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
452} 517}
453 518
454static struct vm_operations_struct privcmd_vm_ops = { 519static struct vm_operations_struct privcmd_vm_ops = {
520 .close = privcmd_close,
455 .fault = privcmd_fault 521 .fault = privcmd_fault
456}; 522};
457 523
@@ -469,7 +535,7 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
469 535
470static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma) 536static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
471{ 537{
472 return (xchg(&vma->vm_private_data, (void *)1) == NULL); 538 return !cmpxchg(&vma->vm_private_data, NULL, PRIV_VMA_LOCKED);
473} 539}
474 540
475const struct file_operations xen_privcmd_fops = { 541const struct file_operations xen_privcmd_fops = {