diff options
author | Ian Campbell <ian.campbell@citrix.com> | 2009-05-20 10:42:14 -0400 |
---|---|---|
committer | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2010-10-20 19:22:33 -0400 |
commit | f020e2905166e12f9a8f109fe968cb5a9db887e9 (patch) | |
tree | de519deb292f099369dc81e65401227016139a94 /drivers/xen/xenfs/privcmd.c | |
parent | 8e3e99918b9ccd6bc2369ddbcd74056f8796e1e0 (diff) |
privcmd: MMAPBATCH: Fix error handling/reporting
On error IOCTL_PRIVCMD_MMAPBATCH is expected to set the top nibble of
the effected MFN and return 0. Currently it leaves the MFN unmodified
and returns the number of failures. Therefore:
- reimplement remap_domain_mfn_range() using direct
HYPERVISOR_mmu_update() calls and small batches. The xen_set_domain_pte()
interface does not report errors and since some failures are
expected/normal using the multicall infrastructure is too noisy.
- return 0 as expected
- writeback the updated MFN list to mmapbatch->arr not over mmapbatch,
smashing the caller's stack.
- remap_domain_mfn_range can be static.
With this change I am able to start an HVM domain.
Signed-off-by: Ian Campbell <ian.campbell@citrix.com>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Diffstat (limited to 'drivers/xen/xenfs/privcmd.c')
-rw-r--r-- | drivers/xen/xenfs/privcmd.c | 56 |
1 files changed, 41 insertions, 15 deletions
diff --git a/drivers/xen/xenfs/privcmd.c b/drivers/xen/xenfs/privcmd.c index 80526afd3063..438223ae0fc3 100644 --- a/drivers/xen/xenfs/privcmd.c +++ b/drivers/xen/xenfs/privcmd.c | |||
@@ -32,14 +32,16 @@ | |||
32 | #include <xen/features.h> | 32 | #include <xen/features.h> |
33 | #include <xen/page.h> | 33 | #include <xen/page.h> |
34 | 34 | ||
35 | #define REMAP_BATCH_SIZE 16 | ||
36 | |||
35 | #ifndef HAVE_ARCH_PRIVCMD_MMAP | 37 | #ifndef HAVE_ARCH_PRIVCMD_MMAP |
36 | static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma); | 38 | static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma); |
37 | #endif | 39 | #endif |
38 | 40 | ||
39 | struct remap_data { | 41 | struct remap_data { |
40 | unsigned long mfn; | 42 | unsigned long mfn; |
41 | unsigned domid; | ||
42 | pgprot_t prot; | 43 | pgprot_t prot; |
44 | struct mmu_update *mmu_update; | ||
43 | }; | 45 | }; |
44 | 46 | ||
45 | static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, | 47 | static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, |
@@ -48,17 +50,23 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, | |||
48 | struct remap_data *rmd = data; | 50 | struct remap_data *rmd = data; |
49 | pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot)); | 51 | pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot)); |
50 | 52 | ||
51 | xen_set_domain_pte(ptep, pte, rmd->domid); | 53 | rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr; |
54 | rmd->mmu_update->val = pte_val_ma(pte); | ||
55 | rmd->mmu_update++; | ||
52 | 56 | ||
53 | return 0; | 57 | return 0; |
54 | } | 58 | } |
55 | 59 | ||
56 | int remap_domain_mfn_range(struct vm_area_struct *vma, unsigned long addr, | 60 | static int remap_domain_mfn_range(struct vm_area_struct *vma, |
57 | unsigned long mfn, unsigned long size, | 61 | unsigned long addr, |
58 | pgprot_t prot, unsigned domid) | 62 | unsigned long mfn, int nr, |
63 | pgprot_t prot, unsigned domid) | ||
59 | { | 64 | { |
60 | struct remap_data rmd; | 65 | struct remap_data rmd; |
61 | int err; | 66 | struct mmu_update mmu_update[REMAP_BATCH_SIZE]; |
67 | int batch; | ||
68 | unsigned long range; | ||
69 | int err = 0; | ||
62 | 70 | ||
63 | prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); | 71 | prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); |
64 | 72 | ||
@@ -66,10 +74,29 @@ int remap_domain_mfn_range(struct vm_area_struct *vma, unsigned long addr, | |||
66 | 74 | ||
67 | rmd.mfn = mfn; | 75 | rmd.mfn = mfn; |
68 | rmd.prot = prot; | 76 | rmd.prot = prot; |
69 | rmd.domid = domid; | ||
70 | 77 | ||
71 | err = apply_to_page_range(vma->vm_mm, addr, size, | 78 | while (nr) { |
72 | remap_area_mfn_pte_fn, &rmd); | 79 | batch = min(REMAP_BATCH_SIZE, nr); |
80 | range = (unsigned long)batch << PAGE_SHIFT; | ||
81 | |||
82 | rmd.mmu_update = mmu_update; | ||
83 | err = apply_to_page_range(vma->vm_mm, addr, range, | ||
84 | remap_area_mfn_pte_fn, &rmd); | ||
85 | if (err) | ||
86 | goto out; | ||
87 | |||
88 | err = -EFAULT; | ||
89 | if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0) | ||
90 | goto out; | ||
91 | |||
92 | nr -= batch; | ||
93 | addr += range; | ||
94 | } | ||
95 | |||
96 | err = 0; | ||
97 | out: | ||
98 | |||
99 | flush_tlb_all(); | ||
73 | 100 | ||
74 | return err; | 101 | return err; |
75 | } | 102 | } |
@@ -158,7 +185,7 @@ static int traverse_pages(unsigned nelem, size_t size, | |||
158 | { | 185 | { |
159 | void *pagedata; | 186 | void *pagedata; |
160 | unsigned pageidx; | 187 | unsigned pageidx; |
161 | int ret; | 188 | int ret = 0; |
162 | 189 | ||
163 | BUG_ON(size > PAGE_SIZE); | 190 | BUG_ON(size > PAGE_SIZE); |
164 | 191 | ||
@@ -208,8 +235,7 @@ static int mmap_mfn_range(void *data, void *state) | |||
208 | 235 | ||
209 | rc = remap_domain_mfn_range(vma, | 236 | rc = remap_domain_mfn_range(vma, |
210 | msg->va & PAGE_MASK, | 237 | msg->va & PAGE_MASK, |
211 | msg->mfn, | 238 | msg->mfn, msg->npages, |
212 | msg->npages << PAGE_SHIFT, | ||
213 | vma->vm_page_prot, | 239 | vma->vm_page_prot, |
214 | st->domain); | 240 | st->domain); |
215 | if (rc < 0) | 241 | if (rc < 0) |
@@ -290,7 +316,7 @@ static int mmap_batch_fn(void *data, void *state) | |||
290 | struct mmap_batch_state *st = state; | 316 | struct mmap_batch_state *st = state; |
291 | 317 | ||
292 | if (remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, | 318 | if (remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, |
293 | *mfnp, PAGE_SIZE, | 319 | *mfnp, 1, |
294 | st->vma->vm_page_prot, st->domain) < 0) { | 320 | st->vma->vm_page_prot, st->domain) < 0) { |
295 | *mfnp |= 0xf0000000U; | 321 | *mfnp |= 0xf0000000U; |
296 | st->err++; | 322 | st->err++; |
@@ -362,9 +388,9 @@ static long privcmd_ioctl_mmap_batch(void __user *udata) | |||
362 | up_write(&mm->mmap_sem); | 388 | up_write(&mm->mmap_sem); |
363 | 389 | ||
364 | if (state.err > 0) { | 390 | if (state.err > 0) { |
365 | ret = state.err; | 391 | ret = 0; |
366 | 392 | ||
367 | state.user = udata; | 393 | state.user = m.arr; |
368 | traverse_pages(m.num, sizeof(xen_pfn_t), | 394 | traverse_pages(m.num, sizeof(xen_pfn_t), |
369 | &pagelist, | 395 | &pagelist, |
370 | mmap_return_errors, &state); | 396 | mmap_return_errors, &state); |