aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r--arch/x86/xen/mmu.c207
1 files changed, 83 insertions, 124 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 65083ad63b6f..dd151b2045b0 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2436,99 +2436,11 @@ void __init xen_hvm_init_mmu_ops(void)
2436} 2436}
2437#endif 2437#endif
2438 2438
2439#ifdef CONFIG_XEN_PVH
2440/*
2441 * Map foreign gfn (fgfn), to local pfn (lpfn). This for the user
2442 * space creating new guest on pvh dom0 and needing to map domU pages.
2443 */
2444static int xlate_add_to_p2m(unsigned long lpfn, unsigned long fgfn,
2445 unsigned int domid)
2446{
2447 int rc, err = 0;
2448 xen_pfn_t gpfn = lpfn;
2449 xen_ulong_t idx = fgfn;
2450
2451 struct xen_add_to_physmap_range xatp = {
2452 .domid = DOMID_SELF,
2453 .foreign_domid = domid,
2454 .size = 1,
2455 .space = XENMAPSPACE_gmfn_foreign,
2456 };
2457 set_xen_guest_handle(xatp.idxs, &idx);
2458 set_xen_guest_handle(xatp.gpfns, &gpfn);
2459 set_xen_guest_handle(xatp.errs, &err);
2460
2461 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
2462 if (rc < 0)
2463 return rc;
2464 return err;
2465}
2466
2467static int xlate_remove_from_p2m(unsigned long spfn, int count)
2468{
2469 struct xen_remove_from_physmap xrp;
2470 int i, rc;
2471
2472 for (i = 0; i < count; i++) {
2473 xrp.domid = DOMID_SELF;
2474 xrp.gpfn = spfn+i;
2475 rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
2476 if (rc)
2477 break;
2478 }
2479 return rc;
2480}
2481
2482struct xlate_remap_data {
2483 unsigned long fgfn; /* foreign domain's gfn */
2484 pgprot_t prot;
2485 domid_t domid;
2486 int index;
2487 struct page **pages;
2488};
2489
2490static int xlate_map_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
2491 void *data)
2492{
2493 int rc;
2494 struct xlate_remap_data *remap = data;
2495 unsigned long pfn = page_to_pfn(remap->pages[remap->index++]);
2496 pte_t pteval = pte_mkspecial(pfn_pte(pfn, remap->prot));
2497
2498 rc = xlate_add_to_p2m(pfn, remap->fgfn, remap->domid);
2499 if (rc)
2500 return rc;
2501 native_set_pte(ptep, pteval);
2502
2503 return 0;
2504}
2505
2506static int xlate_remap_gfn_range(struct vm_area_struct *vma,
2507 unsigned long addr, unsigned long mfn,
2508 int nr, pgprot_t prot, unsigned domid,
2509 struct page **pages)
2510{
2511 int err;
2512 struct xlate_remap_data pvhdata;
2513
2514 BUG_ON(!pages);
2515
2516 pvhdata.fgfn = mfn;
2517 pvhdata.prot = prot;
2518 pvhdata.domid = domid;
2519 pvhdata.index = 0;
2520 pvhdata.pages = pages;
2521 err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
2522 xlate_map_pte_fn, &pvhdata);
2523 flush_tlb_all();
2524 return err;
2525}
2526#endif
2527
2528#define REMAP_BATCH_SIZE 16 2439#define REMAP_BATCH_SIZE 16
2529 2440
2530struct remap_data { 2441struct remap_data {
2531 unsigned long mfn; 2442 xen_pfn_t *mfn;
2443 bool contiguous;
2532 pgprot_t prot; 2444 pgprot_t prot;
2533 struct mmu_update *mmu_update; 2445 struct mmu_update *mmu_update;
2534}; 2446};
@@ -2537,7 +2449,14 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2537 unsigned long addr, void *data) 2449 unsigned long addr, void *data)
2538{ 2450{
2539 struct remap_data *rmd = data; 2451 struct remap_data *rmd = data;
2540 pte_t pte = pte_mkspecial(mfn_pte(rmd->mfn++, rmd->prot)); 2452 pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot));
2453
2454 /* If we have a contigious range, just update the mfn itself,
2455 else update pointer to be "next mfn". */
2456 if (rmd->contiguous)
2457 (*rmd->mfn)++;
2458 else
2459 rmd->mfn++;
2541 2460
2542 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr; 2461 rmd->mmu_update->ptr = virt_to_machine(ptep).maddr;
2543 rmd->mmu_update->val = pte_val_ma(pte); 2462 rmd->mmu_update->val = pte_val_ma(pte);
@@ -2546,26 +2465,26 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
2546 return 0; 2465 return 0;
2547} 2466}
2548 2467
2549int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 2468static int do_remap_mfn(struct vm_area_struct *vma,
2550 unsigned long addr, 2469 unsigned long addr,
2551 xen_pfn_t mfn, int nr, 2470 xen_pfn_t *mfn, int nr,
2552 pgprot_t prot, unsigned domid, 2471 int *err_ptr, pgprot_t prot,
2553 struct page **pages) 2472 unsigned domid,
2554 2473 struct page **pages)
2555{ 2474{
2475 int err = 0;
2556 struct remap_data rmd; 2476 struct remap_data rmd;
2557 struct mmu_update mmu_update[REMAP_BATCH_SIZE]; 2477 struct mmu_update mmu_update[REMAP_BATCH_SIZE];
2558 int batch;
2559 unsigned long range; 2478 unsigned long range;
2560 int err = 0; 2479 int mapped = 0;
2561 2480
2562 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); 2481 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
2563 2482
2564 if (xen_feature(XENFEAT_auto_translated_physmap)) { 2483 if (xen_feature(XENFEAT_auto_translated_physmap)) {
2565#ifdef CONFIG_XEN_PVH 2484#ifdef CONFIG_XEN_PVH
2566 /* We need to update the local page tables and the xen HAP */ 2485 /* We need to update the local page tables and the xen HAP */
2567 return xlate_remap_gfn_range(vma, addr, mfn, nr, prot, 2486 return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr,
2568 domid, pages); 2487 prot, domid, pages);
2569#else 2488#else
2570 return -EINVAL; 2489 return -EINVAL;
2571#endif 2490#endif
@@ -2573,9 +2492,15 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2573 2492
2574 rmd.mfn = mfn; 2493 rmd.mfn = mfn;
2575 rmd.prot = prot; 2494 rmd.prot = prot;
2495 /* We use the err_ptr to indicate if there we are doing a contigious
2496 * mapping or a discontigious mapping. */
2497 rmd.contiguous = !err_ptr;
2576 2498
2577 while (nr) { 2499 while (nr) {
2578 batch = min(REMAP_BATCH_SIZE, nr); 2500 int index = 0;
2501 int done = 0;
2502 int batch = min(REMAP_BATCH_SIZE, nr);
2503 int batch_left = batch;
2579 range = (unsigned long)batch << PAGE_SHIFT; 2504 range = (unsigned long)batch << PAGE_SHIFT;
2580 2505
2581 rmd.mmu_update = mmu_update; 2506 rmd.mmu_update = mmu_update;
@@ -2584,23 +2509,72 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2584 if (err) 2509 if (err)
2585 goto out; 2510 goto out;
2586 2511
2587 err = HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid); 2512 /* We record the error for each page that gives an error, but
2588 if (err < 0) 2513 * continue mapping until the whole set is done */
2589 goto out; 2514 do {
2515 int i;
2516
2517 err = HYPERVISOR_mmu_update(&mmu_update[index],
2518 batch_left, &done, domid);
2519
2520 /*
2521 * @err_ptr may be the same buffer as @mfn, so
2522 * only clear it after each chunk of @mfn is
2523 * used.
2524 */
2525 if (err_ptr) {
2526 for (i = index; i < index + done; i++)
2527 err_ptr[i] = 0;
2528 }
2529 if (err < 0) {
2530 if (!err_ptr)
2531 goto out;
2532 err_ptr[i] = err;
2533 done++; /* Skip failed frame. */
2534 } else
2535 mapped += done;
2536 batch_left -= done;
2537 index += done;
2538 } while (batch_left);
2590 2539
2591 nr -= batch; 2540 nr -= batch;
2592 addr += range; 2541 addr += range;
2542 if (err_ptr)
2543 err_ptr += batch;
2593 } 2544 }
2594
2595 err = 0;
2596out: 2545out:
2597 2546
2598 xen_flush_tlb_all(); 2547 xen_flush_tlb_all();
2599 2548
2600 return err; 2549 return err < 0 ? err : mapped;
2550}
2551
2552int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2553 unsigned long addr,
2554 xen_pfn_t mfn, int nr,
2555 pgprot_t prot, unsigned domid,
2556 struct page **pages)
2557{
2558 return do_remap_mfn(vma, addr, &mfn, nr, NULL, prot, domid, pages);
2601} 2559}
2602EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 2560EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
2603 2561
2562int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
2563 unsigned long addr,
2564 xen_pfn_t *mfn, int nr,
2565 int *err_ptr, pgprot_t prot,
2566 unsigned domid, struct page **pages)
2567{
2568 /* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
2569 * and the consequences later is quite hard to detect what the actual
2570 * cause of "wrong memory was mapped in".
2571 */
2572 BUG_ON(err_ptr == NULL);
2573 return do_remap_mfn(vma, addr, mfn, nr, err_ptr, prot, domid, pages);
2574}
2575EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
2576
2577
2604/* Returns: 0 success */ 2578/* Returns: 0 success */
2605int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, 2579int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
2606 int numpgs, struct page **pages) 2580 int numpgs, struct page **pages)
@@ -2609,22 +2583,7 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
2609 return 0; 2583 return 0;
2610 2584
2611#ifdef CONFIG_XEN_PVH 2585#ifdef CONFIG_XEN_PVH
2612 while (numpgs--) { 2586 return xen_xlate_unmap_gfn_range(vma, numpgs, pages);
2613 /*
2614 * The mmu has already cleaned up the process mmu
2615 * resources at this point (lookup_address will return
2616 * NULL).
2617 */
2618 unsigned long pfn = page_to_pfn(pages[numpgs]);
2619
2620 xlate_remove_from_p2m(pfn, 1);
2621 }
2622 /*
2623 * We don't need to flush tlbs because as part of
2624 * xlate_remove_from_p2m, the hypervisor will do tlb flushes
2625 * after removing the p2m entries from the EPT/NPT
2626 */
2627 return 0;
2628#else 2587#else
2629 return -EINVAL; 2588 return -EINVAL;
2630#endif 2589#endif