aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/enlighten.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/xen/enlighten.c')
-rw-r--r--arch/x86/xen/enlighten.c40
1 files changed, 40 insertions, 0 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 0b95c9b8283f..11d6fb4e8483 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
483 pte_t pte; 483 pte_t pte;
484 unsigned long pfn; 484 unsigned long pfn;
485 struct page *page; 485 struct page *page;
486 unsigned char dummy;
486 487
487 ptep = lookup_address((unsigned long)v, &level); 488 ptep = lookup_address((unsigned long)v, &level);
488 BUG_ON(ptep == NULL); 489 BUG_ON(ptep == NULL);
@@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
492 493
493 pte = pfn_pte(pfn, prot); 494 pte = pfn_pte(pfn, prot);
494 495
496 /*
497 * Careful: update_va_mapping() will fail if the virtual address
498 * we're poking isn't populated in the page tables. We don't
499 * need to worry about the direct map (that's always in the page
500 * tables), but we need to be careful about vmap space. In
501 * particular, the top level page table can lazily propagate
502 * entries between processes, so if we've switched mms since we
503 * vmapped the target in the first place, we might not have the
504 * top-level page table entry populated.
505 *
506 * We disable preemption because we want the same mm active when
507 * we probe the target and when we issue the hypercall. We'll
508 * have the same nominal mm, but if we're a kernel thread, lazy
509 * mm dropping could change our pgd.
510 *
511 * Out of an abundance of caution, this uses __get_user() to fault
512 * in the target address just in case there's some obscure case
513 * in which the target address isn't readable.
514 */
515
516 preempt_disable();
517
518 pagefault_disable(); /* Avoid warnings due to being atomic. */
519 __get_user(dummy, (unsigned char __user __force *)v);
520 pagefault_enable();
521
495 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) 522 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
496 BUG(); 523 BUG();
497 524
@@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
503 BUG(); 530 BUG();
504 } else 531 } else
505 kmap_flush_unused(); 532 kmap_flush_unused();
533
534 preempt_enable();
506} 535}
507 536
508static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) 537static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
@@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
510 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; 539 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
511 int i; 540 int i;
512 541
542 /*
543 * We need to mark the all aliases of the LDT pages RO. We
544 * don't need to call vm_flush_aliases(), though, since that's
545 * only responsible for flushing aliases out the TLBs, not the
546 * page tables, and Xen will flush the TLB for us if needed.
547 *
548 * To avoid confusing future readers: none of this is necessary
549 * to load the LDT. The hypervisor only checks this when the
550 * LDT is faulted in due to subsequent descriptor access.
551 */
552
513 for(i = 0; i < entries; i += entries_per_page) 553 for(i = 0; i < entries; i += entries_per_page)
514 set_aliased_prot(ldt + i, PAGE_KERNEL_RO); 554 set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
515} 555}