aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-07-27 11:45:02 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-28 08:26:27 -0400
commita05d2ebab28011c2f3f520833f4bfdd2fd1b9c02 (patch)
treee1e3d5b61ee717d8e044c6a46af8d7f2ac06f5e5
parent64f53a0492b4bc11868307990bb8f7c1e0764f89 (diff)
xen: fix allocation and use of large ldts
When the ldt gets to more than 1 page in size, the kernel uses vmalloc to allocate it. This means that: - when making the ldt RO, we must update the pages in both the vmalloc mapping and the linear mapping to make sure there are no RW aliases. - we need to use arbitrary_virt_to_machine to compute the machine addr for each update Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/xen/enlighten.c51
1 files changed, 41 insertions, 10 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index e2767c28dac7..b011e4a5dbbe 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -325,24 +325,55 @@ static unsigned long xen_store_tr(void)
325 return 0; 325 return 0;
326} 326}
327 327
328/*
329 * If 'v' is a vmalloc mapping, then find the linear mapping of the
330 * page (if any) and also set its protections to match:
331 */
332static void set_aliased_prot(void *v, pgprot_t prot)
333{
334 int level;
335 pte_t *ptep;
336 pte_t pte;
337 unsigned long pfn;
338 struct page *page;
339
340 ptep = lookup_address((unsigned long)v, &level);
341 BUG_ON(ptep == NULL);
342
343 pfn = pte_pfn(*ptep);
344 page = pfn_to_page(pfn);
345
346 pte = pfn_pte(pfn, prot);
347
348 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
349 BUG();
350
351 if (!PageHighMem(page)) {
352 void *av = __va(PFN_PHYS(pfn));
353
354 if (av != v)
355 if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
356 BUG();
357 } else
358 kmap_flush_unused();
359}
360
328static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) 361static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
329{ 362{
330 unsigned pages = roundup(entries * LDT_ENTRY_SIZE, PAGE_SIZE); 363 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
331 void *v = ldt;
332 int i; 364 int i;
333 365
334 for(i = 0; i < pages; i += PAGE_SIZE) 366 for(i = 0; i < entries; i += entries_per_page)
335 make_lowmem_page_readonly(v + i); 367 set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
336} 368}
337 369
338static void xen_free_ldt(struct desc_struct *ldt, unsigned entries) 370static void xen_free_ldt(struct desc_struct *ldt, unsigned entries)
339{ 371{
340 unsigned pages = roundup(entries * LDT_ENTRY_SIZE, PAGE_SIZE); 372 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
341 void *v = ldt;
342 int i; 373 int i;
343 374
344 for(i = 0; i < pages; i += PAGE_SIZE) 375 for(i = 0; i < entries; i += entries_per_page)
345 make_lowmem_page_readwrite(v + i); 376 set_aliased_prot(ldt + i, PAGE_KERNEL);
346} 377}
347 378
348static void xen_set_ldt(const void *addr, unsigned entries) 379static void xen_set_ldt(const void *addr, unsigned entries)
@@ -446,7 +477,7 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
446 const void *ptr) 477 const void *ptr)
447{ 478{
448 unsigned long lp = (unsigned long)&dt[entrynum]; 479 unsigned long lp = (unsigned long)&dt[entrynum];
449 xmaddr_t mach_lp = virt_to_machine(lp); 480 xmaddr_t mach_lp = arbitrary_virt_to_machine(lp);
450 u64 entry = *(u64 *)ptr; 481 u64 entry = *(u64 *)ptr;
451 482
452 preempt_disable(); 483 preempt_disable();
@@ -579,7 +610,7 @@ static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
579} 610}
580 611
581static void xen_load_sp0(struct tss_struct *tss, 612static void xen_load_sp0(struct tss_struct *tss,
582 struct thread_struct *thread) 613 struct thread_struct *thread)
583{ 614{
584 struct multicall_space mcs = xen_mc_entry(0); 615 struct multicall_space mcs = xen_mc_entry(0);
585 MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0); 616 MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);