aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2010-10-13 19:02:24 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2010-10-13 19:07:13 -0400
commitfef5ba797991f9335bcfc295942b684f9bf613a1 (patch)
tree25aa49f656def959f5380d1320ebbf1f469e3c07 /arch
parentc7fc2de0c83dbd2eaf759c5cd0e2b9cf1eb4df3a (diff)
xen: Cope with unmapped pages when initializing kernel pagetable
Xen requires that all pages containing pagetable entries to be mapped read-only. If pages used for the initial pagetable are already mapped then we can change the mapping to RO. However, if they are initially unmapped, we need to make sure that when they are later mapped, they are also mapped RO. We do this by knowing that the kernel pagetable memory is pre-allocated in the range e820_table_start - e820_table_end, so any pfn within this range should be mapped read-only. However, the pagetable setup code early_ioremaps the pages to write their entries, so we must make sure that mappings created in the early_ioremap fixmap area are mapped RW. (Those mappings are removed before the pages are presented to Xen as pagetable pages.) Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> LKML-Reference: <4CB63A80.8060702@goop.org> Cc: Yinghai Lu <yinghai@kernel.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/io.h1
-rw-r--r--arch/x86/mm/ioremap.c5
-rw-r--r--arch/x86/xen/mmu.c26
3 files changed, 24 insertions, 8 deletions
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 30a3e9776123..66aee6c4123b 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -348,6 +348,7 @@ extern void __iomem *early_memremap(resource_size_t phys_addr,
348 unsigned long size); 348 unsigned long size);
349extern void early_iounmap(void __iomem *addr, unsigned long size); 349extern void early_iounmap(void __iomem *addr, unsigned long size);
350extern void fixup_early_ioremap(void); 350extern void fixup_early_ioremap(void);
351extern bool is_early_ioremap_ptep(pte_t *ptep);
351 352
352#define IO_SPACE_LIMIT 0xffff 353#define IO_SPACE_LIMIT 0xffff
353 354
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 3ba6e0608c55..0369843511dc 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -362,6 +362,11 @@ static inline pte_t * __init early_ioremap_pte(unsigned long addr)
362 return &bm_pte[pte_index(addr)]; 362 return &bm_pte[pte_index(addr)];
363} 363}
364 364
365bool __init is_early_ioremap_ptep(pte_t *ptep)
366{
367 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
368}
369
365static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata; 370static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
366 371
367void __init early_ioremap_init(void) 372void __init early_ioremap_init(void)
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 4fe04ac0bae0..7d55e9ee3a76 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -56,6 +56,7 @@
56#include <asm/e820.h> 56#include <asm/e820.h>
57#include <asm/linkage.h> 57#include <asm/linkage.h>
58#include <asm/page.h> 58#include <asm/page.h>
59#include <asm/init.h>
59 60
60#include <asm/xen/hypercall.h> 61#include <asm/xen/hypercall.h>
61#include <asm/xen/hypervisor.h> 62#include <asm/xen/hypervisor.h>
@@ -360,7 +361,8 @@ void make_lowmem_page_readonly(void *vaddr)
360 unsigned int level; 361 unsigned int level;
361 362
362 pte = lookup_address(address, &level); 363 pte = lookup_address(address, &level);
363 BUG_ON(pte == NULL); 364 if (pte == NULL)
365 return; /* vaddr missing */
364 366
365 ptev = pte_wrprotect(*pte); 367 ptev = pte_wrprotect(*pte);
366 368
@@ -375,7 +377,8 @@ void make_lowmem_page_readwrite(void *vaddr)
375 unsigned int level; 377 unsigned int level;
376 378
377 pte = lookup_address(address, &level); 379 pte = lookup_address(address, &level);
378 BUG_ON(pte == NULL); 380 if (pte == NULL)
381 return; /* vaddr missing */
379 382
380 ptev = pte_mkwrite(*pte); 383 ptev = pte_mkwrite(*pte);
381 384
@@ -1509,13 +1512,25 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1509#endif 1512#endif
1510} 1513}
1511 1514
1512#ifdef CONFIG_X86_32
1513static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) 1515static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1514{ 1516{
1517 unsigned long pfn = pte_pfn(pte);
1518
1519#ifdef CONFIG_X86_32
1515 /* If there's an existing pte, then don't allow _PAGE_RW to be set */ 1520 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1516 if (pte_val_ma(*ptep) & _PAGE_PRESENT) 1521 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1517 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & 1522 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1518 pte_val_ma(pte)); 1523 pte_val_ma(pte));
1524#endif
1525
1526 /*
1527 * If the new pfn is within the range of the newly allocated
1528 * kernel pagetable, and it isn't being mapped into an
1529 * early_ioremap fixmap slot, make sure it is RO.
1530 */
1531 if (!is_early_ioremap_ptep(ptep) &&
1532 pfn >= e820_table_start && pfn < e820_table_end)
1533 pte = pte_wrprotect(pte);
1519 1534
1520 return pte; 1535 return pte;
1521} 1536}
@@ -1528,7 +1543,6 @@ static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
1528 1543
1529 xen_set_pte(ptep, pte); 1544 xen_set_pte(ptep, pte);
1530} 1545}
1531#endif
1532 1546
1533static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) 1547static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1534{ 1548{
@@ -1973,11 +1987,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
1973 .alloc_pmd_clone = paravirt_nop, 1987 .alloc_pmd_clone = paravirt_nop,
1974 .release_pmd = xen_release_pmd_init, 1988 .release_pmd = xen_release_pmd_init,
1975 1989
1976#ifdef CONFIG_X86_64
1977 .set_pte = xen_set_pte,
1978#else
1979 .set_pte = xen_set_pte_init, 1990 .set_pte = xen_set_pte_init,
1980#endif
1981 .set_pte_at = xen_set_pte_at, 1991 .set_pte_at = xen_set_pte_at,
1982 .set_pmd = xen_set_pmd_hyper, 1992 .set_pmd = xen_set_pmd_hyper,
1983 1993