diff options
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r-- | arch/x86/xen/mmu.c | 53 |
1 files changed, 34 insertions, 19 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index aef7af92b28b..02d752460371 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1054,7 +1054,7 @@ void xen_mm_pin_all(void) | |||
1054 | * that's before we have page structures to store the bits. So do all | 1054 | * that's before we have page structures to store the bits. So do all |
1055 | * the book-keeping now. | 1055 | * the book-keeping now. |
1056 | */ | 1056 | */ |
1057 | static __init int xen_mark_pinned(struct mm_struct *mm, struct page *page, | 1057 | static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page, |
1058 | enum pt_level level) | 1058 | enum pt_level level) |
1059 | { | 1059 | { |
1060 | SetPagePinned(page); | 1060 | SetPagePinned(page); |
@@ -1187,7 +1187,7 @@ static void drop_other_mm_ref(void *info) | |||
1187 | 1187 | ||
1188 | active_mm = percpu_read(cpu_tlbstate.active_mm); | 1188 | active_mm = percpu_read(cpu_tlbstate.active_mm); |
1189 | 1189 | ||
1190 | if (active_mm == mm) | 1190 | if (active_mm == mm && percpu_read(cpu_tlbstate.state) != TLBSTATE_OK) |
1191 | leave_mm(smp_processor_id()); | 1191 | leave_mm(smp_processor_id()); |
1192 | 1192 | ||
1193 | /* If this cpu still has a stale cr3 reference, then make sure | 1193 | /* If this cpu still has a stale cr3 reference, then make sure |
@@ -1271,13 +1271,27 @@ void xen_exit_mmap(struct mm_struct *mm) | |||
1271 | spin_unlock(&mm->page_table_lock); | 1271 | spin_unlock(&mm->page_table_lock); |
1272 | } | 1272 | } |
1273 | 1273 | ||
1274 | static __init void xen_pagetable_setup_start(pgd_t *base) | 1274 | static void __init xen_pagetable_setup_start(pgd_t *base) |
1275 | { | 1275 | { |
1276 | } | 1276 | } |
1277 | 1277 | ||
1278 | static __init void xen_mapping_pagetable_reserve(u64 start, u64 end) | ||
1279 | { | ||
1280 | /* reserve the range used */ | ||
1281 | native_pagetable_reserve(start, end); | ||
1282 | |||
1283 | /* set as RW the rest */ | ||
1284 | printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end, | ||
1285 | PFN_PHYS(pgt_buf_top)); | ||
1286 | while (end < PFN_PHYS(pgt_buf_top)) { | ||
1287 | make_lowmem_page_readwrite(__va(end)); | ||
1288 | end += PAGE_SIZE; | ||
1289 | } | ||
1290 | } | ||
1291 | |||
1278 | static void xen_post_allocator_init(void); | 1292 | static void xen_post_allocator_init(void); |
1279 | 1293 | ||
1280 | static __init void xen_pagetable_setup_done(pgd_t *base) | 1294 | static void __init xen_pagetable_setup_done(pgd_t *base) |
1281 | { | 1295 | { |
1282 | xen_setup_shared_info(); | 1296 | xen_setup_shared_info(); |
1283 | xen_post_allocator_init(); | 1297 | xen_post_allocator_init(); |
@@ -1474,7 +1488,7 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) | |||
1474 | } | 1488 | } |
1475 | 1489 | ||
1476 | #ifdef CONFIG_X86_32 | 1490 | #ifdef CONFIG_X86_32 |
1477 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | 1491 | static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) |
1478 | { | 1492 | { |
1479 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ | 1493 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ |
1480 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) | 1494 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) |
@@ -1484,7 +1498,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | |||
1484 | return pte; | 1498 | return pte; |
1485 | } | 1499 | } |
1486 | #else /* CONFIG_X86_64 */ | 1500 | #else /* CONFIG_X86_64 */ |
1487 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | 1501 | static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) |
1488 | { | 1502 | { |
1489 | unsigned long pfn = pte_pfn(pte); | 1503 | unsigned long pfn = pte_pfn(pte); |
1490 | 1504 | ||
@@ -1495,7 +1509,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | |||
1495 | * it is RO. | 1509 | * it is RO. |
1496 | */ | 1510 | */ |
1497 | if (((!is_early_ioremap_ptep(ptep) && | 1511 | if (((!is_early_ioremap_ptep(ptep) && |
1498 | pfn >= pgt_buf_start && pfn < pgt_buf_end)) || | 1512 | pfn >= pgt_buf_start && pfn < pgt_buf_top)) || |
1499 | (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1))) | 1513 | (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1))) |
1500 | pte = pte_wrprotect(pte); | 1514 | pte = pte_wrprotect(pte); |
1501 | 1515 | ||
@@ -1505,7 +1519,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | |||
1505 | 1519 | ||
1506 | /* Init-time set_pte while constructing initial pagetables, which | 1520 | /* Init-time set_pte while constructing initial pagetables, which |
1507 | doesn't allow RO pagetable pages to be remapped RW */ | 1521 | doesn't allow RO pagetable pages to be remapped RW */ |
1508 | static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) | 1522 | static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) |
1509 | { | 1523 | { |
1510 | pte = mask_rw_pte(ptep, pte); | 1524 | pte = mask_rw_pte(ptep, pte); |
1511 | 1525 | ||
@@ -1523,7 +1537,7 @@ static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) | |||
1523 | 1537 | ||
1524 | /* Early in boot, while setting up the initial pagetable, assume | 1538 | /* Early in boot, while setting up the initial pagetable, assume |
1525 | everything is pinned. */ | 1539 | everything is pinned. */ |
1526 | static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) | 1540 | static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) |
1527 | { | 1541 | { |
1528 | #ifdef CONFIG_FLATMEM | 1542 | #ifdef CONFIG_FLATMEM |
1529 | BUG_ON(mem_map); /* should only be used early */ | 1543 | BUG_ON(mem_map); /* should only be used early */ |
@@ -1533,7 +1547,7 @@ static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) | |||
1533 | } | 1547 | } |
1534 | 1548 | ||
1535 | /* Used for pmd and pud */ | 1549 | /* Used for pmd and pud */ |
1536 | static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) | 1550 | static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) |
1537 | { | 1551 | { |
1538 | #ifdef CONFIG_FLATMEM | 1552 | #ifdef CONFIG_FLATMEM |
1539 | BUG_ON(mem_map); /* should only be used early */ | 1553 | BUG_ON(mem_map); /* should only be used early */ |
@@ -1543,13 +1557,13 @@ static __init void xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) | |||
1543 | 1557 | ||
1544 | /* Early release_pte assumes that all pts are pinned, since there's | 1558 | /* Early release_pte assumes that all pts are pinned, since there's |
1545 | only init_mm and anything attached to that is pinned. */ | 1559 | only init_mm and anything attached to that is pinned. */ |
1546 | static __init void xen_release_pte_init(unsigned long pfn) | 1560 | static void __init xen_release_pte_init(unsigned long pfn) |
1547 | { | 1561 | { |
1548 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); | 1562 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); |
1549 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | 1563 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); |
1550 | } | 1564 | } |
1551 | 1565 | ||
1552 | static __init void xen_release_pmd_init(unsigned long pfn) | 1566 | static void __init xen_release_pmd_init(unsigned long pfn) |
1553 | { | 1567 | { |
1554 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | 1568 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); |
1555 | } | 1569 | } |
@@ -1675,7 +1689,7 @@ static void set_page_prot(void *addr, pgprot_t prot) | |||
1675 | BUG(); | 1689 | BUG(); |
1676 | } | 1690 | } |
1677 | 1691 | ||
1678 | static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) | 1692 | static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) |
1679 | { | 1693 | { |
1680 | unsigned pmdidx, pteidx; | 1694 | unsigned pmdidx, pteidx; |
1681 | unsigned ident_pte; | 1695 | unsigned ident_pte; |
@@ -1758,7 +1772,7 @@ static void convert_pfn_mfn(void *v) | |||
1758 | * of the physical mapping once some sort of allocator has been set | 1772 | * of the physical mapping once some sort of allocator has been set |
1759 | * up. | 1773 | * up. |
1760 | */ | 1774 | */ |
1761 | __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | 1775 | pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, |
1762 | unsigned long max_pfn) | 1776 | unsigned long max_pfn) |
1763 | { | 1777 | { |
1764 | pud_t *l3; | 1778 | pud_t *l3; |
@@ -1829,7 +1843,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | |||
1829 | static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); | 1843 | static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); |
1830 | static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); | 1844 | static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); |
1831 | 1845 | ||
1832 | static __init void xen_write_cr3_init(unsigned long cr3) | 1846 | static void __init xen_write_cr3_init(unsigned long cr3) |
1833 | { | 1847 | { |
1834 | unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); | 1848 | unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); |
1835 | 1849 | ||
@@ -1866,7 +1880,7 @@ static __init void xen_write_cr3_init(unsigned long cr3) | |||
1866 | pv_mmu_ops.write_cr3 = &xen_write_cr3; | 1880 | pv_mmu_ops.write_cr3 = &xen_write_cr3; |
1867 | } | 1881 | } |
1868 | 1882 | ||
1869 | __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | 1883 | pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd, |
1870 | unsigned long max_pfn) | 1884 | unsigned long max_pfn) |
1871 | { | 1885 | { |
1872 | pmd_t *kernel_pmd; | 1886 | pmd_t *kernel_pmd; |
@@ -1972,7 +1986,7 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) | |||
1972 | #endif | 1986 | #endif |
1973 | } | 1987 | } |
1974 | 1988 | ||
1975 | __init void xen_ident_map_ISA(void) | 1989 | void __init xen_ident_map_ISA(void) |
1976 | { | 1990 | { |
1977 | unsigned long pa; | 1991 | unsigned long pa; |
1978 | 1992 | ||
@@ -1995,7 +2009,7 @@ __init void xen_ident_map_ISA(void) | |||
1995 | xen_flush_tlb(); | 2009 | xen_flush_tlb(); |
1996 | } | 2010 | } |
1997 | 2011 | ||
1998 | static __init void xen_post_allocator_init(void) | 2012 | static void __init xen_post_allocator_init(void) |
1999 | { | 2013 | { |
2000 | #ifdef CONFIG_XEN_DEBUG | 2014 | #ifdef CONFIG_XEN_DEBUG |
2001 | pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); | 2015 | pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); |
@@ -2032,7 +2046,7 @@ static void xen_leave_lazy_mmu(void) | |||
2032 | preempt_enable(); | 2046 | preempt_enable(); |
2033 | } | 2047 | } |
2034 | 2048 | ||
2035 | static const struct pv_mmu_ops xen_mmu_ops __initdata = { | 2049 | static const struct pv_mmu_ops xen_mmu_ops __initconst = { |
2036 | .read_cr2 = xen_read_cr2, | 2050 | .read_cr2 = xen_read_cr2, |
2037 | .write_cr2 = xen_write_cr2, | 2051 | .write_cr2 = xen_write_cr2, |
2038 | 2052 | ||
@@ -2105,6 +2119,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = { | |||
2105 | 2119 | ||
2106 | void __init xen_init_mmu_ops(void) | 2120 | void __init xen_init_mmu_ops(void) |
2107 | { | 2121 | { |
2122 | x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve; | ||
2108 | x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; | 2123 | x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; |
2109 | x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; | 2124 | x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; |
2110 | pv_mmu_ops = xen_mmu_ops; | 2125 | pv_mmu_ops = xen_mmu_ops; |