aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/mmu.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2009-01-28 17:35:01 -0500
committerH. Peter Anvin <hpa@linux.intel.com>2009-01-30 17:51:14 -0500
commit319f3ba52c71630865b10ac3b99dd020440d681d (patch)
tree2445f2adc22165ad0b5ea3e6e3ed7af7db19eed0 /arch/x86/xen/mmu.c
parent9b7ed8faa034fc2d350e2eff5c68680eb5c43a07 (diff)
xen: move remaining mmu-related stuff into mmu.c
Impact: Cleanup Move remaining mmu-related stuff into mmu.c. A general cleanup, and lay the groundwork for later patches. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r--arch/x86/xen/mmu.c700
1 files changed, 700 insertions, 0 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 98cb9869eb24..94e452c0b00c 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -47,6 +47,7 @@
47#include <asm/tlbflush.h> 47#include <asm/tlbflush.h>
48#include <asm/fixmap.h> 48#include <asm/fixmap.h>
49#include <asm/mmu_context.h> 49#include <asm/mmu_context.h>
50#include <asm/setup.h>
50#include <asm/paravirt.h> 51#include <asm/paravirt.h>
51#include <asm/linkage.h> 52#include <asm/linkage.h>
52 53
@@ -55,6 +56,8 @@
55 56
56#include <xen/page.h> 57#include <xen/page.h>
57#include <xen/interface/xen.h> 58#include <xen/interface/xen.h>
59#include <xen/interface/version.h>
60#include <xen/hvc-console.h>
58 61
59#include "multicalls.h" 62#include "multicalls.h"
60#include "mmu.h" 63#include "mmu.h"
@@ -114,6 +117,37 @@ static inline void check_zero(void)
114 117
115#endif /* CONFIG_XEN_DEBUG_FS */ 118#endif /* CONFIG_XEN_DEBUG_FS */
116 119
120
121/*
122 * Identity map, in addition to plain kernel map. This needs to be
123 * large enough to allocate page table pages to allocate the rest.
124 * Each page can map 2MB.
125 */
126static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
127
128#ifdef CONFIG_X86_64
129/* l3 pud for userspace vsyscall mapping */
130static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
131#endif /* CONFIG_X86_64 */
132
133/*
134 * Note about cr3 (pagetable base) values:
135 *
136 * xen_cr3 contains the current logical cr3 value; it contains the
137 * last set cr3. This may not be the current effective cr3, because
138 * its update may be being lazily deferred. However, a vcpu looking
139 * at its own cr3 can use this value knowing that it everything will
140 * be self-consistent.
141 *
142 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
143 * hypercall to set the vcpu cr3 is complete (so it may be a little
144 * out of date, but it will never be set early). If one vcpu is
145 * looking at another vcpu's cr3 value, it should use this variable.
146 */
147DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
148DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
149
150
117/* 151/*
118 * Just beyond the highest usermode address. STACK_TOP_MAX has a 152 * Just beyond the highest usermode address. STACK_TOP_MAX has a
119 * redzone above it, so round it up to a PGD boundary. 153 * redzone above it, so round it up to a PGD boundary.
@@ -1152,6 +1186,672 @@ void xen_exit_mmap(struct mm_struct *mm)
1152 spin_unlock(&mm->page_table_lock); 1186 spin_unlock(&mm->page_table_lock);
1153} 1187}
1154 1188
1189static __init void xen_pagetable_setup_start(pgd_t *base)
1190{
1191}
1192
1193static __init void xen_pagetable_setup_done(pgd_t *base)
1194{
1195 xen_setup_shared_info();
1196}
1197
1198static void xen_write_cr2(unsigned long cr2)
1199{
1200 percpu_read(xen_vcpu)->arch.cr2 = cr2;
1201}
1202
1203static unsigned long xen_read_cr2(void)
1204{
1205 return percpu_read(xen_vcpu)->arch.cr2;
1206}
1207
1208unsigned long xen_read_cr2_direct(void)
1209{
1210 return percpu_read(xen_vcpu_info.arch.cr2);
1211}
1212
1213static void xen_flush_tlb(void)
1214{
1215 struct mmuext_op *op;
1216 struct multicall_space mcs;
1217
1218 preempt_disable();
1219
1220 mcs = xen_mc_entry(sizeof(*op));
1221
1222 op = mcs.args;
1223 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1224 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1225
1226 xen_mc_issue(PARAVIRT_LAZY_MMU);
1227
1228 preempt_enable();
1229}
1230
1231static void xen_flush_tlb_single(unsigned long addr)
1232{
1233 struct mmuext_op *op;
1234 struct multicall_space mcs;
1235
1236 preempt_disable();
1237
1238 mcs = xen_mc_entry(sizeof(*op));
1239 op = mcs.args;
1240 op->cmd = MMUEXT_INVLPG_LOCAL;
1241 op->arg1.linear_addr = addr & PAGE_MASK;
1242 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1243
1244 xen_mc_issue(PARAVIRT_LAZY_MMU);
1245
1246 preempt_enable();
1247}
1248
1249static void xen_flush_tlb_others(const struct cpumask *cpus,
1250 struct mm_struct *mm, unsigned long va)
1251{
1252 struct {
1253 struct mmuext_op op;
1254 DECLARE_BITMAP(mask, NR_CPUS);
1255 } *args;
1256 struct multicall_space mcs;
1257
1258 BUG_ON(cpumask_empty(cpus));
1259 BUG_ON(!mm);
1260
1261 mcs = xen_mc_entry(sizeof(*args));
1262 args = mcs.args;
1263 args->op.arg2.vcpumask = to_cpumask(args->mask);
1264
1265 /* Remove us, and any offline CPUS. */
1266 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1267 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1268 if (unlikely(cpumask_empty(to_cpumask(args->mask))))
1269 goto issue;
1270
1271 if (va == TLB_FLUSH_ALL) {
1272 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1273 } else {
1274 args->op.cmd = MMUEXT_INVLPG_MULTI;
1275 args->op.arg1.linear_addr = va;
1276 }
1277
1278 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1279
1280issue:
1281 xen_mc_issue(PARAVIRT_LAZY_MMU);
1282}
1283
1284static unsigned long xen_read_cr3(void)
1285{
1286 return percpu_read(xen_cr3);
1287}
1288
1289static void set_current_cr3(void *v)
1290{
1291 percpu_write(xen_current_cr3, (unsigned long)v);
1292}
1293
1294static void __xen_write_cr3(bool kernel, unsigned long cr3)
1295{
1296 struct mmuext_op *op;
1297 struct multicall_space mcs;
1298 unsigned long mfn;
1299
1300 if (cr3)
1301 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1302 else
1303 mfn = 0;
1304
1305 WARN_ON(mfn == 0 && kernel);
1306
1307 mcs = __xen_mc_entry(sizeof(*op));
1308
1309 op = mcs.args;
1310 op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1311 op->arg1.mfn = mfn;
1312
1313 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1314
1315 if (kernel) {
1316 percpu_write(xen_cr3, cr3);
1317
1318 /* Update xen_current_cr3 once the batch has actually
1319 been submitted. */
1320 xen_mc_callback(set_current_cr3, (void *)cr3);
1321 }
1322}
1323
1324static void xen_write_cr3(unsigned long cr3)
1325{
1326 BUG_ON(preemptible());
1327
1328 xen_mc_batch(); /* disables interrupts */
1329
1330 /* Update while interrupts are disabled, so its atomic with
1331 respect to ipis */
1332 percpu_write(xen_cr3, cr3);
1333
1334 __xen_write_cr3(true, cr3);
1335
1336#ifdef CONFIG_X86_64
1337 {
1338 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1339 if (user_pgd)
1340 __xen_write_cr3(false, __pa(user_pgd));
1341 else
1342 __xen_write_cr3(false, 0);
1343 }
1344#endif
1345
1346 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1347}
1348
1349static int xen_pgd_alloc(struct mm_struct *mm)
1350{
1351 pgd_t *pgd = mm->pgd;
1352 int ret = 0;
1353
1354 BUG_ON(PagePinned(virt_to_page(pgd)));
1355
1356#ifdef CONFIG_X86_64
1357 {
1358 struct page *page = virt_to_page(pgd);
1359 pgd_t *user_pgd;
1360
1361 BUG_ON(page->private != 0);
1362
1363 ret = -ENOMEM;
1364
1365 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1366 page->private = (unsigned long)user_pgd;
1367
1368 if (user_pgd != NULL) {
1369 user_pgd[pgd_index(VSYSCALL_START)] =
1370 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1371 ret = 0;
1372 }
1373
1374 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1375 }
1376#endif
1377
1378 return ret;
1379}
1380
1381static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1382{
1383#ifdef CONFIG_X86_64
1384 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1385
1386 if (user_pgd)
1387 free_page((unsigned long)user_pgd);
1388#endif
1389}
1390
1391
1392/* Early in boot, while setting up the initial pagetable, assume
1393 everything is pinned. */
1394static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1395{
1396#ifdef CONFIG_FLATMEM
1397 BUG_ON(mem_map); /* should only be used early */
1398#endif
1399 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1400}
1401
1402/* Early release_pte assumes that all pts are pinned, since there's
1403 only init_mm and anything attached to that is pinned. */
1404static void xen_release_pte_init(unsigned long pfn)
1405{
1406 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1407}
1408
1409static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1410{
1411 struct mmuext_op op;
1412 op.cmd = cmd;
1413 op.arg1.mfn = pfn_to_mfn(pfn);
1414 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1415 BUG();
1416}
1417
1418/* This needs to make sure the new pte page is pinned iff its being
1419 attached to a pinned pagetable. */
1420static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
1421{
1422 struct page *page = pfn_to_page(pfn);
1423
1424 if (PagePinned(virt_to_page(mm->pgd))) {
1425 SetPagePinned(page);
1426
1427 vm_unmap_aliases();
1428 if (!PageHighMem(page)) {
1429 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
1430 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1431 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1432 } else {
1433 /* make sure there are no stray mappings of
1434 this page */
1435 kmap_flush_unused();
1436 }
1437 }
1438}
1439
1440static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1441{
1442 xen_alloc_ptpage(mm, pfn, PT_PTE);
1443}
1444
1445static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1446{
1447 xen_alloc_ptpage(mm, pfn, PT_PMD);
1448}
1449
1450/* This should never happen until we're OK to use struct page */
1451static void xen_release_ptpage(unsigned long pfn, unsigned level)
1452{
1453 struct page *page = pfn_to_page(pfn);
1454
1455 if (PagePinned(page)) {
1456 if (!PageHighMem(page)) {
1457 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1458 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1459 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1460 }
1461 ClearPagePinned(page);
1462 }
1463}
1464
1465static void xen_release_pte(unsigned long pfn)
1466{
1467 xen_release_ptpage(pfn, PT_PTE);
1468}
1469
1470static void xen_release_pmd(unsigned long pfn)
1471{
1472 xen_release_ptpage(pfn, PT_PMD);
1473}
1474
1475#if PAGETABLE_LEVELS == 4
1476static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1477{
1478 xen_alloc_ptpage(mm, pfn, PT_PUD);
1479}
1480
1481static void xen_release_pud(unsigned long pfn)
1482{
1483 xen_release_ptpage(pfn, PT_PUD);
1484}
1485#endif
1486
1487void __init xen_reserve_top(void)
1488{
1489#ifdef CONFIG_X86_32
1490 unsigned long top = HYPERVISOR_VIRT_START;
1491 struct xen_platform_parameters pp;
1492
1493 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1494 top = pp.virt_start;
1495
1496 reserve_top_address(-top);
1497#endif /* CONFIG_X86_32 */
1498}
1499
1500/*
1501 * Like __va(), but returns address in the kernel mapping (which is
1502 * all we have until the physical memory mapping has been set up.
1503 */
1504static void *__ka(phys_addr_t paddr)
1505{
1506#ifdef CONFIG_X86_64
1507 return (void *)(paddr + __START_KERNEL_map);
1508#else
1509 return __va(paddr);
1510#endif
1511}
1512
1513/* Convert a machine address to physical address */
1514static unsigned long m2p(phys_addr_t maddr)
1515{
1516 phys_addr_t paddr;
1517
1518 maddr &= PTE_PFN_MASK;
1519 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1520
1521 return paddr;
1522}
1523
1524/* Convert a machine address to kernel virtual */
1525static void *m2v(phys_addr_t maddr)
1526{
1527 return __ka(m2p(maddr));
1528}
1529
1530static void set_page_prot(void *addr, pgprot_t prot)
1531{
1532 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1533 pte_t pte = pfn_pte(pfn, prot);
1534
1535 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1536 BUG();
1537}
1538
1539static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1540{
1541 unsigned pmdidx, pteidx;
1542 unsigned ident_pte;
1543 unsigned long pfn;
1544
1545 ident_pte = 0;
1546 pfn = 0;
1547 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1548 pte_t *pte_page;
1549
1550 /* Reuse or allocate a page of ptes */
1551 if (pmd_present(pmd[pmdidx]))
1552 pte_page = m2v(pmd[pmdidx].pmd);
1553 else {
1554 /* Check for free pte pages */
1555 if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
1556 break;
1557
1558 pte_page = &level1_ident_pgt[ident_pte];
1559 ident_pte += PTRS_PER_PTE;
1560
1561 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1562 }
1563
1564 /* Install mappings */
1565 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1566 pte_t pte;
1567
1568 if (pfn > max_pfn_mapped)
1569 max_pfn_mapped = pfn;
1570
1571 if (!pte_none(pte_page[pteidx]))
1572 continue;
1573
1574 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1575 pte_page[pteidx] = pte;
1576 }
1577 }
1578
1579 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1580 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1581
1582 set_page_prot(pmd, PAGE_KERNEL_RO);
1583}
1584
1585#ifdef CONFIG_X86_64
1586static void convert_pfn_mfn(void *v)
1587{
1588 pte_t *pte = v;
1589 int i;
1590
1591 /* All levels are converted the same way, so just treat them
1592 as ptes. */
1593 for (i = 0; i < PTRS_PER_PTE; i++)
1594 pte[i] = xen_make_pte(pte[i].pte);
1595}
1596
1597/*
1598 * Set up the inital kernel pagetable.
1599 *
1600 * We can construct this by grafting the Xen provided pagetable into
1601 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1602 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1603 * means that only the kernel has a physical mapping to start with -
1604 * but that's enough to get __va working. We need to fill in the rest
1605 * of the physical mapping once some sort of allocator has been set
1606 * up.
1607 */
1608__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1609 unsigned long max_pfn)
1610{
1611 pud_t *l3;
1612 pmd_t *l2;
1613
1614 /* Zap identity mapping */
1615 init_level4_pgt[0] = __pgd(0);
1616
1617 /* Pre-constructed entries are in pfn, so convert to mfn */
1618 convert_pfn_mfn(init_level4_pgt);
1619 convert_pfn_mfn(level3_ident_pgt);
1620 convert_pfn_mfn(level3_kernel_pgt);
1621
1622 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1623 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1624
1625 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1626 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1627
1628 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1629 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1630 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1631
1632 /* Set up identity map */
1633 xen_map_identity_early(level2_ident_pgt, max_pfn);
1634
1635 /* Make pagetable pieces RO */
1636 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1637 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1638 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1639 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1640 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1641 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1642
1643 /* Pin down new L4 */
1644 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1645 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1646
1647 /* Unpin Xen-provided one */
1648 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1649
1650 /* Switch over */
1651 pgd = init_level4_pgt;
1652
1653 /*
1654 * At this stage there can be no user pgd, and no page
1655 * structure to attach it to, so make sure we just set kernel
1656 * pgd.
1657 */
1658 xen_mc_batch();
1659 __xen_write_cr3(true, __pa(pgd));
1660 xen_mc_issue(PARAVIRT_LAZY_CPU);
1661
1662 reserve_early(__pa(xen_start_info->pt_base),
1663 __pa(xen_start_info->pt_base +
1664 xen_start_info->nr_pt_frames * PAGE_SIZE),
1665 "XEN PAGETABLES");
1666
1667 return pgd;
1668}
1669#else /* !CONFIG_X86_64 */
1670static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
1671
1672__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1673 unsigned long max_pfn)
1674{
1675 pmd_t *kernel_pmd;
1676
1677 init_pg_tables_start = __pa(pgd);
1678 init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE;
1679 max_pfn_mapped = PFN_DOWN(init_pg_tables_end + 512*1024);
1680
1681 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1682 memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
1683
1684 xen_map_identity_early(level2_kernel_pgt, max_pfn);
1685
1686 memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1687 set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
1688 __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
1689
1690 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1691 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1692 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1693
1694 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1695
1696 xen_write_cr3(__pa(swapper_pg_dir));
1697
1698 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
1699
1700 return swapper_pg_dir;
1701}
1702#endif /* CONFIG_X86_64 */
1703
1704static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot)
1705{
1706 pte_t pte;
1707
1708 phys >>= PAGE_SHIFT;
1709
1710 switch (idx) {
1711 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1712#ifdef CONFIG_X86_F00F_BUG
1713 case FIX_F00F_IDT:
1714#endif
1715#ifdef CONFIG_X86_32
1716 case FIX_WP_TEST:
1717 case FIX_VDSO:
1718# ifdef CONFIG_HIGHMEM
1719 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1720# endif
1721#else
1722 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
1723#endif
1724#ifdef CONFIG_X86_LOCAL_APIC
1725 case FIX_APIC_BASE: /* maps dummy local APIC */
1726#endif
1727 pte = pfn_pte(phys, prot);
1728 break;
1729
1730 default:
1731 pte = mfn_pte(phys, prot);
1732 break;
1733 }
1734
1735 __native_set_fixmap(idx, pte);
1736
1737#ifdef CONFIG_X86_64
1738 /* Replicate changes to map the vsyscall page into the user
1739 pagetable vsyscall mapping. */
1740 if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
1741 unsigned long vaddr = __fix_to_virt(idx);
1742 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1743 }
1744#endif
1745}
1746
1747__init void xen_post_allocator_init(void)
1748{
1749 pv_mmu_ops.set_pte = xen_set_pte;
1750 pv_mmu_ops.set_pmd = xen_set_pmd;
1751 pv_mmu_ops.set_pud = xen_set_pud;
1752#if PAGETABLE_LEVELS == 4
1753 pv_mmu_ops.set_pgd = xen_set_pgd;
1754#endif
1755
1756 /* This will work as long as patching hasn't happened yet
1757 (which it hasn't) */
1758 pv_mmu_ops.alloc_pte = xen_alloc_pte;
1759 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
1760 pv_mmu_ops.release_pte = xen_release_pte;
1761 pv_mmu_ops.release_pmd = xen_release_pmd;
1762#if PAGETABLE_LEVELS == 4
1763 pv_mmu_ops.alloc_pud = xen_alloc_pud;
1764 pv_mmu_ops.release_pud = xen_release_pud;
1765#endif
1766
1767#ifdef CONFIG_X86_64
1768 SetPagePinned(virt_to_page(level3_user_vsyscall));
1769#endif
1770 xen_mark_init_mm_pinned();
1771}
1772
1773
1774const struct pv_mmu_ops xen_mmu_ops __initdata = {
1775 .pagetable_setup_start = xen_pagetable_setup_start,
1776 .pagetable_setup_done = xen_pagetable_setup_done,
1777
1778 .read_cr2 = xen_read_cr2,
1779 .write_cr2 = xen_write_cr2,
1780
1781 .read_cr3 = xen_read_cr3,
1782 .write_cr3 = xen_write_cr3,
1783
1784 .flush_tlb_user = xen_flush_tlb,
1785 .flush_tlb_kernel = xen_flush_tlb,
1786 .flush_tlb_single = xen_flush_tlb_single,
1787 .flush_tlb_others = xen_flush_tlb_others,
1788
1789 .pte_update = paravirt_nop,
1790 .pte_update_defer = paravirt_nop,
1791
1792 .pgd_alloc = xen_pgd_alloc,
1793 .pgd_free = xen_pgd_free,
1794
1795 .alloc_pte = xen_alloc_pte_init,
1796 .release_pte = xen_release_pte_init,
1797 .alloc_pmd = xen_alloc_pte_init,
1798 .alloc_pmd_clone = paravirt_nop,
1799 .release_pmd = xen_release_pte_init,
1800
1801#ifdef CONFIG_HIGHPTE
1802 .kmap_atomic_pte = xen_kmap_atomic_pte,
1803#endif
1804
1805#ifdef CONFIG_X86_64
1806 .set_pte = xen_set_pte,
1807#else
1808 .set_pte = xen_set_pte_init,
1809#endif
1810 .set_pte_at = xen_set_pte_at,
1811 .set_pmd = xen_set_pmd_hyper,
1812
1813 .ptep_modify_prot_start = __ptep_modify_prot_start,
1814 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
1815
1816 .pte_val = xen_pte_val,
1817 .pgd_val = xen_pgd_val,
1818
1819 .make_pte = xen_make_pte,
1820 .make_pgd = xen_make_pgd,
1821
1822#ifdef CONFIG_X86_PAE
1823 .set_pte_atomic = xen_set_pte_atomic,
1824 .set_pte_present = xen_set_pte_at,
1825 .pte_clear = xen_pte_clear,
1826 .pmd_clear = xen_pmd_clear,
1827#endif /* CONFIG_X86_PAE */
1828 .set_pud = xen_set_pud_hyper,
1829
1830 .make_pmd = xen_make_pmd,
1831 .pmd_val = xen_pmd_val,
1832
1833#if PAGETABLE_LEVELS == 4
1834 .pud_val = xen_pud_val,
1835 .make_pud = xen_make_pud,
1836 .set_pgd = xen_set_pgd_hyper,
1837
1838 .alloc_pud = xen_alloc_pte_init,
1839 .release_pud = xen_release_pte_init,
1840#endif /* PAGETABLE_LEVELS == 4 */
1841
1842 .activate_mm = xen_activate_mm,
1843 .dup_mmap = xen_dup_mmap,
1844 .exit_mmap = xen_exit_mmap,
1845
1846 .lazy_mode = {
1847 .enter = paravirt_enter_lazy_mmu,
1848 .leave = xen_leave_lazy,
1849 },
1850
1851 .set_fixmap = xen_set_fixmap,
1852};
1853
1854
1155#ifdef CONFIG_XEN_DEBUG_FS 1855#ifdef CONFIG_XEN_DEBUG_FS
1156 1856
1157static struct dentry *d_mmu_debug; 1857static struct dentry *d_mmu_debug;