aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r--arch/x86/xen/mmu.c757
1 files changed, 751 insertions, 6 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 503c240e26c..cb6afa4ec95 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -47,6 +47,7 @@
47#include <asm/tlbflush.h> 47#include <asm/tlbflush.h>
48#include <asm/fixmap.h> 48#include <asm/fixmap.h>
49#include <asm/mmu_context.h> 49#include <asm/mmu_context.h>
50#include <asm/setup.h>
50#include <asm/paravirt.h> 51#include <asm/paravirt.h>
51#include <asm/linkage.h> 52#include <asm/linkage.h>
52 53
@@ -55,6 +56,8 @@
55 56
56#include <xen/page.h> 57#include <xen/page.h>
57#include <xen/interface/xen.h> 58#include <xen/interface/xen.h>
59#include <xen/interface/version.h>
60#include <xen/hvc-console.h>
58 61
59#include "multicalls.h" 62#include "multicalls.h"
60#include "mmu.h" 63#include "mmu.h"
@@ -114,6 +117,37 @@ static inline void check_zero(void)
114 117
115#endif /* CONFIG_XEN_DEBUG_FS */ 118#endif /* CONFIG_XEN_DEBUG_FS */
116 119
120
121/*
122 * Identity map, in addition to plain kernel map. This needs to be
123 * large enough to allocate page table pages to allocate the rest.
124 * Each page can map 2MB.
125 */
126static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
127
128#ifdef CONFIG_X86_64
129/* l3 pud for userspace vsyscall mapping */
130static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
131#endif /* CONFIG_X86_64 */
132
133/*
134 * Note about cr3 (pagetable base) values:
135 *
136 * xen_cr3 contains the current logical cr3 value; it contains the
137 * last set cr3. This may not be the current effective cr3, because
138 * its update may be being lazily deferred. However, a vcpu looking
139 * at its own cr3 can use this value knowing that it everything will
140 * be self-consistent.
141 *
142 * xen_current_cr3 contains the actual vcpu cr3; it is set once the
143 * hypercall to set the vcpu cr3 is complete (so it may be a little
144 * out of date, but it will never be set early). If one vcpu is
145 * looking at another vcpu's cr3 value, it should use this variable.
146 */
147DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */
148DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */
149
150
117/* 151/*
118 * Just beyond the highest usermode address. STACK_TOP_MAX has a 152 * Just beyond the highest usermode address. STACK_TOP_MAX has a
119 * redzone above it, so round it up to a PGD boundary. 153 * redzone above it, so round it up to a PGD boundary.
@@ -242,6 +276,13 @@ void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
242 p2m_top[topidx][idx] = mfn; 276 p2m_top[topidx][idx] = mfn;
243} 277}
244 278
279unsigned long arbitrary_virt_to_mfn(void *vaddr)
280{
281 xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
282
283 return PFN_DOWN(maddr.maddr);
284}
285
245xmaddr_t arbitrary_virt_to_machine(void *vaddr) 286xmaddr_t arbitrary_virt_to_machine(void *vaddr)
246{ 287{
247 unsigned long address = (unsigned long)vaddr; 288 unsigned long address = (unsigned long)vaddr;
@@ -458,28 +499,33 @@ pteval_t xen_pte_val(pte_t pte)
458{ 499{
459 return pte_mfn_to_pfn(pte.pte); 500 return pte_mfn_to_pfn(pte.pte);
460} 501}
502PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
461 503
462pgdval_t xen_pgd_val(pgd_t pgd) 504pgdval_t xen_pgd_val(pgd_t pgd)
463{ 505{
464 return pte_mfn_to_pfn(pgd.pgd); 506 return pte_mfn_to_pfn(pgd.pgd);
465} 507}
508PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
466 509
467pte_t xen_make_pte(pteval_t pte) 510pte_t xen_make_pte(pteval_t pte)
468{ 511{
469 pte = pte_pfn_to_mfn(pte); 512 pte = pte_pfn_to_mfn(pte);
470 return native_make_pte(pte); 513 return native_make_pte(pte);
471} 514}
515PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
472 516
473pgd_t xen_make_pgd(pgdval_t pgd) 517pgd_t xen_make_pgd(pgdval_t pgd)
474{ 518{
475 pgd = pte_pfn_to_mfn(pgd); 519 pgd = pte_pfn_to_mfn(pgd);
476 return native_make_pgd(pgd); 520 return native_make_pgd(pgd);
477} 521}
522PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
478 523
479pmdval_t xen_pmd_val(pmd_t pmd) 524pmdval_t xen_pmd_val(pmd_t pmd)
480{ 525{
481 return pte_mfn_to_pfn(pmd.pmd); 526 return pte_mfn_to_pfn(pmd.pmd);
482} 527}
528PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val);
483 529
484void xen_set_pud_hyper(pud_t *ptr, pud_t val) 530void xen_set_pud_hyper(pud_t *ptr, pud_t val)
485{ 531{
@@ -556,12 +602,14 @@ pmd_t xen_make_pmd(pmdval_t pmd)
556 pmd = pte_pfn_to_mfn(pmd); 602 pmd = pte_pfn_to_mfn(pmd);
557 return native_make_pmd(pmd); 603 return native_make_pmd(pmd);
558} 604}
605PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
559 606
560#if PAGETABLE_LEVELS == 4 607#if PAGETABLE_LEVELS == 4
561pudval_t xen_pud_val(pud_t pud) 608pudval_t xen_pud_val(pud_t pud)
562{ 609{
563 return pte_mfn_to_pfn(pud.pud); 610 return pte_mfn_to_pfn(pud.pud);
564} 611}
612PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
565 613
566pud_t xen_make_pud(pudval_t pud) 614pud_t xen_make_pud(pudval_t pud)
567{ 615{
@@ -569,6 +617,7 @@ pud_t xen_make_pud(pudval_t pud)
569 617
570 return native_make_pud(pud); 618 return native_make_pud(pud);
571} 619}
620PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud);
572 621
573pgd_t *xen_get_user_pgd(pgd_t *pgd) 622pgd_t *xen_get_user_pgd(pgd_t *pgd)
574{ 623{
@@ -1063,18 +1112,14 @@ static void drop_other_mm_ref(void *info)
1063 struct mm_struct *mm = info; 1112 struct mm_struct *mm = info;
1064 struct mm_struct *active_mm; 1113 struct mm_struct *active_mm;
1065 1114
1066#ifdef CONFIG_X86_64 1115 active_mm = percpu_read(cpu_tlbstate.active_mm);
1067 active_mm = read_pda(active_mm);
1068#else
1069 active_mm = __get_cpu_var(cpu_tlbstate).active_mm;
1070#endif
1071 1116
1072 if (active_mm == mm) 1117 if (active_mm == mm)
1073 leave_mm(smp_processor_id()); 1118 leave_mm(smp_processor_id());
1074 1119
1075 /* If this cpu still has a stale cr3 reference, then make sure 1120 /* If this cpu still has a stale cr3 reference, then make sure
1076 it has been flushed. */ 1121 it has been flushed. */
1077 if (x86_read_percpu(xen_current_cr3) == __pa(mm->pgd)) { 1122 if (percpu_read(xen_current_cr3) == __pa(mm->pgd)) {
1078 load_cr3(swapper_pg_dir); 1123 load_cr3(swapper_pg_dir);
1079 arch_flush_lazy_cpu_mode(); 1124 arch_flush_lazy_cpu_mode();
1080 } 1125 }
@@ -1156,6 +1201,706 @@ void xen_exit_mmap(struct mm_struct *mm)
1156 spin_unlock(&mm->page_table_lock); 1201 spin_unlock(&mm->page_table_lock);
1157} 1202}
1158 1203
1204static __init void xen_pagetable_setup_start(pgd_t *base)
1205{
1206}
1207
1208static __init void xen_pagetable_setup_done(pgd_t *base)
1209{
1210 xen_setup_shared_info();
1211}
1212
1213static void xen_write_cr2(unsigned long cr2)
1214{
1215 percpu_read(xen_vcpu)->arch.cr2 = cr2;
1216}
1217
1218static unsigned long xen_read_cr2(void)
1219{
1220 return percpu_read(xen_vcpu)->arch.cr2;
1221}
1222
1223unsigned long xen_read_cr2_direct(void)
1224{
1225 return percpu_read(xen_vcpu_info.arch.cr2);
1226}
1227
1228static void xen_flush_tlb(void)
1229{
1230 struct mmuext_op *op;
1231 struct multicall_space mcs;
1232
1233 preempt_disable();
1234
1235 mcs = xen_mc_entry(sizeof(*op));
1236
1237 op = mcs.args;
1238 op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
1239 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1240
1241 xen_mc_issue(PARAVIRT_LAZY_MMU);
1242
1243 preempt_enable();
1244}
1245
1246static void xen_flush_tlb_single(unsigned long addr)
1247{
1248 struct mmuext_op *op;
1249 struct multicall_space mcs;
1250
1251 preempt_disable();
1252
1253 mcs = xen_mc_entry(sizeof(*op));
1254 op = mcs.args;
1255 op->cmd = MMUEXT_INVLPG_LOCAL;
1256 op->arg1.linear_addr = addr & PAGE_MASK;
1257 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1258
1259 xen_mc_issue(PARAVIRT_LAZY_MMU);
1260
1261 preempt_enable();
1262}
1263
1264static void xen_flush_tlb_others(const struct cpumask *cpus,
1265 struct mm_struct *mm, unsigned long va)
1266{
1267 struct {
1268 struct mmuext_op op;
1269 DECLARE_BITMAP(mask, NR_CPUS);
1270 } *args;
1271 struct multicall_space mcs;
1272
1273 BUG_ON(cpumask_empty(cpus));
1274 BUG_ON(!mm);
1275
1276 mcs = xen_mc_entry(sizeof(*args));
1277 args = mcs.args;
1278 args->op.arg2.vcpumask = to_cpumask(args->mask);
1279
1280 /* Remove us, and any offline CPUS. */
1281 cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask);
1282 cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask));
1283
1284 if (va == TLB_FLUSH_ALL) {
1285 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
1286 } else {
1287 args->op.cmd = MMUEXT_INVLPG_MULTI;
1288 args->op.arg1.linear_addr = va;
1289 }
1290
1291 MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
1292
1293 xen_mc_issue(PARAVIRT_LAZY_MMU);
1294}
1295
1296static unsigned long xen_read_cr3(void)
1297{
1298 return percpu_read(xen_cr3);
1299}
1300
1301static void set_current_cr3(void *v)
1302{
1303 percpu_write(xen_current_cr3, (unsigned long)v);
1304}
1305
1306static void __xen_write_cr3(bool kernel, unsigned long cr3)
1307{
1308 struct mmuext_op *op;
1309 struct multicall_space mcs;
1310 unsigned long mfn;
1311
1312 if (cr3)
1313 mfn = pfn_to_mfn(PFN_DOWN(cr3));
1314 else
1315 mfn = 0;
1316
1317 WARN_ON(mfn == 0 && kernel);
1318
1319 mcs = __xen_mc_entry(sizeof(*op));
1320
1321 op = mcs.args;
1322 op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
1323 op->arg1.mfn = mfn;
1324
1325 MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
1326
1327 if (kernel) {
1328 percpu_write(xen_cr3, cr3);
1329
1330 /* Update xen_current_cr3 once the batch has actually
1331 been submitted. */
1332 xen_mc_callback(set_current_cr3, (void *)cr3);
1333 }
1334}
1335
1336static void xen_write_cr3(unsigned long cr3)
1337{
1338 BUG_ON(preemptible());
1339
1340 xen_mc_batch(); /* disables interrupts */
1341
1342 /* Update while interrupts are disabled, so its atomic with
1343 respect to ipis */
1344 percpu_write(xen_cr3, cr3);
1345
1346 __xen_write_cr3(true, cr3);
1347
1348#ifdef CONFIG_X86_64
1349 {
1350 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
1351 if (user_pgd)
1352 __xen_write_cr3(false, __pa(user_pgd));
1353 else
1354 __xen_write_cr3(false, 0);
1355 }
1356#endif
1357
1358 xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
1359}
1360
1361static int xen_pgd_alloc(struct mm_struct *mm)
1362{
1363 pgd_t *pgd = mm->pgd;
1364 int ret = 0;
1365
1366 BUG_ON(PagePinned(virt_to_page(pgd)));
1367
1368#ifdef CONFIG_X86_64
1369 {
1370 struct page *page = virt_to_page(pgd);
1371 pgd_t *user_pgd;
1372
1373 BUG_ON(page->private != 0);
1374
1375 ret = -ENOMEM;
1376
1377 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1378 page->private = (unsigned long)user_pgd;
1379
1380 if (user_pgd != NULL) {
1381 user_pgd[pgd_index(VSYSCALL_START)] =
1382 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
1383 ret = 0;
1384 }
1385
1386 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
1387 }
1388#endif
1389
1390 return ret;
1391}
1392
1393static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
1394{
1395#ifdef CONFIG_X86_64
1396 pgd_t *user_pgd = xen_get_user_pgd(pgd);
1397
1398 if (user_pgd)
1399 free_page((unsigned long)user_pgd);
1400#endif
1401}
1402
1403#ifdef CONFIG_HIGHPTE
1404static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
1405{
1406 pgprot_t prot = PAGE_KERNEL;
1407
1408 if (PagePinned(page))
1409 prot = PAGE_KERNEL_RO;
1410
1411 if (0 && PageHighMem(page))
1412 printk("mapping highpte %lx type %d prot %s\n",
1413 page_to_pfn(page), type,
1414 (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
1415
1416 return kmap_atomic_prot(page, type, prot);
1417}
1418#endif
1419
1420#ifdef CONFIG_X86_32
1421static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
1422{
1423 /* If there's an existing pte, then don't allow _PAGE_RW to be set */
1424 if (pte_val_ma(*ptep) & _PAGE_PRESENT)
1425 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
1426 pte_val_ma(pte));
1427
1428 return pte;
1429}
1430
1431/* Init-time set_pte while constructing initial pagetables, which
1432 doesn't allow RO pagetable pages to be remapped RW */
1433static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
1434{
1435 pte = mask_rw_pte(ptep, pte);
1436
1437 xen_set_pte(ptep, pte);
1438}
1439#endif
1440
1441/* Early in boot, while setting up the initial pagetable, assume
1442 everything is pinned. */
1443static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
1444{
1445#ifdef CONFIG_FLATMEM
1446 BUG_ON(mem_map); /* should only be used early */
1447#endif
1448 make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
1449}
1450
1451/* Early release_pte assumes that all pts are pinned, since there's
1452 only init_mm and anything attached to that is pinned. */
1453static void xen_release_pte_init(unsigned long pfn)
1454{
1455 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1456}
1457
1458static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
1459{
1460 struct mmuext_op op;
1461 op.cmd = cmd;
1462 op.arg1.mfn = pfn_to_mfn(pfn);
1463 if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
1464 BUG();
1465}
1466
1467/* This needs to make sure the new pte page is pinned iff its being
1468 attached to a pinned pagetable. */
1469static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
1470{
1471 struct page *page = pfn_to_page(pfn);
1472
1473 if (PagePinned(virt_to_page(mm->pgd))) {
1474 SetPagePinned(page);
1475
1476 vm_unmap_aliases();
1477 if (!PageHighMem(page)) {
1478 make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
1479 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1480 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
1481 } else {
1482 /* make sure there are no stray mappings of
1483 this page */
1484 kmap_flush_unused();
1485 }
1486 }
1487}
1488
1489static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
1490{
1491 xen_alloc_ptpage(mm, pfn, PT_PTE);
1492}
1493
1494static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
1495{
1496 xen_alloc_ptpage(mm, pfn, PT_PMD);
1497}
1498
1499/* This should never happen until we're OK to use struct page */
1500static void xen_release_ptpage(unsigned long pfn, unsigned level)
1501{
1502 struct page *page = pfn_to_page(pfn);
1503
1504 if (PagePinned(page)) {
1505 if (!PageHighMem(page)) {
1506 if (level == PT_PTE && USE_SPLIT_PTLOCKS)
1507 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
1508 make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
1509 }
1510 ClearPagePinned(page);
1511 }
1512}
1513
1514static void xen_release_pte(unsigned long pfn)
1515{
1516 xen_release_ptpage(pfn, PT_PTE);
1517}
1518
1519static void xen_release_pmd(unsigned long pfn)
1520{
1521 xen_release_ptpage(pfn, PT_PMD);
1522}
1523
1524#if PAGETABLE_LEVELS == 4
1525static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
1526{
1527 xen_alloc_ptpage(mm, pfn, PT_PUD);
1528}
1529
1530static void xen_release_pud(unsigned long pfn)
1531{
1532 xen_release_ptpage(pfn, PT_PUD);
1533}
1534#endif
1535
1536void __init xen_reserve_top(void)
1537{
1538#ifdef CONFIG_X86_32
1539 unsigned long top = HYPERVISOR_VIRT_START;
1540 struct xen_platform_parameters pp;
1541
1542 if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1543 top = pp.virt_start;
1544
1545 reserve_top_address(-top);
1546#endif /* CONFIG_X86_32 */
1547}
1548
1549/*
1550 * Like __va(), but returns address in the kernel mapping (which is
1551 * all we have until the physical memory mapping has been set up.
1552 */
1553static void *__ka(phys_addr_t paddr)
1554{
1555#ifdef CONFIG_X86_64
1556 return (void *)(paddr + __START_KERNEL_map);
1557#else
1558 return __va(paddr);
1559#endif
1560}
1561
1562/* Convert a machine address to physical address */
1563static unsigned long m2p(phys_addr_t maddr)
1564{
1565 phys_addr_t paddr;
1566
1567 maddr &= PTE_PFN_MASK;
1568 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1569
1570 return paddr;
1571}
1572
1573/* Convert a machine address to kernel virtual */
1574static void *m2v(phys_addr_t maddr)
1575{
1576 return __ka(m2p(maddr));
1577}
1578
1579static void set_page_prot(void *addr, pgprot_t prot)
1580{
1581 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1582 pte_t pte = pfn_pte(pfn, prot);
1583
1584 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1585 BUG();
1586}
1587
1588static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1589{
1590 unsigned pmdidx, pteidx;
1591 unsigned ident_pte;
1592 unsigned long pfn;
1593
1594 ident_pte = 0;
1595 pfn = 0;
1596 for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1597 pte_t *pte_page;
1598
1599 /* Reuse or allocate a page of ptes */
1600 if (pmd_present(pmd[pmdidx]))
1601 pte_page = m2v(pmd[pmdidx].pmd);
1602 else {
1603 /* Check for free pte pages */
1604 if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
1605 break;
1606
1607 pte_page = &level1_ident_pgt[ident_pte];
1608 ident_pte += PTRS_PER_PTE;
1609
1610 pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1611 }
1612
1613 /* Install mappings */
1614 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1615 pte_t pte;
1616
1617 if (pfn > max_pfn_mapped)
1618 max_pfn_mapped = pfn;
1619
1620 if (!pte_none(pte_page[pteidx]))
1621 continue;
1622
1623 pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1624 pte_page[pteidx] = pte;
1625 }
1626 }
1627
1628 for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1629 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1630
1631 set_page_prot(pmd, PAGE_KERNEL_RO);
1632}
1633
1634#ifdef CONFIG_X86_64
1635static void convert_pfn_mfn(void *v)
1636{
1637 pte_t *pte = v;
1638 int i;
1639
1640 /* All levels are converted the same way, so just treat them
1641 as ptes. */
1642 for (i = 0; i < PTRS_PER_PTE; i++)
1643 pte[i] = xen_make_pte(pte[i].pte);
1644}
1645
1646/*
1647 * Set up the inital kernel pagetable.
1648 *
1649 * We can construct this by grafting the Xen provided pagetable into
1650 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1651 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1652 * means that only the kernel has a physical mapping to start with -
1653 * but that's enough to get __va working. We need to fill in the rest
1654 * of the physical mapping once some sort of allocator has been set
1655 * up.
1656 */
1657__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1658 unsigned long max_pfn)
1659{
1660 pud_t *l3;
1661 pmd_t *l2;
1662
1663 /* Zap identity mapping */
1664 init_level4_pgt[0] = __pgd(0);
1665
1666 /* Pre-constructed entries are in pfn, so convert to mfn */
1667 convert_pfn_mfn(init_level4_pgt);
1668 convert_pfn_mfn(level3_ident_pgt);
1669 convert_pfn_mfn(level3_kernel_pgt);
1670
1671 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1672 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1673
1674 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1675 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1676
1677 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1678 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1679 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1680
1681 /* Set up identity map */
1682 xen_map_identity_early(level2_ident_pgt, max_pfn);
1683
1684 /* Make pagetable pieces RO */
1685 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1686 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1687 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1688 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1689 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1690 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1691
1692 /* Pin down new L4 */
1693 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1694 PFN_DOWN(__pa_symbol(init_level4_pgt)));
1695
1696 /* Unpin Xen-provided one */
1697 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1698
1699 /* Switch over */
1700 pgd = init_level4_pgt;
1701
1702 /*
1703 * At this stage there can be no user pgd, and no page
1704 * structure to attach it to, so make sure we just set kernel
1705 * pgd.
1706 */
1707 xen_mc_batch();
1708 __xen_write_cr3(true, __pa(pgd));
1709 xen_mc_issue(PARAVIRT_LAZY_CPU);
1710
1711 reserve_early(__pa(xen_start_info->pt_base),
1712 __pa(xen_start_info->pt_base +
1713 xen_start_info->nr_pt_frames * PAGE_SIZE),
1714 "XEN PAGETABLES");
1715
1716 return pgd;
1717}
1718#else /* !CONFIG_X86_64 */
1719static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
1720
1721__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1722 unsigned long max_pfn)
1723{
1724 pmd_t *kernel_pmd;
1725
1726 init_pg_tables_start = __pa(pgd);
1727 init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE;
1728 max_pfn_mapped = PFN_DOWN(init_pg_tables_end + 512*1024);
1729
1730 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1731 memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
1732
1733 xen_map_identity_early(level2_kernel_pgt, max_pfn);
1734
1735 memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1736 set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
1737 __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
1738
1739 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1740 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1741 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1742
1743 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1744
1745 xen_write_cr3(__pa(swapper_pg_dir));
1746
1747 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
1748
1749 return swapper_pg_dir;
1750}
1751#endif /* CONFIG_X86_64 */
1752
1753static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot)
1754{
1755 pte_t pte;
1756
1757 phys >>= PAGE_SHIFT;
1758
1759 switch (idx) {
1760 case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1761#ifdef CONFIG_X86_F00F_BUG
1762 case FIX_F00F_IDT:
1763#endif
1764#ifdef CONFIG_X86_32
1765 case FIX_WP_TEST:
1766 case FIX_VDSO:
1767# ifdef CONFIG_HIGHMEM
1768 case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1769# endif
1770#else
1771 case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
1772#endif
1773#ifdef CONFIG_X86_LOCAL_APIC
1774 case FIX_APIC_BASE: /* maps dummy local APIC */
1775#endif
1776 pte = pfn_pte(phys, prot);
1777 break;
1778
1779 default:
1780 pte = mfn_pte(phys, prot);
1781 break;
1782 }
1783
1784 __native_set_fixmap(idx, pte);
1785
1786#ifdef CONFIG_X86_64
1787 /* Replicate changes to map the vsyscall page into the user
1788 pagetable vsyscall mapping. */
1789 if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
1790 unsigned long vaddr = __fix_to_virt(idx);
1791 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1792 }
1793#endif
1794}
1795
1796__init void xen_post_allocator_init(void)
1797{
1798 pv_mmu_ops.set_pte = xen_set_pte;
1799 pv_mmu_ops.set_pmd = xen_set_pmd;
1800 pv_mmu_ops.set_pud = xen_set_pud;
1801#if PAGETABLE_LEVELS == 4
1802 pv_mmu_ops.set_pgd = xen_set_pgd;
1803#endif
1804
1805 /* This will work as long as patching hasn't happened yet
1806 (which it hasn't) */
1807 pv_mmu_ops.alloc_pte = xen_alloc_pte;
1808 pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
1809 pv_mmu_ops.release_pte = xen_release_pte;
1810 pv_mmu_ops.release_pmd = xen_release_pmd;
1811#if PAGETABLE_LEVELS == 4
1812 pv_mmu_ops.alloc_pud = xen_alloc_pud;
1813 pv_mmu_ops.release_pud = xen_release_pud;
1814#endif
1815
1816#ifdef CONFIG_X86_64
1817 SetPagePinned(virt_to_page(level3_user_vsyscall));
1818#endif
1819 xen_mark_init_mm_pinned();
1820}
1821
1822
1823const struct pv_mmu_ops xen_mmu_ops __initdata = {
1824 .pagetable_setup_start = xen_pagetable_setup_start,
1825 .pagetable_setup_done = xen_pagetable_setup_done,
1826
1827 .read_cr2 = xen_read_cr2,
1828 .write_cr2 = xen_write_cr2,
1829
1830 .read_cr3 = xen_read_cr3,
1831 .write_cr3 = xen_write_cr3,
1832
1833 .flush_tlb_user = xen_flush_tlb,
1834 .flush_tlb_kernel = xen_flush_tlb,
1835 .flush_tlb_single = xen_flush_tlb_single,
1836 .flush_tlb_others = xen_flush_tlb_others,
1837
1838 .pte_update = paravirt_nop,
1839 .pte_update_defer = paravirt_nop,
1840
1841 .pgd_alloc = xen_pgd_alloc,
1842 .pgd_free = xen_pgd_free,
1843
1844 .alloc_pte = xen_alloc_pte_init,
1845 .release_pte = xen_release_pte_init,
1846 .alloc_pmd = xen_alloc_pte_init,
1847 .alloc_pmd_clone = paravirt_nop,
1848 .release_pmd = xen_release_pte_init,
1849
1850#ifdef CONFIG_HIGHPTE
1851 .kmap_atomic_pte = xen_kmap_atomic_pte,
1852#endif
1853
1854#ifdef CONFIG_X86_64
1855 .set_pte = xen_set_pte,
1856#else
1857 .set_pte = xen_set_pte_init,
1858#endif
1859 .set_pte_at = xen_set_pte_at,
1860 .set_pmd = xen_set_pmd_hyper,
1861
1862 .ptep_modify_prot_start = __ptep_modify_prot_start,
1863 .ptep_modify_prot_commit = __ptep_modify_prot_commit,
1864
1865 .pte_val = PV_CALLEE_SAVE(xen_pte_val),
1866 .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
1867
1868 .make_pte = PV_CALLEE_SAVE(xen_make_pte),
1869 .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
1870
1871#ifdef CONFIG_X86_PAE
1872 .set_pte_atomic = xen_set_pte_atomic,
1873 .set_pte_present = xen_set_pte_at,
1874 .pte_clear = xen_pte_clear,
1875 .pmd_clear = xen_pmd_clear,
1876#endif /* CONFIG_X86_PAE */
1877 .set_pud = xen_set_pud_hyper,
1878
1879 .make_pmd = PV_CALLEE_SAVE(xen_make_pmd),
1880 .pmd_val = PV_CALLEE_SAVE(xen_pmd_val),
1881
1882#if PAGETABLE_LEVELS == 4
1883 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
1884 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
1885 .set_pgd = xen_set_pgd_hyper,
1886
1887 .alloc_pud = xen_alloc_pte_init,
1888 .release_pud = xen_release_pte_init,
1889#endif /* PAGETABLE_LEVELS == 4 */
1890
1891 .activate_mm = xen_activate_mm,
1892 .dup_mmap = xen_dup_mmap,
1893 .exit_mmap = xen_exit_mmap,
1894
1895 .lazy_mode = {
1896 .enter = paravirt_enter_lazy_mmu,
1897 .leave = xen_leave_lazy,
1898 },
1899
1900 .set_fixmap = xen_set_fixmap,
1901};
1902
1903
1159#ifdef CONFIG_XEN_DEBUG_FS 1904#ifdef CONFIG_XEN_DEBUG_FS
1160 1905
1161static struct dentry *d_mmu_debug; 1906static struct dentry *d_mmu_debug;