aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/enlighten.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-07-08 18:06:50 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-16 05:00:07 -0400
commit084a2a4e7656209ea93aac9778defa03213ca31d (patch)
tree4e3e661aa64121389e867e34a893754c90f7ee78 /arch/x86/xen/enlighten.c
parent3d75e1b8ef1567348ceba93d4666a1c7c2333583 (diff)
xen64: early mapping setup
Set up the initial pagetables to map the kernel mapping into the physical mapping space. This makes __va() usable, since it requires physical mappings. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Cc: Stephen Tweedie <sct@redhat.com> Cc: Eduardo Habkost <ehabkost@redhat.com> Cc: Mark McLoughlin <markmc@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/xen/enlighten.c')
-rw-r--r--arch/x86/xen/enlighten.c192
1 files changed, 176 insertions, 16 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 2b7bea3bb6f3..a991ee7ade9e 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -33,6 +33,7 @@
33#include <xen/interface/sched.h> 33#include <xen/interface/sched.h>
34#include <xen/features.h> 34#include <xen/features.h>
35#include <xen/page.h> 35#include <xen/page.h>
36#include <xen/hvc-console.h>
36 37
37#include <asm/paravirt.h> 38#include <asm/paravirt.h>
38#include <asm/page.h> 39#include <asm/page.h>
@@ -1294,6 +1295,157 @@ static void __init xen_reserve_top(void)
1294#endif /* CONFIG_X86_32 */ 1295#endif /* CONFIG_X86_32 */
1295} 1296}
1296 1297
1298#ifdef CONFIG_X86_64
1299/*
1300 * Like __va(), but returns address in the kernel mapping (which is
1301 * all we have until the physical memory mapping has been set up.
1302 */
1303static void *__ka(phys_addr_t paddr)
1304{
1305 return (void *)(paddr + __START_KERNEL_map);
1306}
1307
1308/* Convert a machine address to physical address */
1309static unsigned long m2p(phys_addr_t maddr)
1310{
1311 phys_addr_t paddr;
1312
1313 maddr &= PTE_MASK;
1314 paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1315
1316 return paddr;
1317}
1318
1319/* Convert a machine address to kernel virtual */
1320static void *m2v(phys_addr_t maddr)
1321{
1322 return __ka(m2p(maddr));
1323}
1324
1325static void walk(pgd_t *pgd, unsigned long addr)
1326{
1327 unsigned l4idx = pgd_index(addr);
1328 unsigned l3idx = pud_index(addr);
1329 unsigned l2idx = pmd_index(addr);
1330 unsigned l1idx = pte_index(addr);
1331 pgd_t l4;
1332 pud_t l3;
1333 pmd_t l2;
1334 pte_t l1;
1335
1336 xen_raw_printk("walk %p, %lx -> %d %d %d %d\n",
1337 pgd, addr, l4idx, l3idx, l2idx, l1idx);
1338
1339 l4 = pgd[l4idx];
1340 xen_raw_printk(" l4: %016lx\n", l4.pgd);
1341 xen_raw_printk(" %016lx\n", pgd_val(l4));
1342
1343 l3 = ((pud_t *)(m2v(l4.pgd)))[l3idx];
1344 xen_raw_printk(" l3: %016lx\n", l3.pud);
1345 xen_raw_printk(" %016lx\n", pud_val(l3));
1346
1347 l2 = ((pmd_t *)(m2v(l3.pud)))[l2idx];
1348 xen_raw_printk(" l2: %016lx\n", l2.pmd);
1349 xen_raw_printk(" %016lx\n", pmd_val(l2));
1350
1351 l1 = ((pte_t *)(m2v(l2.pmd)))[l1idx];
1352 xen_raw_printk(" l1: %016lx\n", l1.pte);
1353 xen_raw_printk(" %016lx\n", pte_val(l1));
1354}
1355
1356static void set_page_prot(void *addr, pgprot_t prot)
1357{
1358 unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1359 pte_t pte = pfn_pte(pfn, prot);
1360
1361 xen_raw_printk("addr=%p pfn=%lx mfn=%lx prot=%016x pte=%016x\n",
1362 addr, pfn, get_phys_to_machine(pfn),
1363 pgprot_val(prot), pte.pte);
1364
1365 if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1366 BUG();
1367}
1368
1369static void convert_pfn_mfn(void *v)
1370{
1371 pte_t *pte = v;
1372 int i;
1373
1374 /* All levels are converted the same way, so just treat them
1375 as ptes. */
1376 for(i = 0; i < PTRS_PER_PTE; i++)
1377 pte[i] = xen_make_pte(pte[i].pte);
1378}
1379
1380/*
1381 * Set up the inital kernel pagetable.
1382 *
1383 * We can construct this by grafting the Xen provided pagetable into
1384 * head_64.S's preconstructed pagetables. We copy the Xen L2's into
1385 * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This
1386 * means that only the kernel has a physical mapping to start with -
1387 * but that's enough to get __va working. We need to fill in the rest
1388 * of the physical mapping once some sort of allocator has been set
1389 * up.
1390 */
1391static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd)
1392{
1393 pud_t *l3;
1394 pmd_t *l2;
1395
1396 /* Zap identity mapping */
1397 init_level4_pgt[0] = __pgd(0);
1398
1399 /* Pre-constructed entries are in pfn, so convert to mfn */
1400 convert_pfn_mfn(init_level4_pgt);
1401 convert_pfn_mfn(level3_ident_pgt);
1402 convert_pfn_mfn(level3_kernel_pgt);
1403
1404 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1405 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1406
1407 memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1408 memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1409
1410 l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1411 l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1412 memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1413
1414 /* Make pagetable pieces RO */
1415 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1416 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1417 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1418 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
1419 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1420 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1421
1422 /* Pin down new L4 */
1423 pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(init_level4_pgt)));
1424
1425 /* Unpin Xen-provided one */
1426 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1427
1428 /* Switch over */
1429 pgd = init_level4_pgt;
1430 xen_write_cr3(__pa(pgd));
1431
1432 max_pfn_mapped = PFN_DOWN(__pa(pgd) +
1433 xen_start_info->nr_pt_frames*PAGE_SIZE +
1434 512*1024);
1435
1436 return pgd;
1437}
1438#else
1439static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd)
1440{
1441 init_pg_tables_start = __pa(pgd);
1442 init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE;
1443 max_pfn_mapped = PFN_DOWN(init_pg_tables_end + 512*1024);
1444
1445 return pgd;
1446}
1447#endif /* CONFIG_X86_64 */
1448
1297/* First C function to be called on Xen boot */ 1449/* First C function to be called on Xen boot */
1298asmlinkage void __init xen_start_kernel(void) 1450asmlinkage void __init xen_start_kernel(void)
1299{ 1451{
@@ -1336,32 +1488,29 @@ asmlinkage void __init xen_start_kernel(void)
1336 1488
1337 pgd = (pgd_t *)xen_start_info->pt_base; 1489 pgd = (pgd_t *)xen_start_info->pt_base;
1338 1490
1339#ifdef CONFIG_X86_32 1491 /* Prevent unwanted bits from being set in PTEs. */
1340 init_pg_tables_start = __pa(pgd); 1492 __supported_pte_mask &= ~_PAGE_GLOBAL;
1341 init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE; 1493 if (!is_initial_xendomain())
1342 max_pfn_mapped = (init_pg_tables_end + 512*1024) >> PAGE_SHIFT; 1494 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
1343#endif 1495
1496 /* Don't do the full vcpu_info placement stuff until we have a
1497 possible map and a non-dummy shared_info. */
1498 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
1499
1500 xen_raw_console_write("mapping kernel into physical memory\n");
1501 pgd = xen_setup_kernel_pagetable(pgd);
1344 1502
1345 init_mm.pgd = pgd; /* use the Xen pagetables to start */ 1503 init_mm.pgd = pgd;
1346 1504
1347 /* keep using Xen gdt for now; no urgent need to change it */ 1505 /* keep using Xen gdt for now; no urgent need to change it */
1348 1506
1349 x86_write_percpu(xen_cr3, __pa(pgd)); 1507 x86_write_percpu(xen_cr3, __pa(pgd));
1350 x86_write_percpu(xen_current_cr3, __pa(pgd)); 1508 x86_write_percpu(xen_current_cr3, __pa(pgd));
1351 1509
1352 /* Don't do the full vcpu_info placement stuff until we have a
1353 possible map and a non-dummy shared_info. */
1354 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
1355
1356 pv_info.kernel_rpl = 1; 1510 pv_info.kernel_rpl = 1;
1357 if (xen_feature(XENFEAT_supervisor_mode_kernel)) 1511 if (xen_feature(XENFEAT_supervisor_mode_kernel))
1358 pv_info.kernel_rpl = 0; 1512 pv_info.kernel_rpl = 0;
1359 1513
1360 /* Prevent unwanted bits from being set in PTEs. */
1361 __supported_pte_mask &= ~_PAGE_GLOBAL;
1362 if (!is_initial_xendomain())
1363 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
1364
1365 /* set the limit of our address space */ 1514 /* set the limit of our address space */
1366 xen_reserve_top(); 1515 xen_reserve_top();
1367 1516
@@ -1384,10 +1533,21 @@ asmlinkage void __init xen_start_kernel(void)
1384 add_preferred_console("hvc", 0, NULL); 1533 add_preferred_console("hvc", 0, NULL);
1385 } 1534 }
1386 1535
1536 xen_raw_console_write("about to get started...\n");
1537
1538#if 0
1539 xen_raw_printk("&boot_params=%p __pa(&boot_params)=%lx __va(__pa(&boot_params))=%lx\n",
1540 &boot_params, __pa_symbol(&boot_params),
1541 __va(__pa_symbol(&boot_params)));
1542
1543 walk(pgd, &boot_params);
1544 walk(pgd, __va(__pa(&boot_params)));
1545#endif
1546
1387 /* Start the world */ 1547 /* Start the world */
1388#ifdef CONFIG_X86_32 1548#ifdef CONFIG_X86_32
1389 i386_start_kernel(); 1549 i386_start_kernel();
1390#else 1550#else
1391 x86_64_start_kernel((char *)&boot_params); 1551 x86_64_start_reservations((char *)__pa_symbol(&boot_params));
1392#endif 1552#endif
1393} 1553}