diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-07-08 18:06:52 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-16 05:00:35 -0400 |
commit | d114e1981cc1a51131230993a082c27c79ab370a (patch) | |
tree | bd2330936843721d69a89f5ccd507a400015fbe0 /arch | |
parent | 22911b3f1cf5431058e56b1727e8ef77be5e0ac9 (diff) |
xen64: map an initial chunk of physical memory
Early in boot, map a chunk of extra physical memory for use later on.
We need a pool of mapped pages to allocate further pages to construct
pagetables mapping all physical memory.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Cc: Stephen Tweedie <sct@redhat.com>
Cc: Eduardo Habkost <ehabkost@redhat.com>
Cc: Mark McLoughlin <markmc@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/xen/enlighten.c | 79 |
1 files changed, 69 insertions, 10 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 392450787aa9..e9e3bafe48cf 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1382,6 +1382,61 @@ static void convert_pfn_mfn(void *v) | |||
1382 | } | 1382 | } |
1383 | 1383 | ||
1384 | /* | 1384 | /* |
1385 | * Identity map, in addition to plain kernel map. This needs to be | ||
1386 | * large enough to allocate page table pages to allocate the rest. | ||
1387 | * Each page can map 2MB. | ||
1388 | */ | ||
1389 | static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss; | ||
1390 | |||
1391 | static __init void xen_map_identity_early(unsigned long max_pfn) | ||
1392 | { | ||
1393 | unsigned pmdidx, pteidx; | ||
1394 | unsigned ident_pte; | ||
1395 | unsigned long pfn; | ||
1396 | |||
1397 | ident_pte = 0; | ||
1398 | pfn = 0; | ||
1399 | for(pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) { | ||
1400 | pte_t *pte_page; | ||
1401 | |||
1402 | BUG_ON(level2_ident_pgt[pmdidx].pmd != level2_kernel_pgt[pmdidx].pmd); | ||
1403 | |||
1404 | /* Reuse or allocate a page of ptes */ | ||
1405 | if (pmd_present(level2_ident_pgt[pmdidx])) | ||
1406 | pte_page = m2v(level2_ident_pgt[pmdidx].pmd); | ||
1407 | else { | ||
1408 | /* Check for free pte pages */ | ||
1409 | if (ident_pte == ARRAY_SIZE(level1_ident_pgt)) | ||
1410 | break; | ||
1411 | |||
1412 | pte_page = &level1_ident_pgt[ident_pte]; | ||
1413 | ident_pte += PTRS_PER_PTE; | ||
1414 | |||
1415 | /* Install new l1 in l2(s) */ | ||
1416 | level2_ident_pgt[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE); | ||
1417 | level2_kernel_pgt[pmdidx] = level2_ident_pgt[pmdidx]; | ||
1418 | } | ||
1419 | |||
1420 | /* Install mappings */ | ||
1421 | for(pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { | ||
1422 | pte_t pte; | ||
1423 | |||
1424 | if (pfn > max_pfn_mapped) | ||
1425 | max_pfn_mapped = pfn; | ||
1426 | |||
1427 | if (!pte_none(pte_page[pteidx])) | ||
1428 | continue; | ||
1429 | |||
1430 | pte = pfn_pte(pfn, PAGE_KERNEL_EXEC); | ||
1431 | pte_page[pteidx] = pte; | ||
1432 | } | ||
1433 | } | ||
1434 | |||
1435 | for(pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE) | ||
1436 | set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO); | ||
1437 | } | ||
1438 | |||
1439 | /* | ||
1385 | * Set up the inital kernel pagetable. | 1440 | * Set up the inital kernel pagetable. |
1386 | * | 1441 | * |
1387 | * We can construct this by grafting the Xen provided pagetable into | 1442 | * We can construct this by grafting the Xen provided pagetable into |
@@ -1392,7 +1447,7 @@ static void convert_pfn_mfn(void *v) | |||
1392 | * of the physical mapping once some sort of allocator has been set | 1447 | * of the physical mapping once some sort of allocator has been set |
1393 | * up. | 1448 | * up. |
1394 | */ | 1449 | */ |
1395 | static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd) | 1450 | static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) |
1396 | { | 1451 | { |
1397 | pud_t *l3; | 1452 | pud_t *l3; |
1398 | pmd_t *l2; | 1453 | pmd_t *l2; |
@@ -1415,6 +1470,9 @@ static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd) | |||
1415 | l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud); | 1470 | l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud); |
1416 | memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); | 1471 | memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD); |
1417 | 1472 | ||
1473 | /* Set up identity map */ | ||
1474 | xen_map_identity_early(max_pfn); | ||
1475 | |||
1418 | /* Make pagetable pieces RO */ | 1476 | /* Make pagetable pieces RO */ |
1419 | set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); | 1477 | set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); |
1420 | set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); | 1478 | set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); |
@@ -1424,7 +1482,7 @@ static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd) | |||
1424 | set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); | 1482 | set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); |
1425 | 1483 | ||
1426 | /* Pin down new L4 */ | 1484 | /* Pin down new L4 */ |
1427 | pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(init_level4_pgt))); | 1485 | pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa_symbol(init_level4_pgt))); |
1428 | 1486 | ||
1429 | /* Unpin Xen-provided one */ | 1487 | /* Unpin Xen-provided one */ |
1430 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); | 1488 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); |
@@ -1433,19 +1491,23 @@ static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd) | |||
1433 | pgd = init_level4_pgt; | 1491 | pgd = init_level4_pgt; |
1434 | xen_write_cr3(__pa(pgd)); | 1492 | xen_write_cr3(__pa(pgd)); |
1435 | 1493 | ||
1436 | max_pfn_mapped = PFN_DOWN(__pa(pgd) + | 1494 | reserve_early(__pa(xen_start_info->pt_base), |
1437 | xen_start_info->nr_pt_frames*PAGE_SIZE + | 1495 | __pa(xen_start_info->pt_base + |
1438 | 512*1024); | 1496 | xen_start_info->nr_pt_frames * PAGE_SIZE), |
1497 | "XEN PAGETABLES"); | ||
1439 | 1498 | ||
1440 | return pgd; | 1499 | return pgd; |
1441 | } | 1500 | } |
1442 | #else | 1501 | #else |
1443 | static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd) | 1502 | static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) |
1444 | { | 1503 | { |
1445 | init_pg_tables_start = __pa(pgd); | 1504 | init_pg_tables_start = __pa(pgd); |
1446 | init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE; | 1505 | init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE; |
1447 | max_pfn_mapped = PFN_DOWN(init_pg_tables_end + 512*1024); | 1506 | max_pfn_mapped = PFN_DOWN(init_pg_tables_end + 512*1024); |
1448 | 1507 | ||
1508 | x86_write_percpu(xen_cr3, __pa(pgd)); | ||
1509 | x86_write_percpu(xen_current_cr3, __pa(pgd)); | ||
1510 | |||
1449 | return pgd; | 1511 | return pgd; |
1450 | } | 1512 | } |
1451 | #endif /* CONFIG_X86_64 */ | 1513 | #endif /* CONFIG_X86_64 */ |
@@ -1502,15 +1564,12 @@ asmlinkage void __init xen_start_kernel(void) | |||
1502 | per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; | 1564 | per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; |
1503 | 1565 | ||
1504 | xen_raw_console_write("mapping kernel into physical memory\n"); | 1566 | xen_raw_console_write("mapping kernel into physical memory\n"); |
1505 | pgd = xen_setup_kernel_pagetable(pgd); | 1567 | pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); |
1506 | 1568 | ||
1507 | init_mm.pgd = pgd; | 1569 | init_mm.pgd = pgd; |
1508 | 1570 | ||
1509 | /* keep using Xen gdt for now; no urgent need to change it */ | 1571 | /* keep using Xen gdt for now; no urgent need to change it */ |
1510 | 1572 | ||
1511 | x86_write_percpu(xen_cr3, __pa(pgd)); | ||
1512 | x86_write_percpu(xen_current_cr3, __pa(pgd)); | ||
1513 | |||
1514 | pv_info.kernel_rpl = 1; | 1573 | pv_info.kernel_rpl = 1; |
1515 | if (xen_feature(XENFEAT_supervisor_mode_kernel)) | 1574 | if (xen_feature(XENFEAT_supervisor_mode_kernel)) |
1516 | pv_info.kernel_rpl = 0; | 1575 | pv_info.kernel_rpl = 0; |