diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/paravirt.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/vmi.c | 19 |
2 files changed, 21 insertions, 0 deletions
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c index 8352394d5e..12e3bc49b8 100644 --- a/arch/i386/kernel/paravirt.c +++ b/arch/i386/kernel/paravirt.c | |||
@@ -553,6 +553,8 @@ struct paravirt_ops paravirt_ops = { | |||
553 | .flush_tlb_kernel = native_flush_tlb_global, | 553 | .flush_tlb_kernel = native_flush_tlb_global, |
554 | .flush_tlb_single = native_flush_tlb_single, | 554 | .flush_tlb_single = native_flush_tlb_single, |
555 | 555 | ||
556 | .map_pt_hook = (void *)native_nop, | ||
557 | |||
556 | .alloc_pt = (void *)native_nop, | 558 | .alloc_pt = (void *)native_nop, |
557 | .alloc_pd = (void *)native_nop, | 559 | .alloc_pd = (void *)native_nop, |
558 | .alloc_pd_clone = (void *)native_nop, | 560 | .alloc_pd_clone = (void *)native_nop, |
diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c index acdfe69fb7..bd1037bd12 100644 --- a/arch/i386/kernel/vmi.c +++ b/arch/i386/kernel/vmi.c | |||
@@ -370,6 +370,24 @@ static void vmi_check_page_type(u32 pfn, int type) | |||
370 | #define vmi_check_page_type(p,t) do { } while (0) | 370 | #define vmi_check_page_type(p,t) do { } while (0) |
371 | #endif | 371 | #endif |
372 | 372 | ||
373 | static void vmi_map_pt_hook(int type, pte_t *va, u32 pfn) | ||
374 | { | ||
375 | /* | ||
376 | * Internally, the VMI ROM must map virtual addresses to physical | ||
377 | * addresses for processing MMU updates. By the time MMU updates | ||
378 | * are issued, this information is typically already lost. | ||
379 | * Fortunately, the VMI provides a cache of mapping slots for active | ||
380 | * page tables. | ||
381 | * | ||
382 | * We use slot zero for the linear mapping of physical memory, and | ||
383 | * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1. | ||
384 | * | ||
385 | * args: SLOT VA COUNT PFN | ||
386 | */ | ||
387 | BUG_ON(type != KM_PTE0 && type != KM_PTE1); | ||
388 | vmi_ops.set_linear_mapping((type - KM_PTE0)+1, (u32)va, 1, pfn); | ||
389 | } | ||
390 | |||
373 | static void vmi_allocate_pt(u32 pfn) | 391 | static void vmi_allocate_pt(u32 pfn) |
374 | { | 392 | { |
375 | vmi_set_page_type(pfn, VMI_PAGE_L1); | 393 | vmi_set_page_type(pfn, VMI_PAGE_L1); |
@@ -813,6 +831,7 @@ static inline int __init activate_vmi(void) | |||
813 | vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage); | 831 | vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage); |
814 | vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage); | 832 | vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage); |
815 | 833 | ||
834 | paravirt_ops.map_pt_hook = vmi_map_pt_hook; | ||
816 | paravirt_ops.alloc_pt = vmi_allocate_pt; | 835 | paravirt_ops.alloc_pt = vmi_allocate_pt; |
817 | paravirt_ops.alloc_pd = vmi_allocate_pd; | 836 | paravirt_ops.alloc_pd = vmi_allocate_pd; |
818 | paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone; | 837 | paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone; |