aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorZachary Amsden <zach@vmware.com>2007-03-05 03:30:37 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-03-05 10:57:52 -0500
commit9a1c13e91f100c12dcad3a1be1b12890bf32f6ff (patch)
tree9c132900f5b331570d91df515167776586fb2960
parent1182d8528b620c23d043bccbbef092b42062960a (diff)
[PATCH] vmi: fix highpte
Provide a PT map hook for HIGHPTE kernels to designate where they are mapping page tables. This information is required so the physical address of PTE updates can be determined; otherwise, the mm layer would have to carry the physical address all the way to each PTE modification callsite, which is even more hideous that the macros required to provide the proper hooks. So lets not mess up arch neutral code to achieve this, but keep the horror in an #ifdef HIGHPTE in include/asm-i386/pgtable.h. I had to use macros here because some types are not yet defined in all the include paths for this header. This patch is absolutely required for HIGHPTE kernels to operate properly with VMI. Signed-off-by: Zachary Amsden <zach@vmware.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/i386/kernel/paravirt.c2
-rw-r--r--arch/i386/kernel/vmi.c19
-rw-r--r--include/asm-i386/paravirt.h4
-rw-r--r--include/asm-i386/pgtable.h23
4 files changed, 44 insertions, 4 deletions
diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c
index 8352394d5efb..12e3bc49b83b 100644
--- a/arch/i386/kernel/paravirt.c
+++ b/arch/i386/kernel/paravirt.c
@@ -553,6 +553,8 @@ struct paravirt_ops paravirt_ops = {
553 .flush_tlb_kernel = native_flush_tlb_global, 553 .flush_tlb_kernel = native_flush_tlb_global,
554 .flush_tlb_single = native_flush_tlb_single, 554 .flush_tlb_single = native_flush_tlb_single,
555 555
556 .map_pt_hook = (void *)native_nop,
557
556 .alloc_pt = (void *)native_nop, 558 .alloc_pt = (void *)native_nop,
557 .alloc_pd = (void *)native_nop, 559 .alloc_pd = (void *)native_nop,
558 .alloc_pd_clone = (void *)native_nop, 560 .alloc_pd_clone = (void *)native_nop,
diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c
index acdfe69fb7ad..bd1037bd124b 100644
--- a/arch/i386/kernel/vmi.c
+++ b/arch/i386/kernel/vmi.c
@@ -370,6 +370,24 @@ static void vmi_check_page_type(u32 pfn, int type)
370#define vmi_check_page_type(p,t) do { } while (0) 370#define vmi_check_page_type(p,t) do { } while (0)
371#endif 371#endif
372 372
373static void vmi_map_pt_hook(int type, pte_t *va, u32 pfn)
374{
375 /*
376 * Internally, the VMI ROM must map virtual addresses to physical
377 * addresses for processing MMU updates. By the time MMU updates
378 * are issued, this information is typically already lost.
379 * Fortunately, the VMI provides a cache of mapping slots for active
380 * page tables.
381 *
382 * We use slot zero for the linear mapping of physical memory, and
383 * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1.
384 *
385 * args: SLOT VA COUNT PFN
386 */
387 BUG_ON(type != KM_PTE0 && type != KM_PTE1);
388 vmi_ops.set_linear_mapping((type - KM_PTE0)+1, (u32)va, 1, pfn);
389}
390
373static void vmi_allocate_pt(u32 pfn) 391static void vmi_allocate_pt(u32 pfn)
374{ 392{
375 vmi_set_page_type(pfn, VMI_PAGE_L1); 393 vmi_set_page_type(pfn, VMI_PAGE_L1);
@@ -813,6 +831,7 @@ static inline int __init activate_vmi(void)
813 vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage); 831 vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
814 vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage); 832 vmi_ops.release_page = vmi_get_function(VMI_CALL_ReleasePage);
815 833
834 paravirt_ops.map_pt_hook = vmi_map_pt_hook;
816 paravirt_ops.alloc_pt = vmi_allocate_pt; 835 paravirt_ops.alloc_pt = vmi_allocate_pt;
817 paravirt_ops.alloc_pd = vmi_allocate_pd; 836 paravirt_ops.alloc_pd = vmi_allocate_pd;
818 paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone; 837 paravirt_ops.alloc_pd_clone = vmi_allocate_pd_clone;
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h
index a35c81480654..e01d895d7379 100644
--- a/include/asm-i386/paravirt.h
+++ b/include/asm-i386/paravirt.h
@@ -131,6 +131,8 @@ struct paravirt_ops
131 void (*flush_tlb_kernel)(void); 131 void (*flush_tlb_kernel)(void);
132 void (*flush_tlb_single)(u32 addr); 132 void (*flush_tlb_single)(u32 addr);
133 133
134 void (fastcall *map_pt_hook)(int type, pte_t *va, u32 pfn);
135
134 void (*alloc_pt)(u32 pfn); 136 void (*alloc_pt)(u32 pfn);
135 void (*alloc_pd)(u32 pfn); 137 void (*alloc_pd)(u32 pfn);
136 void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); 138 void (*alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count);
@@ -354,6 +356,8 @@ static inline void startup_ipi_hook(int phys_apicid, unsigned long start_eip,
354#define __flush_tlb_global() paravirt_ops.flush_tlb_kernel() 356#define __flush_tlb_global() paravirt_ops.flush_tlb_kernel()
355#define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr) 357#define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr)
356 358
359#define paravirt_map_pt_hook(type, va, pfn) paravirt_ops.map_pt_hook(type, va, pfn)
360
357#define paravirt_alloc_pt(pfn) paravirt_ops.alloc_pt(pfn) 361#define paravirt_alloc_pt(pfn) paravirt_ops.alloc_pt(pfn)
358#define paravirt_release_pt(pfn) paravirt_ops.release_pt(pfn) 362#define paravirt_release_pt(pfn) paravirt_ops.release_pt(pfn)
359 363
diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h
index e6a4723f0eb1..c3b58d473a55 100644
--- a/include/asm-i386/pgtable.h
+++ b/include/asm-i386/pgtable.h
@@ -263,6 +263,7 @@ static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return p
263 */ 263 */
264#define pte_update(mm, addr, ptep) do { } while (0) 264#define pte_update(mm, addr, ptep) do { } while (0)
265#define pte_update_defer(mm, addr, ptep) do { } while (0) 265#define pte_update_defer(mm, addr, ptep) do { } while (0)
266#define paravirt_map_pt_hook(slot, va, pfn) do { } while (0)
266#endif 267#endif
267 268
268/* 269/*
@@ -469,10 +470,24 @@ extern pte_t *lookup_address(unsigned long address);
469#endif 470#endif
470 471
471#if defined(CONFIG_HIGHPTE) 472#if defined(CONFIG_HIGHPTE)
472#define pte_offset_map(dir, address) \ 473#define pte_offset_map(dir, address) \
473 ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) 474({ \
474#define pte_offset_map_nested(dir, address) \ 475 pte_t *__ptep; \
475 ((pte_t *)kmap_atomic(pmd_page(*(dir)),KM_PTE1) + pte_index(address)) 476 unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \
477 __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE0);\
478 paravirt_map_pt_hook(KM_PTE0,__ptep, pfn); \
479 __ptep = __ptep + pte_index(address); \
480 __ptep; \
481})
482#define pte_offset_map_nested(dir, address) \
483({ \
484 pte_t *__ptep; \
485 unsigned pfn = pmd_val(*(dir)) >> PAGE_SHIFT; \
486 __ptep = (pte_t *)kmap_atomic(pfn_to_page(pfn),KM_PTE1);\
487 paravirt_map_pt_hook(KM_PTE1,__ptep, pfn); \
488 __ptep = __ptep + pte_index(address); \
489 __ptep; \
490})
476#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) 491#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
477#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) 492#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
478#else 493#else