aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorZachary Amsden <zach@vmware.com>2007-05-02 13:27:16 -0400
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 13:27:16 -0400
commiteeef9c68aae2f4f21ab810d0339e0f22d30b0cd8 (patch)
tree8f13bf2b617ac6c8cb3855fcc924e22bb4096197 /arch
parent9f53a729dbf0ba8abdc464f6eb828f485d3417f7 (diff)
[PATCH] i386: Implement vmi_kmap_atomic_pte
Implement vmi_kmap_atomic_pte in terms of the backend set_linear_mapping operation. The conversion is rather straighforward; call kmap_atomic and then inform the hypervisor of the page mapping. The _flush_tlb damage is due to macros being pulled in from highmem.h. Signed-off-by: Zachary Amsden <zach@vmware.com> Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/kernel/vmi.c34
1 files changed, 22 insertions, 12 deletions
diff --git a/arch/i386/kernel/vmi.c b/arch/i386/kernel/vmi.c
index 12312988c626..0df0b2cd3617 100644
--- a/arch/i386/kernel/vmi.c
+++ b/arch/i386/kernel/vmi.c
@@ -26,6 +26,7 @@
26#include <linux/cpu.h> 26#include <linux/cpu.h>
27#include <linux/bootmem.h> 27#include <linux/bootmem.h>
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/highmem.h>
29#include <asm/vmi.h> 30#include <asm/vmi.h>
30#include <asm/io.h> 31#include <asm/io.h>
31#include <asm/fixmap.h> 32#include <asm/fixmap.h>
@@ -65,8 +66,8 @@ static struct {
65 void (*release_page)(u32, u32); 66 void (*release_page)(u32, u32);
66 void (*set_pte)(pte_t, pte_t *, unsigned); 67 void (*set_pte)(pte_t, pte_t *, unsigned);
67 void (*update_pte)(pte_t *, unsigned); 68 void (*update_pte)(pte_t *, unsigned);
68 void (*set_linear_mapping)(int, u32, u32, u32); 69 void (*set_linear_mapping)(int, void *, u32, u32);
69 void (*flush_tlb)(int); 70 void (*_flush_tlb)(int);
70 void (*set_initial_ap_state)(int, int); 71 void (*set_initial_ap_state)(int, int);
71 void (*halt)(void); 72 void (*halt)(void);
72 void (*set_lazy_mode)(int mode); 73 void (*set_lazy_mode)(int mode);
@@ -221,12 +222,12 @@ static void vmi_load_esp0(struct tss_struct *tss,
221 222
222static void vmi_flush_tlb_user(void) 223static void vmi_flush_tlb_user(void)
223{ 224{
224 vmi_ops.flush_tlb(VMI_FLUSH_TLB); 225 vmi_ops._flush_tlb(VMI_FLUSH_TLB);
225} 226}
226 227
227static void vmi_flush_tlb_kernel(void) 228static void vmi_flush_tlb_kernel(void)
228{ 229{
229 vmi_ops.flush_tlb(VMI_FLUSH_TLB | VMI_FLUSH_GLOBAL); 230 vmi_ops._flush_tlb(VMI_FLUSH_TLB | VMI_FLUSH_GLOBAL);
230} 231}
231 232
232/* Stub to do nothing at all; used for delays and unimplemented calls */ 233/* Stub to do nothing at all; used for delays and unimplemented calls */
@@ -349,8 +350,11 @@ static void vmi_check_page_type(u32 pfn, int type)
349#define vmi_check_page_type(p,t) do { } while (0) 350#define vmi_check_page_type(p,t) do { } while (0)
350#endif 351#endif
351 352
352static void vmi_map_pt_hook(int type, pte_t *va, u32 pfn) 353#ifdef CONFIG_HIGHPTE
354static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
353{ 355{
356 void *va = kmap_atomic(page, type);
357
354 /* 358 /*
355 * Internally, the VMI ROM must map virtual addresses to physical 359 * Internally, the VMI ROM must map virtual addresses to physical
356 * addresses for processing MMU updates. By the time MMU updates 360 * addresses for processing MMU updates. By the time MMU updates
@@ -364,8 +368,11 @@ static void vmi_map_pt_hook(int type, pte_t *va, u32 pfn)
364 * args: SLOT VA COUNT PFN 368 * args: SLOT VA COUNT PFN
365 */ 369 */
366 BUG_ON(type != KM_PTE0 && type != KM_PTE1); 370 BUG_ON(type != KM_PTE0 && type != KM_PTE1);
367 vmi_ops.set_linear_mapping((type - KM_PTE0)+1, (u32)va, 1, pfn); 371 vmi_ops.set_linear_mapping((type - KM_PTE0)+1, va, 1, page_to_pfn(page));
372
373 return va;
368} 374}
375#endif
369 376
370static void vmi_allocate_pt(u32 pfn) 377static void vmi_allocate_pt(u32 pfn)
371{ 378{
@@ -660,7 +667,7 @@ void vmi_bringup(void)
660{ 667{
661 /* We must establish the lowmem mapping for MMU ops to work */ 668 /* We must establish the lowmem mapping for MMU ops to work */
662 if (vmi_ops.set_linear_mapping) 669 if (vmi_ops.set_linear_mapping)
663 vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0); 670 vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, max_low_pfn, 0);
664} 671}
665 672
666/* 673/*
@@ -800,8 +807,8 @@ static inline int __init activate_vmi(void)
800 para_wrap(set_lazy_mode, vmi_set_lazy_mode, set_lazy_mode, SetLazyMode); 807 para_wrap(set_lazy_mode, vmi_set_lazy_mode, set_lazy_mode, SetLazyMode);
801 808
802 /* user and kernel flush are just handled with different flags to FlushTLB */ 809 /* user and kernel flush are just handled with different flags to FlushTLB */
803 para_wrap(flush_tlb_user, vmi_flush_tlb_user, flush_tlb, FlushTLB); 810 para_wrap(flush_tlb_user, vmi_flush_tlb_user, _flush_tlb, FlushTLB);
804 para_wrap(flush_tlb_kernel, vmi_flush_tlb_kernel, flush_tlb, FlushTLB); 811 para_wrap(flush_tlb_kernel, vmi_flush_tlb_kernel, _flush_tlb, FlushTLB);
805 para_fill(flush_tlb_single, InvalPage); 812 para_fill(flush_tlb_single, InvalPage);
806 813
807 /* 814 /*
@@ -847,9 +854,12 @@ static inline int __init activate_vmi(void)
847 paravirt_ops.release_pt = vmi_release_pt; 854 paravirt_ops.release_pt = vmi_release_pt;
848 paravirt_ops.release_pd = vmi_release_pd; 855 paravirt_ops.release_pd = vmi_release_pd;
849 } 856 }
850#if 0 857
851 para_wrap(map_pt_hook, vmi_map_pt_hook, set_linear_mapping, 858 /* Set linear is needed in all cases */
852 SetLinearMapping); 859 vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
860#ifdef CONFIG_HIGHPTE
861 if (vmi_ops.set_linear_mapping)
862 paravirt_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
853#endif 863#endif
854 864
855 /* 865 /*