diff options
author | Zachary Amsden <zach@vmware.com> | 2007-02-13 07:26:21 -0500 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2007-02-13 07:26:21 -0500 |
commit | c119ecce894120790903ef535dac3e105f3d6cde (patch) | |
tree | b9a60fe46b03d396ba396912c237e6ee2e9ef873 /include/asm-i386/paravirt.h | |
parent | 90611fe923aa3ac7ffb9e5df45c83860b0f00227 (diff) |
[PATCH] MM: page allocation hooks for VMI backend
The VMI backend uses explicit page type notification to track shadow page
tables. The allocation of page table roots is especially tricky. We need to
clone the root for non-PAE mode while it is protected under the pgd lock to
correctly copy the shadow.
We don't need to allocate pgds in PAE mode, (PDPs in Intel terminology) as
they only have 4 entries, and are cached entirely by the processor, which
makes shadowing them rather simple.
For base page table level allocation, pmd_populate provides the exact hook
point we need. Also, we need to allocate pages when splitting a large page,
and we must release pages before returning the page to any free pool.
Despite being required with these slightly odd semantics for VMI, Xen also
uses these hooks to determine the exact moment when page tables are created or
released.
AK: All nops for other architectures
Signed-off-by: Zachary Amsden <zach@vmware.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Andi Kleen <ak@suse.de>
Cc: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Chris Wright <chrisw@sous-sol.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Diffstat (limited to 'include/asm-i386/paravirt.h')
-rw-r--r-- | include/asm-i386/paravirt.h | 14 |
1 files changed, 14 insertions, 0 deletions
diff --git a/include/asm-i386/paravirt.h b/include/asm-i386/paravirt.h index 9f06265065f4..53da276a2ec2 100644 --- a/include/asm-i386/paravirt.h +++ b/include/asm-i386/paravirt.h | |||
@@ -127,6 +127,12 @@ struct paravirt_ops | |||
127 | void (fastcall *flush_tlb_kernel)(void); | 127 | void (fastcall *flush_tlb_kernel)(void); |
128 | void (fastcall *flush_tlb_single)(u32 addr); | 128 | void (fastcall *flush_tlb_single)(u32 addr); |
129 | 129 | ||
130 | void (fastcall *alloc_pt)(u32 pfn); | ||
131 | void (fastcall *alloc_pd)(u32 pfn); | ||
132 | void (fastcall *alloc_pd_clone)(u32 pfn, u32 clonepfn, u32 start, u32 count); | ||
133 | void (fastcall *release_pt)(u32 pfn); | ||
134 | void (fastcall *release_pd)(u32 pfn); | ||
135 | |||
130 | void (fastcall *set_pte)(pte_t *ptep, pte_t pteval); | 136 | void (fastcall *set_pte)(pte_t *ptep, pte_t pteval); |
131 | void (fastcall *set_pte_at)(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval); | 137 | void (fastcall *set_pte_at)(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval); |
132 | void (fastcall *set_pmd)(pmd_t *pmdp, pmd_t pmdval); | 138 | void (fastcall *set_pmd)(pmd_t *pmdp, pmd_t pmdval); |
@@ -320,6 +326,14 @@ static inline unsigned long apic_read(unsigned long reg) | |||
320 | #define __flush_tlb_global() paravirt_ops.flush_tlb_kernel() | 326 | #define __flush_tlb_global() paravirt_ops.flush_tlb_kernel() |
321 | #define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr) | 327 | #define __flush_tlb_single(addr) paravirt_ops.flush_tlb_single(addr) |
322 | 328 | ||
329 | #define paravirt_alloc_pt(pfn) paravirt_ops.alloc_pt(pfn) | ||
330 | #define paravirt_release_pt(pfn) paravirt_ops.release_pt(pfn) | ||
331 | |||
332 | #define paravirt_alloc_pd(pfn) paravirt_ops.alloc_pd(pfn) | ||
333 | #define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) \ | ||
334 | paravirt_ops.alloc_pd_clone(pfn, clonepfn, start, count) | ||
335 | #define paravirt_release_pd(pfn) paravirt_ops.release_pd(pfn) | ||
336 | |||
323 | static inline void set_pte(pte_t *ptep, pte_t pteval) | 337 | static inline void set_pte(pte_t *ptep, pte_t pteval) |
324 | { | 338 | { |
325 | paravirt_ops.set_pte(ptep, pteval); | 339 | paravirt_ops.set_pte(ptep, pteval); |