aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/mmu.c
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-05-26 18:31:18 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-05-27 04:11:37 -0400
commitd451bb7aa852627bdf7be7937dc3d9d9f261b235 (patch)
tree2a92b5e271fb2ae7a869f0f2b4f5bb390cac99cc /arch/x86/xen/mmu.c
parent955d6f1778da5a9795f2dfb07f760006f194609a (diff)
xen: make phys_to_machine structure dynamic
We now support the use of memory hotplug, so the physical to machine page mapping structure must be dynamic. This is implemented as a two-level radix tree structure, which allows us to efficiently incrementally allocate memory for the p2m table as new pages are added. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r--arch/x86/xen/mmu.c85
1 files changed, 85 insertions, 0 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 07c2653ec335..c3b27dec6f03 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -56,6 +56,91 @@
56#include "multicalls.h" 56#include "multicalls.h"
57#include "mmu.h" 57#include "mmu.h"
58 58
59/*
60 * This should probably be a config option. On 32-bit, it costs 1
61 * page/gig of memory; on 64-bit its 2 pages/gig. If we want it to be
62 * completely unbounded we can add another level to the p2m structure.
63 */
64#define MAX_GUEST_PAGES (16ull * 1024*1024*1024 / PAGE_SIZE)
65#define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
66
67static unsigned long *p2m_top[MAX_GUEST_PAGES / P2M_ENTRIES_PER_PAGE];
68
69static inline unsigned p2m_top_index(unsigned long pfn)
70{
71 BUG_ON(pfn >= MAX_GUEST_PAGES);
72 return pfn / P2M_ENTRIES_PER_PAGE;
73}
74
75static inline unsigned p2m_index(unsigned long pfn)
76{
77 return pfn % P2M_ENTRIES_PER_PAGE;
78}
79
80void __init xen_build_dynamic_phys_to_machine(void)
81{
82 unsigned pfn;
83 unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
84
85 BUG_ON(xen_start_info->nr_pages >= MAX_GUEST_PAGES);
86
87 for(pfn = 0;
88 pfn < xen_start_info->nr_pages;
89 pfn += P2M_ENTRIES_PER_PAGE) {
90 unsigned topidx = p2m_top_index(pfn);
91
92 p2m_top[topidx] = &mfn_list[pfn];
93 }
94}
95
96unsigned long get_phys_to_machine(unsigned long pfn)
97{
98 unsigned topidx, idx;
99
100 topidx = p2m_top_index(pfn);
101 if (p2m_top[topidx] == NULL)
102 return INVALID_P2M_ENTRY;
103
104 idx = p2m_index(pfn);
105 return p2m_top[topidx][idx];
106}
107
108static void alloc_p2m(unsigned long **pp)
109{
110 unsigned long *p;
111 unsigned i;
112
113 p = (void *)__get_free_page(GFP_KERNEL | __GFP_NOFAIL);
114 BUG_ON(p == NULL);
115
116 for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
117 p[i] = INVALID_P2M_ENTRY;
118
119 if (cmpxchg(pp, NULL, p) != NULL)
120 free_page((unsigned long)p);
121}
122
123void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
124{
125 unsigned topidx, idx;
126
127 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) {
128 BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
129 return;
130 }
131
132 topidx = p2m_top_index(pfn);
133 if (p2m_top[topidx] == NULL) {
134 /* no need to allocate a page to store an invalid entry */
135 if (mfn == INVALID_P2M_ENTRY)
136 return;
137 alloc_p2m(&p2m_top[topidx]);
138 }
139
140 idx = p2m_index(pfn);
141 p2m_top[topidx][idx] = mfn;
142}
143
59xmaddr_t arbitrary_virt_to_machine(unsigned long address) 144xmaddr_t arbitrary_virt_to_machine(unsigned long address)
60{ 145{
61 unsigned int level; 146 unsigned int level;