diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-05-26 18:31:22 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-27 04:11:37 -0400 |
commit | d5edbc1f75420935b1ec7e65df10c8f81cea82de (patch) | |
tree | b0ad78cd90a7184a5fb35ad33708067e14c00605 /arch/x86/xen/mmu.c | |
parent | a0d695c821544947342a2d372ec4108bc813b979 (diff) |
xen: add p2m mfn_list_list
When saving a domain, the Xen tools need to remap all our mfns to
portable pfns. In order to remap our p2m table, it needs to know
where all its pages are, so maintain the references to the p2m table
for it to use.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/xen/mmu.c')
-rw-r--r-- | arch/x86/xen/mmu.c | 39 |
1 files changed, 36 insertions, 3 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index da7b45b05066..4740cda36563 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -69,6 +69,13 @@ static unsigned long *p2m_top[TOP_ENTRIES] | |||
69 | __attribute__((section(".data.page_aligned"))) = | 69 | __attribute__((section(".data.page_aligned"))) = |
70 | { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] }; | 70 | { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] }; |
71 | 71 | ||
72 | /* Arrays of p2m arrays expressed in mfns used for save/restore */ | ||
73 | static unsigned long p2m_top_mfn[TOP_ENTRIES] | ||
74 | __attribute__((section(".bss.page_aligned"))); | ||
75 | |||
76 | static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE] | ||
77 | __attribute__((section(".bss.page_aligned"))); | ||
78 | |||
72 | static inline unsigned p2m_top_index(unsigned long pfn) | 79 | static inline unsigned p2m_top_index(unsigned long pfn) |
73 | { | 80 | { |
74 | BUG_ON(pfn >= MAX_DOMAIN_PAGES); | 81 | BUG_ON(pfn >= MAX_DOMAIN_PAGES); |
@@ -80,11 +87,35 @@ static inline unsigned p2m_index(unsigned long pfn) | |||
80 | return pfn % P2M_ENTRIES_PER_PAGE; | 87 | return pfn % P2M_ENTRIES_PER_PAGE; |
81 | } | 88 | } |
82 | 89 | ||
90 | /* Build the parallel p2m_top_mfn structures */ | ||
91 | void xen_setup_mfn_list_list(void) | ||
92 | { | ||
93 | unsigned pfn, idx; | ||
94 | |||
95 | for(pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) { | ||
96 | unsigned topidx = p2m_top_index(pfn); | ||
97 | |||
98 | p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]); | ||
99 | } | ||
100 | |||
101 | for(idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) { | ||
102 | unsigned topidx = idx * P2M_ENTRIES_PER_PAGE; | ||
103 | p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]); | ||
104 | } | ||
105 | |||
106 | BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); | ||
107 | |||
108 | HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = | ||
109 | virt_to_mfn(p2m_top_mfn_list); | ||
110 | HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages; | ||
111 | } | ||
112 | |||
113 | /* Set up p2m_top to point to the domain-builder provided p2m pages */ | ||
83 | void __init xen_build_dynamic_phys_to_machine(void) | 114 | void __init xen_build_dynamic_phys_to_machine(void) |
84 | { | 115 | { |
85 | unsigned pfn; | ||
86 | unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list; | 116 | unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list; |
87 | unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); | 117 | unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); |
118 | unsigned pfn; | ||
88 | 119 | ||
89 | for(pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) { | 120 | for(pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) { |
90 | unsigned topidx = p2m_top_index(pfn); | 121 | unsigned topidx = p2m_top_index(pfn); |
@@ -105,7 +136,7 @@ unsigned long get_phys_to_machine(unsigned long pfn) | |||
105 | return p2m_top[topidx][idx]; | 136 | return p2m_top[topidx][idx]; |
106 | } | 137 | } |
107 | 138 | ||
108 | static void alloc_p2m(unsigned long **pp) | 139 | static void alloc_p2m(unsigned long **pp, unsigned long *mfnp) |
109 | { | 140 | { |
110 | unsigned long *p; | 141 | unsigned long *p; |
111 | unsigned i; | 142 | unsigned i; |
@@ -118,6 +149,8 @@ static void alloc_p2m(unsigned long **pp) | |||
118 | 149 | ||
119 | if (cmpxchg(pp, p2m_missing, p) != p2m_missing) | 150 | if (cmpxchg(pp, p2m_missing, p) != p2m_missing) |
120 | free_page((unsigned long)p); | 151 | free_page((unsigned long)p); |
152 | else | ||
153 | *mfnp = virt_to_mfn(p); | ||
121 | } | 154 | } |
122 | 155 | ||
123 | void set_phys_to_machine(unsigned long pfn, unsigned long mfn) | 156 | void set_phys_to_machine(unsigned long pfn, unsigned long mfn) |
@@ -139,7 +172,7 @@ void set_phys_to_machine(unsigned long pfn, unsigned long mfn) | |||
139 | /* no need to allocate a page to store an invalid entry */ | 172 | /* no need to allocate a page to store an invalid entry */ |
140 | if (mfn == INVALID_P2M_ENTRY) | 173 | if (mfn == INVALID_P2M_ENTRY) |
141 | return; | 174 | return; |
142 | alloc_p2m(&p2m_top[topidx]); | 175 | alloc_p2m(&p2m_top[topidx], &p2m_top_mfn[topidx]); |
143 | } | 176 | } |
144 | 177 | ||
145 | idx = p2m_index(pfn); | 178 | idx = p2m_index(pfn); |