aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/xen/page.h1
-rw-r--r--arch/x86/xen/mmu.c4
-rw-r--r--arch/x86/xen/p2m.c94
-rw-r--r--arch/x86/xen/setup.c369
-rw-r--r--arch/x86/xen/xen-ops.h1
5 files changed, 172 insertions, 297 deletions
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 5a65a7551698..eb89c7ddae16 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -44,7 +44,6 @@ extern unsigned long machine_to_phys_nr;
44 44
45extern unsigned long get_phys_to_machine(unsigned long pfn); 45extern unsigned long get_phys_to_machine(unsigned long pfn);
46extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); 46extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
47extern bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn);
48extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 47extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
49extern unsigned long set_phys_range_identity(unsigned long pfn_s, 48extern unsigned long set_phys_range_identity(unsigned long pfn_s,
50 unsigned long pfn_e); 49 unsigned long pfn_e);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index b995b871da02..601914d2b0a8 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1225,6 +1225,10 @@ static void __init xen_pagetable_init(void)
1225 /* Allocate and initialize top and mid mfn levels for p2m structure */ 1225 /* Allocate and initialize top and mid mfn levels for p2m structure */
1226 xen_build_mfn_list_list(); 1226 xen_build_mfn_list_list();
1227 1227
1228 /* Remap memory freed due to conflicts with E820 map */
1229 if (!xen_feature(XENFEAT_auto_translated_physmap))
1230 xen_remap_memory();
1231
1228 xen_setup_shared_info(); 1232 xen_setup_shared_info();
1229 xen_post_allocator_init(); 1233 xen_post_allocator_init();
1230} 1234}
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index fa53dc2bc589..24cd9d17f57f 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -662,100 +662,6 @@ static bool __init early_alloc_p2m_middle(unsigned long pfn)
662 return true; 662 return true;
663} 663}
664 664
665/*
666 * Skim over the P2M tree looking at pages that are either filled with
667 * INVALID_P2M_ENTRY or with 1:1 PFNs. If found, re-use that page and
668 * replace the P2M leaf with a p2m_missing or p2m_identity.
669 * Stick the old page in the new P2M tree location.
670 */
671static bool __init early_can_reuse_p2m_middle(unsigned long set_pfn)
672{
673 unsigned topidx;
674 unsigned mididx;
675 unsigned ident_pfns;
676 unsigned inv_pfns;
677 unsigned long *p2m;
678 unsigned idx;
679 unsigned long pfn;
680
681 /* We only look when this entails a P2M middle layer */
682 if (p2m_index(set_pfn))
683 return false;
684
685 for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) {
686 topidx = p2m_top_index(pfn);
687
688 if (!p2m_top[topidx])
689 continue;
690
691 if (p2m_top[topidx] == p2m_mid_missing)
692 continue;
693
694 mididx = p2m_mid_index(pfn);
695 p2m = p2m_top[topidx][mididx];
696 if (!p2m)
697 continue;
698
699 if ((p2m == p2m_missing) || (p2m == p2m_identity))
700 continue;
701
702 if ((unsigned long)p2m == INVALID_P2M_ENTRY)
703 continue;
704
705 ident_pfns = 0;
706 inv_pfns = 0;
707 for (idx = 0; idx < P2M_PER_PAGE; idx++) {
708 /* IDENTITY_PFNs are 1:1 */
709 if (p2m[idx] == IDENTITY_FRAME(pfn + idx))
710 ident_pfns++;
711 else if (p2m[idx] == INVALID_P2M_ENTRY)
712 inv_pfns++;
713 else
714 break;
715 }
716 if ((ident_pfns == P2M_PER_PAGE) || (inv_pfns == P2M_PER_PAGE))
717 goto found;
718 }
719 return false;
720found:
721 /* Found one, replace old with p2m_identity or p2m_missing */
722 p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing);
723
724 /* Reset where we want to stick the old page in. */
725 topidx = p2m_top_index(set_pfn);
726 mididx = p2m_mid_index(set_pfn);
727
728 /* This shouldn't happen */
729 if (WARN_ON(p2m_top[topidx] == p2m_mid_missing))
730 early_alloc_p2m_middle(set_pfn);
731
732 if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing))
733 return false;
734
735 p2m_init(p2m);
736 p2m_top[topidx][mididx] = p2m;
737
738 return true;
739}
740bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
741{
742 if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
743 if (!early_alloc_p2m_middle(pfn))
744 return false;
745
746 if (early_can_reuse_p2m_middle(pfn))
747 return __set_phys_to_machine(pfn, mfn);
748
749 if (!early_alloc_p2m(pfn, false /* boundary crossover OK!*/))
750 return false;
751
752 if (!__set_phys_to_machine(pfn, mfn))
753 return false;
754 }
755
756 return true;
757}
758
759static void __init early_split_p2m(unsigned long pfn) 665static void __init early_split_p2m(unsigned long pfn)
760{ 666{
761 unsigned long mididx, idx; 667 unsigned long mididx, idx;
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 29834b3fd87f..e0b6912f9cad 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -30,6 +30,7 @@
30#include "xen-ops.h" 30#include "xen-ops.h"
31#include "vdso.h" 31#include "vdso.h"
32#include "p2m.h" 32#include "p2m.h"
33#include "mmu.h"
33 34
34/* These are code, but not functions. Defined in entry.S */ 35/* These are code, but not functions. Defined in entry.S */
35extern const char xen_hypervisor_callback[]; 36extern const char xen_hypervisor_callback[];
@@ -47,8 +48,19 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
47/* Number of pages released from the initial allocation. */ 48/* Number of pages released from the initial allocation. */
48unsigned long xen_released_pages; 49unsigned long xen_released_pages;
49 50
50/* Buffer used to remap identity mapped pages */ 51/*
51unsigned long xen_remap_buf[P2M_PER_PAGE] __initdata; 52 * Buffer used to remap identity mapped pages. We only need the virtual space.
53 * The physical page behind this address is remapped as needed to different
54 * buffer pages.
55 */
56#define REMAP_SIZE (P2M_PER_PAGE - 3)
57static struct {
58 unsigned long next_area_mfn;
59 unsigned long target_pfn;
60 unsigned long size;
61 unsigned long mfns[REMAP_SIZE];
62} xen_remap_buf __initdata __aligned(PAGE_SIZE);
63static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
52 64
53/* 65/*
54 * The maximum amount of extra memory compared to the base size. The 66 * The maximum amount of extra memory compared to the base size. The
@@ -98,63 +110,6 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
98 } 110 }
99} 111}
100 112
101static unsigned long __init xen_do_chunk(unsigned long start,
102 unsigned long end, bool release)
103{
104 struct xen_memory_reservation reservation = {
105 .address_bits = 0,
106 .extent_order = 0,
107 .domid = DOMID_SELF
108 };
109 unsigned long len = 0;
110 unsigned long pfn;
111 int ret;
112
113 for (pfn = start; pfn < end; pfn++) {
114 unsigned long frame;
115 unsigned long mfn = pfn_to_mfn(pfn);
116
117 if (release) {
118 /* Make sure pfn exists to start with */
119 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
120 continue;
121 frame = mfn;
122 } else {
123 if (mfn != INVALID_P2M_ENTRY)
124 continue;
125 frame = pfn;
126 }
127 set_xen_guest_handle(reservation.extent_start, &frame);
128 reservation.nr_extents = 1;
129
130 ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap,
131 &reservation);
132 WARN(ret != 1, "Failed to %s pfn %lx err=%d\n",
133 release ? "release" : "populate", pfn, ret);
134
135 if (ret == 1) {
136 if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) {
137 if (release)
138 break;
139 set_xen_guest_handle(reservation.extent_start, &frame);
140 reservation.nr_extents = 1;
141 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
142 &reservation);
143 break;
144 }
145 len++;
146 } else
147 break;
148 }
149 if (len)
150 printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n",
151 release ? "Freeing" : "Populating",
152 start, end, len,
153 release ? "freed" : "added");
154
155 return len;
156}
157
158/* 113/*
159 * Finds the next RAM pfn available in the E820 map after min_pfn. 114 * Finds the next RAM pfn available in the E820 map after min_pfn.
160 * This function updates min_pfn with the pfn found and returns 115 * This function updates min_pfn with the pfn found and returns
@@ -198,26 +153,62 @@ static unsigned long __init xen_find_pfn_range(
198 return done; 153 return done;
199} 154}
200 155
156static int __init xen_free_mfn(unsigned long mfn)
157{
158 struct xen_memory_reservation reservation = {
159 .address_bits = 0,
160 .extent_order = 0,
161 .domid = DOMID_SELF
162 };
163
164 set_xen_guest_handle(reservation.extent_start, &mfn);
165 reservation.nr_extents = 1;
166
167 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
168}
169
201/* 170/*
202 * This releases a chunk of memory and then does the identity map. It's used as 171 * This releases a chunk of memory and then does the identity map. It's used
203 * as a fallback if the remapping fails. 172 * as a fallback if the remapping fails.
204 */ 173 */
205static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, 174static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
206 unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity, 175 unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity,
207 unsigned long *released) 176 unsigned long *released)
208{ 177{
178 unsigned long len = 0;
179 unsigned long pfn, end;
180 int ret;
181
209 WARN_ON(start_pfn > end_pfn); 182 WARN_ON(start_pfn > end_pfn);
210 183
184 end = min(end_pfn, nr_pages);
185 for (pfn = start_pfn; pfn < end; pfn++) {
186 unsigned long mfn = pfn_to_mfn(pfn);
187
188 /* Make sure pfn exists to start with */
189 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
190 continue;
191
192 ret = xen_free_mfn(mfn);
193 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
194
195 if (ret == 1) {
196 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
197 break;
198 len++;
199 } else
200 break;
201 }
202
211 /* Need to release pages first */ 203 /* Need to release pages first */
212 *released += xen_do_chunk(start_pfn, min(end_pfn, nr_pages), true); 204 *released += len;
213 *identity += set_phys_range_identity(start_pfn, end_pfn); 205 *identity += set_phys_range_identity(start_pfn, end_pfn);
214} 206}
215 207
216/* 208/*
217 * Helper function to update both the p2m and m2p tables. 209 * Helper function to update the p2m and m2p tables and kernel mapping.
218 */ 210 */
219static unsigned long __init xen_update_mem_tables(unsigned long pfn, 211static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
220 unsigned long mfn)
221{ 212{
222 struct mmu_update update = { 213 struct mmu_update update = {
223 .ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE, 214 .ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
@@ -225,161 +216,91 @@ static unsigned long __init xen_update_mem_tables(unsigned long pfn,
225 }; 216 };
226 217
227 /* Update p2m */ 218 /* Update p2m */
228 if (!early_set_phys_to_machine(pfn, mfn)) { 219 if (!set_phys_to_machine(pfn, mfn)) {
229 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n", 220 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
230 pfn, mfn); 221 pfn, mfn);
231 return false; 222 BUG();
232 } 223 }
233 224
234 /* Update m2p */ 225 /* Update m2p */
235 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) { 226 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
236 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n", 227 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
237 mfn, pfn); 228 mfn, pfn);
238 return false; 229 BUG();
239 } 230 }
240 231
241 return true; 232 /* Update kernel mapping, but not for highmem. */
233 if ((pfn << PAGE_SHIFT) >= __pa(high_memory))
234 return;
235
236 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
237 mfn_pte(mfn, PAGE_KERNEL), 0)) {
238 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
239 mfn, pfn);
240 BUG();
241 }
242} 242}
243 243
244/* 244/*
245 * This function updates the p2m and m2p tables with an identity map from 245 * This function updates the p2m and m2p tables with an identity map from
246 * start_pfn to start_pfn+size and remaps the underlying RAM of the original 246 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
247 * allocation at remap_pfn. It must do so carefully in P2M_PER_PAGE sized blocks 247 * original allocation at remap_pfn. The information needed for remapping is
248 * to not exhaust the reserved brk space. Doing it in properly aligned blocks 248 * saved in the memory itself to avoid the need for allocating buffers. The
249 * ensures we only allocate the minimum required leaf pages in the p2m table. It 249 * complete remap information is contained in a list of MFNs each containing
250 * copies the existing mfns from the p2m table under the 1:1 map, overwrites 250 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
251 * them with the identity map and then updates the p2m and m2p tables with the 251 * This enables us to preserve the original mfn sequence while doing the
252 * remapped memory. 252 * remapping at a time when the memory management is capable of allocating
253 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
254 * its callers.
253 */ 255 */
254static unsigned long __init xen_do_set_identity_and_remap_chunk( 256static void __init xen_do_set_identity_and_remap_chunk(
255 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn) 257 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
256{ 258{
259 unsigned long buf = (unsigned long)&xen_remap_buf;
260 unsigned long mfn_save, mfn;
257 unsigned long ident_pfn_iter, remap_pfn_iter; 261 unsigned long ident_pfn_iter, remap_pfn_iter;
258 unsigned long ident_start_pfn_align, remap_start_pfn_align; 262 unsigned long ident_end_pfn = start_pfn + size;
259 unsigned long ident_end_pfn_align, remap_end_pfn_align;
260 unsigned long ident_boundary_pfn, remap_boundary_pfn;
261 unsigned long ident_cnt = 0;
262 unsigned long remap_cnt = 0;
263 unsigned long left = size; 263 unsigned long left = size;
264 unsigned long mod; 264 unsigned long ident_cnt = 0;
265 int i; 265 unsigned int i, chunk;
266 266
267 WARN_ON(size == 0); 267 WARN_ON(size == 0);
268 268
269 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap)); 269 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
270 270
271 /* 271 /* Don't use memory until remapped */
272 * Determine the proper alignment to remap memory in P2M_PER_PAGE sized 272 memblock_reserve(PFN_PHYS(remap_pfn), PFN_PHYS(size));
273 * blocks. We need to keep track of both the existing pfn mapping and
274 * the new pfn remapping.
275 */
276 mod = start_pfn % P2M_PER_PAGE;
277 ident_start_pfn_align =
278 mod ? (start_pfn - mod + P2M_PER_PAGE) : start_pfn;
279 mod = remap_pfn % P2M_PER_PAGE;
280 remap_start_pfn_align =
281 mod ? (remap_pfn - mod + P2M_PER_PAGE) : remap_pfn;
282 mod = (start_pfn + size) % P2M_PER_PAGE;
283 ident_end_pfn_align = start_pfn + size - mod;
284 mod = (remap_pfn + size) % P2M_PER_PAGE;
285 remap_end_pfn_align = remap_pfn + size - mod;
286
287 /* Iterate over each p2m leaf node in each range */
288 for (ident_pfn_iter = ident_start_pfn_align, remap_pfn_iter = remap_start_pfn_align;
289 ident_pfn_iter < ident_end_pfn_align && remap_pfn_iter < remap_end_pfn_align;
290 ident_pfn_iter += P2M_PER_PAGE, remap_pfn_iter += P2M_PER_PAGE) {
291 /* Check we aren't past the end */
292 BUG_ON(ident_pfn_iter + P2M_PER_PAGE > start_pfn + size);
293 BUG_ON(remap_pfn_iter + P2M_PER_PAGE > remap_pfn + size);
294
295 /* Save p2m mappings */
296 for (i = 0; i < P2M_PER_PAGE; i++)
297 xen_remap_buf[i] = pfn_to_mfn(ident_pfn_iter + i);
298
299 /* Set identity map which will free a p2m leaf */
300 ident_cnt += set_phys_range_identity(ident_pfn_iter,
301 ident_pfn_iter + P2M_PER_PAGE);
302
303#ifdef DEBUG
304 /* Helps verify a p2m leaf has been freed */
305 for (i = 0; i < P2M_PER_PAGE; i++) {
306 unsigned int pfn = ident_pfn_iter + i;
307 BUG_ON(pfn_to_mfn(pfn) != pfn);
308 }
309#endif
310 /* Now remap memory */
311 for (i = 0; i < P2M_PER_PAGE; i++) {
312 unsigned long mfn = xen_remap_buf[i];
313
314 /* This will use the p2m leaf freed above */
315 if (!xen_update_mem_tables(remap_pfn_iter + i, mfn)) {
316 WARN(1, "Failed to update mem mapping for pfn=%ld mfn=%ld\n",
317 remap_pfn_iter + i, mfn);
318 return 0;
319 }
320
321 remap_cnt++;
322 }
323 273
324 left -= P2M_PER_PAGE; 274 mfn_save = virt_to_mfn(buf);
325 }
326
327 /* Max boundary space possible */
328 BUG_ON(left > (P2M_PER_PAGE - 1) * 2);
329 275
330 /* Now handle the boundary conditions */ 276 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
331 ident_boundary_pfn = start_pfn; 277 ident_pfn_iter < ident_end_pfn;
332 remap_boundary_pfn = remap_pfn; 278 ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
333 for (i = 0; i < left; i++) { 279 chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
334 unsigned long mfn;
335 280
336 /* These two checks move from the start to end boundaries */ 281 /* Map first pfn to xen_remap_buf */
337 if (ident_boundary_pfn == ident_start_pfn_align) 282 mfn = pfn_to_mfn(ident_pfn_iter);
338 ident_boundary_pfn = ident_pfn_iter; 283 set_pte_mfn(buf, mfn, PAGE_KERNEL);
339 if (remap_boundary_pfn == remap_start_pfn_align)
340 remap_boundary_pfn = remap_pfn_iter;
341 284
342 /* Check we aren't past the end */ 285 /* Save mapping information in page */
343 BUG_ON(ident_boundary_pfn >= start_pfn + size); 286 xen_remap_buf.next_area_mfn = xen_remap_mfn;
344 BUG_ON(remap_boundary_pfn >= remap_pfn + size); 287 xen_remap_buf.target_pfn = remap_pfn_iter;
288 xen_remap_buf.size = chunk;
289 for (i = 0; i < chunk; i++)
290 xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
345 291
346 mfn = pfn_to_mfn(ident_boundary_pfn); 292 /* Put remap buf into list. */
293 xen_remap_mfn = mfn;
347 294
348 if (!xen_update_mem_tables(remap_boundary_pfn, mfn)) { 295 /* Set identity map */
349 WARN(1, "Failed to update mem mapping for pfn=%ld mfn=%ld\n", 296 ident_cnt += set_phys_range_identity(ident_pfn_iter,
350 remap_pfn_iter + i, mfn); 297 ident_pfn_iter + chunk);
351 return 0;
352 }
353 remap_cnt++;
354
355 ident_boundary_pfn++;
356 remap_boundary_pfn++;
357 }
358 298
359 /* Finish up the identity map */ 299 left -= chunk;
360 if (ident_start_pfn_align >= ident_end_pfn_align) {
361 /*
362 * In this case we have an identity range which does not span an
363 * aligned block so everything needs to be identity mapped here.
364 * If we didn't check this we might remap too many pages since
365 * the align boundaries are not meaningful in this case.
366 */
367 ident_cnt += set_phys_range_identity(start_pfn,
368 start_pfn + size);
369 } else {
370 /* Remapped above so check each end of the chunk */
371 if (start_pfn < ident_start_pfn_align)
372 ident_cnt += set_phys_range_identity(start_pfn,
373 ident_start_pfn_align);
374 if (start_pfn + size > ident_pfn_iter)
375 ident_cnt += set_phys_range_identity(ident_pfn_iter,
376 start_pfn + size);
377 } 300 }
378 301
379 BUG_ON(ident_cnt != size); 302 /* Restore old xen_remap_buf mapping */
380 BUG_ON(remap_cnt != size); 303 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
381
382 return size;
383} 304}
384 305
385/* 306/*
@@ -396,8 +317,7 @@ static unsigned long __init xen_do_set_identity_and_remap_chunk(
396static unsigned long __init xen_set_identity_and_remap_chunk( 317static unsigned long __init xen_set_identity_and_remap_chunk(
397 const struct e820entry *list, size_t map_size, unsigned long start_pfn, 318 const struct e820entry *list, size_t map_size, unsigned long start_pfn,
398 unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn, 319 unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
399 unsigned long *identity, unsigned long *remapped, 320 unsigned long *identity, unsigned long *released)
400 unsigned long *released)
401{ 321{
402 unsigned long pfn; 322 unsigned long pfn;
403 unsigned long i = 0; 323 unsigned long i = 0;
@@ -431,19 +351,12 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
431 if (size > remap_range_size) 351 if (size > remap_range_size)
432 size = remap_range_size; 352 size = remap_range_size;
433 353
434 if (!xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn)) { 354 xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
435 WARN(1, "Failed to remap 1:1 memory cur_pfn=%ld size=%ld remap_pfn=%ld\n",
436 cur_pfn, size, remap_pfn);
437 xen_set_identity_and_release_chunk(cur_pfn,
438 cur_pfn + left, nr_pages, identity, released);
439 break;
440 }
441 355
442 /* Update variables to reflect new mappings. */ 356 /* Update variables to reflect new mappings. */
443 i += size; 357 i += size;
444 remap_pfn += size; 358 remap_pfn += size;
445 *identity += size; 359 *identity += size;
446 *remapped += size;
447 } 360 }
448 361
449 /* 362 /*
@@ -464,7 +377,6 @@ static unsigned long __init xen_set_identity_and_remap(
464{ 377{
465 phys_addr_t start = 0; 378 phys_addr_t start = 0;
466 unsigned long identity = 0; 379 unsigned long identity = 0;
467 unsigned long remapped = 0;
468 unsigned long last_pfn = nr_pages; 380 unsigned long last_pfn = nr_pages;
469 const struct e820entry *entry; 381 const struct e820entry *entry;
470 unsigned long num_released = 0; 382 unsigned long num_released = 0;
@@ -494,8 +406,7 @@ static unsigned long __init xen_set_identity_and_remap(
494 last_pfn = xen_set_identity_and_remap_chunk( 406 last_pfn = xen_set_identity_and_remap_chunk(
495 list, map_size, start_pfn, 407 list, map_size, start_pfn,
496 end_pfn, nr_pages, last_pfn, 408 end_pfn, nr_pages, last_pfn,
497 &identity, &remapped, 409 &identity, &num_released);
498 &num_released);
499 start = end; 410 start = end;
500 } 411 }
501 } 412 }
@@ -503,12 +414,65 @@ static unsigned long __init xen_set_identity_and_remap(
503 *released = num_released; 414 *released = num_released;
504 415
505 pr_info("Set %ld page(s) to 1-1 mapping\n", identity); 416 pr_info("Set %ld page(s) to 1-1 mapping\n", identity);
506 pr_info("Remapped %ld page(s), last_pfn=%ld\n", remapped,
507 last_pfn);
508 pr_info("Released %ld page(s)\n", num_released); 417 pr_info("Released %ld page(s)\n", num_released);
509 418
510 return last_pfn; 419 return last_pfn;
511} 420}
421
422/*
423 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
424 * The remap information (which mfn remap to which pfn) is contained in the
425 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
426 * This scheme allows to remap the different chunks in arbitrary order while
427 * the resulting mapping will be independant from the order.
428 */
429void __init xen_remap_memory(void)
430{
431 unsigned long buf = (unsigned long)&xen_remap_buf;
432 unsigned long mfn_save, mfn, pfn;
433 unsigned long remapped = 0;
434 unsigned int i;
435 unsigned long pfn_s = ~0UL;
436 unsigned long len = 0;
437
438 mfn_save = virt_to_mfn(buf);
439
440 while (xen_remap_mfn != INVALID_P2M_ENTRY) {
441 /* Map the remap information */
442 set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
443
444 BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
445
446 pfn = xen_remap_buf.target_pfn;
447 for (i = 0; i < xen_remap_buf.size; i++) {
448 mfn = xen_remap_buf.mfns[i];
449 xen_update_mem_tables(pfn, mfn);
450 remapped++;
451 pfn++;
452 }
453 if (pfn_s == ~0UL || pfn == pfn_s) {
454 pfn_s = xen_remap_buf.target_pfn;
455 len += xen_remap_buf.size;
456 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
457 len += xen_remap_buf.size;
458 } else {
459 memblock_free(PFN_PHYS(pfn_s), PFN_PHYS(len));
460 pfn_s = xen_remap_buf.target_pfn;
461 len = xen_remap_buf.size;
462 }
463
464 mfn = xen_remap_mfn;
465 xen_remap_mfn = xen_remap_buf.next_area_mfn;
466 }
467
468 if (pfn_s != ~0UL && len)
469 memblock_free(PFN_PHYS(pfn_s), PFN_PHYS(len));
470
471 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
472
473 pr_info("Remapped %ld page(s)\n", remapped);
474}
475
512static unsigned long __init xen_get_max_pages(void) 476static unsigned long __init xen_get_max_pages(void)
513{ 477{
514 unsigned long max_pages = MAX_DOMAIN_PAGES; 478 unsigned long max_pages = MAX_DOMAIN_PAGES;
@@ -616,7 +580,8 @@ char * __init xen_memory_setup(void)
616 extra_pages += max_pages - max_pfn; 580 extra_pages += max_pages - max_pfn;
617 581
618 /* 582 /*
619 * Set identity map on non-RAM pages and remap the underlying RAM. 583 * Set identity map on non-RAM pages and prepare remapping the
584 * underlying RAM.
620 */ 585 */
621 last_pfn = xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn, 586 last_pfn = xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
622 &xen_released_pages); 587 &xen_released_pages);
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 28c7e0be56e4..5b72a06c5b85 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -35,6 +35,7 @@ void xen_mm_pin_all(void);
35void xen_mm_unpin_all(void); 35void xen_mm_unpin_all(void);
36void xen_set_pat(u64); 36void xen_set_pat(u64);
37 37
38void __init xen_remap_memory(void);
38char * __init xen_memory_setup(void); 39char * __init xen_memory_setup(void);
39char * xen_auto_xlated_memory_setup(void); 40char * xen_auto_xlated_memory_setup(void);
40void __init xen_arch_setup(void); 41void __init xen_arch_setup(void);