aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatt Rushton <mvrushton@gmail.com>2014-08-11 14:57:57 -0400
committerStefano Stabellini <stefano.stabellini@eu.citrix.com>2014-09-23 09:36:18 -0400
commit4fbb67e3c87b806ad54445a1b4a9c6bde2359c98 (patch)
treea9f8543998ce470a1c739ca5f1409664ec53f8d6
parent0f33be009b89d2268e94194dc4fd01a7851b6d51 (diff)
xen/setup: Remap Xen Identity Mapped RAM
Instead of ballooning up and down dom0 memory this remaps the existing mfns that were replaced by the identity map. The reason for this is that the existing implementation ballooned memory up and and down which caused dom0 to have discontiguous pages. In some cases this resulted in the use of bounce buffers which reduced network I/O performance significantly. This change will honor the existing order of the pages with the exception of some boundary conditions. To do this we need to update both the Linux p2m table and the Xen m2p table. Particular care must be taken when updating the p2m table since it's important to limit table memory consumption and reuse the existing leaf pages which get freed when an entire leaf page is set to the identity map. To implement this, mapping updates are grouped into blocks with table entries getting cached temporarily and then released. On my test system before: Total pages: 2105014 Total contiguous: 1640635 After: Total pages: 2105014 Total contiguous: 2098904 Signed-off-by: Matthew Rushton <mrushton@amazon.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
-rw-r--r--arch/x86/xen/p2m.c23
-rw-r--r--arch/x86/xen/p2m.h15
-rw-r--r--arch/x86/xen/setup.c370
3 files changed, 314 insertions, 94 deletions
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 3172692381ae..9f5983b01ed9 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -173,6 +173,7 @@
173#include <xen/balloon.h> 173#include <xen/balloon.h>
174#include <xen/grant_table.h> 174#include <xen/grant_table.h>
175 175
176#include "p2m.h"
176#include "multicalls.h" 177#include "multicalls.h"
177#include "xen-ops.h" 178#include "xen-ops.h"
178 179
@@ -180,12 +181,6 @@ static void __init m2p_override_init(void);
180 181
181unsigned long xen_max_p2m_pfn __read_mostly; 182unsigned long xen_max_p2m_pfn __read_mostly;
182 183
183#define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
184#define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *))
185#define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **))
186
187#define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
188
189/* Placeholders for holes in the address space */ 184/* Placeholders for holes in the address space */
190static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE); 185static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
191static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE); 186static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
@@ -202,16 +197,12 @@ static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_identity_mfn, P2M_MID_PER_PAGE);
202RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); 197RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
203RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); 198RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
204 199
205/* We might hit two boundary violations at the start and end, at max each 200/* For each I/O range remapped we may lose up to two leaf pages for the boundary
206 * boundary violation will require three middle nodes. */ 201 * violations and three mid pages to cover up to 3GB. With
207RESERVE_BRK(p2m_mid_extra, PAGE_SIZE * 2 * 3); 202 * early_can_reuse_p2m_middle() most of the leaf pages will be reused by the
208 203 * remapped region.
209/* When we populate back during bootup, the amount of pages can vary. The 204 */
210 * max we have is seen is 395979, but that does not mean it can't be more. 205RESERVE_BRK(p2m_identity_remap, PAGE_SIZE * 2 * 3 * MAX_REMAP_RANGES);
211 * Some machines can have 3GB I/O holes even. With early_can_reuse_p2m_middle
212 * it can re-use Xen provided mfn_list array, so we only need to allocate at
213 * most three P2M top nodes. */
214RESERVE_BRK(p2m_populated, PAGE_SIZE * 3);
215 206
216static inline unsigned p2m_top_index(unsigned long pfn) 207static inline unsigned p2m_top_index(unsigned long pfn)
217{ 208{
diff --git a/arch/x86/xen/p2m.h b/arch/x86/xen/p2m.h
new file mode 100644
index 000000000000..ad8aee24ab72
--- /dev/null
+++ b/arch/x86/xen/p2m.h
@@ -0,0 +1,15 @@
1#ifndef _XEN_P2M_H
2#define _XEN_P2M_H
3
4#define P2M_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
5#define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *))
6#define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **))
7
8#define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE)
9
10#define MAX_REMAP_RANGES 10
11
12extern unsigned long __init set_phys_range_identity(unsigned long pfn_s,
13 unsigned long pfn_e);
14
15#endif /* _XEN_P2M_H */
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 2e555163c2fe..af7216128d93 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -29,6 +29,7 @@
29#include <xen/features.h> 29#include <xen/features.h>
30#include "xen-ops.h" 30#include "xen-ops.h"
31#include "vdso.h" 31#include "vdso.h"
32#include "p2m.h"
32 33
33/* These are code, but not functions. Defined in entry.S */ 34/* These are code, but not functions. Defined in entry.S */
34extern const char xen_hypervisor_callback[]; 35extern const char xen_hypervisor_callback[];
@@ -46,6 +47,9 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
46/* Number of pages released from the initial allocation. */ 47/* Number of pages released from the initial allocation. */
47unsigned long xen_released_pages; 48unsigned long xen_released_pages;
48 49
50/* Buffer used to remap identity mapped pages */
51unsigned long xen_remap_buf[P2M_PER_PAGE] __initdata;
52
49/* 53/*
50 * The maximum amount of extra memory compared to the base size. The 54 * The maximum amount of extra memory compared to the base size. The
51 * main scaling factor is the size of struct page. At extreme ratios 55 * main scaling factor is the size of struct page. At extreme ratios
@@ -151,107 +155,325 @@ static unsigned long __init xen_do_chunk(unsigned long start,
151 return len; 155 return len;
152} 156}
153 157
154static unsigned long __init xen_release_chunk(unsigned long start, 158/*
155 unsigned long end) 159 * Finds the next RAM pfn available in the E820 map after min_pfn.
156{ 160 * This function updates min_pfn with the pfn found and returns
157 return xen_do_chunk(start, end, true); 161 * the size of that range or zero if not found.
158} 162 */
159 163static unsigned long __init xen_find_pfn_range(
160static unsigned long __init xen_populate_chunk(
161 const struct e820entry *list, size_t map_size, 164 const struct e820entry *list, size_t map_size,
162 unsigned long max_pfn, unsigned long *last_pfn, 165 unsigned long *min_pfn)
163 unsigned long credits_left)
164{ 166{
165 const struct e820entry *entry; 167 const struct e820entry *entry;
166 unsigned int i; 168 unsigned int i;
167 unsigned long done = 0; 169 unsigned long done = 0;
168 unsigned long dest_pfn;
169 170
170 for (i = 0, entry = list; i < map_size; i++, entry++) { 171 for (i = 0, entry = list; i < map_size; i++, entry++) {
171 unsigned long s_pfn; 172 unsigned long s_pfn;
172 unsigned long e_pfn; 173 unsigned long e_pfn;
173 unsigned long pfns;
174 long capacity;
175
176 if (credits_left <= 0)
177 break;
178 174
179 if (entry->type != E820_RAM) 175 if (entry->type != E820_RAM)
180 continue; 176 continue;
181 177
182 e_pfn = PFN_DOWN(entry->addr + entry->size); 178 e_pfn = PFN_DOWN(entry->addr + entry->size);
183 179
184 /* We only care about E820 after the xen_start_info->nr_pages */ 180 /* We only care about E820 after this */
185 if (e_pfn <= max_pfn) 181 if (e_pfn < *min_pfn)
186 continue; 182 continue;
187 183
188 s_pfn = PFN_UP(entry->addr); 184 s_pfn = PFN_UP(entry->addr);
189 /* If the E820 falls within the nr_pages, we want to start 185
190 * at the nr_pages PFN. 186 /* If min_pfn falls within the E820 entry, we want to start
191 * If that would mean going past the E820 entry, skip it 187 * at the min_pfn PFN.
192 */ 188 */
193 if (s_pfn <= max_pfn) { 189 if (s_pfn <= *min_pfn) {
194 capacity = e_pfn - max_pfn; 190 done = e_pfn - *min_pfn;
195 dest_pfn = max_pfn;
196 } else { 191 } else {
197 capacity = e_pfn - s_pfn; 192 done = e_pfn - s_pfn;
198 dest_pfn = s_pfn; 193 *min_pfn = s_pfn;
199 } 194 }
195 break;
196 }
200 197
201 if (credits_left < capacity) 198 return done;
202 capacity = credits_left; 199}
203 200
204 pfns = xen_do_chunk(dest_pfn, dest_pfn + capacity, false); 201/*
205 done += pfns; 202 * This releases a chunk of memory and then does the identity map. It's used as
206 *last_pfn = (dest_pfn + pfns); 203 * as a fallback if the remapping fails.
207 if (pfns < capacity) 204 */
208 break; 205static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
209 credits_left -= pfns; 206 unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity,
207 unsigned long *released)
208{
209 WARN_ON(start_pfn > end_pfn);
210
211 /* Need to release pages first */
212 *released += xen_do_chunk(start_pfn, min(end_pfn, nr_pages), true);
213 *identity += set_phys_range_identity(start_pfn, end_pfn);
214}
215
216/*
217 * Helper function to update both the p2m and m2p tables.
218 */
219static unsigned long __init xen_update_mem_tables(unsigned long pfn,
220 unsigned long mfn)
221{
222 struct mmu_update update = {
223 .ptr = ((unsigned long long)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
224 .val = pfn
225 };
226
227 /* Update p2m */
228 if (!early_set_phys_to_machine(pfn, mfn)) {
229 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
230 pfn, mfn);
231 return false;
210 } 232 }
211 return done; 233
234 /* Update m2p */
235 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
236 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
237 mfn, pfn);
238 return false;
239 }
240
241 return true;
212} 242}
213 243
214static void __init xen_set_identity_and_release_chunk( 244/*
215 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, 245 * This function updates the p2m and m2p tables with an identity map from
216 unsigned long *released, unsigned long *identity) 246 * start_pfn to start_pfn+size and remaps the underlying RAM of the original
247 * allocation at remap_pfn. It must do so carefully in P2M_PER_PAGE sized blocks
248 * to not exhaust the reserved brk space. Doing it in properly aligned blocks
249 * ensures we only allocate the minimum required leaf pages in the p2m table. It
250 * copies the existing mfns from the p2m table under the 1:1 map, overwrites
251 * them with the identity map and then updates the p2m and m2p tables with the
252 * remapped memory.
253 */
254static unsigned long __init xen_do_set_identity_and_remap_chunk(
255 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
217{ 256{
218 unsigned long pfn; 257 unsigned long ident_pfn_iter, remap_pfn_iter;
258 unsigned long ident_start_pfn_align, remap_start_pfn_align;
259 unsigned long ident_end_pfn_align, remap_end_pfn_align;
260 unsigned long ident_boundary_pfn, remap_boundary_pfn;
261 unsigned long ident_cnt = 0;
262 unsigned long remap_cnt = 0;
263 unsigned long left = size;
264 unsigned long mod;
265 int i;
266
267 WARN_ON(size == 0);
268
269 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
219 270
220 /* 271 /*
221 * If the PFNs are currently mapped, clear the mappings 272 * Determine the proper alignment to remap memory in P2M_PER_PAGE sized
222 * (except for the ISA region which must be 1:1 mapped) to 273 * blocks. We need to keep track of both the existing pfn mapping and
223 * release the refcounts (in Xen) on the original frames. 274 * the new pfn remapping.
224 */ 275 */
225 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) { 276 mod = start_pfn % P2M_PER_PAGE;
226 pte_t pte = __pte_ma(0); 277 ident_start_pfn_align =
278 mod ? (start_pfn - mod + P2M_PER_PAGE) : start_pfn;
279 mod = remap_pfn % P2M_PER_PAGE;
280 remap_start_pfn_align =
281 mod ? (remap_pfn - mod + P2M_PER_PAGE) : remap_pfn;
282 mod = (start_pfn + size) % P2M_PER_PAGE;
283 ident_end_pfn_align = start_pfn + size - mod;
284 mod = (remap_pfn + size) % P2M_PER_PAGE;
285 remap_end_pfn_align = remap_pfn + size - mod;
286
287 /* Iterate over each p2m leaf node in each range */
288 for (ident_pfn_iter = ident_start_pfn_align, remap_pfn_iter = remap_start_pfn_align;
289 ident_pfn_iter < ident_end_pfn_align && remap_pfn_iter < remap_end_pfn_align;
290 ident_pfn_iter += P2M_PER_PAGE, remap_pfn_iter += P2M_PER_PAGE) {
291 /* Check we aren't past the end */
292 BUG_ON(ident_pfn_iter + P2M_PER_PAGE > start_pfn + size);
293 BUG_ON(remap_pfn_iter + P2M_PER_PAGE > remap_pfn + size);
294
295 /* Save p2m mappings */
296 for (i = 0; i < P2M_PER_PAGE; i++)
297 xen_remap_buf[i] = pfn_to_mfn(ident_pfn_iter + i);
298
299 /* Set identity map which will free a p2m leaf */
300 ident_cnt += set_phys_range_identity(ident_pfn_iter,
301 ident_pfn_iter + P2M_PER_PAGE);
302
303#ifdef DEBUG
304 /* Helps verify a p2m leaf has been freed */
305 for (i = 0; i < P2M_PER_PAGE; i++) {
306 unsigned int pfn = ident_pfn_iter + i;
307 BUG_ON(pfn_to_mfn(pfn) != pfn);
308 }
309#endif
310 /* Now remap memory */
311 for (i = 0; i < P2M_PER_PAGE; i++) {
312 unsigned long mfn = xen_remap_buf[i];
313
314 /* This will use the p2m leaf freed above */
315 if (!xen_update_mem_tables(remap_pfn_iter + i, mfn)) {
316 WARN(1, "Failed to update mem mapping for pfn=%ld mfn=%ld\n",
317 remap_pfn_iter + i, mfn);
318 return 0;
319 }
320
321 remap_cnt++;
322 }
227 323
228 if (pfn < PFN_UP(ISA_END_ADDRESS)) 324 left -= P2M_PER_PAGE;
229 pte = mfn_pte(pfn, PAGE_KERNEL_IO); 325 }
230 326
231 (void)HYPERVISOR_update_va_mapping( 327 /* Max boundary space possible */
232 (unsigned long)__va(pfn << PAGE_SHIFT), pte, 0); 328 BUG_ON(left > (P2M_PER_PAGE - 1) * 2);
329
330 /* Now handle the boundary conditions */
331 ident_boundary_pfn = start_pfn;
332 remap_boundary_pfn = remap_pfn;
333 for (i = 0; i < left; i++) {
334 unsigned long mfn;
335
336 /* These two checks move from the start to end boundaries */
337 if (ident_boundary_pfn == ident_start_pfn_align)
338 ident_boundary_pfn = ident_pfn_iter;
339 if (remap_boundary_pfn == remap_start_pfn_align)
340 remap_boundary_pfn = remap_pfn_iter;
341
342 /* Check we aren't past the end */
343 BUG_ON(ident_boundary_pfn >= start_pfn + size);
344 BUG_ON(remap_boundary_pfn >= remap_pfn + size);
345
346 mfn = pfn_to_mfn(ident_boundary_pfn);
347
348 if (!xen_update_mem_tables(remap_boundary_pfn, mfn)) {
349 WARN(1, "Failed to update mem mapping for pfn=%ld mfn=%ld\n",
350 remap_pfn_iter + i, mfn);
351 return 0;
352 }
353 remap_cnt++;
354
355 ident_boundary_pfn++;
356 remap_boundary_pfn++;
233 } 357 }
234 358
235 if (start_pfn < nr_pages) 359 /* Finish up the identity map */
236 *released += xen_release_chunk( 360 if (ident_start_pfn_align >= ident_end_pfn_align) {
237 start_pfn, min(end_pfn, nr_pages)); 361 /*
362 * In this case we have an identity range which does not span an
363 * aligned block so everything needs to be identity mapped here.
364 * If we didn't check this we might remap too many pages since
365 * the align boundaries are not meaningful in this case.
366 */
367 ident_cnt += set_phys_range_identity(start_pfn,
368 start_pfn + size);
369 } else {
370 /* Remapped above so check each end of the chunk */
371 if (start_pfn < ident_start_pfn_align)
372 ident_cnt += set_phys_range_identity(start_pfn,
373 ident_start_pfn_align);
374 if (start_pfn + size > ident_pfn_iter)
375 ident_cnt += set_phys_range_identity(ident_pfn_iter,
376 start_pfn + size);
377 }
238 378
239 *identity += set_phys_range_identity(start_pfn, end_pfn); 379 BUG_ON(ident_cnt != size);
380 BUG_ON(remap_cnt != size);
381
382 return size;
240} 383}
241 384
242static unsigned long __init xen_set_identity_and_release( 385/*
243 const struct e820entry *list, size_t map_size, unsigned long nr_pages) 386 * This function takes a contiguous pfn range that needs to be identity mapped
387 * and:
388 *
389 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
390 * 2) Calls the do_ function to actually do the mapping/remapping work.
391 *
392 * The goal is to not allocate additional memory but to remap the existing
393 * pages. In the case of an error the underlying memory is simply released back
394 * to Xen and not remapped.
395 */
396static unsigned long __init xen_set_identity_and_remap_chunk(
397 const struct e820entry *list, size_t map_size, unsigned long start_pfn,
398 unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
399 unsigned long *identity, unsigned long *remapped,
400 unsigned long *released)
401{
402 unsigned long pfn;
403 unsigned long i = 0;
404 unsigned long n = end_pfn - start_pfn;
405
406 while (i < n) {
407 unsigned long cur_pfn = start_pfn + i;
408 unsigned long left = n - i;
409 unsigned long size = left;
410 unsigned long remap_range_size;
411
412 /* Do not remap pages beyond the current allocation */
413 if (cur_pfn >= nr_pages) {
414 /* Identity map remaining pages */
415 *identity += set_phys_range_identity(cur_pfn,
416 cur_pfn + size);
417 break;
418 }
419 if (cur_pfn + size > nr_pages)
420 size = nr_pages - cur_pfn;
421
422 remap_range_size = xen_find_pfn_range(list, map_size,
423 &remap_pfn);
424 if (!remap_range_size) {
425 pr_warning("Unable to find available pfn range, not remapping identity pages\n");
426 xen_set_identity_and_release_chunk(cur_pfn,
427 cur_pfn + left, nr_pages, identity, released);
428 break;
429 }
430 /* Adjust size to fit in current e820 RAM region */
431 if (size > remap_range_size)
432 size = remap_range_size;
433
434 if (!xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn)) {
435 WARN(1, "Failed to remap 1:1 memory cur_pfn=%ld size=%ld remap_pfn=%ld\n",
436 cur_pfn, size, remap_pfn);
437 xen_set_identity_and_release_chunk(cur_pfn,
438 cur_pfn + left, nr_pages, identity, released);
439 break;
440 }
441
442 /* Update variables to reflect new mappings. */
443 i += size;
444 remap_pfn += size;
445 *identity += size;
446 *remapped += size;
447 }
448
449 /*
450 * If the PFNs are currently mapped, the VA mapping also needs
451 * to be updated to be 1:1.
452 */
453 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
454 (void)HYPERVISOR_update_va_mapping(
455 (unsigned long)__va(pfn << PAGE_SHIFT),
456 mfn_pte(pfn, PAGE_KERNEL_IO), 0);
457
458 return remap_pfn;
459}
460
461static unsigned long __init xen_set_identity_and_remap(
462 const struct e820entry *list, size_t map_size, unsigned long nr_pages,
463 unsigned long *released)
244{ 464{
245 phys_addr_t start = 0; 465 phys_addr_t start = 0;
246 unsigned long released = 0;
247 unsigned long identity = 0; 466 unsigned long identity = 0;
467 unsigned long remapped = 0;
468 unsigned long last_pfn = nr_pages;
248 const struct e820entry *entry; 469 const struct e820entry *entry;
470 unsigned long num_released = 0;
249 int i; 471 int i;
250 472
251 /* 473 /*
252 * Combine non-RAM regions and gaps until a RAM region (or the 474 * Combine non-RAM regions and gaps until a RAM region (or the
253 * end of the map) is reached, then set the 1:1 map and 475 * end of the map) is reached, then set the 1:1 map and
254 * release the pages (if available) in those non-RAM regions. 476 * remap the memory in those non-RAM regions.
255 * 477 *
256 * The combined non-RAM regions are rounded to a whole number 478 * The combined non-RAM regions are rounded to a whole number
257 * of pages so any partial pages are accessible via the 1:1 479 * of pages so any partial pages are accessible via the 1:1
@@ -269,22 +491,24 @@ static unsigned long __init xen_set_identity_and_release(
269 end_pfn = PFN_UP(entry->addr); 491 end_pfn = PFN_UP(entry->addr);
270 492
271 if (start_pfn < end_pfn) 493 if (start_pfn < end_pfn)
272 xen_set_identity_and_release_chunk( 494 last_pfn = xen_set_identity_and_remap_chunk(
273 start_pfn, end_pfn, nr_pages, 495 list, map_size, start_pfn,
274 &released, &identity); 496 end_pfn, nr_pages, last_pfn,
275 497 &identity, &remapped,
498 &num_released);
276 start = end; 499 start = end;
277 } 500 }
278 } 501 }
279 502
280 if (released) 503 *released = num_released;
281 printk(KERN_INFO "Released %lu pages of unused memory\n", released);
282 if (identity)
283 printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
284 504
285 return released; 505 pr_info("Set %ld page(s) to 1-1 mapping\n", identity);
286} 506 pr_info("Remapped %ld page(s), last_pfn=%ld\n", remapped,
507 last_pfn);
508 pr_info("Released %ld page(s)\n", num_released);
287 509
510 return last_pfn;
511}
288static unsigned long __init xen_get_max_pages(void) 512static unsigned long __init xen_get_max_pages(void)
289{ 513{
290 unsigned long max_pages = MAX_DOMAIN_PAGES; 514 unsigned long max_pages = MAX_DOMAIN_PAGES;
@@ -347,7 +571,6 @@ char * __init xen_memory_setup(void)
347 unsigned long max_pages; 571 unsigned long max_pages;
348 unsigned long last_pfn = 0; 572 unsigned long last_pfn = 0;
349 unsigned long extra_pages = 0; 573 unsigned long extra_pages = 0;
350 unsigned long populated;
351 int i; 574 int i;
352 int op; 575 int op;
353 576
@@ -392,20 +615,11 @@ char * __init xen_memory_setup(void)
392 extra_pages += max_pages - max_pfn; 615 extra_pages += max_pages - max_pfn;
393 616
394 /* 617 /*
395 * Set P2M for all non-RAM pages and E820 gaps to be identity 618 * Set identity map on non-RAM pages and remap the underlying RAM.
396 * type PFNs. Any RAM pages that would be made inaccesible by
397 * this are first released.
398 */ 619 */
399 xen_released_pages = xen_set_identity_and_release( 620 last_pfn = xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
400 map, memmap.nr_entries, max_pfn); 621 &xen_released_pages);
401
402 /*
403 * Populate back the non-RAM pages and E820 gaps that had been
404 * released. */
405 populated = xen_populate_chunk(map, memmap.nr_entries,
406 max_pfn, &last_pfn, xen_released_pages);
407 622
408 xen_released_pages -= populated;
409 extra_pages += xen_released_pages; 623 extra_pages += xen_released_pages;
410 624
411 if (last_pfn > max_pfn) { 625 if (last_pfn > max_pfn) {