aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/setup.c
diff options
context:
space:
mode:
authorJuergen Gross <jgross@suse.com>2014-11-28 05:53:55 -0500
committerDavid Vrabel <david.vrabel@citrix.com>2014-12-04 09:08:59 -0500
commit5b8e7d80542487ff1bf17b4cf2922a01dee13d3a (patch)
treee027791839cd32076d172c16700e010974f70ea4 /arch/x86/xen/setup.c
parent97f4533a60ce5d0cb35ff44a190111f81a987620 (diff)
xen: Delay invalidating extra memory
When the physical memory configuration is initialized the p2m entries for not pouplated memory pages are set to "invalid". As those pages are beyond the hypervisor built p2m list the p2m tree has to be extended. This patch delays processing the extra memory related p2m entries during the boot process until some more basic memory management functions are callable. This removes the need to create new p2m entries until virtual memory management is available. Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: David Vrabel <david.vrabel@citrix.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Diffstat (limited to 'arch/x86/xen/setup.c')
-rw-r--r--arch/x86/xen/setup.c98
1 files changed, 73 insertions, 25 deletions
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index e0b6912f9cad..12cd199e4c9a 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -76,7 +76,6 @@ static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
76 76
77static void __init xen_add_extra_mem(u64 start, u64 size) 77static void __init xen_add_extra_mem(u64 start, u64 size)
78{ 78{
79 unsigned long pfn;
80 int i; 79 int i;
81 80
82 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { 81 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
@@ -96,17 +95,75 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
96 printk(KERN_WARNING "Warning: not enough extra memory regions\n"); 95 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
97 96
98 memblock_reserve(start, size); 97 memblock_reserve(start, size);
98}
99 99
100 xen_max_p2m_pfn = PFN_DOWN(start + size); 100static void __init xen_del_extra_mem(u64 start, u64 size)
101 for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) { 101{
102 unsigned long mfn = pfn_to_mfn(pfn); 102 int i;
103 u64 start_r, size_r;
103 104
104 if (WARN_ONCE(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn)) 105 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
105 continue; 106 start_r = xen_extra_mem[i].start;
106 WARN_ONCE(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n", 107 size_r = xen_extra_mem[i].size;
107 pfn, mfn); 108
109 /* Start of region. */
110 if (start_r == start) {
111 BUG_ON(size > size_r);
112 xen_extra_mem[i].start += size;
113 xen_extra_mem[i].size -= size;
114 break;
115 }
116 /* End of region. */
117 if (start_r + size_r == start + size) {
118 BUG_ON(size > size_r);
119 xen_extra_mem[i].size -= size;
120 break;
121 }
122 /* Mid of region. */
123 if (start > start_r && start < start_r + size_r) {
124 BUG_ON(start + size > start_r + size_r);
125 xen_extra_mem[i].size = start - start_r;
126 /* Calling memblock_reserve() again is okay. */
127 xen_add_extra_mem(start + size, start_r + size_r -
128 (start + size));
129 break;
130 }
131 }
132 memblock_free(start, size);
133}
134
135/*
136 * Called during boot before the p2m list can take entries beyond the
137 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
138 * invalid.
139 */
140unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
141{
142 int i;
143 unsigned long addr = PFN_PHYS(pfn);
108 144
109 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 145 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
146 if (addr >= xen_extra_mem[i].start &&
147 addr < xen_extra_mem[i].start + xen_extra_mem[i].size)
148 return INVALID_P2M_ENTRY;
149 }
150
151 return IDENTITY_FRAME(pfn);
152}
153
154/*
155 * Mark all pfns of extra mem as invalid in p2m list.
156 */
157void __init xen_inv_extra_mem(void)
158{
159 unsigned long pfn, pfn_s, pfn_e;
160 int i;
161
162 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
163 pfn_s = PFN_DOWN(xen_extra_mem[i].start);
164 pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
165 for (pfn = pfn_s; pfn < pfn_e; pfn++)
166 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
110 } 167 }
111} 168}
112 169
@@ -268,9 +325,6 @@ static void __init xen_do_set_identity_and_remap_chunk(
268 325
269 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap)); 326 BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
270 327
271 /* Don't use memory until remapped */
272 memblock_reserve(PFN_PHYS(remap_pfn), PFN_PHYS(size));
273
274 mfn_save = virt_to_mfn(buf); 328 mfn_save = virt_to_mfn(buf);
275 329
276 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn; 330 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
@@ -314,7 +368,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
314 * pages. In the case of an error the underlying memory is simply released back 368 * pages. In the case of an error the underlying memory is simply released back
315 * to Xen and not remapped. 369 * to Xen and not remapped.
316 */ 370 */
317static unsigned long __init xen_set_identity_and_remap_chunk( 371static unsigned long xen_set_identity_and_remap_chunk(
318 const struct e820entry *list, size_t map_size, unsigned long start_pfn, 372 const struct e820entry *list, size_t map_size, unsigned long start_pfn,
319 unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn, 373 unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
320 unsigned long *identity, unsigned long *released) 374 unsigned long *identity, unsigned long *released)
@@ -371,7 +425,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
371 return remap_pfn; 425 return remap_pfn;
372} 426}
373 427
374static unsigned long __init xen_set_identity_and_remap( 428static void __init xen_set_identity_and_remap(
375 const struct e820entry *list, size_t map_size, unsigned long nr_pages, 429 const struct e820entry *list, size_t map_size, unsigned long nr_pages,
376 unsigned long *released) 430 unsigned long *released)
377{ 431{
@@ -415,8 +469,6 @@ static unsigned long __init xen_set_identity_and_remap(
415 469
416 pr_info("Set %ld page(s) to 1-1 mapping\n", identity); 470 pr_info("Set %ld page(s) to 1-1 mapping\n", identity);
417 pr_info("Released %ld page(s)\n", num_released); 471 pr_info("Released %ld page(s)\n", num_released);
418
419 return last_pfn;
420} 472}
421 473
422/* 474/*
@@ -456,7 +508,7 @@ void __init xen_remap_memory(void)
456 } else if (pfn_s + len == xen_remap_buf.target_pfn) { 508 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
457 len += xen_remap_buf.size; 509 len += xen_remap_buf.size;
458 } else { 510 } else {
459 memblock_free(PFN_PHYS(pfn_s), PFN_PHYS(len)); 511 xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
460 pfn_s = xen_remap_buf.target_pfn; 512 pfn_s = xen_remap_buf.target_pfn;
461 len = xen_remap_buf.size; 513 len = xen_remap_buf.size;
462 } 514 }
@@ -466,7 +518,7 @@ void __init xen_remap_memory(void)
466 } 518 }
467 519
468 if (pfn_s != ~0UL && len) 520 if (pfn_s != ~0UL && len)
469 memblock_free(PFN_PHYS(pfn_s), PFN_PHYS(len)); 521 xen_del_extra_mem(PFN_PHYS(pfn_s), PFN_PHYS(len));
470 522
471 set_pte_mfn(buf, mfn_save, PAGE_KERNEL); 523 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
472 524
@@ -533,7 +585,6 @@ char * __init xen_memory_setup(void)
533 int rc; 585 int rc;
534 struct xen_memory_map memmap; 586 struct xen_memory_map memmap;
535 unsigned long max_pages; 587 unsigned long max_pages;
536 unsigned long last_pfn = 0;
537 unsigned long extra_pages = 0; 588 unsigned long extra_pages = 0;
538 int i; 589 int i;
539 int op; 590 int op;
@@ -583,15 +634,11 @@ char * __init xen_memory_setup(void)
583 * Set identity map on non-RAM pages and prepare remapping the 634 * Set identity map on non-RAM pages and prepare remapping the
584 * underlying RAM. 635 * underlying RAM.
585 */ 636 */
586 last_pfn = xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn, 637 xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
587 &xen_released_pages); 638 &xen_released_pages);
588 639
589 extra_pages += xen_released_pages; 640 extra_pages += xen_released_pages;
590 641
591 if (last_pfn > max_pfn) {
592 max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
593 mem_end = PFN_PHYS(max_pfn);
594 }
595 /* 642 /*
596 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO 643 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
597 * factor the base size. On non-highmem systems, the base 644 * factor the base size. On non-highmem systems, the base
@@ -618,6 +665,7 @@ char * __init xen_memory_setup(void)
618 size = min(size, (u64)extra_pages * PAGE_SIZE); 665 size = min(size, (u64)extra_pages * PAGE_SIZE);
619 extra_pages -= size / PAGE_SIZE; 666 extra_pages -= size / PAGE_SIZE;
620 xen_add_extra_mem(addr, size); 667 xen_add_extra_mem(addr, size);
668 xen_max_p2m_pfn = PFN_DOWN(addr + size);
621 } else 669 } else
622 type = E820_UNUSABLE; 670 type = E820_UNUSABLE;
623 } 671 }