aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/p2m.c
diff options
context:
space:
mode:
authorJuergen Gross <jgross@suse.com>2014-11-28 05:53:55 -0500
committerDavid Vrabel <david.vrabel@citrix.com>2014-12-04 09:08:59 -0500
commit5b8e7d80542487ff1bf17b4cf2922a01dee13d3a (patch)
treee027791839cd32076d172c16700e010974f70ea4 /arch/x86/xen/p2m.c
parent97f4533a60ce5d0cb35ff44a190111f81a987620 (diff)
xen: Delay invalidating extra memory
When the physical memory configuration is initialized the p2m entries for not pouplated memory pages are set to "invalid". As those pages are beyond the hypervisor built p2m list the p2m tree has to be extended. This patch delays processing the extra memory related p2m entries during the boot process until some more basic memory management functions are callable. This removes the need to create new p2m entries until virtual memory management is available. Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: David Vrabel <david.vrabel@citrix.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Diffstat (limited to 'arch/x86/xen/p2m.c')
-rw-r--r--arch/x86/xen/p2m.c128
1 files changed, 25 insertions, 103 deletions
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 8676f3566fe3..eddec40a4c20 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -181,7 +181,12 @@
181 181
182static void __init m2p_override_init(void); 182static void __init m2p_override_init(void);
183 183
184unsigned long *xen_p2m_addr __read_mostly;
185EXPORT_SYMBOL_GPL(xen_p2m_addr);
186unsigned long xen_p2m_size __read_mostly;
187EXPORT_SYMBOL_GPL(xen_p2m_size);
184unsigned long xen_max_p2m_pfn __read_mostly; 188unsigned long xen_max_p2m_pfn __read_mostly;
189EXPORT_SYMBOL_GPL(xen_max_p2m_pfn);
185 190
186static unsigned long *p2m_mid_missing_mfn; 191static unsigned long *p2m_mid_missing_mfn;
187static unsigned long *p2m_top_mfn; 192static unsigned long *p2m_top_mfn;
@@ -198,13 +203,6 @@ static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_identity, P2M_MID_PER_PAGE);
198 203
199RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); 204RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
200 205
201/* For each I/O range remapped we may lose up to two leaf pages for the boundary
202 * violations and three mid pages to cover up to 3GB. With
203 * early_can_reuse_p2m_middle() most of the leaf pages will be reused by the
204 * remapped region.
205 */
206RESERVE_BRK(p2m_identity_remap, PAGE_SIZE * 2 * 3 * MAX_REMAP_RANGES);
207
208static int use_brk = 1; 206static int use_brk = 1;
209 207
210static inline unsigned p2m_top_index(unsigned long pfn) 208static inline unsigned p2m_top_index(unsigned long pfn)
@@ -381,9 +379,11 @@ void __init xen_build_dynamic_phys_to_machine(void)
381 if (xen_feature(XENFEAT_auto_translated_physmap)) 379 if (xen_feature(XENFEAT_auto_translated_physmap))
382 return; 380 return;
383 381
382 xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list;
384 mfn_list = (unsigned long *)xen_start_info->mfn_list; 383 mfn_list = (unsigned long *)xen_start_info->mfn_list;
385 max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); 384 max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
386 xen_max_p2m_pfn = max_pfn; 385 xen_max_p2m_pfn = max_pfn;
386 xen_p2m_size = max_pfn;
387 387
388 p2m_missing = alloc_p2m_page(); 388 p2m_missing = alloc_p2m_page();
389 p2m_init(p2m_missing); 389 p2m_init(p2m_missing);
@@ -499,6 +499,11 @@ unsigned long __init xen_revector_p2m_tree(void)
499 /* This should be the leafs allocated for identity from _brk. */ 499 /* This should be the leafs allocated for identity from _brk. */
500 } 500 }
501 501
502 xen_p2m_size = xen_max_p2m_pfn;
503 xen_p2m_addr = mfn_list;
504
505 xen_inv_extra_mem();
506
502 m2p_override_init(); 507 m2p_override_init();
503 return (unsigned long)mfn_list; 508 return (unsigned long)mfn_list;
504} 509}
@@ -506,6 +511,8 @@ unsigned long __init xen_revector_p2m_tree(void)
506unsigned long __init xen_revector_p2m_tree(void) 511unsigned long __init xen_revector_p2m_tree(void)
507{ 512{
508 use_brk = 0; 513 use_brk = 0;
514 xen_p2m_size = xen_max_p2m_pfn;
515 xen_inv_extra_mem();
509 m2p_override_init(); 516 m2p_override_init();
510 return 0; 517 return 0;
511} 518}
@@ -514,8 +521,12 @@ unsigned long get_phys_to_machine(unsigned long pfn)
514{ 521{
515 unsigned topidx, mididx, idx; 522 unsigned topidx, mididx, idx;
516 523
517 if (unlikely(pfn >= MAX_P2M_PFN)) 524 if (unlikely(pfn >= xen_p2m_size)) {
525 if (pfn < xen_max_p2m_pfn)
526 return xen_chk_extra_mem(pfn);
527
518 return IDENTITY_FRAME(pfn); 528 return IDENTITY_FRAME(pfn);
529 }
519 530
520 topidx = p2m_top_index(pfn); 531 topidx = p2m_top_index(pfn);
521 mididx = p2m_mid_index(pfn); 532 mididx = p2m_mid_index(pfn);
@@ -613,78 +624,12 @@ static bool alloc_p2m(unsigned long pfn)
613 return true; 624 return true;
614} 625}
615 626
616static bool __init early_alloc_p2m(unsigned long pfn, bool check_boundary)
617{
618 unsigned topidx, mididx, idx;
619 unsigned long *p2m;
620
621 topidx = p2m_top_index(pfn);
622 mididx = p2m_mid_index(pfn);
623 idx = p2m_index(pfn);
624
625 /* Pfff.. No boundary cross-over, lets get out. */
626 if (!idx && check_boundary)
627 return false;
628
629 WARN(p2m_top[topidx][mididx] == p2m_identity,
630 "P2M[%d][%d] == IDENTITY, should be MISSING (or alloced)!\n",
631 topidx, mididx);
632
633 /*
634 * Could be done by xen_build_dynamic_phys_to_machine..
635 */
636 if (p2m_top[topidx][mididx] != p2m_missing)
637 return false;
638
639 /* Boundary cross-over for the edges: */
640 p2m = alloc_p2m_page();
641
642 p2m_init(p2m);
643
644 p2m_top[topidx][mididx] = p2m;
645
646 return true;
647}
648
649static bool __init early_alloc_p2m_middle(unsigned long pfn)
650{
651 unsigned topidx = p2m_top_index(pfn);
652 unsigned long **mid;
653
654 mid = p2m_top[topidx];
655 if (mid == p2m_mid_missing) {
656 mid = alloc_p2m_page();
657
658 p2m_mid_init(mid, p2m_missing);
659
660 p2m_top[topidx] = mid;
661 }
662 return true;
663}
664
665static void __init early_split_p2m(unsigned long pfn)
666{
667 unsigned long mididx, idx;
668
669 mididx = p2m_mid_index(pfn);
670 idx = p2m_index(pfn);
671
672 /*
673 * Allocate new middle and leaf pages if this pfn lies in the
674 * middle of one.
675 */
676 if (mididx || idx)
677 early_alloc_p2m_middle(pfn);
678 if (idx)
679 early_alloc_p2m(pfn, false);
680}
681
682unsigned long __init set_phys_range_identity(unsigned long pfn_s, 627unsigned long __init set_phys_range_identity(unsigned long pfn_s,
683 unsigned long pfn_e) 628 unsigned long pfn_e)
684{ 629{
685 unsigned long pfn; 630 unsigned long pfn;
686 631
687 if (unlikely(pfn_s >= MAX_P2M_PFN)) 632 if (unlikely(pfn_s >= xen_p2m_size))
688 return 0; 633 return 0;
689 634
690 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) 635 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
@@ -693,34 +638,11 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
693 if (pfn_s > pfn_e) 638 if (pfn_s > pfn_e)
694 return 0; 639 return 0;
695 640
696 if (pfn_e > MAX_P2M_PFN) 641 if (pfn_e > xen_p2m_size)
697 pfn_e = MAX_P2M_PFN; 642 pfn_e = xen_p2m_size;
698
699 early_split_p2m(pfn_s);
700 early_split_p2m(pfn_e);
701
702 for (pfn = pfn_s; pfn < pfn_e;) {
703 unsigned topidx = p2m_top_index(pfn);
704 unsigned mididx = p2m_mid_index(pfn);
705
706 if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
707 break;
708 pfn++;
709
710 /*
711 * If the PFN was set to a middle or leaf identity
712 * page the remainder must also be identity, so skip
713 * ahead to the next middle or leaf entry.
714 */
715 if (p2m_top[topidx] == p2m_mid_identity)
716 pfn = ALIGN(pfn, P2M_MID_PER_PAGE * P2M_PER_PAGE);
717 else if (p2m_top[topidx][mididx] == p2m_identity)
718 pfn = ALIGN(pfn, P2M_PER_PAGE);
719 }
720 643
721 WARN((pfn - pfn_s) != (pfn_e - pfn_s), 644 for (pfn = pfn_s; pfn < pfn_e; pfn++)
722 "Identity mapping failed. We are %ld short of 1-1 mappings!\n", 645 xen_p2m_addr[pfn] = IDENTITY_FRAME(pfn);
723 (pfn_e - pfn_s) - (pfn - pfn_s));
724 646
725 return pfn - pfn_s; 647 return pfn - pfn_s;
726} 648}
@@ -734,7 +656,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
734 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) 656 if (unlikely(xen_feature(XENFEAT_auto_translated_physmap)))
735 return true; 657 return true;
736 658
737 if (unlikely(pfn >= MAX_P2M_PFN)) { 659 if (unlikely(pfn >= xen_p2m_size)) {
738 BUG_ON(mfn != INVALID_P2M_ENTRY); 660 BUG_ON(mfn != INVALID_P2M_ENTRY);
739 return true; 661 return true;
740 } 662 }