aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@citrix.com>2014-01-07 06:44:32 -0500
committerDavid Vrabel <david.vrabel@citrix.com>2014-05-15 11:14:44 -0400
commit3cb83e46d032505016ab2565f067e24c8cba9a9d (patch)
tree11eee2d731dd1fa83bede6f006736ff246082b47 /arch/x86/xen
parenta9b5bff66b2a63f7d0f42434f5da9024b442159c (diff)
x86/xen: compactly store large identity ranges in the p2m
Large (multi-GB) identity ranges currently require a unique middle page (filled with p2m_identity entries) per 1 GB region. Similar to the common p2m_mid_missing middle page for large missing regions, introduce a p2m_mid_identity page (filled with p2m_identity entries) which can be used instead. set_phys_range_identity() thus only needs to allocate new middle pages at the beginning and end of the range. Signed-off-by: David Vrabel <david.vrabel@citrix.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Tested-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/p2m.c155
1 files changed, 105 insertions, 50 deletions
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 82c8c9305510..57001443231e 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -36,7 +36,7 @@
36 * pfn_to_mfn(0xc0000)=0xc0000 36 * pfn_to_mfn(0xc0000)=0xc0000
37 * 37 *
38 * The benefit of this is, that we can assume for non-RAM regions (think 38 * The benefit of this is, that we can assume for non-RAM regions (think
39 * PCI BARs, or ACPI spaces), we can create mappings easily b/c we 39 * PCI BARs, or ACPI spaces), we can create mappings easily because we
40 * get the PFN value to match the MFN. 40 * get the PFN value to match the MFN.
41 * 41 *
42 * For this to work efficiently we have one new page p2m_identity and 42 * For this to work efficiently we have one new page p2m_identity and
@@ -60,7 +60,7 @@
60 * There is also a digram of the P2M at the end that can help. 60 * There is also a digram of the P2M at the end that can help.
61 * Imagine your E820 looking as so: 61 * Imagine your E820 looking as so:
62 * 62 *
63 * 1GB 2GB 63 * 1GB 2GB 4GB
64 * /-------------------+---------\/----\ /----------\ /---+-----\ 64 * /-------------------+---------\/----\ /----------\ /---+-----\
65 * | System RAM | Sys RAM ||ACPI| | reserved | | Sys RAM | 65 * | System RAM | Sys RAM ||ACPI| | reserved | | Sys RAM |
66 * \-------------------+---------/\----/ \----------/ \---+-----/ 66 * \-------------------+---------/\----/ \----------/ \---+-----/
@@ -77,9 +77,8 @@
77 * of the PFN and the end PFN (263424 and 512256 respectively). The first step 77 * of the PFN and the end PFN (263424 and 512256 respectively). The first step
78 * is to reserve_brk a top leaf page if the p2m[1] is missing. The top leaf page 78 * is to reserve_brk a top leaf page if the p2m[1] is missing. The top leaf page
79 * covers 512^2 of page estate (1GB) and in case the start or end PFN is not 79 * covers 512^2 of page estate (1GB) and in case the start or end PFN is not
80 * aligned on 512^2*PAGE_SIZE (1GB) we loop on aligned 1GB PFNs from start pfn 80 * aligned on 512^2*PAGE_SIZE (1GB) we reserve_brk new middle and leaf pages as
81 * to end pfn. We reserve_brk top leaf pages if they are missing (means they 81 * required to split any existing p2m_mid_missing middle pages.
82 * point to p2m_mid_missing).
83 * 82 *
84 * With the E820 example above, 263424 is not 1GB aligned so we allocate a 83 * With the E820 example above, 263424 is not 1GB aligned so we allocate a
85 * reserve_brk page which will cover the PFNs estate from 0x40000 to 0x80000. 84 * reserve_brk page which will cover the PFNs estate from 0x40000 to 0x80000.
@@ -88,7 +87,7 @@
88 * Next stage is to determine if we need to do a more granular boundary check 87 * Next stage is to determine if we need to do a more granular boundary check
89 * on the 4MB (or 2MB depending on architecture) off the start and end pfn's. 88 * on the 4MB (or 2MB depending on architecture) off the start and end pfn's.
90 * We check if the start pfn and end pfn violate that boundary check, and if 89 * We check if the start pfn and end pfn violate that boundary check, and if
91 * so reserve_brk a middle (p2m[x][y]) leaf page. This way we have a much finer 90 * so reserve_brk a (p2m[x][y]) leaf page. This way we have a much finer
92 * granularity of setting which PFNs are missing and which ones are identity. 91 * granularity of setting which PFNs are missing and which ones are identity.
93 * In our example 263424 and 512256 both fail the check so we reserve_brk two 92 * In our example 263424 and 512256 both fail the check so we reserve_brk two
94 * pages. Populate them with INVALID_P2M_ENTRY (so they both have "missing" 93 * pages. Populate them with INVALID_P2M_ENTRY (so they both have "missing"
@@ -102,9 +101,10 @@
102 * 101 *
103 * The next step is to walk from the start pfn to the end pfn setting 102 * The next step is to walk from the start pfn to the end pfn setting
104 * the IDENTITY_FRAME_BIT on each PFN. This is done in set_phys_range_identity. 103 * the IDENTITY_FRAME_BIT on each PFN. This is done in set_phys_range_identity.
105 * If we find that the middle leaf is pointing to p2m_missing we can swap it 104 * If we find that the middle entry is pointing to p2m_missing we can swap it
106 * over to p2m_identity - this way covering 4MB (or 2MB) PFN space. At this 105 * over to p2m_identity - this way covering 4MB (or 2MB) PFN space (and
107 * point we do not need to worry about boundary aligment (so no need to 106 * similarly swapping p2m_mid_missing for p2m_mid_identity for larger regions).
107 * At this point we do not need to worry about boundary aligment (so no need to
108 * reserve_brk a middle page, figure out which PFNs are "missing" and which 108 * reserve_brk a middle page, figure out which PFNs are "missing" and which
109 * ones are identity), as that has been done earlier. If we find that the 109 * ones are identity), as that has been done earlier. If we find that the
110 * middle leaf is not occupied by p2m_identity or p2m_missing, we dereference 110 * middle leaf is not occupied by p2m_identity or p2m_missing, we dereference
@@ -118,6 +118,9 @@
118 * considered missing). In our case, p2m[1][2][0->255] and p2m[1][488][257->511] 118 * considered missing). In our case, p2m[1][2][0->255] and p2m[1][488][257->511]
119 * contain the INVALID_P2M_ENTRY value and are considered "missing." 119 * contain the INVALID_P2M_ENTRY value and are considered "missing."
120 * 120 *
121 * Finally, the region beyond the end of of the E820 (4 GB in this example)
122 * is set to be identity (in case there are MMIO regions placed here).
123 *
121 * This is what the p2m ends up looking (for the E820 above) with this 124 * This is what the p2m ends up looking (for the E820 above) with this
122 * fabulous drawing: 125 * fabulous drawing:
123 * 126 *
@@ -129,21 +132,27 @@
129 * |-----| \ | [p2m_identity]+\\ | .... | 132 * |-----| \ | [p2m_identity]+\\ | .... |
130 * | 2 |--\ \-------------------->| ... | \\ \----------------/ 133 * | 2 |--\ \-------------------->| ... | \\ \----------------/
131 * |-----| \ \---------------/ \\ 134 * |-----| \ \---------------/ \\
132 * | 3 |\ \ \\ p2m_identity 135 * | 3 |-\ \ \\ p2m_identity [1]
133 * |-----| \ \-------------------->/---------------\ /-----------------\ 136 * |-----| \ \-------------------->/---------------\ /-----------------\
134 * | .. +->+ | [p2m_identity]+-->| ~0, ~0, ~0, ... | 137 * | .. |\ | | [p2m_identity]+-->| ~0, ~0, ~0, ... |
135 * \-----/ / | [p2m_identity]+-->| ..., ~0 | 138 * \-----/ | | | [p2m_identity]+-->| ..., ~0 |
136 * / /---------------\ | .... | \-----------------/ 139 * | | | .... | \-----------------/
137 * / | IDENTITY[@0] | /-+-[x], ~0, ~0.. | 140 * | | +-[x], ~0, ~0.. +\
138 * / | IDENTITY[@256]|<----/ \---------------/ 141 * | | \---------------/ \
139 * / | ~0, ~0, .... | 142 * | | \-> /---------------\
140 * | \---------------/ 143 * | V p2m_mid_missing p2m_missing | IDENTITY[@0] |
141 * | 144 * | /-----------------\ /------------\ | IDENTITY[@256]|
142 * p2m_mid_missing p2m_missing 145 * | | [p2m_missing] +---->| ~0, ~0, ...| | ~0, ~0, .... |
143 * /-----------------\ /------------\ 146 * | | [p2m_missing] +---->| ..., ~0 | \---------------/
144 * | [p2m_missing] +---->| ~0, ~0, ~0 | 147 * | | ... | \------------/
145 * | [p2m_missing] +---->| ..., ~0 | 148 * | \-----------------/
146 * \-----------------/ \------------/ 149 * |
150 * | p2m_mid_identity
151 * | /-----------------\
152 * \-->| [p2m_identity] +---->[1]
153 * | [p2m_identity] +---->[1]
154 * | ... |
155 * \-----------------/
147 * 156 *
148 * where ~0 is INVALID_P2M_ENTRY. IDENTITY is (PFN | IDENTITY_BIT) 157 * where ~0 is INVALID_P2M_ENTRY. IDENTITY is (PFN | IDENTITY_BIT)
149 */ 158 */
@@ -187,13 +196,15 @@ static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
187static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE); 196static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE);
188 197
189static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE); 198static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE);
199static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_identity, P2M_MID_PER_PAGE);
200static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_identity_mfn, P2M_MID_PER_PAGE);
190 201
191RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); 202RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
192RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); 203RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
193 204
194/* We might hit two boundary violations at the start and end, at max each 205/* We might hit two boundary violations at the start and end, at max each
195 * boundary violation will require three middle nodes. */ 206 * boundary violation will require three middle nodes. */
196RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3); 207RESERVE_BRK(p2m_mid_extra, PAGE_SIZE * 2 * 3);
197 208
198/* When we populate back during bootup, the amount of pages can vary. The 209/* When we populate back during bootup, the amount of pages can vary. The
199 * max we have is seen is 395979, but that does not mean it can't be more. 210 * max we have is seen is 395979, but that does not mean it can't be more.
@@ -242,20 +253,20 @@ static void p2m_top_mfn_p_init(unsigned long **top)
242 top[i] = p2m_mid_missing_mfn; 253 top[i] = p2m_mid_missing_mfn;
243} 254}
244 255
245static void p2m_mid_init(unsigned long **mid) 256static void p2m_mid_init(unsigned long **mid, unsigned long *leaf)
246{ 257{
247 unsigned i; 258 unsigned i;
248 259
249 for (i = 0; i < P2M_MID_PER_PAGE; i++) 260 for (i = 0; i < P2M_MID_PER_PAGE; i++)
250 mid[i] = p2m_missing; 261 mid[i] = leaf;
251} 262}
252 263
253static void p2m_mid_mfn_init(unsigned long *mid) 264static void p2m_mid_mfn_init(unsigned long *mid, unsigned long *leaf)
254{ 265{
255 unsigned i; 266 unsigned i;
256 267
257 for (i = 0; i < P2M_MID_PER_PAGE; i++) 268 for (i = 0; i < P2M_MID_PER_PAGE; i++)
258 mid[i] = virt_to_mfn(p2m_missing); 269 mid[i] = virt_to_mfn(leaf);
259} 270}
260 271
261static void p2m_init(unsigned long *p2m) 272static void p2m_init(unsigned long *p2m)
@@ -286,7 +297,9 @@ void __ref xen_build_mfn_list_list(void)
286 /* Pre-initialize p2m_top_mfn to be completely missing */ 297 /* Pre-initialize p2m_top_mfn to be completely missing */
287 if (p2m_top_mfn == NULL) { 298 if (p2m_top_mfn == NULL) {
288 p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); 299 p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
289 p2m_mid_mfn_init(p2m_mid_missing_mfn); 300 p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
301 p2m_mid_identity_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
302 p2m_mid_mfn_init(p2m_mid_identity_mfn, p2m_identity);
290 303
291 p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); 304 p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
292 p2m_top_mfn_p_init(p2m_top_mfn_p); 305 p2m_top_mfn_p_init(p2m_top_mfn_p);
@@ -295,7 +308,8 @@ void __ref xen_build_mfn_list_list(void)
295 p2m_top_mfn_init(p2m_top_mfn); 308 p2m_top_mfn_init(p2m_top_mfn);
296 } else { 309 } else {
297 /* Reinitialise, mfn's all change after migration */ 310 /* Reinitialise, mfn's all change after migration */
298 p2m_mid_mfn_init(p2m_mid_missing_mfn); 311 p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
312 p2m_mid_mfn_init(p2m_mid_identity_mfn, p2m_identity);
299 } 313 }
300 314
301 for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) { 315 for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) {
@@ -327,7 +341,7 @@ void __ref xen_build_mfn_list_list(void)
327 * it too late. 341 * it too late.
328 */ 342 */
329 mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); 343 mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
330 p2m_mid_mfn_init(mid_mfn_p); 344 p2m_mid_mfn_init(mid_mfn_p, p2m_missing);
331 345
332 p2m_top_mfn_p[topidx] = mid_mfn_p; 346 p2m_top_mfn_p[topidx] = mid_mfn_p;
333 } 347 }
@@ -365,16 +379,17 @@ void __init xen_build_dynamic_phys_to_machine(void)
365 379
366 p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); 380 p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
367 p2m_init(p2m_missing); 381 p2m_init(p2m_missing);
382 p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE);
383 p2m_init(p2m_identity);
368 384
369 p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); 385 p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE);
370 p2m_mid_init(p2m_mid_missing); 386 p2m_mid_init(p2m_mid_missing, p2m_missing);
387 p2m_mid_identity = extend_brk(PAGE_SIZE, PAGE_SIZE);
388 p2m_mid_init(p2m_mid_identity, p2m_identity);
371 389
372 p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE); 390 p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE);
373 p2m_top_init(p2m_top); 391 p2m_top_init(p2m_top);
374 392
375 p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE);
376 p2m_init(p2m_identity);
377
378 /* 393 /*
379 * The domain builder gives us a pre-constructed p2m array in 394 * The domain builder gives us a pre-constructed p2m array in
380 * mfn_list for all the pages initially given to us, so we just 395 * mfn_list for all the pages initially given to us, so we just
@@ -386,7 +401,7 @@ void __init xen_build_dynamic_phys_to_machine(void)
386 401
387 if (p2m_top[topidx] == p2m_mid_missing) { 402 if (p2m_top[topidx] == p2m_mid_missing) {
388 unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); 403 unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
389 p2m_mid_init(mid); 404 p2m_mid_init(mid, p2m_missing);
390 405
391 p2m_top[topidx] = mid; 406 p2m_top[topidx] = mid;
392 } 407 }
@@ -545,7 +560,7 @@ static bool alloc_p2m(unsigned long pfn)
545 if (!mid) 560 if (!mid)
546 return false; 561 return false;
547 562
548 p2m_mid_init(mid); 563 p2m_mid_init(mid, p2m_missing);
549 564
550 if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing) 565 if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing)
551 free_p2m_page(mid); 566 free_p2m_page(mid);
@@ -565,7 +580,7 @@ static bool alloc_p2m(unsigned long pfn)
565 if (!mid_mfn) 580 if (!mid_mfn)
566 return false; 581 return false;
567 582
568 p2m_mid_mfn_init(mid_mfn); 583 p2m_mid_mfn_init(mid_mfn, p2m_missing);
569 584
570 missing_mfn = virt_to_mfn(p2m_mid_missing_mfn); 585 missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
571 mid_mfn_mfn = virt_to_mfn(mid_mfn); 586 mid_mfn_mfn = virt_to_mfn(mid_mfn);
@@ -649,7 +664,7 @@ static bool __init early_alloc_p2m_middle(unsigned long pfn)
649 if (mid == p2m_mid_missing) { 664 if (mid == p2m_mid_missing) {
650 mid = extend_brk(PAGE_SIZE, PAGE_SIZE); 665 mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
651 666
652 p2m_mid_init(mid); 667 p2m_mid_init(mid, p2m_missing);
653 668
654 p2m_top[topidx] = mid; 669 p2m_top[topidx] = mid;
655 670
@@ -658,7 +673,7 @@ static bool __init early_alloc_p2m_middle(unsigned long pfn)
658 /* And the save/restore P2M tables.. */ 673 /* And the save/restore P2M tables.. */
659 if (mid_mfn_p == p2m_mid_missing_mfn) { 674 if (mid_mfn_p == p2m_mid_missing_mfn) {
660 mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); 675 mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
661 p2m_mid_mfn_init(mid_mfn_p); 676 p2m_mid_mfn_init(mid_mfn_p, p2m_missing);
662 677
663 p2m_top_mfn_p[topidx] = mid_mfn_p; 678 p2m_top_mfn_p[topidx] = mid_mfn_p;
664 p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); 679 p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
@@ -769,6 +784,24 @@ bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
769 784
770 return true; 785 return true;
771} 786}
787
788static void __init early_split_p2m(unsigned long pfn)
789{
790 unsigned long mididx, idx;
791
792 mididx = p2m_mid_index(pfn);
793 idx = p2m_index(pfn);
794
795 /*
796 * Allocate new middle and leaf pages if this pfn lies in the
797 * middle of one.
798 */
799 if (mididx || idx)
800 early_alloc_p2m_middle(pfn);
801 if (idx)
802 early_alloc_p2m(pfn, false);
803}
804
772unsigned long __init set_phys_range_identity(unsigned long pfn_s, 805unsigned long __init set_phys_range_identity(unsigned long pfn_s,
773 unsigned long pfn_e) 806 unsigned long pfn_e)
774{ 807{
@@ -786,19 +819,27 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
786 if (pfn_e > MAX_P2M_PFN) 819 if (pfn_e > MAX_P2M_PFN)
787 pfn_e = MAX_P2M_PFN; 820 pfn_e = MAX_P2M_PFN;
788 821
789 for (pfn = (pfn_s & ~(P2M_MID_PER_PAGE * P2M_PER_PAGE - 1)); 822 early_split_p2m(pfn_s);
790 pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE)); 823 early_split_p2m(pfn_e);
791 pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE)
792 {
793 WARN_ON(!early_alloc_p2m(pfn));
794 }
795 824
796 early_alloc_p2m_middle(pfn_s, true); 825 for (pfn = pfn_s; pfn < pfn_e;) {
797 early_alloc_p2m_middle(pfn_e, true); 826 unsigned topidx = p2m_top_index(pfn);
827 unsigned mididx = p2m_mid_index(pfn);
798 828
799 for (pfn = pfn_s; pfn < pfn_e; pfn++)
800 if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn))) 829 if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
801 break; 830 break;
831 pfn++;
832
833 /*
834 * If the PFN was set to a middle or leaf identity
835 * page the remainder must also be identity, so skip
836 * ahead to the next middle or leaf entry.
837 */
838 if (p2m_top[topidx] == p2m_mid_identity)
839 pfn = ALIGN(pfn, P2M_MID_PER_PAGE * P2M_PER_PAGE);
840 else if (p2m_top[topidx][mididx] == p2m_identity)
841 pfn = ALIGN(pfn, P2M_PER_PAGE);
842 }
802 843
803 if (!WARN((pfn - pfn_s) != (pfn_e - pfn_s), 844 if (!WARN((pfn - pfn_s) != (pfn_e - pfn_s),
804 "Identity mapping failed. We are %ld short of 1-1 mappings!\n", 845 "Identity mapping failed. We are %ld short of 1-1 mappings!\n",
@@ -828,8 +869,22 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
828 869
829 /* For sparse holes were the p2m leaf has real PFN along with 870 /* For sparse holes were the p2m leaf has real PFN along with
830 * PCI holes, stick in the PFN as the MFN value. 871 * PCI holes, stick in the PFN as the MFN value.
872 *
873 * set_phys_range_identity() will have allocated new middle
874 * and leaf pages as required so an existing p2m_mid_missing
875 * or p2m_missing mean that whole range will be identity so
876 * these can be switched to p2m_mid_identity or p2m_identity.
831 */ 877 */
832 if (mfn != INVALID_P2M_ENTRY && (mfn & IDENTITY_FRAME_BIT)) { 878 if (mfn != INVALID_P2M_ENTRY && (mfn & IDENTITY_FRAME_BIT)) {
879 if (p2m_top[topidx] == p2m_mid_identity)
880 return true;
881
882 if (p2m_top[topidx] == p2m_mid_missing) {
883 WARN_ON(cmpxchg(&p2m_top[topidx], p2m_mid_missing,
884 p2m_mid_identity) != p2m_mid_missing);
885 return true;
886 }
887
833 if (p2m_top[topidx][mididx] == p2m_identity) 888 if (p2m_top[topidx][mididx] == p2m_identity)
834 return true; 889 return true;
835 890