aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-08-16 16:38:55 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-08-23 11:52:16 -0400
commit3fc509fc0c590900568ef516a37101d88f3476f5 (patch)
tree97e4a577d8ebd3ef8ab64874b2fe69a07a24eac1 /arch/x86
parent785f62314984ea3af9dd830b020289ba2509ae69 (diff)
xen/p2m: When revectoring deal with holes in the P2M array.
When we free the PFNs and then subsequently populate them back during bootup: Freeing 20000-20200 pfn range: 512 pages freed 1-1 mapping on 20000->20200 Freeing 40000-40200 pfn range: 512 pages freed 1-1 mapping on 40000->40200 Freeing bad80-badf4 pfn range: 116 pages freed 1-1 mapping on bad80->badf4 Freeing badf6-bae7f pfn range: 137 pages freed 1-1 mapping on badf6->bae7f Freeing bb000-100000 pfn range: 282624 pages freed 1-1 mapping on bb000->100000 Released 283999 pages of unused memory Set 283999 page(s) to 1-1 mapping Populating 1acb8a-1f20e9 pfn range: 283999 pages added We end up having the P2M array (that is the one that was grafted on the P2M tree) filled with IDENTITY_FRAME or INVALID_P2M_ENTRY) entries. The patch titled "xen/p2m: Reuse existing P2M leafs if they are filled with 1:1 PFNs or INVALID." recycles said slots and replaces the P2M tree leaf's with &mfn_list[xx] with p2m_identity or p2m_missing. And re-uses the P2M array sections for other P2M tree leaf's. For the above mentioned bootup excerpt, the PFNs at 0x20000->0x20200 are going to be IDENTITY based: P2M[0][256][0] -> P2M[0][257][0] get turned in IDENTITY_FRAME. We can re-use that and replace P2M[0][256] to point to p2m_identity. The "old" page (the grafted P2M array provided by Xen) that was at P2M[0][256] gets put somewhere else. Specifically at P2M[6][358], b/c when we populate back: Populating 1acb8a-1f20e9 pfn range: 283999 pages added we fill P2M[6][358][0] (and P2M[6][358], P2M[6][359], ...) with the new MFNs. That is all OK, except when we revector we assume that the PFN count would be the same in the grafted P2M array and in the newly allocated. Since that is no longer the case, as we have holes in the P2M that point to p2m_missing or p2m_identity we have to take that into account. [v2: Check for overflow] [v3: Move within the __va check] [v4: Fix the computation] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/xen/p2m.c14
1 files changed, 11 insertions, 3 deletions
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 996ee2bf7bdb..c3e92912c3fb 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -396,6 +396,7 @@ unsigned long __init xen_revector_p2m_tree(void)
396 unsigned long va_start; 396 unsigned long va_start;
397 unsigned long va_end; 397 unsigned long va_end;
398 unsigned long pfn; 398 unsigned long pfn;
399 unsigned long pfn_free = 0;
399 unsigned long *mfn_list = NULL; 400 unsigned long *mfn_list = NULL;
400 unsigned long size; 401 unsigned long size;
401 402
@@ -442,11 +443,18 @@ unsigned long __init xen_revector_p2m_tree(void)
442 if (mid_p >= (unsigned long *)va_start && mid_p <= (unsigned long *)va_end) { 443 if (mid_p >= (unsigned long *)va_start && mid_p <= (unsigned long *)va_end) {
443 unsigned long *new; 444 unsigned long *new;
444 445
445 new = &mfn_list[pfn]; 446 if (pfn_free > (size / sizeof(unsigned long))) {
447 WARN(1, "Only allocated for %ld pages, but we want %ld!\n",
448 size / sizeof(unsigned long), pfn_free);
449 return 0;
450 }
451 new = &mfn_list[pfn_free];
446 452
447 copy_page(new, mid_p); 453 copy_page(new, mid_p);
448 p2m_top[topidx][mididx] = &mfn_list[pfn]; 454 p2m_top[topidx][mididx] = &mfn_list[pfn_free];
449 p2m_top_mfn_p[topidx][mididx] = virt_to_mfn(&mfn_list[pfn]); 455 p2m_top_mfn_p[topidx][mididx] = virt_to_mfn(&mfn_list[pfn_free]);
456
457 pfn_free += P2M_PER_PAGE;
450 458
451 } 459 }
452 /* This should be the leafs allocated for identity from _brk. */ 460 /* This should be the leafs allocated for identity from _brk. */