diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-05-26 18:31:20 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2008-05-27 04:11:37 -0400 |
commit | cf0923ea295ba08ae656ef04164a43cb6553ba99 (patch) | |
tree | c5c8ea1a226edcf7a29bec1953f65469a21756e5 /arch/x86/xen | |
parent | 8006ec3e911f93d702e1d4a4e387e244ab434924 (diff) |
xen: efficiently support a holey p2m table
When using sparsemem and memory hotplug, the kernel's pseudo-physical
address space can be discontigious. Previously this was dealt with by
having the upper parts of the radix tree stubbed off. Unfortunately,
this is incompatible with save/restore, which requires a complete p2m
table.
The solution is to have a special distinguished all-invalid p2m leaf
page, which we can point all the hole areas at. This allows the tools
to see a complete p2m table, but it only costs a page for all memory
holes.
It also simplifies the code since it removes a few special cases.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r-- | arch/x86/xen/mmu.c | 18 |
1 files changed, 12 insertions, 6 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 644232aa7bf..da7b45b0506 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -57,8 +57,17 @@ | |||
57 | #include "mmu.h" | 57 | #include "mmu.h" |
58 | 58 | ||
59 | #define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) | 59 | #define P2M_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long)) |
60 | #define TOP_ENTRIES (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE) | ||
60 | 61 | ||
61 | static unsigned long *p2m_top[MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE]; | 62 | /* Placeholder for holes in the address space */ |
63 | static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] | ||
64 | __attribute__((section(".data.page_aligned"))) = | ||
65 | { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL }; | ||
66 | |||
67 | /* Array of pointers to pages containing p2m entries */ | ||
68 | static unsigned long *p2m_top[TOP_ENTRIES] | ||
69 | __attribute__((section(".data.page_aligned"))) = | ||
70 | { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] }; | ||
62 | 71 | ||
63 | static inline unsigned p2m_top_index(unsigned long pfn) | 72 | static inline unsigned p2m_top_index(unsigned long pfn) |
64 | { | 73 | { |
@@ -92,9 +101,6 @@ unsigned long get_phys_to_machine(unsigned long pfn) | |||
92 | return INVALID_P2M_ENTRY; | 101 | return INVALID_P2M_ENTRY; |
93 | 102 | ||
94 | topidx = p2m_top_index(pfn); | 103 | topidx = p2m_top_index(pfn); |
95 | if (p2m_top[topidx] == NULL) | ||
96 | return INVALID_P2M_ENTRY; | ||
97 | |||
98 | idx = p2m_index(pfn); | 104 | idx = p2m_index(pfn); |
99 | return p2m_top[topidx][idx]; | 105 | return p2m_top[topidx][idx]; |
100 | } | 106 | } |
@@ -110,7 +116,7 @@ static void alloc_p2m(unsigned long **pp) | |||
110 | for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++) | 116 | for(i = 0; i < P2M_ENTRIES_PER_PAGE; i++) |
111 | p[i] = INVALID_P2M_ENTRY; | 117 | p[i] = INVALID_P2M_ENTRY; |
112 | 118 | ||
113 | if (cmpxchg(pp, NULL, p) != NULL) | 119 | if (cmpxchg(pp, p2m_missing, p) != p2m_missing) |
114 | free_page((unsigned long)p); | 120 | free_page((unsigned long)p); |
115 | } | 121 | } |
116 | 122 | ||
@@ -129,7 +135,7 @@ void set_phys_to_machine(unsigned long pfn, unsigned long mfn) | |||
129 | } | 135 | } |
130 | 136 | ||
131 | topidx = p2m_top_index(pfn); | 137 | topidx = p2m_top_index(pfn); |
132 | if (p2m_top[topidx] == NULL) { | 138 | if (p2m_top[topidx] == p2m_missing) { |
133 | /* no need to allocate a page to store an invalid entry */ | 139 | /* no need to allocate a page to store an invalid entry */ |
134 | if (mfn == INVALID_P2M_ENTRY) | 140 | if (mfn == INVALID_P2M_ENTRY) |
135 | return; | 141 | return; |