aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/setup.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/xen/setup.c')
-rw-r--r--arch/x86/xen/setup.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index b1dbdaa23ecc..769c4b01fa32 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -118,16 +118,18 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
118 const struct e820map *e820) 118 const struct e820map *e820)
119{ 119{
120 phys_addr_t max_addr = PFN_PHYS(max_pfn); 120 phys_addr_t max_addr = PFN_PHYS(max_pfn);
121 phys_addr_t last_end = 0; 121 phys_addr_t last_end = ISA_END_ADDRESS;
122 unsigned long released = 0; 122 unsigned long released = 0;
123 int i; 123 int i;
124 124
125 /* Free any unused memory above the low 1Mbyte. */
125 for (i = 0; i < e820->nr_map && last_end < max_addr; i++) { 126 for (i = 0; i < e820->nr_map && last_end < max_addr; i++) {
126 phys_addr_t end = e820->map[i].addr; 127 phys_addr_t end = e820->map[i].addr;
127 end = min(max_addr, end); 128 end = min(max_addr, end);
128 129
129 released += xen_release_chunk(last_end, end); 130 if (last_end < end)
130 last_end = e820->map[i].addr + e820->map[i].size; 131 released += xen_release_chunk(last_end, end);
132 last_end = max(last_end, e820->map[i].addr + e820->map[i].size);
131 } 133 }
132 134
133 if (last_end < max_addr) 135 if (last_end < max_addr)
@@ -164,6 +166,7 @@ char * __init xen_memory_setup(void)
164 XENMEM_memory_map; 166 XENMEM_memory_map;
165 rc = HYPERVISOR_memory_op(op, &memmap); 167 rc = HYPERVISOR_memory_op(op, &memmap);
166 if (rc == -ENOSYS) { 168 if (rc == -ENOSYS) {
169 BUG_ON(xen_initial_domain());
167 memmap.nr_entries = 1; 170 memmap.nr_entries = 1;
168 map[0].addr = 0ULL; 171 map[0].addr = 0ULL;
169 map[0].size = mem_end; 172 map[0].size = mem_end;
@@ -201,12 +204,13 @@ char * __init xen_memory_setup(void)
201 } 204 }
202 205
203 /* 206 /*
204 * Even though this is normal, usable memory under Xen, reserve 207 * In domU, the ISA region is normal, usable memory, but we
205 * ISA memory anyway because too many things think they can poke 208 * reserve ISA memory anyway because too many things poke
206 * about in there. 209 * about in there.
207 * 210 *
208 * In a dom0 kernel, this region is identity mapped with the 211 * In Dom0, the host E820 information can leave gaps in the
209 * hardware ISA area, so it really is out of bounds. 212 * ISA range, which would cause us to release those pages. To
213 * avoid this, we unconditionally reserve them here.
210 */ 214 */
211 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, 215 e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
212 E820_RESERVED); 216 E820_RESERVED);