aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/setup.c
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-02-01 17:15:30 -0500
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-03-14 11:17:10 -0400
commit68df0da7f42be6ae017fe9f48ac414c43a7b9d32 (patch)
tree9f4d774a788400d339794a2359b92f145bc21c1a /arch/x86/xen/setup.c
parentc7617798771ad588d585986d896197c04b737621 (diff)
xen/setup: Set identity mapping for non-RAM E820 and E820 gaps.
We walk the E820 region and start at 0 (for PV guests we start at ISA_END_ADDRESS) and skip any E820 RAM regions. For all other regions and as well the gaps we set them to be identity mappings. The reasons we do not want to set the identity mapping from 0-> ISA_END_ADDRESS when running as PV is b/c that the kernel would try to read DMI information and fail (no permissions to read that). There is a lot of gnarly code to deal with that weird region so we won't try to do a cleanup in this patch. This code ends up calling 'set_phys_to_identity' with the start and end PFN of the the E820 that are non-RAM or have gaps. On 99% of machines that means one big region right underneath the 4GB mark. Usually starts at 0xc0000 (or 0x80000) and goes to 0x100000. [v2: Fix for E820 crossing 1MB region and clamp the start] [v3: Squshed in code that does this over ranges] [v4: Moved the comment to the correct spot] [v5: Use the "raw" E820 from the hypervisor] [v6: Added Review-by tag] Reviewed-by: Ian Campbell <ian.campbell@citrix.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch/x86/xen/setup.c')
-rw-r--r--arch/x86/xen/setup.c52
1 files changed, 52 insertions, 0 deletions
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 7201800e55a4..54d93791ddb9 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -143,12 +143,55 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn,
143 return released; 143 return released;
144} 144}
145 145
146static unsigned long __init xen_set_identity(const struct e820entry *list,
147 ssize_t map_size)
148{
149 phys_addr_t last = xen_initial_domain() ? 0 : ISA_END_ADDRESS;
150 phys_addr_t start_pci = last;
151 const struct e820entry *entry;
152 unsigned long identity = 0;
153 int i;
154
155 for (i = 0, entry = list; i < map_size; i++, entry++) {
156 phys_addr_t start = entry->addr;
157 phys_addr_t end = start + entry->size;
158
159 if (start < last)
160 start = last;
161
162 if (end <= start)
163 continue;
164
165 /* Skip over the 1MB region. */
166 if (last > end)
167 continue;
168
169 if (entry->type == E820_RAM) {
170 if (start > start_pci)
171 identity += set_phys_range_identity(
172 PFN_UP(start_pci), PFN_DOWN(start));
173
174 /* Without saving 'last' we would gooble RAM too
175 * at the end of the loop. */
176 last = end;
177 start_pci = end;
178 continue;
179 }
180 start_pci = min(start, start_pci);
181 last = end;
182 }
183 if (last > start_pci)
184 identity += set_phys_range_identity(
185 PFN_UP(start_pci), PFN_DOWN(last));
186 return identity;
187}
146/** 188/**
147 * machine_specific_memory_setup - Hook for machine specific memory setup. 189 * machine_specific_memory_setup - Hook for machine specific memory setup.
148 **/ 190 **/
149char * __init xen_memory_setup(void) 191char * __init xen_memory_setup(void)
150{ 192{
151 static struct e820entry map[E820MAX] __initdata; 193 static struct e820entry map[E820MAX] __initdata;
194 static struct e820entry map_raw[E820MAX] __initdata;
152 195
153 unsigned long max_pfn = xen_start_info->nr_pages; 196 unsigned long max_pfn = xen_start_info->nr_pages;
154 unsigned long long mem_end; 197 unsigned long long mem_end;
@@ -156,6 +199,7 @@ char * __init xen_memory_setup(void)
156 struct xen_memory_map memmap; 199 struct xen_memory_map memmap;
157 unsigned long extra_pages = 0; 200 unsigned long extra_pages = 0;
158 unsigned long extra_limit; 201 unsigned long extra_limit;
202 unsigned long identity_pages = 0;
159 int i; 203 int i;
160 int op; 204 int op;
161 205
@@ -181,6 +225,7 @@ char * __init xen_memory_setup(void)
181 } 225 }
182 BUG_ON(rc); 226 BUG_ON(rc);
183 227
228 memcpy(map_raw, map, sizeof(map));
184 e820.nr_map = 0; 229 e820.nr_map = 0;
185 xen_extra_mem_start = mem_end; 230 xen_extra_mem_start = mem_end;
186 for (i = 0; i < memmap.nr_entries; i++) { 231 for (i = 0; i < memmap.nr_entries; i++) {
@@ -251,6 +296,13 @@ char * __init xen_memory_setup(void)
251 296
252 xen_add_extra_mem(extra_pages); 297 xen_add_extra_mem(extra_pages);
253 298
299 /*
300 * Set P2M for all non-RAM pages and E820 gaps to be identity
301 * type PFNs. We supply it with the non-sanitized version
302 * of the E820.
303 */
304 identity_pages = xen_set_identity(map_raw, memmap.nr_entries);
305 printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages);
254 return "Xen"; 306 return "Xen";
255} 307}
256 308