aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorDavid Vrabel <dvrabel@cantab.net>2012-05-03 11:15:42 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-05-07 15:32:24 -0400
commit83d51ab473dddde7df858015070ed22b84ebe9a9 (patch)
treec02be9696877ae5d7c00a524ae4daf2f99c181f1 /arch/x86
parent96dc08b35c4af8cb5810450602590706f2593a5f (diff)
xen/setup: update VA mapping when releasing memory during setup
In xen_memory_setup(), if a page that is being released has a VA mapping this must also be updated. Otherwise, the page will be not released completely -- it will still be referenced in Xen and won't be freed util the mapping is removed and this prevents it from being reallocated at a different PFN. This was already being done for the ISA memory region in xen_ident_map_ISA() but on many systems this was omitting a few pages as many systems marked a few pages below the ISA memory region as reserved in the e820 map. This fixes errors such as: (XEN) page_alloc.c:1148:d0 Over-allocation for domain 0: 2097153 > 2097152 (XEN) memory.c:133:d0 Could not allocate order=0 extent: id=0 memflags=0 (0 of 17) Signed-off-by: David Vrabel <david.vrabel@citrix.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/xen/enlighten.c1
-rw-r--r--arch/x86/xen/mmu.c23
-rw-r--r--arch/x86/xen/setup.c41
-rw-r--r--arch/x86/xen/xen-ops.h1
4 files changed, 34 insertions, 32 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index fe06bf4ef0e3..ac90e5629508 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1308,7 +1308,6 @@ asmlinkage void __init xen_start_kernel(void)
1308 1308
1309 xen_raw_console_write("mapping kernel into physical memory\n"); 1309 xen_raw_console_write("mapping kernel into physical memory\n");
1310 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); 1310 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
1311 xen_ident_map_ISA();
1312 1311
1313 /* Allocate and initialize top and mid mfn levels for p2m structure */ 1312 /* Allocate and initialize top and mid mfn levels for p2m structure */
1314 xen_build_mfn_list_list(); 1313 xen_build_mfn_list_list();
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 91dc2871e336..c9a351925a0c 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1929,29 +1929,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
1929#endif 1929#endif
1930} 1930}
1931 1931
1932void __init xen_ident_map_ISA(void)
1933{
1934 unsigned long pa;
1935
1936 /*
1937 * If we're dom0, then linear map the ISA machine addresses into
1938 * the kernel's address space.
1939 */
1940 if (!xen_initial_domain())
1941 return;
1942
1943 xen_raw_printk("Xen: setup ISA identity maps\n");
1944
1945 for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
1946 pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
1947
1948 if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
1949 BUG();
1950 }
1951
1952 xen_flush_tlb();
1953}
1954
1955static void __init xen_post_allocator_init(void) 1932static void __init xen_post_allocator_init(void)
1956{ 1933{
1957 pv_mmu_ops.set_pte = xen_set_pte; 1934 pv_mmu_ops.set_pte = xen_set_pte;
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 30ac05a8d28f..3ebba0753d38 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -139,6 +139,13 @@ static unsigned long __init xen_do_chunk(unsigned long start,
139 139
140 return len; 140 return len;
141} 141}
142
143static unsigned long __init xen_release_chunk(unsigned long start,
144 unsigned long end)
145{
146 return xen_do_chunk(start, end, true);
147}
148
142static unsigned long __init xen_populate_chunk( 149static unsigned long __init xen_populate_chunk(
143 const struct e820entry *list, size_t map_size, 150 const struct e820entry *list, size_t map_size,
144 unsigned long max_pfn, unsigned long *last_pfn, 151 unsigned long max_pfn, unsigned long *last_pfn,
@@ -197,6 +204,29 @@ static unsigned long __init xen_populate_chunk(
197 } 204 }
198 return done; 205 return done;
199} 206}
207
208static void __init xen_set_identity_and_release_chunk(
209 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
210 unsigned long *released, unsigned long *identity)
211{
212 unsigned long pfn;
213
214 /*
215 * If the PFNs are currently mapped, the VA mapping also needs
216 * to be updated to be 1:1.
217 */
218 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
219 (void)HYPERVISOR_update_va_mapping(
220 (unsigned long)__va(pfn << PAGE_SHIFT),
221 mfn_pte(pfn, PAGE_KERNEL_IO), 0);
222
223 if (start_pfn < nr_pages)
224 *released += xen_release_chunk(
225 start_pfn, min(end_pfn, nr_pages));
226
227 *identity += set_phys_range_identity(start_pfn, end_pfn);
228}
229
200static unsigned long __init xen_set_identity_and_release( 230static unsigned long __init xen_set_identity_and_release(
201 const struct e820entry *list, size_t map_size, unsigned long nr_pages) 231 const struct e820entry *list, size_t map_size, unsigned long nr_pages)
202{ 232{
@@ -226,14 +256,11 @@ static unsigned long __init xen_set_identity_and_release(
226 if (entry->type == E820_RAM) 256 if (entry->type == E820_RAM)
227 end_pfn = PFN_UP(entry->addr); 257 end_pfn = PFN_UP(entry->addr);
228 258
229 if (start_pfn < end_pfn) { 259 if (start_pfn < end_pfn)
230 if (start_pfn < nr_pages) 260 xen_set_identity_and_release_chunk(
231 released += xen_do_chunk( 261 start_pfn, end_pfn, nr_pages,
232 start_pfn, min(end_pfn, nr_pages), true); 262 &released, &identity);
233 263
234 identity += set_phys_range_identity(
235 start_pfn, end_pfn);
236 }
237 start = end; 264 start = end;
238 } 265 }
239 } 266 }
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index b095739ccd4c..506fa08d934a 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -28,7 +28,6 @@ void xen_setup_shared_info(void);
28void xen_build_mfn_list_list(void); 28void xen_build_mfn_list_list(void);
29void xen_setup_machphys_mapping(void); 29void xen_setup_machphys_mapping(void);
30pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); 30pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
31void xen_ident_map_ISA(void);
32void xen_reserve_top(void); 31void xen_reserve_top(void);
33extern unsigned long xen_max_p2m_pfn; 32extern unsigned long xen_max_p2m_pfn;
34 33