aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-05-07 15:33:27 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2012-05-07 15:33:27 -0400
commit4b3451ad1313134821ad13b6fe821d0b4409aa18 (patch)
tree6544c050ac52e139e01025c1a992c15a26c95b57 /arch/x86/xen
parentf62805f1f30a40e354bd036b4cb799863a39be4b (diff)
parent83d51ab473dddde7df858015070ed22b84ebe9a9 (diff)
Merge branch 'stable/autoballoon.v5.2' into stable/for-linus-3.5
* stable/autoballoon.v5.2: xen/setup: update VA mapping when releasing memory during setup xen/setup: Combine the two hypercall functions - since they are quite similar. xen/setup: Populate freed MFNs from non-RAM E820 entries and gaps to E820 RAM xen/setup: Only print "Freeing XXX-YYY pfn range: Z pages freed" if Z > 0 xen/p2m: An early bootup variant of set_phys_to_machine xen/p2m: Collapse early_alloc_p2m_middle redundant checks. xen/p2m: Allow alloc_p2m_middle to call reserve_brk depending on argument xen/p2m: Move code around to allow for better re-usage.
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/enlighten.c1
-rw-r--r--arch/x86/xen/mmu.c23
-rw-r--r--arch/x86/xen/p2m.c104
-rw-r--r--arch/x86/xen/setup.c171
-rw-r--r--arch/x86/xen/xen-ops.h1
5 files changed, 206 insertions, 94 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index eca90e5be1e7..d1f9a0472d44 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1316,7 +1316,6 @@ asmlinkage void __init xen_start_kernel(void)
1316 1316
1317 xen_raw_console_write("mapping kernel into physical memory\n"); 1317 xen_raw_console_write("mapping kernel into physical memory\n");
1318 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); 1318 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
1319 xen_ident_map_ISA();
1320 1319
1321 /* Allocate and initialize top and mid mfn levels for p2m structure */ 1320 /* Allocate and initialize top and mid mfn levels for p2m structure */
1322 xen_build_mfn_list_list(); 1321 xen_build_mfn_list_list();
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index b8e279479a6b..b756d8cf4df5 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1929,29 +1929,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
1929#endif 1929#endif
1930} 1930}
1931 1931
1932void __init xen_ident_map_ISA(void)
1933{
1934 unsigned long pa;
1935
1936 /*
1937 * If we're dom0, then linear map the ISA machine addresses into
1938 * the kernel's address space.
1939 */
1940 if (!xen_initial_domain())
1941 return;
1942
1943 xen_raw_printk("Xen: setup ISA identity maps\n");
1944
1945 for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
1946 pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
1947
1948 if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
1949 BUG();
1950 }
1951
1952 xen_flush_tlb();
1953}
1954
1955static void __init xen_post_allocator_init(void) 1932static void __init xen_post_allocator_init(void)
1956{ 1933{
1957 pv_mmu_ops.set_pte = xen_set_pte; 1934 pv_mmu_ops.set_pte = xen_set_pte;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 1b267e75158d..ffd08c414e91 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -499,16 +499,18 @@ static bool alloc_p2m(unsigned long pfn)
499 return true; 499 return true;
500} 500}
501 501
502static bool __init __early_alloc_p2m(unsigned long pfn) 502static bool __init early_alloc_p2m_middle(unsigned long pfn, bool check_boundary)
503{ 503{
504 unsigned topidx, mididx, idx; 504 unsigned topidx, mididx, idx;
505 unsigned long *p2m;
506 unsigned long *mid_mfn_p;
505 507
506 topidx = p2m_top_index(pfn); 508 topidx = p2m_top_index(pfn);
507 mididx = p2m_mid_index(pfn); 509 mididx = p2m_mid_index(pfn);
508 idx = p2m_index(pfn); 510 idx = p2m_index(pfn);
509 511
510 /* Pfff.. No boundary cross-over, lets get out. */ 512 /* Pfff.. No boundary cross-over, lets get out. */
511 if (!idx) 513 if (!idx && check_boundary)
512 return false; 514 return false;
513 515
514 WARN(p2m_top[topidx][mididx] == p2m_identity, 516 WARN(p2m_top[topidx][mididx] == p2m_identity,
@@ -522,24 +524,66 @@ static bool __init __early_alloc_p2m(unsigned long pfn)
522 return false; 524 return false;
523 525
524 /* Boundary cross-over for the edges: */ 526 /* Boundary cross-over for the edges: */
525 if (idx) { 527 p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
526 unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
527 unsigned long *mid_mfn_p;
528 528
529 p2m_init(p2m); 529 p2m_init(p2m);
530 530
531 p2m_top[topidx][mididx] = p2m; 531 p2m_top[topidx][mididx] = p2m;
532 532
533 /* For save/restore we need to MFN of the P2M saved */ 533 /* For save/restore we need to MFN of the P2M saved */
534 534
535 mid_mfn_p = p2m_top_mfn_p[topidx]; 535 mid_mfn_p = p2m_top_mfn_p[topidx];
536 WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing), 536 WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
537 "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n", 537 "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
538 topidx, mididx); 538 topidx, mididx);
539 mid_mfn_p[mididx] = virt_to_mfn(p2m); 539 mid_mfn_p[mididx] = virt_to_mfn(p2m);
540
541 return true;
542}
543
544static bool __init early_alloc_p2m(unsigned long pfn)
545{
546 unsigned topidx = p2m_top_index(pfn);
547 unsigned long *mid_mfn_p;
548 unsigned long **mid;
549
550 mid = p2m_top[topidx];
551 mid_mfn_p = p2m_top_mfn_p[topidx];
552 if (mid == p2m_mid_missing) {
553 mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
554
555 p2m_mid_init(mid);
556
557 p2m_top[topidx] = mid;
540 558
559 BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
541 } 560 }
542 return idx != 0; 561 /* And the save/restore P2M tables.. */
562 if (mid_mfn_p == p2m_mid_missing_mfn) {
563 mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
564 p2m_mid_mfn_init(mid_mfn_p);
565
566 p2m_top_mfn_p[topidx] = mid_mfn_p;
567 p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
568 /* Note: we don't set mid_mfn_p[midix] here,
569 * look in early_alloc_p2m_middle */
570 }
571 return true;
572}
573bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
574{
575 if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
576 if (!early_alloc_p2m(pfn))
577 return false;
578
579 if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/))
580 return false;
581
582 if (!__set_phys_to_machine(pfn, mfn))
583 return false;
584 }
585
586 return true;
543} 587}
544unsigned long __init set_phys_range_identity(unsigned long pfn_s, 588unsigned long __init set_phys_range_identity(unsigned long pfn_s,
545 unsigned long pfn_e) 589 unsigned long pfn_e)
@@ -559,35 +603,11 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
559 pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE)); 603 pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE));
560 pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) 604 pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE)
561 { 605 {
562 unsigned topidx = p2m_top_index(pfn); 606 WARN_ON(!early_alloc_p2m(pfn));
563 unsigned long *mid_mfn_p;
564 unsigned long **mid;
565
566 mid = p2m_top[topidx];
567 mid_mfn_p = p2m_top_mfn_p[topidx];
568 if (mid == p2m_mid_missing) {
569 mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
570
571 p2m_mid_init(mid);
572
573 p2m_top[topidx] = mid;
574
575 BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
576 }
577 /* And the save/restore P2M tables.. */
578 if (mid_mfn_p == p2m_mid_missing_mfn) {
579 mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
580 p2m_mid_mfn_init(mid_mfn_p);
581
582 p2m_top_mfn_p[topidx] = mid_mfn_p;
583 p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
584 /* Note: we don't set mid_mfn_p[midix] here,
585 * look in __early_alloc_p2m */
586 }
587 } 607 }
588 608
589 __early_alloc_p2m(pfn_s); 609 early_alloc_p2m_middle(pfn_s, true);
590 __early_alloc_p2m(pfn_e); 610 early_alloc_p2m_middle(pfn_e, true);
591 611
592 for (pfn = pfn_s; pfn < pfn_e; pfn++) 612 for (pfn = pfn_s; pfn < pfn_e; pfn++)
593 if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn))) 613 if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 1ba8dff26753..3ebba0753d38 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -26,7 +26,6 @@
26#include <xen/interface/memory.h> 26#include <xen/interface/memory.h>
27#include <xen/interface/physdev.h> 27#include <xen/interface/physdev.h>
28#include <xen/features.h> 28#include <xen/features.h>
29
30#include "xen-ops.h" 29#include "xen-ops.h"
31#include "vdso.h" 30#include "vdso.h"
32 31
@@ -84,8 +83,8 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
84 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 83 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
85} 84}
86 85
87static unsigned long __init xen_release_chunk(unsigned long start, 86static unsigned long __init xen_do_chunk(unsigned long start,
88 unsigned long end) 87 unsigned long end, bool release)
89{ 88{
90 struct xen_memory_reservation reservation = { 89 struct xen_memory_reservation reservation = {
91 .address_bits = 0, 90 .address_bits = 0,
@@ -96,30 +95,138 @@ static unsigned long __init xen_release_chunk(unsigned long start,
96 unsigned long pfn; 95 unsigned long pfn;
97 int ret; 96 int ret;
98 97
99 for(pfn = start; pfn < end; pfn++) { 98 for (pfn = start; pfn < end; pfn++) {
99 unsigned long frame;
100 unsigned long mfn = pfn_to_mfn(pfn); 100 unsigned long mfn = pfn_to_mfn(pfn);
101 101
102 /* Make sure pfn exists to start with */ 102 if (release) {
103 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn) 103 /* Make sure pfn exists to start with */
104 continue; 104 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
105 105 continue;
106 set_xen_guest_handle(reservation.extent_start, &mfn); 106 frame = mfn;
107 } else {
108 if (mfn != INVALID_P2M_ENTRY)
109 continue;
110 frame = pfn;
111 }
112 set_xen_guest_handle(reservation.extent_start, &frame);
107 reservation.nr_extents = 1; 113 reservation.nr_extents = 1;
108 114
109 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, 115 ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap,
110 &reservation); 116 &reservation);
111 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); 117 WARN(ret != 1, "Failed to %s pfn %lx err=%d\n",
118 release ? "release" : "populate", pfn, ret);
119
112 if (ret == 1) { 120 if (ret == 1) {
113 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 121 if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) {
122 if (release)
123 break;
124 set_xen_guest_handle(reservation.extent_start, &frame);
125 reservation.nr_extents = 1;
126 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
127 &reservation);
128 break;
129 }
114 len++; 130 len++;
115 } 131 } else
132 break;
116 } 133 }
117 printk(KERN_INFO "Freeing %lx-%lx pfn range: %lu pages freed\n", 134 if (len)
118 start, end, len); 135 printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n",
136 release ? "Freeing" : "Populating",
137 start, end, len,
138 release ? "freed" : "added");
119 139
120 return len; 140 return len;
121} 141}
122 142
143static unsigned long __init xen_release_chunk(unsigned long start,
144 unsigned long end)
145{
146 return xen_do_chunk(start, end, true);
147}
148
149static unsigned long __init xen_populate_chunk(
150 const struct e820entry *list, size_t map_size,
151 unsigned long max_pfn, unsigned long *last_pfn,
152 unsigned long credits_left)
153{
154 const struct e820entry *entry;
155 unsigned int i;
156 unsigned long done = 0;
157 unsigned long dest_pfn;
158
159 for (i = 0, entry = list; i < map_size; i++, entry++) {
160 unsigned long credits = credits_left;
161 unsigned long s_pfn;
162 unsigned long e_pfn;
163 unsigned long pfns;
164 long capacity;
165
166 if (credits <= 0)
167 break;
168
169 if (entry->type != E820_RAM)
170 continue;
171
172 e_pfn = PFN_UP(entry->addr + entry->size);
173
174 /* We only care about E820 after the xen_start_info->nr_pages */
175 if (e_pfn <= max_pfn)
176 continue;
177
178 s_pfn = PFN_DOWN(entry->addr);
179 /* If the E820 falls within the nr_pages, we want to start
180 * at the nr_pages PFN.
181 * If that would mean going past the E820 entry, skip it
182 */
183 if (s_pfn <= max_pfn) {
184 capacity = e_pfn - max_pfn;
185 dest_pfn = max_pfn;
186 } else {
187 /* last_pfn MUST be within E820_RAM regions */
188 if (*last_pfn && e_pfn >= *last_pfn)
189 s_pfn = *last_pfn;
190 capacity = e_pfn - s_pfn;
191 dest_pfn = s_pfn;
192 }
193 /* If we had filled this E820_RAM entry, go to the next one. */
194 if (capacity <= 0)
195 continue;
196
197 if (credits > capacity)
198 credits = capacity;
199
200 pfns = xen_do_chunk(dest_pfn, dest_pfn + credits, false);
201 done += pfns;
202 credits_left -= pfns;
203 *last_pfn = (dest_pfn + pfns);
204 }
205 return done;
206}
207
208static void __init xen_set_identity_and_release_chunk(
209 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
210 unsigned long *released, unsigned long *identity)
211{
212 unsigned long pfn;
213
214 /*
215 * If the PFNs are currently mapped, the VA mapping also needs
216 * to be updated to be 1:1.
217 */
218 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
219 (void)HYPERVISOR_update_va_mapping(
220 (unsigned long)__va(pfn << PAGE_SHIFT),
221 mfn_pte(pfn, PAGE_KERNEL_IO), 0);
222
223 if (start_pfn < nr_pages)
224 *released += xen_release_chunk(
225 start_pfn, min(end_pfn, nr_pages));
226
227 *identity += set_phys_range_identity(start_pfn, end_pfn);
228}
229
123static unsigned long __init xen_set_identity_and_release( 230static unsigned long __init xen_set_identity_and_release(
124 const struct e820entry *list, size_t map_size, unsigned long nr_pages) 231 const struct e820entry *list, size_t map_size, unsigned long nr_pages)
125{ 232{
@@ -142,7 +249,6 @@ static unsigned long __init xen_set_identity_and_release(
142 */ 249 */
143 for (i = 0, entry = list; i < map_size; i++, entry++) { 250 for (i = 0, entry = list; i < map_size; i++, entry++) {
144 phys_addr_t end = entry->addr + entry->size; 251 phys_addr_t end = entry->addr + entry->size;
145
146 if (entry->type == E820_RAM || i == map_size - 1) { 252 if (entry->type == E820_RAM || i == map_size - 1) {
147 unsigned long start_pfn = PFN_DOWN(start); 253 unsigned long start_pfn = PFN_DOWN(start);
148 unsigned long end_pfn = PFN_UP(end); 254 unsigned long end_pfn = PFN_UP(end);
@@ -150,20 +256,19 @@ static unsigned long __init xen_set_identity_and_release(
150 if (entry->type == E820_RAM) 256 if (entry->type == E820_RAM)
151 end_pfn = PFN_UP(entry->addr); 257 end_pfn = PFN_UP(entry->addr);
152 258
153 if (start_pfn < end_pfn) { 259 if (start_pfn < end_pfn)
154 if (start_pfn < nr_pages) 260 xen_set_identity_and_release_chunk(
155 released += xen_release_chunk( 261 start_pfn, end_pfn, nr_pages,
156 start_pfn, min(end_pfn, nr_pages)); 262 &released, &identity);
157 263
158 identity += set_phys_range_identity(
159 start_pfn, end_pfn);
160 }
161 start = end; 264 start = end;
162 } 265 }
163 } 266 }
164 267
165 printk(KERN_INFO "Released %lu pages of unused memory\n", released); 268 if (released)
166 printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity); 269 printk(KERN_INFO "Released %lu pages of unused memory\n", released);
270 if (identity)
271 printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
167 272
168 return released; 273 return released;
169} 274}
@@ -217,7 +322,9 @@ char * __init xen_memory_setup(void)
217 int rc; 322 int rc;
218 struct xen_memory_map memmap; 323 struct xen_memory_map memmap;
219 unsigned long max_pages; 324 unsigned long max_pages;
325 unsigned long last_pfn = 0;
220 unsigned long extra_pages = 0; 326 unsigned long extra_pages = 0;
327 unsigned long populated;
221 int i; 328 int i;
222 int op; 329 int op;
223 330
@@ -257,9 +364,20 @@ char * __init xen_memory_setup(void)
257 */ 364 */
258 xen_released_pages = xen_set_identity_and_release( 365 xen_released_pages = xen_set_identity_and_release(
259 map, memmap.nr_entries, max_pfn); 366 map, memmap.nr_entries, max_pfn);
260 extra_pages += xen_released_pages;
261 367
262 /* 368 /*
369 * Populate back the non-RAM pages and E820 gaps that had been
370 * released. */
371 populated = xen_populate_chunk(map, memmap.nr_entries,
372 max_pfn, &last_pfn, xen_released_pages);
373
374 extra_pages += (xen_released_pages - populated);
375
376 if (last_pfn > max_pfn) {
377 max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
378 mem_end = PFN_PHYS(max_pfn);
379 }
380 /*
263 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO 381 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
264 * factor the base size. On non-highmem systems, the base 382 * factor the base size. On non-highmem systems, the base
265 * size is the full initial memory allocation; on highmem it 383 * size is the full initial memory allocation; on highmem it
@@ -272,7 +390,6 @@ char * __init xen_memory_setup(void)
272 */ 390 */
273 extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), 391 extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
274 extra_pages); 392 extra_pages);
275
276 i = 0; 393 i = 0;
277 while (i < memmap.nr_entries) { 394 while (i < memmap.nr_entries) {
278 u64 addr = map[i].addr; 395 u64 addr = map[i].addr;
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index b095739ccd4c..506fa08d934a 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -28,7 +28,6 @@ void xen_setup_shared_info(void);
28void xen_build_mfn_list_list(void); 28void xen_build_mfn_list_list(void);
29void xen_setup_machphys_mapping(void); 29void xen_setup_machphys_mapping(void);
30pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); 30pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
31void xen_ident_map_ISA(void);
32void xen_reserve_top(void); 31void xen_reserve_top(void);
33extern unsigned long xen_max_p2m_pfn; 32extern unsigned long xen_max_p2m_pfn;
34 33