aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJulien Grall <julien.grall@citrix.com>2015-05-04 10:39:08 -0400
committerDavid Vrabel <david.vrabel@citrix.com>2015-10-23 09:20:38 -0400
commit30756c62997822894fb34e2114f5dc727a12af30 (patch)
treef48503562c024f160ebabcaba5be5b0414f3c871
parent9652c08012580c9961c77fc8726a877e0d437324 (diff)
xen/balloon: Don't rely on the page granularity is the same for Xen and Linux
For ARM64 guests, Linux is able to support either 64K or 4K page granularity. Although, the hypercall interface is always based on 4K page granularity. With 64K page granularity, a single page will be spread over multiple Xen frame. To avoid splitting the page into 4K frame, take advantage of the extent_order field to directly allocate/free chunk of the Linux page size. Note that PVMMU is only used for PV guest (which is x86) and the page granularity is always 4KB. Some BUILD_BUG_ON has been added to ensure that because the code has not been modified. Signed-off-by: Julien Grall <julien.grall@citrix.com> Reviewed-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
-rw-r--r--drivers/xen/balloon.c70
1 files changed, 55 insertions, 15 deletions
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index f56662324a47..b50d22960ce7 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -113,6 +113,12 @@ static struct ctl_table xen_root[] = {
113#endif 113#endif
114 114
115/* 115/*
116 * Use one extent per PAGE_SIZE to avoid to break down the page into
117 * multiple frame.
118 */
119#define EXTENT_ORDER (fls(XEN_PFN_PER_PAGE) - 1)
120
121/*
116 * balloon_process() state: 122 * balloon_process() state:
117 * 123 *
118 * BP_DONE: done or nothing to do, 124 * BP_DONE: done or nothing to do,
@@ -304,6 +310,12 @@ static enum bp_state reserve_additional_memory(void)
304 nid = memory_add_physaddr_to_nid(resource->start); 310 nid = memory_add_physaddr_to_nid(resource->start);
305 311
306#ifdef CONFIG_XEN_HAVE_PVMMU 312#ifdef CONFIG_XEN_HAVE_PVMMU
313 /*
314 * We don't support PV MMU when Linux and Xen is using
315 * different page granularity.
316 */
317 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
318
307 /* 319 /*
308 * add_memory() will build page tables for the new memory so 320 * add_memory() will build page tables for the new memory so
309 * the p2m must contain invalid entries so the correct 321 * the p2m must contain invalid entries so the correct
@@ -384,11 +396,11 @@ static bool balloon_is_inflated(void)
384static enum bp_state increase_reservation(unsigned long nr_pages) 396static enum bp_state increase_reservation(unsigned long nr_pages)
385{ 397{
386 int rc; 398 int rc;
387 unsigned long pfn, i; 399 unsigned long i;
388 struct page *page; 400 struct page *page;
389 struct xen_memory_reservation reservation = { 401 struct xen_memory_reservation reservation = {
390 .address_bits = 0, 402 .address_bits = 0,
391 .extent_order = 0, 403 .extent_order = EXTENT_ORDER,
392 .domid = DOMID_SELF 404 .domid = DOMID_SELF
393 }; 405 };
394 406
@@ -401,7 +413,11 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
401 nr_pages = i; 413 nr_pages = i;
402 break; 414 break;
403 } 415 }
404 frame_list[i] = page_to_pfn(page); 416
417 /* XENMEM_populate_physmap requires a PFN based on Xen
418 * granularity.
419 */
420 frame_list[i] = page_to_xen_pfn(page);
405 page = balloon_next_page(page); 421 page = balloon_next_page(page);
406 } 422 }
407 423
@@ -415,10 +431,16 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
415 page = balloon_retrieve(false); 431 page = balloon_retrieve(false);
416 BUG_ON(page == NULL); 432 BUG_ON(page == NULL);
417 433
418 pfn = page_to_pfn(page);
419
420#ifdef CONFIG_XEN_HAVE_PVMMU 434#ifdef CONFIG_XEN_HAVE_PVMMU
435 /*
436 * We don't support PV MMU when Linux and Xen is using
437 * different page granularity.
438 */
439 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
440
421 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 441 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
442 unsigned long pfn = page_to_pfn(page);
443
422 set_phys_to_machine(pfn, frame_list[i]); 444 set_phys_to_machine(pfn, frame_list[i]);
423 445
424 /* Link back into the page tables if not highmem. */ 446 /* Link back into the page tables if not highmem. */
@@ -445,14 +467,15 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
445static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) 467static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
446{ 468{
447 enum bp_state state = BP_DONE; 469 enum bp_state state = BP_DONE;
448 unsigned long pfn, i; 470 unsigned long i;
449 struct page *page; 471 struct page *page, *tmp;
450 int ret; 472 int ret;
451 struct xen_memory_reservation reservation = { 473 struct xen_memory_reservation reservation = {
452 .address_bits = 0, 474 .address_bits = 0,
453 .extent_order = 0, 475 .extent_order = EXTENT_ORDER,
454 .domid = DOMID_SELF 476 .domid = DOMID_SELF
455 }; 477 };
478 LIST_HEAD(pages);
456 479
457 if (nr_pages > ARRAY_SIZE(frame_list)) 480 if (nr_pages > ARRAY_SIZE(frame_list))
458 nr_pages = ARRAY_SIZE(frame_list); 481 nr_pages = ARRAY_SIZE(frame_list);
@@ -465,8 +488,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
465 break; 488 break;
466 } 489 }
467 scrub_page(page); 490 scrub_page(page);
468 491 list_add(&page->lru, &pages);
469 frame_list[i] = page_to_pfn(page);
470 } 492 }
471 493
472 /* 494 /*
@@ -478,14 +500,25 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
478 */ 500 */
479 kmap_flush_unused(); 501 kmap_flush_unused();
480 502
481 /* Update direct mapping, invalidate P2M, and add to balloon. */ 503 /*
482 for (i = 0; i < nr_pages; i++) { 504 * Setup the frame, update direct mapping, invalidate P2M,
483 pfn = frame_list[i]; 505 * and add to balloon.
484 frame_list[i] = pfn_to_gfn(pfn); 506 */
485 page = pfn_to_page(pfn); 507 i = 0;
508 list_for_each_entry_safe(page, tmp, &pages, lru) {
509 /* XENMEM_decrease_reservation requires a GFN */
510 frame_list[i++] = xen_page_to_gfn(page);
486 511
487#ifdef CONFIG_XEN_HAVE_PVMMU 512#ifdef CONFIG_XEN_HAVE_PVMMU
513 /*
514 * We don't support PV MMU when Linux and Xen is using
515 * different page granularity.
516 */
517 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
518
488 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 519 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
520 unsigned long pfn = page_to_pfn(page);
521
489 if (!PageHighMem(page)) { 522 if (!PageHighMem(page)) {
490 ret = HYPERVISOR_update_va_mapping( 523 ret = HYPERVISOR_update_va_mapping(
491 (unsigned long)__va(pfn << PAGE_SHIFT), 524 (unsigned long)__va(pfn << PAGE_SHIFT),
@@ -495,6 +528,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
495 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 528 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
496 } 529 }
497#endif 530#endif
531 list_del(&page->lru);
498 532
499 balloon_append(page); 533 balloon_append(page);
500 } 534 }
@@ -603,6 +637,12 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages)
603 if (page) { 637 if (page) {
604 pages[pgno++] = page; 638 pages[pgno++] = page;
605#ifdef CONFIG_XEN_HAVE_PVMMU 639#ifdef CONFIG_XEN_HAVE_PVMMU
640 /*
641 * We don't support PV MMU when Linux and Xen is using
642 * different page granularity.
643 */
644 BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE);
645
606 ret = xen_alloc_p2m_entry(page_to_pfn(page)); 646 ret = xen_alloc_p2m_entry(page_to_pfn(page));
607 if (ret < 0) 647 if (ret < 0)
608 goto out_undo; 648 goto out_undo;