aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/xen/balloon.c
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@citrix.com>2015-01-05 12:06:01 -0500
committerDavid Vrabel <david.vrabel@citrix.com>2015-01-28 09:03:10 -0500
commit0bb599fd30108883b00c7d4a226eeb49111e6932 (patch)
treee7939c55ff9b95a2eb180884bea213c1b78eb863 /drivers/xen/balloon.c
parent853d0289340026b30f93fd0e768340221d4e605c (diff)
xen: remove scratch frames for ballooned pages and m2p override
The scratch frame mappings for ballooned pages and the m2p override are broken. Remove them in preparation for replacing them with simpler mechanisms that works. The scratch pages did not ensure that the page was not in use. In particular, the foreign page could still be in use by hardware. If the guest reused the frame the hardware could read or write that frame. The m2p override did not handle the same frame being granted by two different grant references. Trying an M2P override lookup in this case is impossible. With the m2p override removed, the grant map/unmap for the kernel mappings (for x86 PV) can be easily batched in set_foreign_p2m_mapping() and clear_foreign_p2m_mapping(). Signed-off-by: David Vrabel <david.vrabel@citrix.com> Reviewed-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Diffstat (limited to 'drivers/xen/balloon.c')
-rw-r--r--drivers/xen/balloon.c86
1 files changed, 2 insertions, 84 deletions
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 3860d02729dc..0b52d92cb2e5 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -92,7 +92,6 @@ EXPORT_SYMBOL_GPL(balloon_stats);
92 92
93/* We increase/decrease in batches which fit in a page */ 93/* We increase/decrease in batches which fit in a page */
94static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)]; 94static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
95static DEFINE_PER_CPU(struct page *, balloon_scratch_page);
96 95
97 96
98/* List of ballooned pages, threaded through the mem_map array. */ 97/* List of ballooned pages, threaded through the mem_map array. */
@@ -423,22 +422,12 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
423 page = pfn_to_page(pfn); 422 page = pfn_to_page(pfn);
424 423
425#ifdef CONFIG_XEN_HAVE_PVMMU 424#ifdef CONFIG_XEN_HAVE_PVMMU
426 /*
427 * Ballooned out frames are effectively replaced with
428 * a scratch frame. Ensure direct mappings and the
429 * p2m are consistent.
430 */
431 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 425 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
432 if (!PageHighMem(page)) { 426 if (!PageHighMem(page)) {
433 struct page *scratch_page = get_balloon_scratch_page();
434
435 ret = HYPERVISOR_update_va_mapping( 427 ret = HYPERVISOR_update_va_mapping(
436 (unsigned long)__va(pfn << PAGE_SHIFT), 428 (unsigned long)__va(pfn << PAGE_SHIFT),
437 pfn_pte(page_to_pfn(scratch_page), 429 __pte_ma(0), 0);
438 PAGE_KERNEL_RO), 0);
439 BUG_ON(ret); 430 BUG_ON(ret);
440
441 put_balloon_scratch_page();
442 } 431 }
443 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 432 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
444 } 433 }
@@ -500,18 +489,6 @@ static void balloon_process(struct work_struct *work)
500 mutex_unlock(&balloon_mutex); 489 mutex_unlock(&balloon_mutex);
501} 490}
502 491
503struct page *get_balloon_scratch_page(void)
504{
505 struct page *ret = get_cpu_var(balloon_scratch_page);
506 BUG_ON(ret == NULL);
507 return ret;
508}
509
510void put_balloon_scratch_page(void)
511{
512 put_cpu_var(balloon_scratch_page);
513}
514
515/* Resets the Xen limit, sets new target, and kicks off processing. */ 492/* Resets the Xen limit, sets new target, and kicks off processing. */
516void balloon_set_new_target(unsigned long target) 493void balloon_set_new_target(unsigned long target)
517{ 494{
@@ -605,61 +582,13 @@ static void __init balloon_add_region(unsigned long start_pfn,
605 } 582 }
606} 583}
607 584
608static int alloc_balloon_scratch_page(int cpu)
609{
610 if (per_cpu(balloon_scratch_page, cpu) != NULL)
611 return 0;
612
613 per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
614 if (per_cpu(balloon_scratch_page, cpu) == NULL) {
615 pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
616 return -ENOMEM;
617 }
618
619 return 0;
620}
621
622
623static int balloon_cpu_notify(struct notifier_block *self,
624 unsigned long action, void *hcpu)
625{
626 int cpu = (long)hcpu;
627 switch (action) {
628 case CPU_UP_PREPARE:
629 if (alloc_balloon_scratch_page(cpu))
630 return NOTIFY_BAD;
631 break;
632 default:
633 break;
634 }
635 return NOTIFY_OK;
636}
637
638static struct notifier_block balloon_cpu_notifier = {
639 .notifier_call = balloon_cpu_notify,
640};
641
642static int __init balloon_init(void) 585static int __init balloon_init(void)
643{ 586{
644 int i, cpu; 587 int i;
645 588
646 if (!xen_domain()) 589 if (!xen_domain())
647 return -ENODEV; 590 return -ENODEV;
648 591
649 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
650 register_cpu_notifier(&balloon_cpu_notifier);
651
652 get_online_cpus();
653 for_each_online_cpu(cpu) {
654 if (alloc_balloon_scratch_page(cpu)) {
655 put_online_cpus();
656 unregister_cpu_notifier(&balloon_cpu_notifier);
657 return -ENOMEM;
658 }
659 }
660 put_online_cpus();
661 }
662
663 pr_info("Initialising balloon driver\n"); 592 pr_info("Initialising balloon driver\n");
664 593
665 balloon_stats.current_pages = xen_pv_domain() 594 balloon_stats.current_pages = xen_pv_domain()
@@ -696,15 +625,4 @@ static int __init balloon_init(void)
696 625
697subsys_initcall(balloon_init); 626subsys_initcall(balloon_init);
698 627
699static int __init balloon_clear(void)
700{
701 int cpu;
702
703 for_each_possible_cpu(cpu)
704 per_cpu(balloon_scratch_page, cpu) = NULL;
705
706 return 0;
707}
708early_initcall(balloon_clear);
709
710MODULE_LICENSE("GPL"); 628MODULE_LICENSE("GPL");