aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/p2m.c
diff options
context:
space:
mode:
authorStefano Stabellini <stefano.stabellini@eu.citrix.com>2011-09-29 06:57:56 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2011-09-29 10:32:58 -0400
commit0930bba674e248b921ea659b036ff02564e5a5f4 (patch)
tree52df16f477e1e63b034ee2e28f2d5c916bb53097 /arch/x86/xen/p2m.c
parent693394b8c3dcee1a3baa52e30fdc3323d88cd579 (diff)
xen: modify kernel mappings corresponding to granted pages
If we want to use granted pages for AIO, changing the mappings of a user vma and the corresponding p2m is not enough, we also need to update the kernel mappings accordingly. Currently this is only needed for pages that are created for user usages through /dev/xen/gntdev. As in, pages that have been in use by the kernel and use the P2M will not need this special mapping. However there are no guarantees that in the future the kernel won't start accessing pages through the 1:1 even for internal usage. In order to avoid the complexity of dealing with highmem, we allocated the pages lowmem. We issue a HYPERVISOR_grant_table_op right away in m2p_add_override and we remove the mappings using another HYPERVISOR_grant_table_op in m2p_remove_override. Considering that m2p_add_override and m2p_remove_override are called once per page we use multicalls and hypercall batching. Use the kmap_op pointer directly as argument to do the mapping as it is guaranteed to be present up until the unmapping is done. Before issuing any unmapping multicalls, we need to make sure that the mapping has already being done, because we need the kmap->handle to be set correctly. Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com> [v1: Removed GRANT_FRAME_BIT usage] Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch/x86/xen/p2m.c')
-rw-r--r--arch/x86/xen/p2m.c76
1 files changed, 66 insertions, 10 deletions
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 6e56b65edafb..a8ee9a45c359 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -161,7 +161,9 @@
161#include <asm/xen/page.h> 161#include <asm/xen/page.h>
162#include <asm/xen/hypercall.h> 162#include <asm/xen/hypercall.h>
163#include <asm/xen/hypervisor.h> 163#include <asm/xen/hypervisor.h>
164#include <xen/grant_table.h>
164 165
166#include "multicalls.h"
165#include "xen-ops.h" 167#include "xen-ops.h"
166 168
167static void __init m2p_override_init(void); 169static void __init m2p_override_init(void);
@@ -676,7 +678,8 @@ static unsigned long mfn_hash(unsigned long mfn)
676} 678}
677 679
678/* Add an MFN override for a particular page */ 680/* Add an MFN override for a particular page */
679int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte) 681int m2p_add_override(unsigned long mfn, struct page *page,
682 struct gnttab_map_grant_ref *kmap_op)
680{ 683{
681 unsigned long flags; 684 unsigned long flags;
682 unsigned long pfn; 685 unsigned long pfn;
@@ -700,9 +703,20 @@ int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte)
700 if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) 703 if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
701 return -ENOMEM; 704 return -ENOMEM;
702 705
703 if (clear_pte && !PageHighMem(page)) 706 if (kmap_op != NULL) {
704 /* Just zap old mapping for now */ 707 if (!PageHighMem(page)) {
705 pte_clear(&init_mm, address, ptep); 708 struct multicall_space mcs =
709 xen_mc_entry(sizeof(*kmap_op));
710
711 MULTI_grant_table_op(mcs.mc,
712 GNTTABOP_map_grant_ref, kmap_op, 1);
713
714 xen_mc_issue(PARAVIRT_LAZY_MMU);
715 }
716 /* let's use dev_bus_addr to record the old mfn instead */
717 kmap_op->dev_bus_addr = page->index;
718 page->index = (unsigned long) kmap_op;
719 }
706 spin_lock_irqsave(&m2p_override_lock, flags); 720 spin_lock_irqsave(&m2p_override_lock, flags);
707 list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); 721 list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
708 spin_unlock_irqrestore(&m2p_override_lock, flags); 722 spin_unlock_irqrestore(&m2p_override_lock, flags);
@@ -736,14 +750,56 @@ int m2p_remove_override(struct page *page, bool clear_pte)
736 spin_lock_irqsave(&m2p_override_lock, flags); 750 spin_lock_irqsave(&m2p_override_lock, flags);
737 list_del(&page->lru); 751 list_del(&page->lru);
738 spin_unlock_irqrestore(&m2p_override_lock, flags); 752 spin_unlock_irqrestore(&m2p_override_lock, flags);
739 set_phys_to_machine(pfn, page->index);
740 WARN_ON(!PagePrivate(page)); 753 WARN_ON(!PagePrivate(page));
741 ClearPagePrivate(page); 754 ClearPagePrivate(page);
742 if (clear_pte && !PageHighMem(page)) 755
743 set_pte_at(&init_mm, address, ptep, 756 if (clear_pte) {
744 pfn_pte(pfn, PAGE_KERNEL)); 757 struct gnttab_map_grant_ref *map_op =
745 /* No tlb flush necessary because the caller already 758 (struct gnttab_map_grant_ref *) page->index;
746 * left the pte unmapped. */ 759 set_phys_to_machine(pfn, map_op->dev_bus_addr);
760 if (!PageHighMem(page)) {
761 struct multicall_space mcs;
762 struct gnttab_unmap_grant_ref *unmap_op;
763
764 /*
765 * It might be that we queued all the m2p grant table
766 * hypercalls in a multicall, then m2p_remove_override
767 * get called before the multicall has actually been
768 * issued. In this case handle is going to -1 because
769 * it hasn't been modified yet.
770 */
771 if (map_op->handle == -1)
772 xen_mc_flush();
773 /*
774 * Now if map_op->handle is negative it means that the
775 * hypercall actually returned an error.
776 */
777 if (map_op->handle == GNTST_general_error) {
778 printk(KERN_WARNING "m2p_remove_override: "
779 "pfn %lx mfn %lx, failed to modify kernel mappings",
780 pfn, mfn);
781 return -1;
782 }
783
784 mcs = xen_mc_entry(
785 sizeof(struct gnttab_unmap_grant_ref));
786 unmap_op = mcs.args;
787 unmap_op->host_addr = map_op->host_addr;
788 unmap_op->handle = map_op->handle;
789 unmap_op->dev_bus_addr = 0;
790
791 MULTI_grant_table_op(mcs.mc,
792 GNTTABOP_unmap_grant_ref, unmap_op, 1);
793
794 xen_mc_issue(PARAVIRT_LAZY_MMU);
795
796 set_pte_at(&init_mm, address, ptep,
797 pfn_pte(pfn, PAGE_KERNEL));
798 __flush_tlb_single(address);
799 map_op->host_addr = 0;
800 }
801 } else
802 set_phys_to_machine(pfn, page->index);
747 803
748 return 0; 804 return 0;
749} 805}