From c0011dbfce69467b23b08fb4a64c39a409a935fb Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Thu, 4 Feb 2010 14:46:34 -0800 Subject: xen: use _PAGE_IOMAP in ioremap to do machine mappings In a Xen domain, ioremap operates on machine addresses, not pseudo-physical addresses. We use _PAGE_IOMAP to determine whether a mapping is intended for machine addresses. [ Impact: allow Xen domain to map real hardware ] Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Konrad Rzeszutek Wilk --- arch/x86/xen/mmu.c | 71 ++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 69 insertions(+), 2 deletions(-) (limited to 'arch/x86/xen') diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 914f04695ce5..a4dea9df0cc0 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -56,9 +56,11 @@ #include #include +#include #include #include #include +#include #include #include "multicalls.h" @@ -377,6 +379,28 @@ static bool xen_page_pinned(void *ptr) return PagePinned(page); } +static bool xen_iomap_pte(pte_t pte) +{ + return xen_initial_domain() && (pte_flags(pte) & _PAGE_IOMAP); +} + +static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval) +{ + struct multicall_space mcs; + struct mmu_update *u; + + mcs = xen_mc_entry(sizeof(*u)); + u = mcs.args; + + /* ptep might be kmapped when using 32-bit HIGHPTE */ + u->ptr = arbitrary_virt_to_machine(ptep).maddr; + u->val = pte_val_ma(pteval); + + MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_IO); + + xen_mc_issue(PARAVIRT_LAZY_MMU); +} + static void xen_extend_mmu_update(const struct mmu_update *update) { struct multicall_space mcs; @@ -453,6 +477,11 @@ void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags) void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) { + if (xen_iomap_pte(pteval)) { + xen_set_iomap_pte(ptep, pteval); + goto out; + } + ADD_STATS(set_pte_at, 1); // ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep)); ADD_STATS(set_pte_at_current, mm == current->mm); @@ -523,8 +552,25 @@ static pteval_t pte_pfn_to_mfn(pteval_t val) return val; } +static pteval_t iomap_pte(pteval_t val) +{ + if (val & _PAGE_PRESENT) { + unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; + pteval_t flags = val & PTE_FLAGS_MASK; + + /* We assume the pte frame number is a MFN, so + just use it as-is. */ + val = ((pteval_t)pfn << PAGE_SHIFT) | flags; + } + + return val; +} + pteval_t xen_pte_val(pte_t pte) { + if (xen_initial_domain() && (pte.pte & _PAGE_IOMAP)) + return pte.pte; + return pte_mfn_to_pfn(pte.pte); } PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); @@ -537,7 +583,11 @@ PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val); pte_t xen_make_pte(pteval_t pte) { - pte = pte_pfn_to_mfn(pte); + if (unlikely(xen_initial_domain() && (pte & _PAGE_IOMAP))) + pte = iomap_pte(pte); + else + pte = pte_pfn_to_mfn(pte); + return native_make_pte(pte); } PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); @@ -593,6 +643,11 @@ void xen_set_pud(pud_t *ptr, pud_t val) void xen_set_pte(pte_t *ptep, pte_t pte) { + if (xen_iomap_pte(pte)) { + xen_set_iomap_pte(ptep, pte); + return; + } + ADD_STATS(pte_update, 1); // ADD_STATS(pte_update_pinned, xen_page_pinned(ptep)); ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); @@ -609,6 +664,11 @@ void xen_set_pte(pte_t *ptep, pte_t pte) #ifdef CONFIG_X86_PAE void xen_set_pte_atomic(pte_t *ptep, pte_t pte) { + if (xen_iomap_pte(pte)) { + xen_set_iomap_pte(ptep, pte); + return; + } + set_64bit((u64 *)ptep, native_pte_val(pte)); } @@ -1811,9 +1871,16 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) pte = pfn_pte(phys, prot); break; - default: + case FIX_PARAVIRT_BOOTMAP: + /* This is an MFN, but it isn't an IO mapping from the + IO domain */ pte = mfn_pte(phys, prot); break; + + default: + /* By default, set_fixmap is used for hardware mappings */ + pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP)); + break; } __native_set_fixmap(idx, pte); -- cgit v1.2.2 From 7347b4082e55ac4a673f06a0a0ce25c37273c9ec Mon Sep 17 00:00:00 2001 From: Alex Nixon Date: Fri, 19 Feb 2010 13:31:06 -0500 Subject: xen: Allow unprivileged Xen domains to create iomap pages PV DomU domains are allowed to map hardware MFNs for PCI passthrough, but are not generally allowed to map raw machine pages. In particular, various pieces of code try to map DMI and ACPI tables in the ISA ROM range. We disallow _PAGE_IOMAP for those mappings, so that they are redirected to a set of local zeroed pages we reserve for that purpose. [ Impact: prevent passthrough of ISA space, as we only allow PCI ] Signed-off-by: Alex Nixon Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Konrad Rzeszutek Wilk --- arch/x86/xen/enlighten.c | 4 ++++ arch/x86/xen/mmu.c | 18 +++++++++++++++--- 2 files changed, 19 insertions(+), 3 deletions(-) (limited to 'arch/x86/xen') diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 65d8d79b46a8..3254e8bc4cd7 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1145,6 +1145,10 @@ asmlinkage void __init xen_start_kernel(void) pgd = (pgd_t *)xen_start_info->pt_base; + if (!xen_initial_domain()) + __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD); + + __supported_pte_mask |= _PAGE_IOMAP; /* Don't do the full vcpu_info placement stuff until we have a possible map and a non-dummy shared_info. */ per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index a4dea9df0cc0..a5577f59416a 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #include @@ -381,7 +382,7 @@ static bool xen_page_pinned(void *ptr) static bool xen_iomap_pte(pte_t pte) { - return xen_initial_domain() && (pte_flags(pte) & _PAGE_IOMAP); + return pte_flags(pte) & _PAGE_IOMAP; } static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval) @@ -583,10 +584,21 @@ PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val); pte_t xen_make_pte(pteval_t pte) { - if (unlikely(xen_initial_domain() && (pte & _PAGE_IOMAP))) + phys_addr_t addr = (pte & PTE_PFN_MASK); + + /* + * Unprivileged domains are allowed to do IOMAPpings for + * PCI passthrough, but not map ISA space. The ISA + * mappings are just dummy local mappings to keep other + * parts of the kernel happy. + */ + if (unlikely(pte & _PAGE_IOMAP) && + (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { pte = iomap_pte(pte); - else + } else { + pte &= ~_PAGE_IOMAP; pte = pte_pfn_to_mfn(pte); + } return native_make_pte(pte); } -- cgit v1.2.2 From 19001c8c5bfa032ed45b10dfe48e355f5df88c61 Mon Sep 17 00:00:00 2001 From: Alex Nixon Date: Mon, 9 Feb 2009 12:05:46 -0800 Subject: xen: Rename the balloon lock * xen_create_contiguous_region needs access to the balloon lock to ensure memory doesn't change under its feet, so expose the balloon lock * Change the name of the lock to xen_reservation_lock, to imply it's now less-specific usage. [ Impact: cleanup ] Signed-off-by: Alex Nixon Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Konrad Rzeszutek Wilk --- arch/x86/xen/mmu.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/x86/xen') diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index a5577f59416a..9e0d82fc21e4 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -70,6 +70,13 @@ #define MMU_UPDATE_HISTO 30 +/* + * Protects atomic reservation decrease/increase against concurrent increases. + * Also protects non-atomic updates of current_pages and driver_pages, and + * balloon lists. + */ +DEFINE_SPINLOCK(xen_reservation_lock); + #ifdef CONFIG_XEN_DEBUG_FS static struct { -- cgit v1.2.2 From 08bbc9da92f7e44b9c208c6a1adba70c403b255e Mon Sep 17 00:00:00 2001 From: Alex Nixon Date: Mon, 9 Feb 2009 12:05:46 -0800 Subject: xen: Add xen_create_contiguous_region A memory region must be physically contiguous in order to be accessed through DMA. This patch adds xen_create_contiguous_region, which ensures a region of contiguous virtual memory is also physically contiguous. Based on Stephen Tweedie's port of the 2.6.18-xen version. Remove contiguous_bitmap[] as it's no longer needed. Ported from linux-2.6.18-xen.hg 707:e410857fd83c [ Impact: add Xen-internal API to make pages phys-contig ] Signed-off-by: Alex Nixon Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Ian Campbell Signed-off-by: Konrad Rzeszutek Wilk --- arch/x86/xen/mmu.c | 201 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 201 insertions(+) (limited to 'arch/x86/xen') diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 9e0d82fc21e4..eb51402dd99a 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -53,6 +53,7 @@ #include #include #include +#include #include #include @@ -2027,6 +2028,206 @@ void __init xen_init_mmu_ops(void) pv_mmu_ops = xen_mmu_ops; } +/* Protected by xen_reservation_lock. */ +#define MAX_CONTIG_ORDER 9 /* 2MB */ +static unsigned long discontig_frames[1< MAX_CONTIG_ORDER)) + return -ENOMEM; + + memset((void *) vstart, 0, PAGE_SIZE << order); + + vm_unmap_aliases(); + + spin_lock_irqsave(&xen_reservation_lock, flags); + + /* 1. Zap current PTEs, remembering MFNs. */ + xen_zap_pfn_range(vstart, order, in_frames, NULL); + + /* 2. Get a new contiguous memory extent. */ + out_frame = virt_to_pfn(vstart); + success = xen_exchange_memory(1UL << order, 0, in_frames, + 1, order, &out_frame, + address_bits); + + /* 3. Map the new extent in place of old pages. */ + if (success) + xen_remap_exchanged_ptes(vstart, order, NULL, out_frame); + else + xen_remap_exchanged_ptes(vstart, order, in_frames, 0); + + spin_unlock_irqrestore(&xen_reservation_lock, flags); + + return success ? 0 : -ENOMEM; +} +EXPORT_SYMBOL_GPL(xen_create_contiguous_region); + +void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order) +{ + unsigned long *out_frames = discontig_frames, in_frame; + unsigned long flags; + int success; + + if (xen_feature(XENFEAT_auto_translated_physmap)) + return; + + if (unlikely(order > MAX_CONTIG_ORDER)) + return; + + memset((void *) vstart, 0, PAGE_SIZE << order); + + vm_unmap_aliases(); + + spin_lock_irqsave(&xen_reservation_lock, flags); + + /* 1. Find start MFN of contiguous extent. */ + in_frame = virt_to_mfn(vstart); + + /* 2. Zap current PTEs. */ + xen_zap_pfn_range(vstart, order, NULL, out_frames); + + /* 3. Do the exchange for non-contiguous MFNs. */ + success = xen_exchange_memory(1, order, &in_frame, 1UL << order, + 0, out_frames, 0); + + /* 4. Map new pages in place of old pages. */ + if (success) + xen_remap_exchanged_ptes(vstart, order, out_frames, 0); + else + xen_remap_exchanged_ptes(vstart, order, NULL, in_frame); + + spin_unlock_irqrestore(&xen_reservation_lock, flags); +} +EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); + #ifdef CONFIG_XEN_DEBUG_FS static struct dentry *d_mmu_debug; -- cgit v1.2.2 From d2cb214551de8180542a04ec8c86c0c9412c5124 Mon Sep 17 00:00:00 2001 From: Jeremy Fitzhardinge Date: Fri, 26 Mar 2010 15:37:50 -0700 Subject: xen/mmu: inhibit vmap aliases rather than trying to clear them out Rather than trying to deal with aliases once they appear, just completely inhibit them. Mostly the removal of aliases was managable, but it comes unstuck in xen_create_contiguous_region() because it gets executed at interrupt time (as a result of dma_alloc_coherent()), which causes all sorts of confusion in the vmap code, as it was never intended to be run in interrupt context. This has the unfortunate side effect of removing all the unmap batching the vmap code so carefully added, but that can't be helped. Signed-off-by: Jeremy Fitzhardinge Signed-off-by: Konrad Rzeszutek Wilk --- arch/x86/xen/mmu.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'arch/x86/xen') diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index eb51402dd99a..ef5728dde8f3 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -1015,8 +1016,6 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page, read-only, and can be pinned. */ static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) { - vm_unmap_aliases(); - xen_mc_batch(); if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) { @@ -1580,7 +1579,6 @@ static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned l if (PagePinned(virt_to_page(mm->pgd))) { SetPagePinned(page); - vm_unmap_aliases(); if (!PageHighMem(page)) { make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn))); if (level == PT_PTE && USE_SPLIT_PTLOCKS) @@ -2026,6 +2024,8 @@ void __init xen_init_mmu_ops(void) x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; pv_mmu_ops = xen_mmu_ops; + + vmap_lazy_unmap = false; } /* Protected by xen_reservation_lock. */ @@ -2165,8 +2165,6 @@ int xen_create_contiguous_region(unsigned long vstart, unsigned int order, memset((void *) vstart, 0, PAGE_SIZE << order); - vm_unmap_aliases(); - spin_lock_irqsave(&xen_reservation_lock, flags); /* 1. Zap current PTEs, remembering MFNs. */ @@ -2204,8 +2202,6 @@ void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order) memset((void *) vstart, 0, PAGE_SIZE << order); - vm_unmap_aliases(); - spin_lock_irqsave(&xen_reservation_lock, flags); /* 1. Find start MFN of contiguous extent. */ -- cgit v1.2.2 From bbbe57386e857eb2a8d4abcae71063c819c06ff1 Mon Sep 17 00:00:00 2001 From: Konrad Rzeszutek Wilk Date: Tue, 9 Feb 2010 14:30:55 -0500 Subject: pci-swiotlb-xen: Add glue code to setup dma_ops utilizing xen_swiotlb_* functions. We add the glue code that sets up a dma_ops structure with the xen_swiotlb_* functions. The code turns on xen_swiotlb flag when it detects it is running under Xen and it is either in privileged mode or the iommu=soft flag was passed in. It also disables the bare-metal SWIOTLB if the Xen-SWIOTLB has been enabled. Note: The Xen-SWIOTLB is only built when CONFIG_XEN is enabled. Signed-off-by: Konrad Rzeszutek Wilk Acked-by: Jeremy Fitzhardinge Cc: FUJITA Tomonori Cc: Albert Herranz Cc: Ian Campbell --- arch/x86/xen/Makefile | 1 + arch/x86/xen/pci-swiotlb-xen.c | 58 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+) create mode 100644 arch/x86/xen/pci-swiotlb-xen.c (limited to 'arch/x86/xen') diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 3bb4fc21f4f2..32af238055c3 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -18,3 +18,4 @@ obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o +obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o diff --git a/arch/x86/xen/pci-swiotlb-xen.c b/arch/x86/xen/pci-swiotlb-xen.c new file mode 100644 index 000000000000..a013ec9d0c54 --- /dev/null +++ b/arch/x86/xen/pci-swiotlb-xen.c @@ -0,0 +1,58 @@ +/* Glue code to lib/swiotlb-xen.c */ + +#include +#include + +#include +#include + +int xen_swiotlb __read_mostly; + +static struct dma_map_ops xen_swiotlb_dma_ops = { + .mapping_error = xen_swiotlb_dma_mapping_error, + .alloc_coherent = xen_swiotlb_alloc_coherent, + .free_coherent = xen_swiotlb_free_coherent, + .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, + .sync_single_for_device = xen_swiotlb_sync_single_for_device, + .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, + .sync_sg_for_device = xen_swiotlb_sync_sg_for_device, + .map_sg = xen_swiotlb_map_sg_attrs, + .unmap_sg = xen_swiotlb_unmap_sg_attrs, + .map_page = xen_swiotlb_map_page, + .unmap_page = xen_swiotlb_unmap_page, + .dma_supported = xen_swiotlb_dma_supported, +}; + +/* + * pci_xen_swiotlb_detect - set xen_swiotlb to 1 if necessary + * + * This returns non-zero if we are forced to use xen_swiotlb (by the boot + * option). + */ +int __init pci_xen_swiotlb_detect(void) +{ + + /* If running as PV guest, either iommu=soft, or swiotlb=force will + * activate this IOMMU. If running as PV privileged, activate it + * irregardlesss. + */ + if ((xen_initial_domain() || swiotlb || swiotlb_force) && + (xen_pv_domain())) + xen_swiotlb = 1; + + /* If we are running under Xen, we MUST disable the native SWIOTLB. + * Don't worry about swiotlb_force flag activating the native, as + * the 'swiotlb' flag is the only one turning it on. */ + if (xen_pv_domain()) + swiotlb = 0; + + return xen_swiotlb; +} + +void __init pci_xen_swiotlb_init(void) +{ + if (xen_swiotlb) { + xen_swiotlb_init(1); + dma_ops = &xen_swiotlb_dma_ops; + } +} -- cgit v1.2.2