aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorAlex Nixon <alex.nixon@citrix.com>2009-02-09 15:05:46 -0500
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2010-06-07 15:37:53 -0400
commit08bbc9da92f7e44b9c208c6a1adba70c403b255e (patch)
tree75c6cf9422e03990d1fd280b631d00ea4d4dbe4b /arch/x86/xen
parent19001c8c5bfa032ed45b10dfe48e355f5df88c61 (diff)
xen: Add xen_create_contiguous_region
A memory region must be physically contiguous in order to be accessed through DMA. This patch adds xen_create_contiguous_region, which ensures a region of contiguous virtual memory is also physically contiguous. Based on Stephen Tweedie's port of the 2.6.18-xen version. Remove contiguous_bitmap[] as it's no longer needed. Ported from linux-2.6.18-xen.hg 707:e410857fd83c [ Impact: add Xen-internal API to make pages phys-contig ] Signed-off-by: Alex Nixon <alex.nixon@citrix.com> Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Ian Campbell <ian.campbell@citrix.com> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/mmu.c201
1 files changed, 201 insertions, 0 deletions
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 9e0d82fc21e..eb51402dd99 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -53,6 +53,7 @@
53#include <asm/paravirt.h> 53#include <asm/paravirt.h>
54#include <asm/e820.h> 54#include <asm/e820.h>
55#include <asm/linkage.h> 55#include <asm/linkage.h>
56#include <asm/page.h>
56 57
57#include <asm/xen/hypercall.h> 58#include <asm/xen/hypercall.h>
58#include <asm/xen/hypervisor.h> 59#include <asm/xen/hypervisor.h>
@@ -2027,6 +2028,206 @@ void __init xen_init_mmu_ops(void)
2027 pv_mmu_ops = xen_mmu_ops; 2028 pv_mmu_ops = xen_mmu_ops;
2028} 2029}
2029 2030
2031/* Protected by xen_reservation_lock. */
2032#define MAX_CONTIG_ORDER 9 /* 2MB */
2033static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
2034
2035#define VOID_PTE (mfn_pte(0, __pgprot(0)))
2036static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
2037 unsigned long *in_frames,
2038 unsigned long *out_frames)
2039{
2040 int i;
2041 struct multicall_space mcs;
2042
2043 xen_mc_batch();
2044 for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
2045 mcs = __xen_mc_entry(0);
2046
2047 if (in_frames)
2048 in_frames[i] = virt_to_mfn(vaddr);
2049
2050 MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
2051 set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
2052
2053 if (out_frames)
2054 out_frames[i] = virt_to_pfn(vaddr);
2055 }
2056 xen_mc_issue(0);
2057}
2058
2059/*
2060 * Update the pfn-to-mfn mappings for a virtual address range, either to
2061 * point to an array of mfns, or contiguously from a single starting
2062 * mfn.
2063 */
2064static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
2065 unsigned long *mfns,
2066 unsigned long first_mfn)
2067{
2068 unsigned i, limit;
2069 unsigned long mfn;
2070
2071 xen_mc_batch();
2072
2073 limit = 1u << order;
2074 for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
2075 struct multicall_space mcs;
2076 unsigned flags;
2077
2078 mcs = __xen_mc_entry(0);
2079 if (mfns)
2080 mfn = mfns[i];
2081 else
2082 mfn = first_mfn + i;
2083
2084 if (i < (limit - 1))
2085 flags = 0;
2086 else {
2087 if (order == 0)
2088 flags = UVMF_INVLPG | UVMF_ALL;
2089 else
2090 flags = UVMF_TLB_FLUSH | UVMF_ALL;
2091 }
2092
2093 MULTI_update_va_mapping(mcs.mc, vaddr,
2094 mfn_pte(mfn, PAGE_KERNEL), flags);
2095
2096 set_phys_to_machine(virt_to_pfn(vaddr), mfn);
2097 }
2098
2099 xen_mc_issue(0);
2100}
2101
2102/*
2103 * Perform the hypercall to exchange a region of our pfns to point to
2104 * memory with the required contiguous alignment. Takes the pfns as
2105 * input, and populates mfns as output.
2106 *
2107 * Returns a success code indicating whether the hypervisor was able to
2108 * satisfy the request or not.
2109 */
2110static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
2111 unsigned long *pfns_in,
2112 unsigned long extents_out,
2113 unsigned int order_out,
2114 unsigned long *mfns_out,
2115 unsigned int address_bits)
2116{
2117 long rc;
2118 int success;
2119
2120 struct xen_memory_exchange exchange = {
2121 .in = {
2122 .nr_extents = extents_in,
2123 .extent_order = order_in,
2124 .extent_start = pfns_in,
2125 .domid = DOMID_SELF
2126 },
2127 .out = {
2128 .nr_extents = extents_out,
2129 .extent_order = order_out,
2130 .extent_start = mfns_out,
2131 .address_bits = address_bits,
2132 .domid = DOMID_SELF
2133 }
2134 };
2135
2136 BUG_ON(extents_in << order_in != extents_out << order_out);
2137
2138 rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
2139 success = (exchange.nr_exchanged == extents_in);
2140
2141 BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
2142 BUG_ON(success && (rc != 0));
2143
2144 return success;
2145}
2146
2147int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
2148 unsigned int address_bits)
2149{
2150 unsigned long *in_frames = discontig_frames, out_frame;
2151 unsigned long flags;
2152 int success;
2153
2154 /*
2155 * Currently an auto-translated guest will not perform I/O, nor will
2156 * it require PAE page directories below 4GB. Therefore any calls to
2157 * this function are redundant and can be ignored.
2158 */
2159
2160 if (xen_feature(XENFEAT_auto_translated_physmap))
2161 return 0;
2162
2163 if (unlikely(order > MAX_CONTIG_ORDER))
2164 return -ENOMEM;
2165
2166 memset((void *) vstart, 0, PAGE_SIZE << order);
2167
2168 vm_unmap_aliases();
2169
2170 spin_lock_irqsave(&xen_reservation_lock, flags);
2171
2172 /* 1. Zap current PTEs, remembering MFNs. */
2173 xen_zap_pfn_range(vstart, order, in_frames, NULL);
2174
2175 /* 2. Get a new contiguous memory extent. */
2176 out_frame = virt_to_pfn(vstart);
2177 success = xen_exchange_memory(1UL << order, 0, in_frames,
2178 1, order, &out_frame,
2179 address_bits);
2180
2181 /* 3. Map the new extent in place of old pages. */
2182 if (success)
2183 xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
2184 else
2185 xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
2186
2187 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2188
2189 return success ? 0 : -ENOMEM;
2190}
2191EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
2192
2193void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
2194{
2195 unsigned long *out_frames = discontig_frames, in_frame;
2196 unsigned long flags;
2197 int success;
2198
2199 if (xen_feature(XENFEAT_auto_translated_physmap))
2200 return;
2201
2202 if (unlikely(order > MAX_CONTIG_ORDER))
2203 return;
2204
2205 memset((void *) vstart, 0, PAGE_SIZE << order);
2206
2207 vm_unmap_aliases();
2208
2209 spin_lock_irqsave(&xen_reservation_lock, flags);
2210
2211 /* 1. Find start MFN of contiguous extent. */
2212 in_frame = virt_to_mfn(vstart);
2213
2214 /* 2. Zap current PTEs. */
2215 xen_zap_pfn_range(vstart, order, NULL, out_frames);
2216
2217 /* 3. Do the exchange for non-contiguous MFNs. */
2218 success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
2219 0, out_frames, 0);
2220
2221 /* 4. Map new pages in place of old pages. */
2222 if (success)
2223 xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
2224 else
2225 xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
2226
2227 spin_unlock_irqrestore(&xen_reservation_lock, flags);
2228}
2229EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
2230
2030#ifdef CONFIG_XEN_DEBUG_FS 2231#ifdef CONFIG_XEN_DEBUG_FS
2031 2232
2032static struct dentry *d_mmu_debug; 2233static struct dentry *d_mmu_debug;