diff options
author | David Vrabel <david.vrabel@citrix.com> | 2011-09-29 11:53:32 -0400 |
---|---|---|
committer | Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | 2011-11-16 12:13:08 -0500 |
commit | cd12909cb576d37311fe35868780e82d5007d0c8 (patch) | |
tree | 70ec60af4feb32087f542a838fe4dce8717f0cd6 /mm | |
parent | 1ea6b8f48918282bdca0b32a34095504ee65bab5 (diff) |
xen: map foreign pages for shared rings by updating the PTEs directly
When mapping a foreign page with xenbus_map_ring_valloc() with the
GNTTABOP_map_grant_ref hypercall, set the GNTMAP_contains_pte flag and
pass a pointer to the PTE (in init_mm).
After the page is mapped, the usual fault mechanism can be used to
update additional MMs. This allows the vmalloc_sync_all() to be
removed from alloc_vm_area().
Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Acked-by: Andrew Morton <akpm@linux-foundation.org>
[v1: Squashed fix by Michal for no-mmu case]
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Michal Simek <monstr@monstr.eu>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/nommu.c | 2 | ||||
-rw-r--r-- | mm/vmalloc.c | 27 |
2 files changed, 14 insertions, 15 deletions
diff --git a/mm/nommu.c b/mm/nommu.c index 73419c55eda6..b982290fd962 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -454,7 +454,7 @@ void __attribute__((weak)) vmalloc_sync_all(void) | |||
454 | * between processes, it syncs the pagetable across all | 454 | * between processes, it syncs the pagetable across all |
455 | * processes. | 455 | * processes. |
456 | */ | 456 | */ |
457 | struct vm_struct *alloc_vm_area(size_t size) | 457 | struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) |
458 | { | 458 | { |
459 | BUG(); | 459 | BUG(); |
460 | return NULL; | 460 | return NULL; |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index b669aa6f6caf..3231bf332878 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -2141,23 +2141,30 @@ void __attribute__((weak)) vmalloc_sync_all(void) | |||
2141 | 2141 | ||
2142 | static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) | 2142 | static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) |
2143 | { | 2143 | { |
2144 | /* apply_to_page_range() does all the hard work. */ | 2144 | pte_t ***p = data; |
2145 | |||
2146 | if (p) { | ||
2147 | *(*p) = pte; | ||
2148 | (*p)++; | ||
2149 | } | ||
2145 | return 0; | 2150 | return 0; |
2146 | } | 2151 | } |
2147 | 2152 | ||
2148 | /** | 2153 | /** |
2149 | * alloc_vm_area - allocate a range of kernel address space | 2154 | * alloc_vm_area - allocate a range of kernel address space |
2150 | * @size: size of the area | 2155 | * @size: size of the area |
2156 | * @ptes: returns the PTEs for the address space | ||
2151 | * | 2157 | * |
2152 | * Returns: NULL on failure, vm_struct on success | 2158 | * Returns: NULL on failure, vm_struct on success |
2153 | * | 2159 | * |
2154 | * This function reserves a range of kernel address space, and | 2160 | * This function reserves a range of kernel address space, and |
2155 | * allocates pagetables to map that range. No actual mappings | 2161 | * allocates pagetables to map that range. No actual mappings |
2156 | * are created. If the kernel address space is not shared | 2162 | * are created. |
2157 | * between processes, it syncs the pagetable across all | 2163 | * |
2158 | * processes. | 2164 | * If @ptes is non-NULL, pointers to the PTEs (in init_mm) |
2165 | * allocated for the VM area are returned. | ||
2159 | */ | 2166 | */ |
2160 | struct vm_struct *alloc_vm_area(size_t size) | 2167 | struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes) |
2161 | { | 2168 | { |
2162 | struct vm_struct *area; | 2169 | struct vm_struct *area; |
2163 | 2170 | ||
@@ -2171,19 +2178,11 @@ struct vm_struct *alloc_vm_area(size_t size) | |||
2171 | * of kernel virtual address space and mapped into init_mm. | 2178 | * of kernel virtual address space and mapped into init_mm. |
2172 | */ | 2179 | */ |
2173 | if (apply_to_page_range(&init_mm, (unsigned long)area->addr, | 2180 | if (apply_to_page_range(&init_mm, (unsigned long)area->addr, |
2174 | area->size, f, NULL)) { | 2181 | size, f, ptes ? &ptes : NULL)) { |
2175 | free_vm_area(area); | 2182 | free_vm_area(area); |
2176 | return NULL; | 2183 | return NULL; |
2177 | } | 2184 | } |
2178 | 2185 | ||
2179 | /* | ||
2180 | * If the allocated address space is passed to a hypercall | ||
2181 | * before being used then we cannot rely on a page fault to | ||
2182 | * trigger an update of the page tables. So sync all the page | ||
2183 | * tables here. | ||
2184 | */ | ||
2185 | vmalloc_sync_all(); | ||
2186 | |||
2187 | return area; | 2186 | return area; |
2188 | } | 2187 | } |
2189 | EXPORT_SYMBOL_GPL(alloc_vm_area); | 2188 | EXPORT_SYMBOL_GPL(alloc_vm_area); |