aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c29
1 files changed, 15 insertions, 14 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index b669aa6f6ca..1d8b32f0713 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1633,6 +1633,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
1633 goto fail; 1633 goto fail;
1634 1634
1635 addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller); 1635 addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
1636 if (!addr)
1637 return NULL;
1636 1638
1637 /* 1639 /*
1638 * In this function, newly allocated vm_struct is not added 1640 * In this function, newly allocated vm_struct is not added
@@ -2141,23 +2143,30 @@ void __attribute__((weak)) vmalloc_sync_all(void)
2141 2143
2142static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data) 2144static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
2143{ 2145{
2144 /* apply_to_page_range() does all the hard work. */ 2146 pte_t ***p = data;
2147
2148 if (p) {
2149 *(*p) = pte;
2150 (*p)++;
2151 }
2145 return 0; 2152 return 0;
2146} 2153}
2147 2154
2148/** 2155/**
2149 * alloc_vm_area - allocate a range of kernel address space 2156 * alloc_vm_area - allocate a range of kernel address space
2150 * @size: size of the area 2157 * @size: size of the area
2158 * @ptes: returns the PTEs for the address space
2151 * 2159 *
2152 * Returns: NULL on failure, vm_struct on success 2160 * Returns: NULL on failure, vm_struct on success
2153 * 2161 *
2154 * This function reserves a range of kernel address space, and 2162 * This function reserves a range of kernel address space, and
2155 * allocates pagetables to map that range. No actual mappings 2163 * allocates pagetables to map that range. No actual mappings
2156 * are created. If the kernel address space is not shared 2164 * are created.
2157 * between processes, it syncs the pagetable across all 2165 *
2158 * processes. 2166 * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
2167 * allocated for the VM area are returned.
2159 */ 2168 */
2160struct vm_struct *alloc_vm_area(size_t size) 2169struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
2161{ 2170{
2162 struct vm_struct *area; 2171 struct vm_struct *area;
2163 2172
@@ -2171,19 +2180,11 @@ struct vm_struct *alloc_vm_area(size_t size)
2171 * of kernel virtual address space and mapped into init_mm. 2180 * of kernel virtual address space and mapped into init_mm.
2172 */ 2181 */
2173 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, 2182 if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2174 area->size, f, NULL)) { 2183 size, f, ptes ? &ptes : NULL)) {
2175 free_vm_area(area); 2184 free_vm_area(area);
2176 return NULL; 2185 return NULL;
2177 } 2186 }
2178 2187
2179 /*
2180 * If the allocated address space is passed to a hypercall
2181 * before being used then we cannot rely on a page fault to
2182 * trigger an update of the page tables. So sync all the page
2183 * tables here.
2184 */
2185 vmalloc_sync_all();
2186
2187 return area; 2188 return area;
2188} 2189}
2189EXPORT_SYMBOL_GPL(alloc_vm_area); 2190EXPORT_SYMBOL_GPL(alloc_vm_area);