aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c103
1 files changed, 51 insertions, 52 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d365724feb05..91a10472a39a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -292,7 +292,7 @@ static struct vmap_area *__find_vmap_area(unsigned long addr)
292 va = rb_entry(n, struct vmap_area, rb_node); 292 va = rb_entry(n, struct vmap_area, rb_node);
293 if (addr < va->va_start) 293 if (addr < va->va_start)
294 n = n->rb_left; 294 n = n->rb_left;
295 else if (addr > va->va_start) 295 else if (addr >= va->va_end)
296 n = n->rb_right; 296 n = n->rb_right;
297 else 297 else
298 return va; 298 return va;
@@ -1322,13 +1322,6 @@ static void clear_vm_unlist(struct vm_struct *vm)
1322 vm->flags &= ~VM_UNLIST; 1322 vm->flags &= ~VM_UNLIST;
1323} 1323}
1324 1324
1325static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
1326 unsigned long flags, const void *caller)
1327{
1328 setup_vmalloc_vm(vm, va, flags, caller);
1329 clear_vm_unlist(vm);
1330}
1331
1332static struct vm_struct *__get_vm_area_node(unsigned long size, 1325static struct vm_struct *__get_vm_area_node(unsigned long size,
1333 unsigned long align, unsigned long flags, unsigned long start, 1326 unsigned long align, unsigned long flags, unsigned long start,
1334 unsigned long end, int node, gfp_t gfp_mask, const void *caller) 1327 unsigned long end, int node, gfp_t gfp_mask, const void *caller)
@@ -1337,16 +1330,8 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
1337 struct vm_struct *area; 1330 struct vm_struct *area;
1338 1331
1339 BUG_ON(in_interrupt()); 1332 BUG_ON(in_interrupt());
1340 if (flags & VM_IOREMAP) { 1333 if (flags & VM_IOREMAP)
1341 int bit = fls(size); 1334 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
1342
1343 if (bit > IOREMAP_MAX_ORDER)
1344 bit = IOREMAP_MAX_ORDER;
1345 else if (bit < PAGE_SHIFT)
1346 bit = PAGE_SHIFT;
1347
1348 align = 1ul << bit;
1349 }
1350 1335
1351 size = PAGE_ALIGN(size); 1336 size = PAGE_ALIGN(size);
1352 if (unlikely(!size)) 1337 if (unlikely(!size))
@@ -1367,16 +1352,7 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
1367 return NULL; 1352 return NULL;
1368 } 1353 }
1369 1354
1370 /* 1355 setup_vmalloc_vm(area, va, flags, caller);
1371 * When this function is called from __vmalloc_node_range,
1372 * we add VM_UNLIST flag to avoid accessing uninitialized
1373 * members of vm_struct such as pages and nr_pages fields.
1374 * They will be set later.
1375 */
1376 if (flags & VM_UNLIST)
1377 setup_vmalloc_vm(area, va, flags, caller);
1378 else
1379 insert_vmalloc_vm(area, va, flags, caller);
1380 1356
1381 return area; 1357 return area;
1382} 1358}
@@ -1476,10 +1452,9 @@ static void __vunmap(const void *addr, int deallocate_pages)
1476 if (!addr) 1452 if (!addr)
1477 return; 1453 return;
1478 1454
1479 if ((PAGE_SIZE-1) & (unsigned long)addr) { 1455 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
1480 WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr); 1456 addr));
1481 return; 1457 return;
1482 }
1483 1458
1484 area = remove_vm_area(addr); 1459 area = remove_vm_area(addr);
1485 if (unlikely(!area)) { 1460 if (unlikely(!area)) {
@@ -2148,42 +2123,43 @@ finished:
2148} 2123}
2149 2124
2150/** 2125/**
2151 * remap_vmalloc_range - map vmalloc pages to userspace 2126 * remap_vmalloc_range_partial - map vmalloc pages to userspace
2152 * @vma: vma to cover (map full range of vma) 2127 * @vma: vma to cover
2153 * @addr: vmalloc memory 2128 * @uaddr: target user address to start at
2154 * @pgoff: number of pages into addr before first page to map 2129 * @kaddr: virtual address of vmalloc kernel memory
2130 * @size: size of map area
2155 * 2131 *
2156 * Returns: 0 for success, -Exxx on failure 2132 * Returns: 0 for success, -Exxx on failure
2157 * 2133 *
2158 * This function checks that addr is a valid vmalloc'ed area, and 2134 * This function checks that @kaddr is a valid vmalloc'ed area,
2159 * that it is big enough to cover the vma. Will return failure if 2135 * and that it is big enough to cover the range starting at
2160 * that criteria isn't met. 2136 * @uaddr in @vma. Will return failure if that criteria isn't
2137 * met.
2161 * 2138 *
2162 * Similar to remap_pfn_range() (see mm/memory.c) 2139 * Similar to remap_pfn_range() (see mm/memory.c)
2163 */ 2140 */
2164int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 2141int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2165 unsigned long pgoff) 2142 void *kaddr, unsigned long size)
2166{ 2143{
2167 struct vm_struct *area; 2144 struct vm_struct *area;
2168 unsigned long uaddr = vma->vm_start;
2169 unsigned long usize = vma->vm_end - vma->vm_start;
2170 2145
2171 if ((PAGE_SIZE-1) & (unsigned long)addr) 2146 size = PAGE_ALIGN(size);
2147
2148 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
2172 return -EINVAL; 2149 return -EINVAL;
2173 2150
2174 area = find_vm_area(addr); 2151 area = find_vm_area(kaddr);
2175 if (!area) 2152 if (!area)
2176 return -EINVAL; 2153 return -EINVAL;
2177 2154
2178 if (!(area->flags & VM_USERMAP)) 2155 if (!(area->flags & VM_USERMAP))
2179 return -EINVAL; 2156 return -EINVAL;
2180 2157
2181 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) 2158 if (kaddr + size > area->addr + area->size)
2182 return -EINVAL; 2159 return -EINVAL;
2183 2160
2184 addr += pgoff << PAGE_SHIFT;
2185 do { 2161 do {
2186 struct page *page = vmalloc_to_page(addr); 2162 struct page *page = vmalloc_to_page(kaddr);
2187 int ret; 2163 int ret;
2188 2164
2189 ret = vm_insert_page(vma, uaddr, page); 2165 ret = vm_insert_page(vma, uaddr, page);
@@ -2191,14 +2167,37 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
2191 return ret; 2167 return ret;
2192 2168
2193 uaddr += PAGE_SIZE; 2169 uaddr += PAGE_SIZE;
2194 addr += PAGE_SIZE; 2170 kaddr += PAGE_SIZE;
2195 usize -= PAGE_SIZE; 2171 size -= PAGE_SIZE;
2196 } while (usize > 0); 2172 } while (size > 0);
2197 2173
2198 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 2174 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
2199 2175
2200 return 0; 2176 return 0;
2201} 2177}
2178EXPORT_SYMBOL(remap_vmalloc_range_partial);
2179
2180/**
2181 * remap_vmalloc_range - map vmalloc pages to userspace
2182 * @vma: vma to cover (map full range of vma)
2183 * @addr: vmalloc memory
2184 * @pgoff: number of pages into addr before first page to map
2185 *
2186 * Returns: 0 for success, -Exxx on failure
2187 *
2188 * This function checks that addr is a valid vmalloc'ed area, and
2189 * that it is big enough to cover the vma. Will return failure if
2190 * that criteria isn't met.
2191 *
2192 * Similar to remap_pfn_range() (see mm/memory.c)
2193 */
2194int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
2195 unsigned long pgoff)
2196{
2197 return remap_vmalloc_range_partial(vma, vma->vm_start,
2198 addr + (pgoff << PAGE_SHIFT),
2199 vma->vm_end - vma->vm_start);
2200}
2202EXPORT_SYMBOL(remap_vmalloc_range); 2201EXPORT_SYMBOL(remap_vmalloc_range);
2203 2202
2204/* 2203/*
@@ -2512,8 +2511,8 @@ found:
2512 2511
2513 /* insert all vm's */ 2512 /* insert all vm's */
2514 for (area = 0; area < nr_vms; area++) 2513 for (area = 0; area < nr_vms; area++)
2515 insert_vmalloc_vm(vms[area], vas[area], VM_ALLOC, 2514 setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
2516 pcpu_get_vm_areas); 2515 pcpu_get_vm_areas);
2517 2516
2518 kfree(vas); 2517 kfree(vas);
2519 return vms; 2518 return vms;