aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c67
1 files changed, 45 insertions, 22 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 3875fa2f0f60..b7259906a806 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1476,10 +1476,9 @@ static void __vunmap(const void *addr, int deallocate_pages)
1476 if (!addr) 1476 if (!addr)
1477 return; 1477 return;
1478 1478
1479 if ((PAGE_SIZE-1) & (unsigned long)addr) { 1479 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
1480 WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr); 1480 addr));
1481 return; 1481 return;
1482 }
1483 1482
1484 area = remove_vm_area(addr); 1483 area = remove_vm_area(addr);
1485 if (unlikely(!area)) { 1484 if (unlikely(!area)) {
@@ -2148,42 +2147,43 @@ finished:
2148} 2147}
2149 2148
2150/** 2149/**
2151 * remap_vmalloc_range - map vmalloc pages to userspace 2150 * remap_vmalloc_range_partial - map vmalloc pages to userspace
2152 * @vma: vma to cover (map full range of vma) 2151 * @vma: vma to cover
2153 * @addr: vmalloc memory 2152 * @uaddr: target user address to start at
2154 * @pgoff: number of pages into addr before first page to map 2153 * @kaddr: virtual address of vmalloc kernel memory
2154 * @size: size of map area
2155 * 2155 *
2156 * Returns: 0 for success, -Exxx on failure 2156 * Returns: 0 for success, -Exxx on failure
2157 * 2157 *
2158 * This function checks that addr is a valid vmalloc'ed area, and 2158 * This function checks that @kaddr is a valid vmalloc'ed area,
2159 * that it is big enough to cover the vma. Will return failure if 2159 * and that it is big enough to cover the range starting at
2160 * that criteria isn't met. 2160 * @uaddr in @vma. Will return failure if that criteria isn't
2161 * met.
2161 * 2162 *
2162 * Similar to remap_pfn_range() (see mm/memory.c) 2163 * Similar to remap_pfn_range() (see mm/memory.c)
2163 */ 2164 */
2164int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 2165int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2165 unsigned long pgoff) 2166 void *kaddr, unsigned long size)
2166{ 2167{
2167 struct vm_struct *area; 2168 struct vm_struct *area;
2168 unsigned long uaddr = vma->vm_start;
2169 unsigned long usize = vma->vm_end - vma->vm_start;
2170 2169
2171 if ((PAGE_SIZE-1) & (unsigned long)addr) 2170 size = PAGE_ALIGN(size);
2171
2172 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
2172 return -EINVAL; 2173 return -EINVAL;
2173 2174
2174 area = find_vm_area(addr); 2175 area = find_vm_area(kaddr);
2175 if (!area) 2176 if (!area)
2176 return -EINVAL; 2177 return -EINVAL;
2177 2178
2178 if (!(area->flags & VM_USERMAP)) 2179 if (!(area->flags & VM_USERMAP))
2179 return -EINVAL; 2180 return -EINVAL;
2180 2181
2181 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) 2182 if (kaddr + size > area->addr + area->size)
2182 return -EINVAL; 2183 return -EINVAL;
2183 2184
2184 addr += pgoff << PAGE_SHIFT;
2185 do { 2185 do {
2186 struct page *page = vmalloc_to_page(addr); 2186 struct page *page = vmalloc_to_page(kaddr);
2187 int ret; 2187 int ret;
2188 2188
2189 ret = vm_insert_page(vma, uaddr, page); 2189 ret = vm_insert_page(vma, uaddr, page);
@@ -2191,14 +2191,37 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
2191 return ret; 2191 return ret;
2192 2192
2193 uaddr += PAGE_SIZE; 2193 uaddr += PAGE_SIZE;
2194 addr += PAGE_SIZE; 2194 kaddr += PAGE_SIZE;
2195 usize -= PAGE_SIZE; 2195 size -= PAGE_SIZE;
2196 } while (usize > 0); 2196 } while (size > 0);
2197 2197
2198 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 2198 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
2199 2199
2200 return 0; 2200 return 0;
2201} 2201}
2202EXPORT_SYMBOL(remap_vmalloc_range_partial);
2203
2204/**
2205 * remap_vmalloc_range - map vmalloc pages to userspace
2206 * @vma: vma to cover (map full range of vma)
2207 * @addr: vmalloc memory
2208 * @pgoff: number of pages into addr before first page to map
2209 *
2210 * Returns: 0 for success, -Exxx on failure
2211 *
2212 * This function checks that addr is a valid vmalloc'ed area, and
2213 * that it is big enough to cover the vma. Will return failure if
2214 * that criteria isn't met.
2215 *
2216 * Similar to remap_pfn_range() (see mm/memory.c)
2217 */
2218int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
2219 unsigned long pgoff)
2220{
2221 return remap_vmalloc_range_partial(vma, vma->vm_start,
2222 addr + (pgoff << PAGE_SHIFT),
2223 vma->vm_end - vma->vm_start);
2224}
2202EXPORT_SYMBOL(remap_vmalloc_range); 2225EXPORT_SYMBOL(remap_vmalloc_range);
2203 2226
2204/* 2227/*