aboutsummaryrefslogtreecommitdiffstats
path: root/mm/nommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/nommu.c')
-rw-r--r--mm/nommu.c45
1 files changed, 44 insertions, 1 deletions
diff --git a/mm/nommu.c b/mm/nommu.c
index f3bfd015c40b..5d8ae086f74e 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -10,6 +10,7 @@
10 * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> 10 * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
11 * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> 11 * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
12 * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> 12 * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com>
13 * Copyright (c) 2007 Paul Mundt <lethal@linux-sh.org>
13 */ 14 */
14 15
15#include <linux/module.h> 16#include <linux/module.h>
@@ -183,6 +184,26 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
183} 184}
184EXPORT_SYMBOL(__vmalloc); 185EXPORT_SYMBOL(__vmalloc);
185 186
187void *vmalloc_user(unsigned long size)
188{
189 void *ret;
190
191 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
192 PAGE_KERNEL);
193 if (ret) {
194 struct vm_area_struct *vma;
195
196 down_write(&current->mm->mmap_sem);
197 vma = find_vma(current->mm, (unsigned long)ret);
198 if (vma)
199 vma->vm_flags |= VM_USERMAP;
200 up_write(&current->mm->mmap_sem);
201 }
202
203 return ret;
204}
205EXPORT_SYMBOL(vmalloc_user);
206
186struct page *vmalloc_to_page(const void *addr) 207struct page *vmalloc_to_page(const void *addr)
187{ 208{
188 return virt_to_page(addr); 209 return virt_to_page(addr);
@@ -253,10 +274,17 @@ EXPORT_SYMBOL(vmalloc_32);
253 * 274 *
254 * The resulting memory area is 32bit addressable and zeroed so it can be 275 * The resulting memory area is 32bit addressable and zeroed so it can be
255 * mapped to userspace without leaking data. 276 * mapped to userspace without leaking data.
277 *
278 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
279 * remap_vmalloc_range() are permissible.
256 */ 280 */
257void *vmalloc_32_user(unsigned long size) 281void *vmalloc_32_user(unsigned long size)
258{ 282{
259 return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); 283 /*
284 * We'll have to sort out the ZONE_DMA bits for 64-bit,
285 * but for now this can simply use vmalloc_user() directly.
286 */
287 return vmalloc_user(size);
260} 288}
261EXPORT_SYMBOL(vmalloc_32_user); 289EXPORT_SYMBOL(vmalloc_32_user);
262 290
@@ -1216,6 +1244,21 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
1216} 1244}
1217EXPORT_SYMBOL(remap_pfn_range); 1245EXPORT_SYMBOL(remap_pfn_range);
1218 1246
1247int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1248 unsigned long pgoff)
1249{
1250 unsigned int size = vma->vm_end - vma->vm_start;
1251
1252 if (!(vma->vm_flags & VM_USERMAP))
1253 return -EINVAL;
1254
1255 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1256 vma->vm_end = vma->vm_start + size;
1257
1258 return 0;
1259}
1260EXPORT_SYMBOL(remap_vmalloc_range);
1261
1219void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) 1262void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1220{ 1263{
1221} 1264}