aboutsummaryrefslogtreecommitdiffstats
path: root/mm/nommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/nommu.c')
-rw-r--r--mm/nommu.c53
1 files changed, 48 insertions, 5 deletions
diff --git a/mm/nommu.c b/mm/nommu.c
index b989cb928a7c..5d8ae086f74e 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -10,6 +10,7 @@
10 * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com> 10 * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
11 * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org> 11 * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
12 * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com> 12 * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com>
13 * Copyright (c) 2007 Paul Mundt <lethal@linux-sh.org>
13 */ 14 */
14 15
15#include <linux/module.h> 16#include <linux/module.h>
@@ -167,7 +168,7 @@ EXPORT_SYMBOL(get_user_pages);
167DEFINE_RWLOCK(vmlist_lock); 168DEFINE_RWLOCK(vmlist_lock);
168struct vm_struct *vmlist; 169struct vm_struct *vmlist;
169 170
170void vfree(void *addr) 171void vfree(const void *addr)
171{ 172{
172 kfree(addr); 173 kfree(addr);
173} 174}
@@ -183,13 +184,33 @@ void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
183} 184}
184EXPORT_SYMBOL(__vmalloc); 185EXPORT_SYMBOL(__vmalloc);
185 186
186struct page * vmalloc_to_page(void *addr) 187void *vmalloc_user(unsigned long size)
188{
189 void *ret;
190
191 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
192 PAGE_KERNEL);
193 if (ret) {
194 struct vm_area_struct *vma;
195
196 down_write(&current->mm->mmap_sem);
197 vma = find_vma(current->mm, (unsigned long)ret);
198 if (vma)
199 vma->vm_flags |= VM_USERMAP;
200 up_write(&current->mm->mmap_sem);
201 }
202
203 return ret;
204}
205EXPORT_SYMBOL(vmalloc_user);
206
207struct page *vmalloc_to_page(const void *addr)
187{ 208{
188 return virt_to_page(addr); 209 return virt_to_page(addr);
189} 210}
190EXPORT_SYMBOL(vmalloc_to_page); 211EXPORT_SYMBOL(vmalloc_to_page);
191 212
192unsigned long vmalloc_to_pfn(void *addr) 213unsigned long vmalloc_to_pfn(const void *addr)
193{ 214{
194 return page_to_pfn(virt_to_page(addr)); 215 return page_to_pfn(virt_to_page(addr));
195} 216}
@@ -253,10 +274,17 @@ EXPORT_SYMBOL(vmalloc_32);
253 * 274 *
254 * The resulting memory area is 32bit addressable and zeroed so it can be 275 * The resulting memory area is 32bit addressable and zeroed so it can be
255 * mapped to userspace without leaking data. 276 * mapped to userspace without leaking data.
277 *
278 * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
279 * remap_vmalloc_range() are permissible.
256 */ 280 */
257void *vmalloc_32_user(unsigned long size) 281void *vmalloc_32_user(unsigned long size)
258{ 282{
259 return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); 283 /*
284 * We'll have to sort out the ZONE_DMA bits for 64-bit,
285 * but for now this can simply use vmalloc_user() directly.
286 */
287 return vmalloc_user(size);
260} 288}
261EXPORT_SYMBOL(vmalloc_32_user); 289EXPORT_SYMBOL(vmalloc_32_user);
262 290
@@ -267,7 +295,7 @@ void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_
267} 295}
268EXPORT_SYMBOL(vmap); 296EXPORT_SYMBOL(vmap);
269 297
270void vunmap(void *addr) 298void vunmap(const void *addr)
271{ 299{
272 BUG(); 300 BUG();
273} 301}
@@ -1216,6 +1244,21 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
1216} 1244}
1217EXPORT_SYMBOL(remap_pfn_range); 1245EXPORT_SYMBOL(remap_pfn_range);
1218 1246
1247int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1248 unsigned long pgoff)
1249{
1250 unsigned int size = vma->vm_end - vma->vm_start;
1251
1252 if (!(vma->vm_flags & VM_USERMAP))
1253 return -EINVAL;
1254
1255 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1256 vma->vm_end = vma->vm_start + size;
1257
1258 return 0;
1259}
1260EXPORT_SYMBOL(remap_vmalloc_range);
1261
1219void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) 1262void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
1220{ 1263{
1221} 1264}