aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/vmalloc.h8
-rw-r--r--mm/vmalloc.c122
2 files changed, 128 insertions, 2 deletions
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 1d5577b2b752..f6024ab4eff0 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -4,10 +4,13 @@
4#include <linux/spinlock.h> 4#include <linux/spinlock.h>
5#include <asm/page.h> /* pgprot_t */ 5#include <asm/page.h> /* pgprot_t */
6 6
7struct vm_area_struct;
8
7/* bits in vm_struct->flags */ 9/* bits in vm_struct->flags */
8#define VM_IOREMAP 0x00000001 /* ioremap() and friends */ 10#define VM_IOREMAP 0x00000001 /* ioremap() and friends */
9#define VM_ALLOC 0x00000002 /* vmalloc() */ 11#define VM_ALLOC 0x00000002 /* vmalloc() */
10#define VM_MAP 0x00000004 /* vmap()ed pages */ 12#define VM_MAP 0x00000004 /* vmap()ed pages */
13#define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
11/* bits [20..32] reserved for arch specific ioremap internals */ 14/* bits [20..32] reserved for arch specific ioremap internals */
12 15
13/* 16/*
@@ -32,9 +35,11 @@ struct vm_struct {
32 * Highlevel APIs for driver use 35 * Highlevel APIs for driver use
33 */ 36 */
34extern void *vmalloc(unsigned long size); 37extern void *vmalloc(unsigned long size);
38extern void *vmalloc_user(unsigned long size);
35extern void *vmalloc_node(unsigned long size, int node); 39extern void *vmalloc_node(unsigned long size, int node);
36extern void *vmalloc_exec(unsigned long size); 40extern void *vmalloc_exec(unsigned long size);
37extern void *vmalloc_32(unsigned long size); 41extern void *vmalloc_32(unsigned long size);
42extern void *vmalloc_32_user(unsigned long size);
38extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); 43extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
39extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, 44extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask,
40 pgprot_t prot); 45 pgprot_t prot);
@@ -45,6 +50,9 @@ extern void vfree(void *addr);
45extern void *vmap(struct page **pages, unsigned int count, 50extern void *vmap(struct page **pages, unsigned int count,
46 unsigned long flags, pgprot_t prot); 51 unsigned long flags, pgprot_t prot);
47extern void vunmap(void *addr); 52extern void vunmap(void *addr);
53
54extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
55 unsigned long pgoff);
48 56
49/* 57/*
50 * Lowlevel-APIs (not for driver use!) 58 * Lowlevel-APIs (not for driver use!)
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index c0504f1e34eb..35f8553f893a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -257,6 +257,19 @@ struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int
257} 257}
258 258
259/* Caller must hold vmlist_lock */ 259/* Caller must hold vmlist_lock */
260static struct vm_struct *__find_vm_area(void *addr)
261{
262 struct vm_struct *tmp;
263
264 for (tmp = vmlist; tmp != NULL; tmp = tmp->next) {
265 if (tmp->addr == addr)
266 break;
267 }
268
269 return tmp;
270}
271
272/* Caller must hold vmlist_lock */
260struct vm_struct *__remove_vm_area(void *addr) 273struct vm_struct *__remove_vm_area(void *addr)
261{ 274{
262 struct vm_struct **p, *tmp; 275 struct vm_struct **p, *tmp;
@@ -498,11 +511,33 @@ EXPORT_SYMBOL(__vmalloc);
498 */ 511 */
499void *vmalloc(unsigned long size) 512void *vmalloc(unsigned long size)
500{ 513{
501 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); 514 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
502} 515}
503EXPORT_SYMBOL(vmalloc); 516EXPORT_SYMBOL(vmalloc);
504 517
505/** 518/**
519 * vmalloc_user - allocate virtually contiguous memory which has
520 * been zeroed so it can be mapped to userspace without
521 * leaking data.
522 *
523 * @size: allocation size
524 */
525void *vmalloc_user(unsigned long size)
526{
527 struct vm_struct *area;
528 void *ret;
529
530 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
531 write_lock(&vmlist_lock);
532 area = __find_vm_area(ret);
533 area->flags |= VM_USERMAP;
534 write_unlock(&vmlist_lock);
535
536 return ret;
537}
538EXPORT_SYMBOL(vmalloc_user);
539
540/**
506 * vmalloc_node - allocate memory on a specific node 541 * vmalloc_node - allocate memory on a specific node
507 * 542 *
508 * @size: allocation size 543 * @size: allocation size
@@ -516,7 +551,7 @@ EXPORT_SYMBOL(vmalloc);
516 */ 551 */
517void *vmalloc_node(unsigned long size, int node) 552void *vmalloc_node(unsigned long size, int node)
518{ 553{
519 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node); 554 return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
520} 555}
521EXPORT_SYMBOL(vmalloc_node); 556EXPORT_SYMBOL(vmalloc_node);
522 557
@@ -556,6 +591,28 @@ void *vmalloc_32(unsigned long size)
556} 591}
557EXPORT_SYMBOL(vmalloc_32); 592EXPORT_SYMBOL(vmalloc_32);
558 593
594/**
595 * vmalloc_32_user - allocate virtually contiguous memory (32bit
596 * addressable) which is zeroed so it can be
597 * mapped to userspace without leaking data.
598 *
599 * @size: allocation size
600 */
601void *vmalloc_32_user(unsigned long size)
602{
603 struct vm_struct *area;
604 void *ret;
605
606 ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
607 write_lock(&vmlist_lock);
608 area = __find_vm_area(ret);
609 area->flags |= VM_USERMAP;
610 write_unlock(&vmlist_lock);
611
612 return ret;
613}
614EXPORT_SYMBOL(vmalloc_32_user);
615
559long vread(char *buf, char *addr, unsigned long count) 616long vread(char *buf, char *addr, unsigned long count)
560{ 617{
561 struct vm_struct *tmp; 618 struct vm_struct *tmp;
@@ -630,3 +687,64 @@ finished:
630 read_unlock(&vmlist_lock); 687 read_unlock(&vmlist_lock);
631 return buf - buf_start; 688 return buf - buf_start;
632} 689}
690
691/**
692 * remap_vmalloc_range - map vmalloc pages to userspace
693 *
694 * @vma: vma to cover (map full range of vma)
695 * @addr: vmalloc memory
696 * @pgoff: number of pages into addr before first page to map
697 * @returns: 0 for success, -Exxx on failure
698 *
699 * This function checks that addr is a valid vmalloc'ed area, and
700 * that it is big enough to cover the vma. Will return failure if
701 * that criteria isn't met.
702 *
703 * Similar to remap_pfn_range (see mm/memory.c)
704 */
705int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
706 unsigned long pgoff)
707{
708 struct vm_struct *area;
709 unsigned long uaddr = vma->vm_start;
710 unsigned long usize = vma->vm_end - vma->vm_start;
711 int ret;
712
713 if ((PAGE_SIZE-1) & (unsigned long)addr)
714 return -EINVAL;
715
716 read_lock(&vmlist_lock);
717 area = __find_vm_area(addr);
718 if (!area)
719 goto out_einval_locked;
720
721 if (!(area->flags & VM_USERMAP))
722 goto out_einval_locked;
723
724 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE)
725 goto out_einval_locked;
726 read_unlock(&vmlist_lock);
727
728 addr += pgoff << PAGE_SHIFT;
729 do {
730 struct page *page = vmalloc_to_page(addr);
731 ret = vm_insert_page(vma, uaddr, page);
732 if (ret)
733 return ret;
734
735 uaddr += PAGE_SIZE;
736 addr += PAGE_SIZE;
737 usize -= PAGE_SIZE;
738 } while (usize > 0);
739
740 /* Prevent "things" like memory migration? VM_flags need a cleanup... */
741 vma->vm_flags |= VM_RESERVED;
742
743 return ret;
744
745out_einval_locked:
746 read_unlock(&vmlist_lock);
747 return -EINVAL;
748}
749EXPORT_SYMBOL(remap_vmalloc_range);
750