diff options
author | Nick Piggin <npiggin@suse.de> | 2006-06-23 05:03:20 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-06-23 10:42:49 -0400 |
commit | 833423143c3a7c6545e409d65febd0d92deb351b (patch) | |
tree | 13a1881f1ffd6e546e80a2ec04b1ac44ad145298 | |
parent | 762834e8bf46bf41ce9034d062a7c1f8563175f3 (diff) |
[PATCH] mm: introduce remap_vmalloc_range()
Add remap_vmalloc_range, vmalloc_user, and vmalloc_32_user so that drivers
can have a nice interface for remapping vmalloc memory.
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/linux/vmalloc.h | 8 | ||||
-rw-r--r-- | mm/vmalloc.c | 122 |
2 files changed, 128 insertions, 2 deletions
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 1d5577b2b752..f6024ab4eff0 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -4,10 +4,13 @@ | |||
4 | #include <linux/spinlock.h> | 4 | #include <linux/spinlock.h> |
5 | #include <asm/page.h> /* pgprot_t */ | 5 | #include <asm/page.h> /* pgprot_t */ |
6 | 6 | ||
7 | struct vm_area_struct; | ||
8 | |||
7 | /* bits in vm_struct->flags */ | 9 | /* bits in vm_struct->flags */ |
8 | #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ | 10 | #define VM_IOREMAP 0x00000001 /* ioremap() and friends */ |
9 | #define VM_ALLOC 0x00000002 /* vmalloc() */ | 11 | #define VM_ALLOC 0x00000002 /* vmalloc() */ |
10 | #define VM_MAP 0x00000004 /* vmap()ed pages */ | 12 | #define VM_MAP 0x00000004 /* vmap()ed pages */ |
13 | #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ | ||
11 | /* bits [20..32] reserved for arch specific ioremap internals */ | 14 | /* bits [20..32] reserved for arch specific ioremap internals */ |
12 | 15 | ||
13 | /* | 16 | /* |
@@ -32,9 +35,11 @@ struct vm_struct { | |||
32 | * Highlevel APIs for driver use | 35 | * Highlevel APIs for driver use |
33 | */ | 36 | */ |
34 | extern void *vmalloc(unsigned long size); | 37 | extern void *vmalloc(unsigned long size); |
38 | extern void *vmalloc_user(unsigned long size); | ||
35 | extern void *vmalloc_node(unsigned long size, int node); | 39 | extern void *vmalloc_node(unsigned long size, int node); |
36 | extern void *vmalloc_exec(unsigned long size); | 40 | extern void *vmalloc_exec(unsigned long size); |
37 | extern void *vmalloc_32(unsigned long size); | 41 | extern void *vmalloc_32(unsigned long size); |
42 | extern void *vmalloc_32_user(unsigned long size); | ||
38 | extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); | 43 | extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot); |
39 | extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, | 44 | extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, |
40 | pgprot_t prot); | 45 | pgprot_t prot); |
@@ -45,6 +50,9 @@ extern void vfree(void *addr); | |||
45 | extern void *vmap(struct page **pages, unsigned int count, | 50 | extern void *vmap(struct page **pages, unsigned int count, |
46 | unsigned long flags, pgprot_t prot); | 51 | unsigned long flags, pgprot_t prot); |
47 | extern void vunmap(void *addr); | 52 | extern void vunmap(void *addr); |
53 | |||
54 | extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, | ||
55 | unsigned long pgoff); | ||
48 | 56 | ||
49 | /* | 57 | /* |
50 | * Lowlevel-APIs (not for driver use!) | 58 | * Lowlevel-APIs (not for driver use!) |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index c0504f1e34eb..35f8553f893a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -257,6 +257,19 @@ struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int | |||
257 | } | 257 | } |
258 | 258 | ||
259 | /* Caller must hold vmlist_lock */ | 259 | /* Caller must hold vmlist_lock */ |
260 | static struct vm_struct *__find_vm_area(void *addr) | ||
261 | { | ||
262 | struct vm_struct *tmp; | ||
263 | |||
264 | for (tmp = vmlist; tmp != NULL; tmp = tmp->next) { | ||
265 | if (tmp->addr == addr) | ||
266 | break; | ||
267 | } | ||
268 | |||
269 | return tmp; | ||
270 | } | ||
271 | |||
272 | /* Caller must hold vmlist_lock */ | ||
260 | struct vm_struct *__remove_vm_area(void *addr) | 273 | struct vm_struct *__remove_vm_area(void *addr) |
261 | { | 274 | { |
262 | struct vm_struct **p, *tmp; | 275 | struct vm_struct **p, *tmp; |
@@ -498,11 +511,33 @@ EXPORT_SYMBOL(__vmalloc); | |||
498 | */ | 511 | */ |
499 | void *vmalloc(unsigned long size) | 512 | void *vmalloc(unsigned long size) |
500 | { | 513 | { |
501 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); | 514 | return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL); |
502 | } | 515 | } |
503 | EXPORT_SYMBOL(vmalloc); | 516 | EXPORT_SYMBOL(vmalloc); |
504 | 517 | ||
505 | /** | 518 | /** |
519 | * vmalloc_user - allocate virtually contiguous memory which has | ||
520 | * been zeroed so it can be mapped to userspace without | ||
521 | * leaking data. | ||
522 | * | ||
523 | * @size: allocation size | ||
524 | */ | ||
525 | void *vmalloc_user(unsigned long size) | ||
526 | { | ||
527 | struct vm_struct *area; | ||
528 | void *ret; | ||
529 | |||
530 | ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); | ||
531 | write_lock(&vmlist_lock); | ||
532 | area = __find_vm_area(ret); | ||
533 | area->flags |= VM_USERMAP; | ||
534 | write_unlock(&vmlist_lock); | ||
535 | |||
536 | return ret; | ||
537 | } | ||
538 | EXPORT_SYMBOL(vmalloc_user); | ||
539 | |||
540 | /** | ||
506 | * vmalloc_node - allocate memory on a specific node | 541 | * vmalloc_node - allocate memory on a specific node |
507 | * | 542 | * |
508 | * @size: allocation size | 543 | * @size: allocation size |
@@ -516,7 +551,7 @@ EXPORT_SYMBOL(vmalloc); | |||
516 | */ | 551 | */ |
517 | void *vmalloc_node(unsigned long size, int node) | 552 | void *vmalloc_node(unsigned long size, int node) |
518 | { | 553 | { |
519 | return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node); | 554 | return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node); |
520 | } | 555 | } |
521 | EXPORT_SYMBOL(vmalloc_node); | 556 | EXPORT_SYMBOL(vmalloc_node); |
522 | 557 | ||
@@ -556,6 +591,28 @@ void *vmalloc_32(unsigned long size) | |||
556 | } | 591 | } |
557 | EXPORT_SYMBOL(vmalloc_32); | 592 | EXPORT_SYMBOL(vmalloc_32); |
558 | 593 | ||
594 | /** | ||
595 | * vmalloc_32_user - allocate virtually contiguous memory (32bit | ||
596 | * addressable) which is zeroed so it can be | ||
597 | * mapped to userspace without leaking data. | ||
598 | * | ||
599 | * @size: allocation size | ||
600 | */ | ||
601 | void *vmalloc_32_user(unsigned long size) | ||
602 | { | ||
603 | struct vm_struct *area; | ||
604 | void *ret; | ||
605 | |||
606 | ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); | ||
607 | write_lock(&vmlist_lock); | ||
608 | area = __find_vm_area(ret); | ||
609 | area->flags |= VM_USERMAP; | ||
610 | write_unlock(&vmlist_lock); | ||
611 | |||
612 | return ret; | ||
613 | } | ||
614 | EXPORT_SYMBOL(vmalloc_32_user); | ||
615 | |||
559 | long vread(char *buf, char *addr, unsigned long count) | 616 | long vread(char *buf, char *addr, unsigned long count) |
560 | { | 617 | { |
561 | struct vm_struct *tmp; | 618 | struct vm_struct *tmp; |
@@ -630,3 +687,64 @@ finished: | |||
630 | read_unlock(&vmlist_lock); | 687 | read_unlock(&vmlist_lock); |
631 | return buf - buf_start; | 688 | return buf - buf_start; |
632 | } | 689 | } |
690 | |||
691 | /** | ||
692 | * remap_vmalloc_range - map vmalloc pages to userspace | ||
693 | * | ||
694 | * @vma: vma to cover (map full range of vma) | ||
695 | * @addr: vmalloc memory | ||
696 | * @pgoff: number of pages into addr before first page to map | ||
697 | * @returns: 0 for success, -Exxx on failure | ||
698 | * | ||
699 | * This function checks that addr is a valid vmalloc'ed area, and | ||
700 | * that it is big enough to cover the vma. Will return failure if | ||
701 | * that criteria isn't met. | ||
702 | * | ||
703 | * Similar to remap_pfn_range (see mm/memory.c) | ||
704 | */ | ||
705 | int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, | ||
706 | unsigned long pgoff) | ||
707 | { | ||
708 | struct vm_struct *area; | ||
709 | unsigned long uaddr = vma->vm_start; | ||
710 | unsigned long usize = vma->vm_end - vma->vm_start; | ||
711 | int ret; | ||
712 | |||
713 | if ((PAGE_SIZE-1) & (unsigned long)addr) | ||
714 | return -EINVAL; | ||
715 | |||
716 | read_lock(&vmlist_lock); | ||
717 | area = __find_vm_area(addr); | ||
718 | if (!area) | ||
719 | goto out_einval_locked; | ||
720 | |||
721 | if (!(area->flags & VM_USERMAP)) | ||
722 | goto out_einval_locked; | ||
723 | |||
724 | if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) | ||
725 | goto out_einval_locked; | ||
726 | read_unlock(&vmlist_lock); | ||
727 | |||
728 | addr += pgoff << PAGE_SHIFT; | ||
729 | do { | ||
730 | struct page *page = vmalloc_to_page(addr); | ||
731 | ret = vm_insert_page(vma, uaddr, page); | ||
732 | if (ret) | ||
733 | return ret; | ||
734 | |||
735 | uaddr += PAGE_SIZE; | ||
736 | addr += PAGE_SIZE; | ||
737 | usize -= PAGE_SIZE; | ||
738 | } while (usize > 0); | ||
739 | |||
740 | /* Prevent "things" like memory migration? VM_flags need a cleanup... */ | ||
741 | vma->vm_flags |= VM_RESERVED; | ||
742 | |||
743 | return ret; | ||
744 | |||
745 | out_einval_locked: | ||
746 | read_unlock(&vmlist_lock); | ||
747 | return -EINVAL; | ||
748 | } | ||
749 | EXPORT_SYMBOL(remap_vmalloc_range); | ||
750 | |||