diff options
author | Jeremy Fitzhardinge <jeremy@xensource.com> | 2007-07-17 21:37:04 -0400 |
---|---|---|
committer | Jeremy Fitzhardinge <jeremy@goop.org> | 2007-07-18 11:47:41 -0400 |
commit | 5f4352fbffd6c45123dbce9e195efd54df4e177e (patch) | |
tree | e2a0316e2f2d22c266e7cae3015ddc0f2f77f64f | |
parent | bdef40a6af64a0140a65df49bf504124d57094a9 (diff) |
Allocate and free vmalloc areas
Allocate/release a chunk of vmalloc address space:
alloc_vm_area reserves a chunk of address space, and makes sure all
the pagetables are constructed for that address range - but no pages.
free_vm_area releases the address space range.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Ian Pratt <ian.pratt@xensource.com>
Signed-off-by: Christian Limpach <Christian.Limpach@cl.cam.ac.uk>
Signed-off-by: Chris Wright <chrisw@sous-sol.org>
Cc: "Jan Beulich" <JBeulich@novell.com>
Cc: "Andi Kleen" <ak@muc.de>
-rw-r--r-- | include/linux/vmalloc.h | 4 | ||||
-rw-r--r-- | mm/vmalloc.c | 53 |
2 files changed, 57 insertions, 0 deletions
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 132b260aef1e..c2b10cae5da5 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -70,6 +70,10 @@ extern int map_vm_area(struct vm_struct *area, pgprot_t prot, | |||
70 | struct page ***pages); | 70 | struct page ***pages); |
71 | extern void unmap_kernel_range(unsigned long addr, unsigned long size); | 71 | extern void unmap_kernel_range(unsigned long addr, unsigned long size); |
72 | 72 | ||
73 | /* Allocate/destroy a 'vmalloc' VM area. */ | ||
74 | extern struct vm_struct *alloc_vm_area(size_t size); | ||
75 | extern void free_vm_area(struct vm_struct *area); | ||
76 | |||
73 | /* | 77 | /* |
74 | * Internals. Dont't use.. | 78 | * Internals. Dont't use.. |
75 | */ | 79 | */ |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 8e05a11155c9..3130c343088f 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -767,3 +767,56 @@ EXPORT_SYMBOL(remap_vmalloc_range); | |||
767 | void __attribute__((weak)) vmalloc_sync_all(void) | 767 | void __attribute__((weak)) vmalloc_sync_all(void) |
768 | { | 768 | { |
769 | } | 769 | } |
770 | |||
771 | |||
772 | static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) | ||
773 | { | ||
774 | /* apply_to_page_range() does all the hard work. */ | ||
775 | return 0; | ||
776 | } | ||
777 | |||
778 | /** | ||
779 | * alloc_vm_area - allocate a range of kernel address space | ||
780 | * @size: size of the area | ||
781 | * @returns: NULL on failure, vm_struct on success | ||
782 | * | ||
783 | * This function reserves a range of kernel address space, and | ||
784 | * allocates pagetables to map that range. No actual mappings | ||
785 | * are created. If the kernel address space is not shared | ||
786 | * between processes, it syncs the pagetable across all | ||
787 | * processes. | ||
788 | */ | ||
789 | struct vm_struct *alloc_vm_area(size_t size) | ||
790 | { | ||
791 | struct vm_struct *area; | ||
792 | |||
793 | area = get_vm_area(size, VM_IOREMAP); | ||
794 | if (area == NULL) | ||
795 | return NULL; | ||
796 | |||
797 | /* | ||
798 | * This ensures that page tables are constructed for this region | ||
799 | * of kernel virtual address space and mapped into init_mm. | ||
800 | */ | ||
801 | if (apply_to_page_range(&init_mm, (unsigned long)area->addr, | ||
802 | area->size, f, NULL)) { | ||
803 | free_vm_area(area); | ||
804 | return NULL; | ||
805 | } | ||
806 | |||
807 | /* Make sure the pagetables are constructed in process kernel | ||
808 | mappings */ | ||
809 | vmalloc_sync_all(); | ||
810 | |||
811 | return area; | ||
812 | } | ||
813 | EXPORT_SYMBOL_GPL(alloc_vm_area); | ||
814 | |||
815 | void free_vm_area(struct vm_struct *area) | ||
816 | { | ||
817 | struct vm_struct *ret; | ||
818 | ret = remove_vm_area(area->addr); | ||
819 | BUG_ON(ret != area); | ||
820 | kfree(area); | ||
821 | } | ||
822 | EXPORT_SYMBOL_GPL(free_vm_area); | ||