aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-24 15:52:45 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-24 15:52:45 -0500
commit0edcf8d6926f4038443dbc24e319530177ca0353 (patch)
tree6010af62f73d01ab673d5106f310eaf4f4228e32 /mm/vmalloc.c
parent87b203079ed949de52f0d92aeae20e5e0116c12f (diff)
parent40150d37be7f7949b2ec07d511244da856647d84 (diff)
Merge branch 'tj-percpu' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/misc into core/percpu
Conflicts: arch/x86/include/asm/pgtable.h
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c94
1 files changed, 91 insertions, 3 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 903cad46e796..fb6f59935fb2 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -24,6 +24,7 @@
24#include <linux/radix-tree.h> 24#include <linux/radix-tree.h>
25#include <linux/rcupdate.h> 25#include <linux/rcupdate.h>
26#include <linux/bootmem.h> 26#include <linux/bootmem.h>
27#include <linux/pfn.h>
27 28
28#include <asm/atomic.h> 29#include <asm/atomic.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
@@ -152,8 +153,8 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
152 * 153 *
153 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] 154 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
154 */ 155 */
155static int vmap_page_range(unsigned long start, unsigned long end, 156static int vmap_page_range_noflush(unsigned long start, unsigned long end,
156 pgprot_t prot, struct page **pages) 157 pgprot_t prot, struct page **pages)
157{ 158{
158 pgd_t *pgd; 159 pgd_t *pgd;
159 unsigned long next; 160 unsigned long next;
@@ -169,13 +170,22 @@ static int vmap_page_range(unsigned long start, unsigned long end,
169 if (err) 170 if (err)
170 break; 171 break;
171 } while (pgd++, addr = next, addr != end); 172 } while (pgd++, addr = next, addr != end);
172 flush_cache_vmap(start, end);
173 173
174 if (unlikely(err)) 174 if (unlikely(err))
175 return err; 175 return err;
176 return nr; 176 return nr;
177} 177}
178 178
179static int vmap_page_range(unsigned long start, unsigned long end,
180 pgprot_t prot, struct page **pages)
181{
182 int ret;
183
184 ret = vmap_page_range_noflush(start, end, prot, pages);
185 flush_cache_vmap(start, end);
186 return ret;
187}
188
179static inline int is_vmalloc_or_module_addr(const void *x) 189static inline int is_vmalloc_or_module_addr(const void *x)
180{ 190{
181 /* 191 /*
@@ -982,6 +992,32 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro
982} 992}
983EXPORT_SYMBOL(vm_map_ram); 993EXPORT_SYMBOL(vm_map_ram);
984 994
995/**
996 * vm_area_register_early - register vmap area early during boot
997 * @vm: vm_struct to register
998 * @align: requested alignment
999 *
1000 * This function is used to register kernel vm area before
1001 * vmalloc_init() is called. @vm->size and @vm->flags should contain
1002 * proper values on entry and other fields should be zero. On return,
1003 * vm->addr contains the allocated address.
1004 *
1005 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1006 */
1007void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1008{
1009 static size_t vm_init_off __initdata;
1010 unsigned long addr;
1011
1012 addr = ALIGN(VMALLOC_START + vm_init_off, align);
1013 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1014
1015 vm->addr = (void *)addr;
1016
1017 vm->next = vmlist;
1018 vmlist = vm;
1019}
1020
985void __init vmalloc_init(void) 1021void __init vmalloc_init(void)
986{ 1022{
987 struct vmap_area *va; 1023 struct vmap_area *va;
@@ -1009,6 +1045,58 @@ void __init vmalloc_init(void)
1009 vmap_initialized = true; 1045 vmap_initialized = true;
1010} 1046}
1011 1047
1048/**
1049 * map_kernel_range_noflush - map kernel VM area with the specified pages
1050 * @addr: start of the VM area to map
1051 * @size: size of the VM area to map
1052 * @prot: page protection flags to use
1053 * @pages: pages to map
1054 *
1055 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size
1056 * specify should have been allocated using get_vm_area() and its
1057 * friends.
1058 *
1059 * NOTE:
1060 * This function does NOT do any cache flushing. The caller is
1061 * responsible for calling flush_cache_vmap() on to-be-mapped areas
1062 * before calling this function.
1063 *
1064 * RETURNS:
1065 * The number of pages mapped on success, -errno on failure.
1066 */
1067int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1068 pgprot_t prot, struct page **pages)
1069{
1070 return vmap_page_range_noflush(addr, addr + size, prot, pages);
1071}
1072
1073/**
1074 * unmap_kernel_range_noflush - unmap kernel VM area
1075 * @addr: start of the VM area to unmap
1076 * @size: size of the VM area to unmap
1077 *
1078 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size
1079 * specify should have been allocated using get_vm_area() and its
1080 * friends.
1081 *
1082 * NOTE:
1083 * This function does NOT do any cache flushing. The caller is
1084 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
1085 * before calling this function and flush_tlb_kernel_range() after.
1086 */
1087void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1088{
1089 vunmap_page_range(addr, addr + size);
1090}
1091
1092/**
1093 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
1094 * @addr: start of the VM area to unmap
1095 * @size: size of the VM area to unmap
1096 *
1097 * Similar to unmap_kernel_range_noflush() but flushes vcache before
1098 * the unmapping and tlb after.
1099 */
1012void unmap_kernel_range(unsigned long addr, unsigned long size) 1100void unmap_kernel_range(unsigned long addr, unsigned long size)
1013{ 1101{
1014 unsigned long end = addr + size; 1102 unsigned long end = addr + size;