diff options
author | Tejun Heo <tj@kernel.org> | 2009-02-20 02:29:08 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-02-20 02:29:08 -0500 |
commit | 8fc48985006da4ceba24508db64ec77fc0dfe3bb (patch) | |
tree | 2234b92b13c53bfd1972e967c50ff305b6efe013 /mm/vmalloc.c | |
parent | f0aa6617903648077dffe5cfcf7c4458f4610fa7 (diff) |
vmalloc: add un/map_kernel_range_noflush()
Impact: two more public map/unmap functions
Implement map_kernel_range_noflush() and unmap_kernel_range_noflush().
These functions respectively map and unmap address range in kernel VM
area but doesn't do any vcache or tlb flushing. These will be used by
new percpu allocator.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r-- | mm/vmalloc.c | 67 |
1 files changed, 64 insertions, 3 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d206261ad9ef..224eca9650a8 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -153,8 +153,8 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr, | |||
153 | * | 153 | * |
154 | * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] | 154 | * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] |
155 | */ | 155 | */ |
156 | static int vmap_page_range(unsigned long start, unsigned long end, | 156 | static int vmap_page_range_noflush(unsigned long start, unsigned long end, |
157 | pgprot_t prot, struct page **pages) | 157 | pgprot_t prot, struct page **pages) |
158 | { | 158 | { |
159 | pgd_t *pgd; | 159 | pgd_t *pgd; |
160 | unsigned long next; | 160 | unsigned long next; |
@@ -170,13 +170,22 @@ static int vmap_page_range(unsigned long start, unsigned long end, | |||
170 | if (err) | 170 | if (err) |
171 | break; | 171 | break; |
172 | } while (pgd++, addr = next, addr != end); | 172 | } while (pgd++, addr = next, addr != end); |
173 | flush_cache_vmap(start, end); | ||
174 | 173 | ||
175 | if (unlikely(err)) | 174 | if (unlikely(err)) |
176 | return err; | 175 | return err; |
177 | return nr; | 176 | return nr; |
178 | } | 177 | } |
179 | 178 | ||
179 | static int vmap_page_range(unsigned long start, unsigned long end, | ||
180 | pgprot_t prot, struct page **pages) | ||
181 | { | ||
182 | int ret; | ||
183 | |||
184 | ret = vmap_page_range_noflush(start, end, prot, pages); | ||
185 | flush_cache_vmap(start, end); | ||
186 | return ret; | ||
187 | } | ||
188 | |||
180 | static inline int is_vmalloc_or_module_addr(const void *x) | 189 | static inline int is_vmalloc_or_module_addr(const void *x) |
181 | { | 190 | { |
182 | /* | 191 | /* |
@@ -1033,6 +1042,58 @@ void __init vmalloc_init(void) | |||
1033 | vmap_initialized = true; | 1042 | vmap_initialized = true; |
1034 | } | 1043 | } |
1035 | 1044 | ||
1045 | /** | ||
1046 | * map_kernel_range_noflush - map kernel VM area with the specified pages | ||
1047 | * @addr: start of the VM area to map | ||
1048 | * @size: size of the VM area to map | ||
1049 | * @prot: page protection flags to use | ||
1050 | * @pages: pages to map | ||
1051 | * | ||
1052 | * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size | ||
1053 | * specify should have been allocated using get_vm_area() and its | ||
1054 | * friends. | ||
1055 | * | ||
1056 | * NOTE: | ||
1057 | * This function does NOT do any cache flushing. The caller is | ||
1058 | * responsible for calling flush_cache_vmap() on to-be-mapped areas | ||
1059 | * before calling this function. | ||
1060 | * | ||
1061 | * RETURNS: | ||
1062 | * The number of pages mapped on success, -errno on failure. | ||
1063 | */ | ||
1064 | int map_kernel_range_noflush(unsigned long addr, unsigned long size, | ||
1065 | pgprot_t prot, struct page **pages) | ||
1066 | { | ||
1067 | return vmap_page_range_noflush(addr, addr + size, prot, pages); | ||
1068 | } | ||
1069 | |||
1070 | /** | ||
1071 | * unmap_kernel_range_noflush - unmap kernel VM area | ||
1072 | * @addr: start of the VM area to unmap | ||
1073 | * @size: size of the VM area to unmap | ||
1074 | * | ||
1075 | * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size | ||
1076 | * specify should have been allocated using get_vm_area() and its | ||
1077 | * friends. | ||
1078 | * | ||
1079 | * NOTE: | ||
1080 | * This function does NOT do any cache flushing. The caller is | ||
1081 | * responsible for calling flush_cache_vunmap() on to-be-mapped areas | ||
1082 | * before calling this function and flush_tlb_kernel_range() after. | ||
1083 | */ | ||
1084 | void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) | ||
1085 | { | ||
1086 | vunmap_page_range(addr, addr + size); | ||
1087 | } | ||
1088 | |||
1089 | /** | ||
1090 | * unmap_kernel_range - unmap kernel VM area and flush cache and TLB | ||
1091 | * @addr: start of the VM area to unmap | ||
1092 | * @size: size of the VM area to unmap | ||
1093 | * | ||
1094 | * Similar to unmap_kernel_range_noflush() but flushes vcache before | ||
1095 | * the unmapping and tlb after. | ||
1096 | */ | ||
1036 | void unmap_kernel_range(unsigned long addr, unsigned long size) | 1097 | void unmap_kernel_range(unsigned long addr, unsigned long size) |
1037 | { | 1098 | { |
1038 | unsigned long end = addr + size; | 1099 | unsigned long end = addr + size; |