diff options
Diffstat (limited to 'mm/vmalloc.c')
| -rw-r--r-- | mm/vmalloc.c | 94 | 
1 files changed, 91 insertions, 3 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 11a929872ebd..af58324c361a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c  | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/radix-tree.h> | 24 | #include <linux/radix-tree.h> | 
| 25 | #include <linux/rcupdate.h> | 25 | #include <linux/rcupdate.h> | 
| 26 | #include <linux/bootmem.h> | 26 | #include <linux/bootmem.h> | 
| 27 | #include <linux/pfn.h> | ||
| 27 | 28 | ||
| 28 | #include <asm/atomic.h> | 29 | #include <asm/atomic.h> | 
| 29 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> | 
| @@ -152,8 +153,8 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr, | |||
| 152 | * | 153 | * | 
| 153 | * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] | 154 | * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] | 
| 154 | */ | 155 | */ | 
| 155 | static int vmap_page_range(unsigned long start, unsigned long end, | 156 | static int vmap_page_range_noflush(unsigned long start, unsigned long end, | 
| 156 | pgprot_t prot, struct page **pages) | 157 | pgprot_t prot, struct page **pages) | 
| 157 | { | 158 | { | 
| 158 | pgd_t *pgd; | 159 | pgd_t *pgd; | 
| 159 | unsigned long next; | 160 | unsigned long next; | 
| @@ -169,13 +170,22 @@ static int vmap_page_range(unsigned long start, unsigned long end, | |||
| 169 | if (err) | 170 | if (err) | 
| 170 | break; | 171 | break; | 
| 171 | } while (pgd++, addr = next, addr != end); | 172 | } while (pgd++, addr = next, addr != end); | 
| 172 | flush_cache_vmap(start, end); | ||
| 173 | 173 | ||
| 174 | if (unlikely(err)) | 174 | if (unlikely(err)) | 
| 175 | return err; | 175 | return err; | 
| 176 | return nr; | 176 | return nr; | 
| 177 | } | 177 | } | 
| 178 | 178 | ||
| 179 | static int vmap_page_range(unsigned long start, unsigned long end, | ||
| 180 | pgprot_t prot, struct page **pages) | ||
| 181 | { | ||
| 182 | int ret; | ||
| 183 | |||
| 184 | ret = vmap_page_range_noflush(start, end, prot, pages); | ||
| 185 | flush_cache_vmap(start, end); | ||
| 186 | return ret; | ||
| 187 | } | ||
| 188 | |||
| 179 | static inline int is_vmalloc_or_module_addr(const void *x) | 189 | static inline int is_vmalloc_or_module_addr(const void *x) | 
| 180 | { | 190 | { | 
| 181 | /* | 191 | /* | 
| @@ -990,6 +1000,32 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro | |||
| 990 | } | 1000 | } | 
| 991 | EXPORT_SYMBOL(vm_map_ram); | 1001 | EXPORT_SYMBOL(vm_map_ram); | 
| 992 | 1002 | ||
| 1003 | /** | ||
| 1004 | * vm_area_register_early - register vmap area early during boot | ||
| 1005 | * @vm: vm_struct to register | ||
| 1006 | * @align: requested alignment | ||
| 1007 | * | ||
| 1008 | * This function is used to register kernel vm area before | ||
| 1009 | * vmalloc_init() is called. @vm->size and @vm->flags should contain | ||
| 1010 | * proper values on entry and other fields should be zero. On return, | ||
| 1011 | * vm->addr contains the allocated address. | ||
| 1012 | * | ||
| 1013 | * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. | ||
| 1014 | */ | ||
| 1015 | void __init vm_area_register_early(struct vm_struct *vm, size_t align) | ||
| 1016 | { | ||
| 1017 | static size_t vm_init_off __initdata; | ||
| 1018 | unsigned long addr; | ||
| 1019 | |||
| 1020 | addr = ALIGN(VMALLOC_START + vm_init_off, align); | ||
| 1021 | vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; | ||
| 1022 | |||
| 1023 | vm->addr = (void *)addr; | ||
| 1024 | |||
| 1025 | vm->next = vmlist; | ||
| 1026 | vmlist = vm; | ||
| 1027 | } | ||
| 1028 | |||
| 993 | void __init vmalloc_init(void) | 1029 | void __init vmalloc_init(void) | 
| 994 | { | 1030 | { | 
| 995 | struct vmap_area *va; | 1031 | struct vmap_area *va; | 
| @@ -1017,6 +1053,58 @@ void __init vmalloc_init(void) | |||
| 1017 | vmap_initialized = true; | 1053 | vmap_initialized = true; | 
| 1018 | } | 1054 | } | 
| 1019 | 1055 | ||
| 1056 | /** | ||
| 1057 | * map_kernel_range_noflush - map kernel VM area with the specified pages | ||
| 1058 | * @addr: start of the VM area to map | ||
| 1059 | * @size: size of the VM area to map | ||
| 1060 | * @prot: page protection flags to use | ||
| 1061 | * @pages: pages to map | ||
| 1062 | * | ||
| 1063 | * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size | ||
| 1064 | * specify should have been allocated using get_vm_area() and its | ||
| 1065 | * friends. | ||
| 1066 | * | ||
| 1067 | * NOTE: | ||
| 1068 | * This function does NOT do any cache flushing. The caller is | ||
| 1069 | * responsible for calling flush_cache_vmap() on to-be-mapped areas | ||
| 1070 | * before calling this function. | ||
| 1071 | * | ||
| 1072 | * RETURNS: | ||
| 1073 | * The number of pages mapped on success, -errno on failure. | ||
| 1074 | */ | ||
| 1075 | int map_kernel_range_noflush(unsigned long addr, unsigned long size, | ||
| 1076 | pgprot_t prot, struct page **pages) | ||
| 1077 | { | ||
| 1078 | return vmap_page_range_noflush(addr, addr + size, prot, pages); | ||
| 1079 | } | ||
| 1080 | |||
| 1081 | /** | ||
| 1082 | * unmap_kernel_range_noflush - unmap kernel VM area | ||
| 1083 | * @addr: start of the VM area to unmap | ||
| 1084 | * @size: size of the VM area to unmap | ||
| 1085 | * | ||
| 1086 | * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size | ||
| 1087 | * specify should have been allocated using get_vm_area() and its | ||
| 1088 | * friends. | ||
| 1089 | * | ||
| 1090 | * NOTE: | ||
| 1091 | * This function does NOT do any cache flushing. The caller is | ||
| 1092 | * responsible for calling flush_cache_vunmap() on to-be-mapped areas | ||
| 1093 | * before calling this function and flush_tlb_kernel_range() after. | ||
| 1094 | */ | ||
| 1095 | void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) | ||
| 1096 | { | ||
| 1097 | vunmap_page_range(addr, addr + size); | ||
| 1098 | } | ||
| 1099 | |||
| 1100 | /** | ||
| 1101 | * unmap_kernel_range - unmap kernel VM area and flush cache and TLB | ||
| 1102 | * @addr: start of the VM area to unmap | ||
| 1103 | * @size: size of the VM area to unmap | ||
| 1104 | * | ||
| 1105 | * Similar to unmap_kernel_range_noflush() but flushes vcache before | ||
| 1106 | * the unmapping and tlb after. | ||
| 1107 | */ | ||
| 1020 | void unmap_kernel_range(unsigned long addr, unsigned long size) | 1108 | void unmap_kernel_range(unsigned long addr, unsigned long size) | 
| 1021 | { | 1109 | { | 
| 1022 | unsigned long end = addr + size; | 1110 | unsigned long end = addr + size; | 
