diff options
| author | Len Brown <len.brown@intel.com> | 2009-04-05 02:14:15 -0400 |
|---|---|---|
| committer | Len Brown <len.brown@intel.com> | 2009-04-05 02:14:15 -0400 |
| commit | 478c6a43fcbc6c11609f8cee7c7b57223907754f (patch) | |
| tree | a7f7952099da60d33032aed6de9c0c56c9f8779e /mm/vmalloc.c | |
| parent | 8a3f257c704e02aee9869decd069a806b45be3f1 (diff) | |
| parent | 6bb597507f9839b13498781e481f5458aea33620 (diff) | |
Merge branch 'linus' into release
Conflicts:
arch/x86/kernel/cpu/cpufreq/longhaul.c
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'mm/vmalloc.c')
| -rw-r--r-- | mm/vmalloc.c | 116 |
1 files changed, 97 insertions, 19 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 520a75980269..fab19876b4d1 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/radix-tree.h> | 24 | #include <linux/radix-tree.h> |
| 25 | #include <linux/rcupdate.h> | 25 | #include <linux/rcupdate.h> |
| 26 | #include <linux/bootmem.h> | 26 | #include <linux/bootmem.h> |
| 27 | #include <linux/pfn.h> | ||
| 27 | 28 | ||
| 28 | #include <asm/atomic.h> | 29 | #include <asm/atomic.h> |
| 29 | #include <asm/uaccess.h> | 30 | #include <asm/uaccess.h> |
| @@ -152,8 +153,8 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr, | |||
| 152 | * | 153 | * |
| 153 | * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] | 154 | * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N] |
| 154 | */ | 155 | */ |
| 155 | static int vmap_page_range(unsigned long start, unsigned long end, | 156 | static int vmap_page_range_noflush(unsigned long start, unsigned long end, |
| 156 | pgprot_t prot, struct page **pages) | 157 | pgprot_t prot, struct page **pages) |
| 157 | { | 158 | { |
| 158 | pgd_t *pgd; | 159 | pgd_t *pgd; |
| 159 | unsigned long next; | 160 | unsigned long next; |
| @@ -169,13 +170,22 @@ static int vmap_page_range(unsigned long start, unsigned long end, | |||
| 169 | if (err) | 170 | if (err) |
| 170 | break; | 171 | break; |
| 171 | } while (pgd++, addr = next, addr != end); | 172 | } while (pgd++, addr = next, addr != end); |
| 172 | flush_cache_vmap(start, end); | ||
| 173 | 173 | ||
| 174 | if (unlikely(err)) | 174 | if (unlikely(err)) |
| 175 | return err; | 175 | return err; |
| 176 | return nr; | 176 | return nr; |
| 177 | } | 177 | } |
| 178 | 178 | ||
| 179 | static int vmap_page_range(unsigned long start, unsigned long end, | ||
| 180 | pgprot_t prot, struct page **pages) | ||
| 181 | { | ||
| 182 | int ret; | ||
| 183 | |||
| 184 | ret = vmap_page_range_noflush(start, end, prot, pages); | ||
| 185 | flush_cache_vmap(start, end); | ||
| 186 | return ret; | ||
| 187 | } | ||
| 188 | |||
| 179 | static inline int is_vmalloc_or_module_addr(const void *x) | 189 | static inline int is_vmalloc_or_module_addr(const void *x) |
| 180 | { | 190 | { |
| 181 | /* | 191 | /* |
| @@ -661,10 +671,7 @@ struct vmap_block { | |||
| 661 | DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); | 671 | DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); |
| 662 | DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); | 672 | DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); |
| 663 | union { | 673 | union { |
| 664 | struct { | 674 | struct list_head free_list; |
| 665 | struct list_head free_list; | ||
| 666 | struct list_head dirty_list; | ||
| 667 | }; | ||
| 668 | struct rcu_head rcu_head; | 675 | struct rcu_head rcu_head; |
| 669 | }; | 676 | }; |
| 670 | }; | 677 | }; |
| @@ -731,7 +738,6 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask) | |||
| 731 | bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS); | 738 | bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS); |
| 732 | bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); | 739 | bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); |
| 733 | INIT_LIST_HEAD(&vb->free_list); | 740 | INIT_LIST_HEAD(&vb->free_list); |
| 734 | INIT_LIST_HEAD(&vb->dirty_list); | ||
| 735 | 741 | ||
| 736 | vb_idx = addr_to_vb_idx(va->va_start); | 742 | vb_idx = addr_to_vb_idx(va->va_start); |
| 737 | spin_lock(&vmap_block_tree_lock); | 743 | spin_lock(&vmap_block_tree_lock); |
| @@ -762,12 +768,7 @@ static void free_vmap_block(struct vmap_block *vb) | |||
| 762 | struct vmap_block *tmp; | 768 | struct vmap_block *tmp; |
| 763 | unsigned long vb_idx; | 769 | unsigned long vb_idx; |
| 764 | 770 | ||
| 765 | spin_lock(&vb->vbq->lock); | 771 | BUG_ON(!list_empty(&vb->free_list)); |
| 766 | if (!list_empty(&vb->free_list)) | ||
| 767 | list_del(&vb->free_list); | ||
| 768 | if (!list_empty(&vb->dirty_list)) | ||
| 769 | list_del(&vb->dirty_list); | ||
| 770 | spin_unlock(&vb->vbq->lock); | ||
| 771 | 772 | ||
| 772 | vb_idx = addr_to_vb_idx(vb->va->va_start); | 773 | vb_idx = addr_to_vb_idx(vb->va->va_start); |
| 773 | spin_lock(&vmap_block_tree_lock); | 774 | spin_lock(&vmap_block_tree_lock); |
| @@ -852,11 +853,7 @@ static void vb_free(const void *addr, unsigned long size) | |||
| 852 | 853 | ||
| 853 | spin_lock(&vb->lock); | 854 | spin_lock(&vb->lock); |
| 854 | bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order); | 855 | bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order); |
| 855 | if (!vb->dirty) { | 856 | |
| 856 | spin_lock(&vb->vbq->lock); | ||
| 857 | list_add(&vb->dirty_list, &vb->vbq->dirty); | ||
| 858 | spin_unlock(&vb->vbq->lock); | ||
| 859 | } | ||
| 860 | vb->dirty += 1UL << order; | 857 | vb->dirty += 1UL << order; |
| 861 | if (vb->dirty == VMAP_BBMAP_BITS) { | 858 | if (vb->dirty == VMAP_BBMAP_BITS) { |
| 862 | BUG_ON(vb->free || !list_empty(&vb->free_list)); | 859 | BUG_ON(vb->free || !list_empty(&vb->free_list)); |
| @@ -990,6 +987,32 @@ void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t pro | |||
| 990 | } | 987 | } |
| 991 | EXPORT_SYMBOL(vm_map_ram); | 988 | EXPORT_SYMBOL(vm_map_ram); |
| 992 | 989 | ||
| 990 | /** | ||
| 991 | * vm_area_register_early - register vmap area early during boot | ||
| 992 | * @vm: vm_struct to register | ||
| 993 | * @align: requested alignment | ||
| 994 | * | ||
| 995 | * This function is used to register kernel vm area before | ||
| 996 | * vmalloc_init() is called. @vm->size and @vm->flags should contain | ||
| 997 | * proper values on entry and other fields should be zero. On return, | ||
| 998 | * vm->addr contains the allocated address. | ||
| 999 | * | ||
| 1000 | * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING. | ||
| 1001 | */ | ||
| 1002 | void __init vm_area_register_early(struct vm_struct *vm, size_t align) | ||
| 1003 | { | ||
| 1004 | static size_t vm_init_off __initdata; | ||
| 1005 | unsigned long addr; | ||
| 1006 | |||
| 1007 | addr = ALIGN(VMALLOC_START + vm_init_off, align); | ||
| 1008 | vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; | ||
| 1009 | |||
| 1010 | vm->addr = (void *)addr; | ||
| 1011 | |||
| 1012 | vm->next = vmlist; | ||
| 1013 | vmlist = vm; | ||
| 1014 | } | ||
| 1015 | |||
| 993 | void __init vmalloc_init(void) | 1016 | void __init vmalloc_init(void) |
| 994 | { | 1017 | { |
| 995 | struct vmap_area *va; | 1018 | struct vmap_area *va; |
| @@ -1017,6 +1040,58 @@ void __init vmalloc_init(void) | |||
| 1017 | vmap_initialized = true; | 1040 | vmap_initialized = true; |
| 1018 | } | 1041 | } |
| 1019 | 1042 | ||
| 1043 | /** | ||
| 1044 | * map_kernel_range_noflush - map kernel VM area with the specified pages | ||
| 1045 | * @addr: start of the VM area to map | ||
| 1046 | * @size: size of the VM area to map | ||
| 1047 | * @prot: page protection flags to use | ||
| 1048 | * @pages: pages to map | ||
| 1049 | * | ||
| 1050 | * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size | ||
| 1051 | * specify should have been allocated using get_vm_area() and its | ||
| 1052 | * friends. | ||
| 1053 | * | ||
| 1054 | * NOTE: | ||
| 1055 | * This function does NOT do any cache flushing. The caller is | ||
| 1056 | * responsible for calling flush_cache_vmap() on to-be-mapped areas | ||
| 1057 | * before calling this function. | ||
| 1058 | * | ||
| 1059 | * RETURNS: | ||
| 1060 | * The number of pages mapped on success, -errno on failure. | ||
| 1061 | */ | ||
| 1062 | int map_kernel_range_noflush(unsigned long addr, unsigned long size, | ||
| 1063 | pgprot_t prot, struct page **pages) | ||
| 1064 | { | ||
| 1065 | return vmap_page_range_noflush(addr, addr + size, prot, pages); | ||
| 1066 | } | ||
| 1067 | |||
| 1068 | /** | ||
| 1069 | * unmap_kernel_range_noflush - unmap kernel VM area | ||
| 1070 | * @addr: start of the VM area to unmap | ||
| 1071 | * @size: size of the VM area to unmap | ||
| 1072 | * | ||
| 1073 | * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size | ||
| 1074 | * specify should have been allocated using get_vm_area() and its | ||
| 1075 | * friends. | ||
| 1076 | * | ||
| 1077 | * NOTE: | ||
| 1078 | * This function does NOT do any cache flushing. The caller is | ||
| 1079 | * responsible for calling flush_cache_vunmap() on to-be-mapped areas | ||
| 1080 | * before calling this function and flush_tlb_kernel_range() after. | ||
| 1081 | */ | ||
| 1082 | void unmap_kernel_range_noflush(unsigned long addr, unsigned long size) | ||
| 1083 | { | ||
| 1084 | vunmap_page_range(addr, addr + size); | ||
| 1085 | } | ||
| 1086 | |||
| 1087 | /** | ||
| 1088 | * unmap_kernel_range - unmap kernel VM area and flush cache and TLB | ||
| 1089 | * @addr: start of the VM area to unmap | ||
| 1090 | * @size: size of the VM area to unmap | ||
| 1091 | * | ||
| 1092 | * Similar to unmap_kernel_range_noflush() but flushes vcache before | ||
| 1093 | * the unmapping and tlb after. | ||
| 1094 | */ | ||
| 1020 | void unmap_kernel_range(unsigned long addr, unsigned long size) | 1095 | void unmap_kernel_range(unsigned long addr, unsigned long size) |
| 1021 | { | 1096 | { |
| 1022 | unsigned long end = addr + size; | 1097 | unsigned long end = addr + size; |
| @@ -1267,6 +1342,7 @@ EXPORT_SYMBOL(vfree); | |||
| 1267 | void vunmap(const void *addr) | 1342 | void vunmap(const void *addr) |
| 1268 | { | 1343 | { |
| 1269 | BUG_ON(in_interrupt()); | 1344 | BUG_ON(in_interrupt()); |
| 1345 | might_sleep(); | ||
| 1270 | __vunmap(addr, 0); | 1346 | __vunmap(addr, 0); |
| 1271 | } | 1347 | } |
| 1272 | EXPORT_SYMBOL(vunmap); | 1348 | EXPORT_SYMBOL(vunmap); |
| @@ -1286,6 +1362,8 @@ void *vmap(struct page **pages, unsigned int count, | |||
| 1286 | { | 1362 | { |
| 1287 | struct vm_struct *area; | 1363 | struct vm_struct *area; |
| 1288 | 1364 | ||
| 1365 | might_sleep(); | ||
| 1366 | |||
| 1289 | if (count > num_physpages) | 1367 | if (count > num_physpages) |
| 1290 | return NULL; | 1368 | return NULL; |
| 1291 | 1369 | ||
