aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c29
1 files changed, 12 insertions, 17 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 13a54953a273..107454312d5e 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -752,7 +752,6 @@ struct vmap_block_queue {
752struct vmap_block { 752struct vmap_block {
753 spinlock_t lock; 753 spinlock_t lock;
754 struct vmap_area *va; 754 struct vmap_area *va;
755 struct vmap_block_queue *vbq;
756 unsigned long free, dirty; 755 unsigned long free, dirty;
757 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); 756 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
758 struct list_head free_list; 757 struct list_head free_list;
@@ -830,7 +829,6 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
830 radix_tree_preload_end(); 829 radix_tree_preload_end();
831 830
832 vbq = &get_cpu_var(vmap_block_queue); 831 vbq = &get_cpu_var(vmap_block_queue);
833 vb->vbq = vbq;
834 spin_lock(&vbq->lock); 832 spin_lock(&vbq->lock);
835 list_add_rcu(&vb->free_list, &vbq->free); 833 list_add_rcu(&vb->free_list, &vbq->free);
836 spin_unlock(&vbq->lock); 834 spin_unlock(&vbq->lock);
@@ -1018,15 +1016,16 @@ void vm_unmap_aliases(void)
1018 1016
1019 rcu_read_lock(); 1017 rcu_read_lock();
1020 list_for_each_entry_rcu(vb, &vbq->free, free_list) { 1018 list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1021 int i; 1019 int i, j;
1022 1020
1023 spin_lock(&vb->lock); 1021 spin_lock(&vb->lock);
1024 i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS); 1022 i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
1025 while (i < VMAP_BBMAP_BITS) { 1023 if (i < VMAP_BBMAP_BITS) {
1026 unsigned long s, e; 1024 unsigned long s, e;
1027 int j; 1025
1028 j = find_next_zero_bit(vb->dirty_map, 1026 j = find_last_bit(vb->dirty_map,
1029 VMAP_BBMAP_BITS, i); 1027 VMAP_BBMAP_BITS);
1028 j = j + 1; /* need exclusive index */
1030 1029
1031 s = vb->va->va_start + (i << PAGE_SHIFT); 1030 s = vb->va->va_start + (i << PAGE_SHIFT);
1032 e = vb->va->va_start + (j << PAGE_SHIFT); 1031 e = vb->va->va_start + (j << PAGE_SHIFT);
@@ -1036,10 +1035,6 @@ void vm_unmap_aliases(void)
1036 start = s; 1035 start = s;
1037 if (e > end) 1036 if (e > end)
1038 end = e; 1037 end = e;
1039
1040 i = j;
1041 i = find_next_bit(vb->dirty_map,
1042 VMAP_BBMAP_BITS, i);
1043 } 1038 }
1044 spin_unlock(&vb->lock); 1039 spin_unlock(&vb->lock);
1045 } 1040 }
@@ -1263,7 +1258,7 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)
1263int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) 1258int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
1264{ 1259{
1265 unsigned long addr = (unsigned long)area->addr; 1260 unsigned long addr = (unsigned long)area->addr;
1266 unsigned long end = addr + area->size - PAGE_SIZE; 1261 unsigned long end = addr + get_vm_area_size(area);
1267 int err; 1262 int err;
1268 1263
1269 err = vmap_page_range(addr, end, prot, *pages); 1264 err = vmap_page_range(addr, end, prot, *pages);
@@ -1558,7 +1553,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1558 unsigned int nr_pages, array_size, i; 1553 unsigned int nr_pages, array_size, i;
1559 gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 1554 gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1560 1555
1561 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; 1556 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
1562 array_size = (nr_pages * sizeof(struct page *)); 1557 array_size = (nr_pages * sizeof(struct page *));
1563 1558
1564 area->nr_pages = nr_pages; 1559 area->nr_pages = nr_pages;
@@ -1990,7 +1985,7 @@ long vread(char *buf, char *addr, unsigned long count)
1990 1985
1991 vm = va->vm; 1986 vm = va->vm;
1992 vaddr = (char *) vm->addr; 1987 vaddr = (char *) vm->addr;
1993 if (addr >= vaddr + vm->size - PAGE_SIZE) 1988 if (addr >= vaddr + get_vm_area_size(vm))
1994 continue; 1989 continue;
1995 while (addr < vaddr) { 1990 while (addr < vaddr) {
1996 if (count == 0) 1991 if (count == 0)
@@ -2000,7 +1995,7 @@ long vread(char *buf, char *addr, unsigned long count)
2000 addr++; 1995 addr++;
2001 count--; 1996 count--;
2002 } 1997 }
2003 n = vaddr + vm->size - PAGE_SIZE - addr; 1998 n = vaddr + get_vm_area_size(vm) - addr;
2004 if (n > count) 1999 if (n > count)
2005 n = count; 2000 n = count;
2006 if (!(vm->flags & VM_IOREMAP)) 2001 if (!(vm->flags & VM_IOREMAP))
@@ -2072,7 +2067,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
2072 2067
2073 vm = va->vm; 2068 vm = va->vm;
2074 vaddr = (char *) vm->addr; 2069 vaddr = (char *) vm->addr;
2075 if (addr >= vaddr + vm->size - PAGE_SIZE) 2070 if (addr >= vaddr + get_vm_area_size(vm))
2076 continue; 2071 continue;
2077 while (addr < vaddr) { 2072 while (addr < vaddr) {
2078 if (count == 0) 2073 if (count == 0)
@@ -2081,7 +2076,7 @@ long vwrite(char *buf, char *addr, unsigned long count)
2081 addr++; 2076 addr++;
2082 count--; 2077 count--;
2083 } 2078 }
2084 n = vaddr + vm->size - PAGE_SIZE - addr; 2079 n = vaddr + get_vm_area_size(vm) - addr;
2085 if (n > count) 2080 if (n > count)
2086 n = count; 2081 n = count;
2087 if (!(vm->flags & VM_IOREMAP)) { 2082 if (!(vm->flags & VM_IOREMAP)) {