aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c47
1 files changed, 25 insertions, 22 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 1133dd3aafcf..86897ee792d6 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -160,13 +160,15 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
160 return err; 160 return err;
161} 161}
162 162
163struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags, 163static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
164 unsigned long start, unsigned long end, int node) 164 unsigned long start, unsigned long end,
165 int node, gfp_t gfp_mask)
165{ 166{
166 struct vm_struct **p, *tmp, *area; 167 struct vm_struct **p, *tmp, *area;
167 unsigned long align = 1; 168 unsigned long align = 1;
168 unsigned long addr; 169 unsigned long addr;
169 170
171 BUG_ON(in_interrupt());
170 if (flags & VM_IOREMAP) { 172 if (flags & VM_IOREMAP) {
171 int bit = fls(size); 173 int bit = fls(size);
172 174
@@ -179,16 +181,13 @@ struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
179 } 181 }
180 addr = ALIGN(start, align); 182 addr = ALIGN(start, align);
181 size = PAGE_ALIGN(size); 183 size = PAGE_ALIGN(size);
184 if (unlikely(!size))
185 return NULL;
182 186
183 area = kmalloc_node(sizeof(*area), GFP_KERNEL, node); 187 area = kmalloc_node(sizeof(*area), gfp_mask & GFP_LEVEL_MASK, node);
184 if (unlikely(!area)) 188 if (unlikely(!area))
185 return NULL; 189 return NULL;
186 190
187 if (unlikely(!size)) {
188 kfree (area);
189 return NULL;
190 }
191
192 /* 191 /*
193 * We always allocate a guard page. 192 * We always allocate a guard page.
194 */ 193 */
@@ -236,7 +235,7 @@ out:
236struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 235struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
237 unsigned long start, unsigned long end) 236 unsigned long start, unsigned long end)
238{ 237{
239 return __get_vm_area_node(size, flags, start, end, -1); 238 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL);
240} 239}
241 240
242/** 241/**
@@ -253,9 +252,11 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
253 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); 252 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
254} 253}
255 254
256struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int node) 255struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
256 int node, gfp_t gfp_mask)
257{ 257{
258 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node); 258 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
259 gfp_mask);
259} 260}
260 261
261/* Caller must hold vmlist_lock */ 262/* Caller must hold vmlist_lock */
@@ -487,7 +488,7 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
487 if (!size || (size >> PAGE_SHIFT) > num_physpages) 488 if (!size || (size >> PAGE_SHIFT) > num_physpages)
488 return NULL; 489 return NULL;
489 490
490 area = get_vm_area_node(size, VM_ALLOC, node); 491 area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask);
491 if (!area) 492 if (!area)
492 return NULL; 493 return NULL;
493 494
@@ -528,11 +529,12 @@ void *vmalloc_user(unsigned long size)
528 void *ret; 529 void *ret;
529 530
530 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); 531 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
531 write_lock(&vmlist_lock); 532 if (ret) {
532 area = __find_vm_area(ret); 533 write_lock(&vmlist_lock);
533 area->flags |= VM_USERMAP; 534 area = __find_vm_area(ret);
534 write_unlock(&vmlist_lock); 535 area->flags |= VM_USERMAP;
535 536 write_unlock(&vmlist_lock);
537 }
536 return ret; 538 return ret;
537} 539}
538EXPORT_SYMBOL(vmalloc_user); 540EXPORT_SYMBOL(vmalloc_user);
@@ -601,11 +603,12 @@ void *vmalloc_32_user(unsigned long size)
601 void *ret; 603 void *ret;
602 604
603 ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); 605 ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
604 write_lock(&vmlist_lock); 606 if (ret) {
605 area = __find_vm_area(ret); 607 write_lock(&vmlist_lock);
606 area->flags |= VM_USERMAP; 608 area = __find_vm_area(ret);
607 write_unlock(&vmlist_lock); 609 area->flags |= VM_USERMAP;
608 610 write_unlock(&vmlist_lock);
611 }
609 return ret; 612 return ret;
610} 613}
611EXPORT_SYMBOL(vmalloc_32_user); 614EXPORT_SYMBOL(vmalloc_32_user);