aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-11-22 13:06:44 -0500
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-11-22 13:06:44 -0500
commit0bd2af46839ad6262d25714a6ec0365db9d6b98f (patch)
treedcced72d230d69fd0c5816ac6dd03ab84799a93e /mm/vmalloc.c
parente138a5d2356729b8752e88520cc1525fae9794ac (diff)
parentf26b90440cd74c78fe10c9bd5160809704a9627c (diff)
Merge ../scsi-rc-fixes-2.6
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c54
1 files changed, 30 insertions, 24 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 750ab6ed13fc..86897ee792d6 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -160,13 +160,15 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
160 return err; 160 return err;
161} 161}
162 162
163struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags, 163static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
164 unsigned long start, unsigned long end, int node) 164 unsigned long start, unsigned long end,
165 int node, gfp_t gfp_mask)
165{ 166{
166 struct vm_struct **p, *tmp, *area; 167 struct vm_struct **p, *tmp, *area;
167 unsigned long align = 1; 168 unsigned long align = 1;
168 unsigned long addr; 169 unsigned long addr;
169 170
171 BUG_ON(in_interrupt());
170 if (flags & VM_IOREMAP) { 172 if (flags & VM_IOREMAP) {
171 int bit = fls(size); 173 int bit = fls(size);
172 174
@@ -179,16 +181,13 @@ struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
179 } 181 }
180 addr = ALIGN(start, align); 182 addr = ALIGN(start, align);
181 size = PAGE_ALIGN(size); 183 size = PAGE_ALIGN(size);
184 if (unlikely(!size))
185 return NULL;
182 186
183 area = kmalloc_node(sizeof(*area), GFP_KERNEL, node); 187 area = kmalloc_node(sizeof(*area), gfp_mask & GFP_LEVEL_MASK, node);
184 if (unlikely(!area)) 188 if (unlikely(!area))
185 return NULL; 189 return NULL;
186 190
187 if (unlikely(!size)) {
188 kfree (area);
189 return NULL;
190 }
191
192 /* 191 /*
193 * We always allocate a guard page. 192 * We always allocate a guard page.
194 */ 193 */
@@ -236,7 +235,7 @@ out:
236struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 235struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
237 unsigned long start, unsigned long end) 236 unsigned long start, unsigned long end)
238{ 237{
239 return __get_vm_area_node(size, flags, start, end, -1); 238 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL);
240} 239}
241 240
242/** 241/**
@@ -253,9 +252,11 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
253 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); 252 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
254} 253}
255 254
256struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int node) 255struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
256 int node, gfp_t gfp_mask)
257{ 257{
258 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node); 258 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
259 gfp_mask);
259} 260}
260 261
261/* Caller must hold vmlist_lock */ 262/* Caller must hold vmlist_lock */
@@ -428,8 +429,11 @@ void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
428 if (array_size > PAGE_SIZE) { 429 if (array_size > PAGE_SIZE) {
429 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node); 430 pages = __vmalloc_node(array_size, gfp_mask, PAGE_KERNEL, node);
430 area->flags |= VM_VPAGES; 431 area->flags |= VM_VPAGES;
431 } else 432 } else {
432 pages = kmalloc_node(array_size, (gfp_mask & ~__GFP_HIGHMEM), node); 433 pages = kmalloc_node(array_size,
434 (gfp_mask & ~(__GFP_HIGHMEM | __GFP_ZERO)),
435 node);
436 }
433 area->pages = pages; 437 area->pages = pages;
434 if (!area->pages) { 438 if (!area->pages) {
435 remove_vm_area(area->addr); 439 remove_vm_area(area->addr);
@@ -484,7 +488,7 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
484 if (!size || (size >> PAGE_SHIFT) > num_physpages) 488 if (!size || (size >> PAGE_SHIFT) > num_physpages)
485 return NULL; 489 return NULL;
486 490
487 area = get_vm_area_node(size, VM_ALLOC, node); 491 area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask);
488 if (!area) 492 if (!area)
489 return NULL; 493 return NULL;
490 494
@@ -525,11 +529,12 @@ void *vmalloc_user(unsigned long size)
525 void *ret; 529 void *ret;
526 530
527 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); 531 ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
528 write_lock(&vmlist_lock); 532 if (ret) {
529 area = __find_vm_area(ret); 533 write_lock(&vmlist_lock);
530 area->flags |= VM_USERMAP; 534 area = __find_vm_area(ret);
531 write_unlock(&vmlist_lock); 535 area->flags |= VM_USERMAP;
532 536 write_unlock(&vmlist_lock);
537 }
533 return ret; 538 return ret;
534} 539}
535EXPORT_SYMBOL(vmalloc_user); 540EXPORT_SYMBOL(vmalloc_user);
@@ -598,11 +603,12 @@ void *vmalloc_32_user(unsigned long size)
598 void *ret; 603 void *ret;
599 604
600 ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); 605 ret = __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
601 write_lock(&vmlist_lock); 606 if (ret) {
602 area = __find_vm_area(ret); 607 write_lock(&vmlist_lock);
603 area->flags |= VM_USERMAP; 608 area = __find_vm_area(ret);
604 write_unlock(&vmlist_lock); 609 area->flags |= VM_USERMAP;
605 610 write_unlock(&vmlist_lock);
611 }
606 return ret; 612 return ret;
607} 613}
608EXPORT_SYMBOL(vmalloc_32_user); 614EXPORT_SYMBOL(vmalloc_32_user);