aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorGiridhar Pemmasani <pgiri@yahoo.com>2006-10-28 13:38:34 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-28 14:30:52 -0400
commit52fd24ca1db3a741f144bbc229beefe044202cac (patch)
treebb3959b403c4bfec138b61e7943e17a76dc6cad6 /mm/vmalloc.c
parent6a2aae06cc1e87e9712a26a639f6a2f3442e2027 (diff)
[PATCH] __vmalloc with GFP_ATOMIC causes 'sleeping from invalid context'
If __vmalloc is called to allocate memory with GFP_ATOMIC in atomic context, the chain of calls results in __get_vm_area_node allocating memory for vm_struct with GFP_KERNEL, causing the 'sleeping from invalid context' warning. This patch fixes it by passing the gfp flags along so __get_vm_area_node allocates memory for vm_struct with the same flags. Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 1133dd3aafcf..6d381df7c9b3 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -160,13 +160,15 @@ int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
160 return err; 160 return err;
161} 161}
162 162
163struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags, 163static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
164 unsigned long start, unsigned long end, int node) 164 unsigned long start, unsigned long end,
165 int node, gfp_t gfp_mask)
165{ 166{
166 struct vm_struct **p, *tmp, *area; 167 struct vm_struct **p, *tmp, *area;
167 unsigned long align = 1; 168 unsigned long align = 1;
168 unsigned long addr; 169 unsigned long addr;
169 170
171 BUG_ON(in_interrupt());
170 if (flags & VM_IOREMAP) { 172 if (flags & VM_IOREMAP) {
171 int bit = fls(size); 173 int bit = fls(size);
172 174
@@ -180,7 +182,7 @@ struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
180 addr = ALIGN(start, align); 182 addr = ALIGN(start, align);
181 size = PAGE_ALIGN(size); 183 size = PAGE_ALIGN(size);
182 184
183 area = kmalloc_node(sizeof(*area), GFP_KERNEL, node); 185 area = kmalloc_node(sizeof(*area), gfp_mask, node);
184 if (unlikely(!area)) 186 if (unlikely(!area))
185 return NULL; 187 return NULL;
186 188
@@ -236,7 +238,7 @@ out:
236struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 238struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
237 unsigned long start, unsigned long end) 239 unsigned long start, unsigned long end)
238{ 240{
239 return __get_vm_area_node(size, flags, start, end, -1); 241 return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL);
240} 242}
241 243
242/** 244/**
@@ -253,9 +255,11 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
253 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); 255 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
254} 256}
255 257
256struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags, int node) 258struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
259 int node, gfp_t gfp_mask)
257{ 260{
258 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node); 261 return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
262 gfp_mask);
259} 263}
260 264
261/* Caller must hold vmlist_lock */ 265/* Caller must hold vmlist_lock */
@@ -487,7 +491,7 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
487 if (!size || (size >> PAGE_SHIFT) > num_physpages) 491 if (!size || (size >> PAGE_SHIFT) > num_physpages)
488 return NULL; 492 return NULL;
489 493
490 area = get_vm_area_node(size, VM_ALLOC, node); 494 area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask);
491 if (!area) 495 if (!area)
492 return NULL; 496 return NULL;
493 497