diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-03-03 20:29:19 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-03 20:29:19 -0500 |
commit | 91d75e209bd59695f0708d66964d928d45b3b2f3 (patch) | |
tree | 32cab1359d951e4193bebb181a0f0319824a2b95 /mm/vmalloc.c | |
parent | 9976b39b5031bbf76f715893cf080b6a17683881 (diff) | |
parent | 8b0e5860cb099d7958d13b00ffbc35ad02735700 (diff) |
Merge branch 'x86/core' into core/percpu
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r-- | mm/vmalloc.c | 13 |
1 files changed, 12 insertions, 1 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index fb6f59935fb2..af58324c361a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -333,6 +333,7 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, | |||
333 | unsigned long addr; | 333 | unsigned long addr; |
334 | int purged = 0; | 334 | int purged = 0; |
335 | 335 | ||
336 | BUG_ON(!size); | ||
336 | BUG_ON(size & ~PAGE_MASK); | 337 | BUG_ON(size & ~PAGE_MASK); |
337 | 338 | ||
338 | va = kmalloc_node(sizeof(struct vmap_area), | 339 | va = kmalloc_node(sizeof(struct vmap_area), |
@@ -344,6 +345,9 @@ retry: | |||
344 | addr = ALIGN(vstart, align); | 345 | addr = ALIGN(vstart, align); |
345 | 346 | ||
346 | spin_lock(&vmap_area_lock); | 347 | spin_lock(&vmap_area_lock); |
348 | if (addr + size - 1 < addr) | ||
349 | goto overflow; | ||
350 | |||
347 | /* XXX: could have a last_hole cache */ | 351 | /* XXX: could have a last_hole cache */ |
348 | n = vmap_area_root.rb_node; | 352 | n = vmap_area_root.rb_node; |
349 | if (n) { | 353 | if (n) { |
@@ -375,6 +379,8 @@ retry: | |||
375 | 379 | ||
376 | while (addr + size > first->va_start && addr + size <= vend) { | 380 | while (addr + size > first->va_start && addr + size <= vend) { |
377 | addr = ALIGN(first->va_end + PAGE_SIZE, align); | 381 | addr = ALIGN(first->va_end + PAGE_SIZE, align); |
382 | if (addr + size - 1 < addr) | ||
383 | goto overflow; | ||
378 | 384 | ||
379 | n = rb_next(&first->rb_node); | 385 | n = rb_next(&first->rb_node); |
380 | if (n) | 386 | if (n) |
@@ -385,6 +391,7 @@ retry: | |||
385 | } | 391 | } |
386 | found: | 392 | found: |
387 | if (addr + size > vend) { | 393 | if (addr + size > vend) { |
394 | overflow: | ||
388 | spin_unlock(&vmap_area_lock); | 395 | spin_unlock(&vmap_area_lock); |
389 | if (!purged) { | 396 | if (!purged) { |
390 | purge_vmap_area_lazy(); | 397 | purge_vmap_area_lazy(); |
@@ -508,6 +515,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, | |||
508 | static DEFINE_SPINLOCK(purge_lock); | 515 | static DEFINE_SPINLOCK(purge_lock); |
509 | LIST_HEAD(valist); | 516 | LIST_HEAD(valist); |
510 | struct vmap_area *va; | 517 | struct vmap_area *va; |
518 | struct vmap_area *n_va; | ||
511 | int nr = 0; | 519 | int nr = 0; |
512 | 520 | ||
513 | /* | 521 | /* |
@@ -547,7 +555,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, | |||
547 | 555 | ||
548 | if (nr) { | 556 | if (nr) { |
549 | spin_lock(&vmap_area_lock); | 557 | spin_lock(&vmap_area_lock); |
550 | list_for_each_entry(va, &valist, purge_list) | 558 | list_for_each_entry_safe(va, n_va, &valist, purge_list) |
551 | __free_vmap_area(va); | 559 | __free_vmap_area(va); |
552 | spin_unlock(&vmap_area_lock); | 560 | spin_unlock(&vmap_area_lock); |
553 | } | 561 | } |
@@ -1347,6 +1355,7 @@ EXPORT_SYMBOL(vfree); | |||
1347 | void vunmap(const void *addr) | 1355 | void vunmap(const void *addr) |
1348 | { | 1356 | { |
1349 | BUG_ON(in_interrupt()); | 1357 | BUG_ON(in_interrupt()); |
1358 | might_sleep(); | ||
1350 | __vunmap(addr, 0); | 1359 | __vunmap(addr, 0); |
1351 | } | 1360 | } |
1352 | EXPORT_SYMBOL(vunmap); | 1361 | EXPORT_SYMBOL(vunmap); |
@@ -1366,6 +1375,8 @@ void *vmap(struct page **pages, unsigned int count, | |||
1366 | { | 1375 | { |
1367 | struct vm_struct *area; | 1376 | struct vm_struct *area; |
1368 | 1377 | ||
1378 | might_sleep(); | ||
1379 | |||
1369 | if (count > num_physpages) | 1380 | if (count > num_physpages) |
1370 | return NULL; | 1381 | return NULL; |
1371 | 1382 | ||