diff options
Diffstat (limited to 'mm/vmalloc.c')
| -rw-r--r-- | mm/vmalloc.c | 21 |
1 files changed, 16 insertions, 5 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ba6b0f5f7fac..30f826d484f0 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
| @@ -324,14 +324,14 @@ static struct vmap_area *alloc_vmap_area(unsigned long size, | |||
| 324 | 324 | ||
| 325 | BUG_ON(size & ~PAGE_MASK); | 325 | BUG_ON(size & ~PAGE_MASK); |
| 326 | 326 | ||
| 327 | addr = ALIGN(vstart, align); | ||
| 328 | |||
| 329 | va = kmalloc_node(sizeof(struct vmap_area), | 327 | va = kmalloc_node(sizeof(struct vmap_area), |
| 330 | gfp_mask & GFP_RECLAIM_MASK, node); | 328 | gfp_mask & GFP_RECLAIM_MASK, node); |
| 331 | if (unlikely(!va)) | 329 | if (unlikely(!va)) |
| 332 | return ERR_PTR(-ENOMEM); | 330 | return ERR_PTR(-ENOMEM); |
| 333 | 331 | ||
| 334 | retry: | 332 | retry: |
| 333 | addr = ALIGN(vstart, align); | ||
| 334 | |||
| 335 | spin_lock(&vmap_area_lock); | 335 | spin_lock(&vmap_area_lock); |
| 336 | /* XXX: could have a last_hole cache */ | 336 | /* XXX: could have a last_hole cache */ |
| 337 | n = vmap_area_root.rb_node; | 337 | n = vmap_area_root.rb_node; |
| @@ -362,7 +362,7 @@ retry: | |||
| 362 | goto found; | 362 | goto found; |
| 363 | } | 363 | } |
| 364 | 364 | ||
| 365 | while (addr + size >= first->va_start && addr + size <= vend) { | 365 | while (addr + size > first->va_start && addr + size <= vend) { |
| 366 | addr = ALIGN(first->va_end + PAGE_SIZE, align); | 366 | addr = ALIGN(first->va_end + PAGE_SIZE, align); |
| 367 | 367 | ||
| 368 | n = rb_next(&first->rb_node); | 368 | n = rb_next(&first->rb_node); |
| @@ -522,13 +522,24 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end, | |||
| 522 | } | 522 | } |
| 523 | 523 | ||
| 524 | /* | 524 | /* |
| 525 | * Kick off a purge of the outstanding lazy areas. Don't bother if somebody | ||
| 526 | * is already purging. | ||
| 527 | */ | ||
| 528 | static void try_purge_vmap_area_lazy(void) | ||
| 529 | { | ||
| 530 | unsigned long start = ULONG_MAX, end = 0; | ||
| 531 | |||
| 532 | __purge_vmap_area_lazy(&start, &end, 0, 0); | ||
| 533 | } | ||
| 534 | |||
| 535 | /* | ||
| 525 | * Kick off a purge of the outstanding lazy areas. | 536 | * Kick off a purge of the outstanding lazy areas. |
| 526 | */ | 537 | */ |
| 527 | static void purge_vmap_area_lazy(void) | 538 | static void purge_vmap_area_lazy(void) |
| 528 | { | 539 | { |
| 529 | unsigned long start = ULONG_MAX, end = 0; | 540 | unsigned long start = ULONG_MAX, end = 0; |
| 530 | 541 | ||
| 531 | __purge_vmap_area_lazy(&start, &end, 0, 0); | 542 | __purge_vmap_area_lazy(&start, &end, 1, 0); |
| 532 | } | 543 | } |
| 533 | 544 | ||
| 534 | /* | 545 | /* |
| @@ -539,7 +550,7 @@ static void free_unmap_vmap_area(struct vmap_area *va) | |||
| 539 | va->flags |= VM_LAZY_FREE; | 550 | va->flags |= VM_LAZY_FREE; |
| 540 | atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); | 551 | atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); |
| 541 | if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) | 552 | if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages())) |
| 542 | purge_vmap_area_lazy(); | 553 | try_purge_vmap_area_lazy(); |
| 543 | } | 554 | } |
| 544 | 555 | ||
| 545 | static struct vmap_area *find_vmap_area(unsigned long addr) | 556 | static struct vmap_area *find_vmap_area(unsigned long addr) |
