diff options
Diffstat (limited to 'mm/slob.c')
-rw-r--r-- | mm/slob.c | 45 |
1 files changed, 28 insertions, 17 deletions
@@ -127,9 +127,9 @@ static LIST_HEAD(free_slob_medium); | |||
127 | static LIST_HEAD(free_slob_large); | 127 | static LIST_HEAD(free_slob_large); |
128 | 128 | ||
129 | /* | 129 | /* |
130 | * slob_page: True for all slob pages (false for bigblock pages) | 130 | * is_slob_page: True for all slob pages (false for bigblock pages) |
131 | */ | 131 | */ |
132 | static inline int slob_page(struct slob_page *sp) | 132 | static inline int is_slob_page(struct slob_page *sp) |
133 | { | 133 | { |
134 | return PageSlobPage((struct page *)sp); | 134 | return PageSlobPage((struct page *)sp); |
135 | } | 135 | } |
@@ -144,6 +144,11 @@ static inline void clear_slob_page(struct slob_page *sp) | |||
144 | __ClearPageSlobPage((struct page *)sp); | 144 | __ClearPageSlobPage((struct page *)sp); |
145 | } | 145 | } |
146 | 146 | ||
147 | static inline struct slob_page *slob_page(const void *addr) | ||
148 | { | ||
149 | return (struct slob_page *)virt_to_page(addr); | ||
150 | } | ||
151 | |||
147 | /* | 152 | /* |
148 | * slob_page_free: true for pages on free_slob_pages list. | 153 | * slob_page_free: true for pages on free_slob_pages list. |
149 | */ | 154 | */ |
@@ -231,7 +236,7 @@ static int slob_last(slob_t *s) | |||
231 | return !((unsigned long)slob_next(s) & ~PAGE_MASK); | 236 | return !((unsigned long)slob_next(s) & ~PAGE_MASK); |
232 | } | 237 | } |
233 | 238 | ||
234 | static void *slob_new_page(gfp_t gfp, int order, int node) | 239 | static void *slob_new_pages(gfp_t gfp, int order, int node) |
235 | { | 240 | { |
236 | void *page; | 241 | void *page; |
237 | 242 | ||
@@ -248,12 +253,17 @@ static void *slob_new_page(gfp_t gfp, int order, int node) | |||
248 | return page_address(page); | 253 | return page_address(page); |
249 | } | 254 | } |
250 | 255 | ||
256 | static void slob_free_pages(void *b, int order) | ||
257 | { | ||
258 | free_pages((unsigned long)b, order); | ||
259 | } | ||
260 | |||
251 | /* | 261 | /* |
252 | * Allocate a slob block within a given slob_page sp. | 262 | * Allocate a slob block within a given slob_page sp. |
253 | */ | 263 | */ |
254 | static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) | 264 | static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) |
255 | { | 265 | { |
256 | slob_t *prev, *cur, *aligned = 0; | 266 | slob_t *prev, *cur, *aligned = NULL; |
257 | int delta = 0, units = SLOB_UNITS(size); | 267 | int delta = 0, units = SLOB_UNITS(size); |
258 | 268 | ||
259 | for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { | 269 | for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { |
@@ -350,10 +360,10 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) | |||
350 | 360 | ||
351 | /* Not enough space: must allocate a new page */ | 361 | /* Not enough space: must allocate a new page */ |
352 | if (!b) { | 362 | if (!b) { |
353 | b = slob_new_page(gfp & ~__GFP_ZERO, 0, node); | 363 | b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node); |
354 | if (!b) | 364 | if (!b) |
355 | return 0; | 365 | return NULL; |
356 | sp = (struct slob_page *)virt_to_page(b); | 366 | sp = slob_page(b); |
357 | set_slob_page(sp); | 367 | set_slob_page(sp); |
358 | 368 | ||
359 | spin_lock_irqsave(&slob_lock, flags); | 369 | spin_lock_irqsave(&slob_lock, flags); |
@@ -385,7 +395,7 @@ static void slob_free(void *block, int size) | |||
385 | return; | 395 | return; |
386 | BUG_ON(!size); | 396 | BUG_ON(!size); |
387 | 397 | ||
388 | sp = (struct slob_page *)virt_to_page(block); | 398 | sp = slob_page(block); |
389 | units = SLOB_UNITS(size); | 399 | units = SLOB_UNITS(size); |
390 | 400 | ||
391 | spin_lock_irqsave(&slob_lock, flags); | 401 | spin_lock_irqsave(&slob_lock, flags); |
@@ -394,10 +404,11 @@ static void slob_free(void *block, int size) | |||
394 | /* Go directly to page allocator. Do not pass slob allocator */ | 404 | /* Go directly to page allocator. Do not pass slob allocator */ |
395 | if (slob_page_free(sp)) | 405 | if (slob_page_free(sp)) |
396 | clear_slob_page_free(sp); | 406 | clear_slob_page_free(sp); |
407 | spin_unlock_irqrestore(&slob_lock, flags); | ||
397 | clear_slob_page(sp); | 408 | clear_slob_page(sp); |
398 | free_slob_page(sp); | 409 | free_slob_page(sp); |
399 | free_page((unsigned long)b); | 410 | free_page((unsigned long)b); |
400 | goto out; | 411 | return; |
401 | } | 412 | } |
402 | 413 | ||
403 | if (!slob_page_free(sp)) { | 414 | if (!slob_page_free(sp)) { |
@@ -466,7 +477,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
466 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 477 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
467 | void *ret; | 478 | void *ret; |
468 | 479 | ||
469 | lockdep_trace_alloc(flags); | 480 | lockdep_trace_alloc(gfp); |
470 | 481 | ||
471 | if (size < PAGE_SIZE - align) { | 482 | if (size < PAGE_SIZE - align) { |
472 | if (!size) | 483 | if (!size) |
@@ -485,7 +496,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) | |||
485 | } else { | 496 | } else { |
486 | unsigned int order = get_order(size); | 497 | unsigned int order = get_order(size); |
487 | 498 | ||
488 | ret = slob_new_page(gfp | __GFP_COMP, order, node); | 499 | ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node); |
489 | if (ret) { | 500 | if (ret) { |
490 | struct page *page; | 501 | struct page *page; |
491 | page = virt_to_page(ret); | 502 | page = virt_to_page(ret); |
@@ -508,8 +519,8 @@ void kfree(const void *block) | |||
508 | if (unlikely(ZERO_OR_NULL_PTR(block))) | 519 | if (unlikely(ZERO_OR_NULL_PTR(block))) |
509 | return; | 520 | return; |
510 | 521 | ||
511 | sp = (struct slob_page *)virt_to_page(block); | 522 | sp = slob_page(block); |
512 | if (slob_page(sp)) { | 523 | if (is_slob_page(sp)) { |
513 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 524 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
514 | unsigned int *m = (unsigned int *)(block - align); | 525 | unsigned int *m = (unsigned int *)(block - align); |
515 | slob_free(m, *m + align); | 526 | slob_free(m, *m + align); |
@@ -529,8 +540,8 @@ size_t ksize(const void *block) | |||
529 | if (unlikely(block == ZERO_SIZE_PTR)) | 540 | if (unlikely(block == ZERO_SIZE_PTR)) |
530 | return 0; | 541 | return 0; |
531 | 542 | ||
532 | sp = (struct slob_page *)virt_to_page(block); | 543 | sp = slob_page(block); |
533 | if (slob_page(sp)) { | 544 | if (is_slob_page(sp)) { |
534 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); | 545 | int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); |
535 | unsigned int *m = (unsigned int *)(block - align); | 546 | unsigned int *m = (unsigned int *)(block - align); |
536 | return SLOB_UNITS(*m) * SLOB_UNIT; | 547 | return SLOB_UNITS(*m) * SLOB_UNIT; |
@@ -593,7 +604,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | |||
593 | SLOB_UNITS(c->size) * SLOB_UNIT, | 604 | SLOB_UNITS(c->size) * SLOB_UNIT, |
594 | flags, node); | 605 | flags, node); |
595 | } else { | 606 | } else { |
596 | b = slob_new_page(flags, get_order(c->size), node); | 607 | b = slob_new_pages(flags, get_order(c->size), node); |
597 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, | 608 | kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE, |
598 | _RET_IP_, b, c->size, | 609 | _RET_IP_, b, c->size, |
599 | PAGE_SIZE << get_order(c->size), | 610 | PAGE_SIZE << get_order(c->size), |
@@ -612,7 +623,7 @@ static void __kmem_cache_free(void *b, int size) | |||
612 | if (size < PAGE_SIZE) | 623 | if (size < PAGE_SIZE) |
613 | slob_free(b, size); | 624 | slob_free(b, size); |
614 | else | 625 | else |
615 | free_pages((unsigned long)b, get_order(size)); | 626 | slob_free_pages(b, get_order(size)); |
616 | } | 627 | } |
617 | 628 | ||
618 | static void kmem_rcu_free(struct rcu_head *head) | 629 | static void kmem_rcu_free(struct rcu_head *head) |