aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slob.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slob.c')
-rw-r--r--mm/slob.c80
1 files changed, 59 insertions, 21 deletions
diff --git a/mm/slob.c b/mm/slob.c
index 52bc8a2bd9ef..4dd6516447f2 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -65,6 +65,7 @@
65#include <linux/module.h> 65#include <linux/module.h>
66#include <linux/rcupdate.h> 66#include <linux/rcupdate.h>
67#include <linux/list.h> 67#include <linux/list.h>
68#include <trace/kmemtrace.h>
68#include <asm/atomic.h> 69#include <asm/atomic.h>
69 70
70/* 71/*
@@ -126,9 +127,9 @@ static LIST_HEAD(free_slob_medium);
126static LIST_HEAD(free_slob_large); 127static LIST_HEAD(free_slob_large);
127 128
128/* 129/*
129 * slob_page: True for all slob pages (false for bigblock pages) 130 * is_slob_page: True for all slob pages (false for bigblock pages)
130 */ 131 */
131static inline int slob_page(struct slob_page *sp) 132static inline int is_slob_page(struct slob_page *sp)
132{ 133{
133 return PageSlobPage((struct page *)sp); 134 return PageSlobPage((struct page *)sp);
134} 135}
@@ -143,6 +144,11 @@ static inline void clear_slob_page(struct slob_page *sp)
143 __ClearPageSlobPage((struct page *)sp); 144 __ClearPageSlobPage((struct page *)sp);
144} 145}
145 146
147static inline struct slob_page *slob_page(const void *addr)
148{
149 return (struct slob_page *)virt_to_page(addr);
150}
151
146/* 152/*
147 * slob_page_free: true for pages on free_slob_pages list. 153 * slob_page_free: true for pages on free_slob_pages list.
148 */ 154 */
@@ -230,7 +236,7 @@ static int slob_last(slob_t *s)
230 return !((unsigned long)slob_next(s) & ~PAGE_MASK); 236 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
231} 237}
232 238
233static void *slob_new_page(gfp_t gfp, int order, int node) 239static void *slob_new_pages(gfp_t gfp, int order, int node)
234{ 240{
235 void *page; 241 void *page;
236 242
@@ -247,12 +253,17 @@ static void *slob_new_page(gfp_t gfp, int order, int node)
247 return page_address(page); 253 return page_address(page);
248} 254}
249 255
256static void slob_free_pages(void *b, int order)
257{
258 free_pages((unsigned long)b, order);
259}
260
250/* 261/*
251 * Allocate a slob block within a given slob_page sp. 262 * Allocate a slob block within a given slob_page sp.
252 */ 263 */
253static void *slob_page_alloc(struct slob_page *sp, size_t size, int align) 264static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
254{ 265{
255 slob_t *prev, *cur, *aligned = 0; 266 slob_t *prev, *cur, *aligned = NULL;
256 int delta = 0, units = SLOB_UNITS(size); 267 int delta = 0, units = SLOB_UNITS(size);
257 268
258 for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) { 269 for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
@@ -349,10 +360,10 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
349 360
350 /* Not enough space: must allocate a new page */ 361 /* Not enough space: must allocate a new page */
351 if (!b) { 362 if (!b) {
352 b = slob_new_page(gfp & ~__GFP_ZERO, 0, node); 363 b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
353 if (!b) 364 if (!b)
354 return 0; 365 return NULL;
355 sp = (struct slob_page *)virt_to_page(b); 366 sp = slob_page(b);
356 set_slob_page(sp); 367 set_slob_page(sp);
357 368
358 spin_lock_irqsave(&slob_lock, flags); 369 spin_lock_irqsave(&slob_lock, flags);
@@ -384,7 +395,7 @@ static void slob_free(void *block, int size)
384 return; 395 return;
385 BUG_ON(!size); 396 BUG_ON(!size);
386 397
387 sp = (struct slob_page *)virt_to_page(block); 398 sp = slob_page(block);
388 units = SLOB_UNITS(size); 399 units = SLOB_UNITS(size);
389 400
390 spin_lock_irqsave(&slob_lock, flags); 401 spin_lock_irqsave(&slob_lock, flags);
@@ -393,10 +404,11 @@ static void slob_free(void *block, int size)
393 /* Go directly to page allocator. Do not pass slob allocator */ 404 /* Go directly to page allocator. Do not pass slob allocator */
394 if (slob_page_free(sp)) 405 if (slob_page_free(sp))
395 clear_slob_page_free(sp); 406 clear_slob_page_free(sp);
407 spin_unlock_irqrestore(&slob_lock, flags);
396 clear_slob_page(sp); 408 clear_slob_page(sp);
397 free_slob_page(sp); 409 free_slob_page(sp);
398 free_page((unsigned long)b); 410 free_page((unsigned long)b);
399 goto out; 411 return;
400 } 412 }
401 413
402 if (!slob_page_free(sp)) { 414 if (!slob_page_free(sp)) {
@@ -463,27 +475,40 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
463{ 475{
464 unsigned int *m; 476 unsigned int *m;
465 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 477 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
478 void *ret;
479
480 lockdep_trace_alloc(gfp);
466 481
467 if (size < PAGE_SIZE - align) { 482 if (size < PAGE_SIZE - align) {
468 if (!size) 483 if (!size)
469 return ZERO_SIZE_PTR; 484 return ZERO_SIZE_PTR;
470 485
471 m = slob_alloc(size + align, gfp, align, node); 486 m = slob_alloc(size + align, gfp, align, node);
487
472 if (!m) 488 if (!m)
473 return NULL; 489 return NULL;
474 *m = size; 490 *m = size;
475 return (void *)m + align; 491 ret = (void *)m + align;
492
493 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
494 _RET_IP_, ret,
495 size, size + align, gfp, node);
476 } else { 496 } else {
477 void *ret; 497 unsigned int order = get_order(size);
478 498
479 ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node); 499 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
480 if (ret) { 500 if (ret) {
481 struct page *page; 501 struct page *page;
482 page = virt_to_page(ret); 502 page = virt_to_page(ret);
483 page->private = size; 503 page->private = size;
484 } 504 }
485 return ret; 505
506 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
507 _RET_IP_, ret,
508 size, PAGE_SIZE << order, gfp, node);
486 } 509 }
510
511 return ret;
487} 512}
488EXPORT_SYMBOL(__kmalloc_node); 513EXPORT_SYMBOL(__kmalloc_node);
489 514
@@ -494,13 +519,15 @@ void kfree(const void *block)
494 if (unlikely(ZERO_OR_NULL_PTR(block))) 519 if (unlikely(ZERO_OR_NULL_PTR(block)))
495 return; 520 return;
496 521
497 sp = (struct slob_page *)virt_to_page(block); 522 sp = slob_page(block);
498 if (slob_page(sp)) { 523 if (is_slob_page(sp)) {
499 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 524 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
500 unsigned int *m = (unsigned int *)(block - align); 525 unsigned int *m = (unsigned int *)(block - align);
501 slob_free(m, *m + align); 526 slob_free(m, *m + align);
502 } else 527 } else
503 put_page(&sp->page); 528 put_page(&sp->page);
529
530 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block);
504} 531}
505EXPORT_SYMBOL(kfree); 532EXPORT_SYMBOL(kfree);
506 533
@@ -513,8 +540,8 @@ size_t ksize(const void *block)
513 if (unlikely(block == ZERO_SIZE_PTR)) 540 if (unlikely(block == ZERO_SIZE_PTR))
514 return 0; 541 return 0;
515 542
516 sp = (struct slob_page *)virt_to_page(block); 543 sp = slob_page(block);
517 if (slob_page(sp)) { 544 if (is_slob_page(sp)) {
518 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 545 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
519 unsigned int *m = (unsigned int *)(block - align); 546 unsigned int *m = (unsigned int *)(block - align);
520 return SLOB_UNITS(*m) * SLOB_UNIT; 547 return SLOB_UNITS(*m) * SLOB_UNIT;
@@ -570,10 +597,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
570{ 597{
571 void *b; 598 void *b;
572 599
573 if (c->size < PAGE_SIZE) 600 if (c->size < PAGE_SIZE) {
574 b = slob_alloc(c->size, flags, c->align, node); 601 b = slob_alloc(c->size, flags, c->align, node);
575 else 602 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
576 b = slob_new_page(flags, get_order(c->size), node); 603 _RET_IP_, b, c->size,
604 SLOB_UNITS(c->size) * SLOB_UNIT,
605 flags, node);
606 } else {
607 b = slob_new_pages(flags, get_order(c->size), node);
608 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
609 _RET_IP_, b, c->size,
610 PAGE_SIZE << get_order(c->size),
611 flags, node);
612 }
577 613
578 if (c->ctor) 614 if (c->ctor)
579 c->ctor(b); 615 c->ctor(b);
@@ -587,7 +623,7 @@ static void __kmem_cache_free(void *b, int size)
587 if (size < PAGE_SIZE) 623 if (size < PAGE_SIZE)
588 slob_free(b, size); 624 slob_free(b, size);
589 else 625 else
590 free_pages((unsigned long)b, get_order(size)); 626 slob_free_pages(b, get_order(size));
591} 627}
592 628
593static void kmem_rcu_free(struct rcu_head *head) 629static void kmem_rcu_free(struct rcu_head *head)
@@ -609,6 +645,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
609 } else { 645 } else {
610 __kmem_cache_free(b, c->size); 646 __kmem_cache_free(b, c->size);
611 } 647 }
648
649 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b);
612} 650}
613EXPORT_SYMBOL(kmem_cache_free); 651EXPORT_SYMBOL(kmem_cache_free);
614 652