aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slob.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slob.c')
-rw-r--r--mm/slob.c37
1 files changed, 31 insertions, 6 deletions
diff --git a/mm/slob.c b/mm/slob.c
index 52bc8a2bd9ef..f9cc24688232 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -65,6 +65,7 @@
65#include <linux/module.h> 65#include <linux/module.h>
66#include <linux/rcupdate.h> 66#include <linux/rcupdate.h>
67#include <linux/list.h> 67#include <linux/list.h>
68#include <trace/kmemtrace.h>
68#include <asm/atomic.h> 69#include <asm/atomic.h>
69 70
70/* 71/*
@@ -463,27 +464,38 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
463{ 464{
464 unsigned int *m; 465 unsigned int *m;
465 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 466 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
467 void *ret;
466 468
467 if (size < PAGE_SIZE - align) { 469 if (size < PAGE_SIZE - align) {
468 if (!size) 470 if (!size)
469 return ZERO_SIZE_PTR; 471 return ZERO_SIZE_PTR;
470 472
471 m = slob_alloc(size + align, gfp, align, node); 473 m = slob_alloc(size + align, gfp, align, node);
474
472 if (!m) 475 if (!m)
473 return NULL; 476 return NULL;
474 *m = size; 477 *m = size;
475 return (void *)m + align; 478 ret = (void *)m + align;
479
480 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
481 _RET_IP_, ret,
482 size, size + align, gfp, node);
476 } else { 483 } else {
477 void *ret; 484 unsigned int order = get_order(size);
478 485
479 ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node); 486 ret = slob_new_page(gfp | __GFP_COMP, order, node);
480 if (ret) { 487 if (ret) {
481 struct page *page; 488 struct page *page;
482 page = virt_to_page(ret); 489 page = virt_to_page(ret);
483 page->private = size; 490 page->private = size;
484 } 491 }
485 return ret; 492
493 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
494 _RET_IP_, ret,
495 size, PAGE_SIZE << order, gfp, node);
486 } 496 }
497
498 return ret;
487} 499}
488EXPORT_SYMBOL(__kmalloc_node); 500EXPORT_SYMBOL(__kmalloc_node);
489 501
@@ -501,6 +513,8 @@ void kfree(const void *block)
501 slob_free(m, *m + align); 513 slob_free(m, *m + align);
502 } else 514 } else
503 put_page(&sp->page); 515 put_page(&sp->page);
516
517 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block);
504} 518}
505EXPORT_SYMBOL(kfree); 519EXPORT_SYMBOL(kfree);
506 520
@@ -570,10 +584,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
570{ 584{
571 void *b; 585 void *b;
572 586
573 if (c->size < PAGE_SIZE) 587 if (c->size < PAGE_SIZE) {
574 b = slob_alloc(c->size, flags, c->align, node); 588 b = slob_alloc(c->size, flags, c->align, node);
575 else 589 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
590 _RET_IP_, b, c->size,
591 SLOB_UNITS(c->size) * SLOB_UNIT,
592 flags, node);
593 } else {
576 b = slob_new_page(flags, get_order(c->size), node); 594 b = slob_new_page(flags, get_order(c->size), node);
595 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
596 _RET_IP_, b, c->size,
597 PAGE_SIZE << get_order(c->size),
598 flags, node);
599 }
577 600
578 if (c->ctor) 601 if (c->ctor)
579 c->ctor(b); 602 c->ctor(b);
@@ -609,6 +632,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
609 } else { 632 } else {
610 __kmem_cache_free(b, c->size); 633 __kmem_cache_free(b, c->size);
611 } 634 }
635
636 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b);
612} 637}
613EXPORT_SYMBOL(kmem_cache_free); 638EXPORT_SYMBOL(kmem_cache_free);
614 639