aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEduard - Gabriel Munteanu <eduard.munteanu@linux360.ro>2008-08-10 13:14:07 -0400
committerPekka Enberg <penberg@cs.helsinki.fi>2008-12-29 08:34:05 -0500
commit3eae2cb24a96509e0a38cc48dc1538a2826f4e33 (patch)
treed59f2c69d8b377c12222deba8b07d065ade73818
parent36555751c6751a5bdfd6d7bdf0648343bb1ef0de (diff)
kmemtrace: SLOB hooks.
This adds hooks for the SLOB allocator, to allow tracing with kmemtrace. We also convert some inline functions to __always_inline to make sure _RET_IP_, which expands to __builtin_return_address(0), always works as expected. Acked-by: Matt Mackall <mpm@selenic.com> Signed-off-by: Eduard - Gabriel Munteanu <eduard.munteanu@linux360.ro> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
-rw-r--r--include/linux/slob_def.h9
-rw-r--r--mm/slob.c37
2 files changed, 36 insertions, 10 deletions
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index 59a3fa476ab9..0ec00b39d006 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -3,14 +3,15 @@
3 3
4void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 4void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
5 5
6static inline void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) 6static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
7 gfp_t flags)
7{ 8{
8 return kmem_cache_alloc_node(cachep, flags, -1); 9 return kmem_cache_alloc_node(cachep, flags, -1);
9} 10}
10 11
11void *__kmalloc_node(size_t size, gfp_t flags, int node); 12void *__kmalloc_node(size_t size, gfp_t flags, int node);
12 13
13static inline void *kmalloc_node(size_t size, gfp_t flags, int node) 14static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
14{ 15{
15 return __kmalloc_node(size, flags, node); 16 return __kmalloc_node(size, flags, node);
16} 17}
@@ -23,12 +24,12 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
23 * kmalloc is the normal method of allocating memory 24 * kmalloc is the normal method of allocating memory
24 * in the kernel. 25 * in the kernel.
25 */ 26 */
26static inline void *kmalloc(size_t size, gfp_t flags) 27static __always_inline void *kmalloc(size_t size, gfp_t flags)
27{ 28{
28 return __kmalloc_node(size, flags, -1); 29 return __kmalloc_node(size, flags, -1);
29} 30}
30 31
31static inline void *__kmalloc(size_t size, gfp_t flags) 32static __always_inline void *__kmalloc(size_t size, gfp_t flags)
32{ 33{
33 return kmalloc(size, flags); 34 return kmalloc(size, flags);
34} 35}
diff --git a/mm/slob.c b/mm/slob.c
index cb675d126791..55de44ae5d30 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -65,6 +65,7 @@
65#include <linux/module.h> 65#include <linux/module.h>
66#include <linux/rcupdate.h> 66#include <linux/rcupdate.h>
67#include <linux/list.h> 67#include <linux/list.h>
68#include <linux/kmemtrace.h>
68#include <asm/atomic.h> 69#include <asm/atomic.h>
69 70
70/* 71/*
@@ -463,27 +464,38 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
463{ 464{
464 unsigned int *m; 465 unsigned int *m;
465 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); 466 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
467 void *ret;
466 468
467 if (size < PAGE_SIZE - align) { 469 if (size < PAGE_SIZE - align) {
468 if (!size) 470 if (!size)
469 return ZERO_SIZE_PTR; 471 return ZERO_SIZE_PTR;
470 472
471 m = slob_alloc(size + align, gfp, align, node); 473 m = slob_alloc(size + align, gfp, align, node);
474
472 if (!m) 475 if (!m)
473 return NULL; 476 return NULL;
474 *m = size; 477 *m = size;
475 return (void *)m + align; 478 ret = (void *)m + align;
479
480 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
481 _RET_IP_, ret,
482 size, size + align, gfp, node);
476 } else { 483 } else {
477 void *ret; 484 unsigned int order = get_order(size);
478 485
479 ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node); 486 ret = slob_new_page(gfp | __GFP_COMP, order, node);
480 if (ret) { 487 if (ret) {
481 struct page *page; 488 struct page *page;
482 page = virt_to_page(ret); 489 page = virt_to_page(ret);
483 page->private = size; 490 page->private = size;
484 } 491 }
485 return ret; 492
493 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_KMALLOC,
494 _RET_IP_, ret,
495 size, PAGE_SIZE << order, gfp, node);
486 } 496 }
497
498 return ret;
487} 499}
488EXPORT_SYMBOL(__kmalloc_node); 500EXPORT_SYMBOL(__kmalloc_node);
489 501
@@ -501,6 +513,8 @@ void kfree(const void *block)
501 slob_free(m, *m + align); 513 slob_free(m, *m + align);
502 } else 514 } else
503 put_page(&sp->page); 515 put_page(&sp->page);
516
517 kmemtrace_mark_free(KMEMTRACE_TYPE_KMALLOC, _RET_IP_, block);
504} 518}
505EXPORT_SYMBOL(kfree); 519EXPORT_SYMBOL(kfree);
506 520
@@ -569,10 +583,19 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
569{ 583{
570 void *b; 584 void *b;
571 585
572 if (c->size < PAGE_SIZE) 586 if (c->size < PAGE_SIZE) {
573 b = slob_alloc(c->size, flags, c->align, node); 587 b = slob_alloc(c->size, flags, c->align, node);
574 else 588 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
589 _RET_IP_, b, c->size,
590 SLOB_UNITS(c->size) * SLOB_UNIT,
591 flags, node);
592 } else {
575 b = slob_new_page(flags, get_order(c->size), node); 593 b = slob_new_page(flags, get_order(c->size), node);
594 kmemtrace_mark_alloc_node(KMEMTRACE_TYPE_CACHE,
595 _RET_IP_, b, c->size,
596 PAGE_SIZE << get_order(c->size),
597 flags, node);
598 }
576 599
577 if (c->ctor) 600 if (c->ctor)
578 c->ctor(b); 601 c->ctor(b);
@@ -608,6 +631,8 @@ void kmem_cache_free(struct kmem_cache *c, void *b)
608 } else { 631 } else {
609 __kmem_cache_free(b, c->size); 632 __kmem_cache_free(b, c->size);
610 } 633 }
634
635 kmemtrace_mark_free(KMEMTRACE_TYPE_CACHE, _RET_IP_, b);
611} 636}
612EXPORT_SYMBOL(kmem_cache_free); 637EXPORT_SYMBOL(kmem_cache_free);
613 638