aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/vm/Makefile2
-rw-r--r--include/linux/slab_def.h33
-rw-r--r--include/linux/slub_def.h55
-rw-r--r--mm/slab.c38
-rw-r--r--mm/slub.c30
-rw-r--r--tools/slub/slabinfo.c (renamed from Documentation/vm/slabinfo.c)6
6 files changed, 89 insertions, 75 deletions
diff --git a/Documentation/vm/Makefile b/Documentation/vm/Makefile
index 9dcff328b964..3fa4d0668864 100644
--- a/Documentation/vm/Makefile
+++ b/Documentation/vm/Makefile
@@ -2,7 +2,7 @@
2obj- := dummy.o 2obj- := dummy.o
3 3
4# List of programs to build 4# List of programs to build
5hostprogs-y := slabinfo page-types hugepage-mmap hugepage-shm map_hugetlb 5hostprogs-y := page-types hugepage-mmap hugepage-shm map_hugetlb
6 6
7# Tell kbuild to always build the programs 7# Tell kbuild to always build the programs
8always := $(hostprogs-y) 8always := $(hostprogs-y)
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 791a502f6906..83203ae9390b 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -138,11 +138,12 @@ void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
138void *__kmalloc(size_t size, gfp_t flags); 138void *__kmalloc(size_t size, gfp_t flags);
139 139
140#ifdef CONFIG_TRACING 140#ifdef CONFIG_TRACING
141extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); 141extern void *kmem_cache_alloc_trace(size_t size,
142 struct kmem_cache *cachep, gfp_t flags);
142extern size_t slab_buffer_size(struct kmem_cache *cachep); 143extern size_t slab_buffer_size(struct kmem_cache *cachep);
143#else 144#else
144static __always_inline void * 145static __always_inline void *
145kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) 146kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
146{ 147{
147 return kmem_cache_alloc(cachep, flags); 148 return kmem_cache_alloc(cachep, flags);
148} 149}
@@ -179,10 +180,7 @@ found:
179#endif 180#endif
180 cachep = malloc_sizes[i].cs_cachep; 181 cachep = malloc_sizes[i].cs_cachep;
181 182
182 ret = kmem_cache_alloc_notrace(cachep, flags); 183 ret = kmem_cache_alloc_trace(size, cachep, flags);
183
184 trace_kmalloc(_THIS_IP_, ret,
185 size, slab_buffer_size(cachep), flags);
186 184
187 return ret; 185 return ret;
188 } 186 }
@@ -194,14 +192,16 @@ extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
194extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 192extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
195 193
196#ifdef CONFIG_TRACING 194#ifdef CONFIG_TRACING
197extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, 195extern void *kmem_cache_alloc_node_trace(size_t size,
198 gfp_t flags, 196 struct kmem_cache *cachep,
199 int nodeid); 197 gfp_t flags,
198 int nodeid);
200#else 199#else
201static __always_inline void * 200static __always_inline void *
202kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, 201kmem_cache_alloc_node_trace(size_t size,
203 gfp_t flags, 202 struct kmem_cache *cachep,
204 int nodeid) 203 gfp_t flags,
204 int nodeid)
205{ 205{
206 return kmem_cache_alloc_node(cachep, flags, nodeid); 206 return kmem_cache_alloc_node(cachep, flags, nodeid);
207} 207}
@@ -210,7 +210,6 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
210static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 210static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
211{ 211{
212 struct kmem_cache *cachep; 212 struct kmem_cache *cachep;
213 void *ret;
214 213
215 if (__builtin_constant_p(size)) { 214 if (__builtin_constant_p(size)) {
216 int i = 0; 215 int i = 0;
@@ -234,13 +233,7 @@ found:
234#endif 233#endif
235 cachep = malloc_sizes[i].cs_cachep; 234 cachep = malloc_sizes[i].cs_cachep;
236 235
237 ret = kmem_cache_alloc_node_notrace(cachep, flags, node); 236 return kmem_cache_alloc_node_trace(size, cachep, flags, node);
238
239 trace_kmalloc_node(_THIS_IP_, ret,
240 size, slab_buffer_size(cachep),
241 flags, node);
242
243 return ret;
244 } 237 }
245 return __kmalloc_node(size, flags, node); 238 return __kmalloc_node(size, flags, node);
246} 239}
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index e4f5ed180b9b..8b6e8ae5d5ca 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -10,9 +10,8 @@
10#include <linux/gfp.h> 10#include <linux/gfp.h>
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/kobject.h> 12#include <linux/kobject.h>
13#include <linux/kmemleak.h>
14 13
15#include <trace/events/kmem.h> 14#include <linux/kmemleak.h>
16 15
17enum stat_item { 16enum stat_item {
18 ALLOC_FASTPATH, /* Allocation from cpu slab */ 17 ALLOC_FASTPATH, /* Allocation from cpu slab */
@@ -216,31 +215,40 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
216void *kmem_cache_alloc(struct kmem_cache *, gfp_t); 215void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
217void *__kmalloc(size_t size, gfp_t flags); 216void *__kmalloc(size_t size, gfp_t flags);
218 217
218static __always_inline void *
219kmalloc_order(size_t size, gfp_t flags, unsigned int order)
220{
221 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
222 kmemleak_alloc(ret, size, 1, flags);
223 return ret;
224}
225
219#ifdef CONFIG_TRACING 226#ifdef CONFIG_TRACING
220extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags); 227extern void *
228kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size);
229extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order);
221#else 230#else
222static __always_inline void * 231static __always_inline void *
223kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) 232kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
224{ 233{
225 return kmem_cache_alloc(s, gfpflags); 234 return kmem_cache_alloc(s, gfpflags);
226} 235}
236
237static __always_inline void *
238kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
239{
240 return kmalloc_order(size, flags, order);
241}
227#endif 242#endif
228 243
229static __always_inline void *kmalloc_large(size_t size, gfp_t flags) 244static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
230{ 245{
231 unsigned int order = get_order(size); 246 unsigned int order = get_order(size);
232 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order); 247 return kmalloc_order_trace(size, flags, order);
233
234 kmemleak_alloc(ret, size, 1, flags);
235 trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags);
236
237 return ret;
238} 248}
239 249
240static __always_inline void *kmalloc(size_t size, gfp_t flags) 250static __always_inline void *kmalloc(size_t size, gfp_t flags)
241{ 251{
242 void *ret;
243
244 if (__builtin_constant_p(size)) { 252 if (__builtin_constant_p(size)) {
245 if (size > SLUB_MAX_SIZE) 253 if (size > SLUB_MAX_SIZE)
246 return kmalloc_large(size, flags); 254 return kmalloc_large(size, flags);
@@ -251,11 +259,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
251 if (!s) 259 if (!s)
252 return ZERO_SIZE_PTR; 260 return ZERO_SIZE_PTR;
253 261
254 ret = kmem_cache_alloc_notrace(s, flags); 262 return kmem_cache_alloc_trace(s, flags, size);
255
256 trace_kmalloc(_THIS_IP_, ret, size, s->size, flags);
257
258 return ret;
259 } 263 }
260 } 264 }
261 return __kmalloc(size, flags); 265 return __kmalloc(size, flags);
@@ -266,14 +270,14 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node);
266void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); 270void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
267 271
268#ifdef CONFIG_TRACING 272#ifdef CONFIG_TRACING
269extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, 273extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
270 gfp_t gfpflags, 274 gfp_t gfpflags,
271 int node); 275 int node, size_t size);
272#else 276#else
273static __always_inline void * 277static __always_inline void *
274kmem_cache_alloc_node_notrace(struct kmem_cache *s, 278kmem_cache_alloc_node_trace(struct kmem_cache *s,
275 gfp_t gfpflags, 279 gfp_t gfpflags,
276 int node) 280 int node, size_t size)
277{ 281{
278 return kmem_cache_alloc_node(s, gfpflags, node); 282 return kmem_cache_alloc_node(s, gfpflags, node);
279} 283}
@@ -281,8 +285,6 @@ kmem_cache_alloc_node_notrace(struct kmem_cache *s,
281 285
282static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 286static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
283{ 287{
284 void *ret;
285
286 if (__builtin_constant_p(size) && 288 if (__builtin_constant_p(size) &&
287 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) { 289 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
288 struct kmem_cache *s = kmalloc_slab(size); 290 struct kmem_cache *s = kmalloc_slab(size);
@@ -290,12 +292,7 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
290 if (!s) 292 if (!s)
291 return ZERO_SIZE_PTR; 293 return ZERO_SIZE_PTR;
292 294
293 ret = kmem_cache_alloc_node_notrace(s, flags, node); 295 return kmem_cache_alloc_node_trace(s, flags, node, size);
294
295 trace_kmalloc_node(_THIS_IP_, ret,
296 size, s->size, flags, node);
297
298 return ret;
299 } 296 }
300 return __kmalloc_node(size, flags, node); 297 return __kmalloc_node(size, flags, node);
301} 298}
diff --git a/mm/slab.c b/mm/slab.c
index e9f92987954a..264037449f08 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3653,11 +3653,18 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3653EXPORT_SYMBOL(kmem_cache_alloc); 3653EXPORT_SYMBOL(kmem_cache_alloc);
3654 3654
3655#ifdef CONFIG_TRACING 3655#ifdef CONFIG_TRACING
3656void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) 3656void *
3657kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
3657{ 3658{
3658 return __cache_alloc(cachep, flags, __builtin_return_address(0)); 3659 void *ret;
3660
3661 ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3662
3663 trace_kmalloc(_RET_IP_, ret,
3664 size, slab_buffer_size(cachep), flags);
3665 return ret;
3659} 3666}
3660EXPORT_SYMBOL(kmem_cache_alloc_notrace); 3667EXPORT_SYMBOL(kmem_cache_alloc_trace);
3661#endif 3668#endif
3662 3669
3663#ifdef CONFIG_NUMA 3670#ifdef CONFIG_NUMA
@@ -3675,31 +3682,32 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3675EXPORT_SYMBOL(kmem_cache_alloc_node); 3682EXPORT_SYMBOL(kmem_cache_alloc_node);
3676 3683
3677#ifdef CONFIG_TRACING 3684#ifdef CONFIG_TRACING
3678void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, 3685void *kmem_cache_alloc_node_trace(size_t size,
3679 gfp_t flags, 3686 struct kmem_cache *cachep,
3680 int nodeid) 3687 gfp_t flags,
3688 int nodeid)
3681{ 3689{
3682 return __cache_alloc_node(cachep, flags, nodeid, 3690 void *ret;
3691
3692 ret = __cache_alloc_node(cachep, flags, nodeid,
3683 __builtin_return_address(0)); 3693 __builtin_return_address(0));
3694 trace_kmalloc_node(_RET_IP_, ret,
3695 size, slab_buffer_size(cachep),
3696 flags, nodeid);
3697 return ret;
3684} 3698}
3685EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); 3699EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
3686#endif 3700#endif
3687 3701
3688static __always_inline void * 3702static __always_inline void *
3689__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller) 3703__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3690{ 3704{
3691 struct kmem_cache *cachep; 3705 struct kmem_cache *cachep;
3692 void *ret;
3693 3706
3694 cachep = kmem_find_general_cachep(size, flags); 3707 cachep = kmem_find_general_cachep(size, flags);
3695 if (unlikely(ZERO_OR_NULL_PTR(cachep))) 3708 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3696 return cachep; 3709 return cachep;
3697 ret = kmem_cache_alloc_node_notrace(cachep, flags, node); 3710 return kmem_cache_alloc_node_trace(size, cachep, flags, node);
3698
3699 trace_kmalloc_node((unsigned long) caller, ret,
3700 size, cachep->buffer_size, flags, node);
3701
3702 return ret;
3703} 3711}
3704 3712
3705#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) 3713#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
diff --git a/mm/slub.c b/mm/slub.c
index a2fe1727ed85..008cd743a36a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -28,6 +28,8 @@
28#include <linux/math64.h> 28#include <linux/math64.h>
29#include <linux/fault-inject.h> 29#include <linux/fault-inject.h>
30 30
31#include <trace/events/kmem.h>
32
31/* 33/*
32 * Lock order: 34 * Lock order:
33 * 1. slab_lock(page) 35 * 1. slab_lock(page)
@@ -1774,11 +1776,21 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1774EXPORT_SYMBOL(kmem_cache_alloc); 1776EXPORT_SYMBOL(kmem_cache_alloc);
1775 1777
1776#ifdef CONFIG_TRACING 1778#ifdef CONFIG_TRACING
1777void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags) 1779void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
1780{
1781 void *ret = slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_);
1782 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
1783 return ret;
1784}
1785EXPORT_SYMBOL(kmem_cache_alloc_trace);
1786
1787void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
1778{ 1788{
1779 return slab_alloc(s, gfpflags, NUMA_NO_NODE, _RET_IP_); 1789 void *ret = kmalloc_order(size, flags, order);
1790 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
1791 return ret;
1780} 1792}
1781EXPORT_SYMBOL(kmem_cache_alloc_notrace); 1793EXPORT_SYMBOL(kmalloc_order_trace);
1782#endif 1794#endif
1783 1795
1784#ifdef CONFIG_NUMA 1796#ifdef CONFIG_NUMA
@@ -1794,13 +1806,17 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1794EXPORT_SYMBOL(kmem_cache_alloc_node); 1806EXPORT_SYMBOL(kmem_cache_alloc_node);
1795 1807
1796#ifdef CONFIG_TRACING 1808#ifdef CONFIG_TRACING
1797void *kmem_cache_alloc_node_notrace(struct kmem_cache *s, 1809void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
1798 gfp_t gfpflags, 1810 gfp_t gfpflags,
1799 int node) 1811 int node, size_t size)
1800{ 1812{
1801 return slab_alloc(s, gfpflags, node, _RET_IP_); 1813 void *ret = slab_alloc(s, gfpflags, node, _RET_IP_);
1814
1815 trace_kmalloc_node(_RET_IP_, ret,
1816 size, s->size, gfpflags, node);
1817 return ret;
1802} 1818}
1803EXPORT_SYMBOL(kmem_cache_alloc_node_notrace); 1819EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
1804#endif 1820#endif
1805#endif 1821#endif
1806 1822
diff --git a/Documentation/vm/slabinfo.c b/tools/slub/slabinfo.c
index 92e729f4b676..516551c9f172 100644
--- a/Documentation/vm/slabinfo.c
+++ b/tools/slub/slabinfo.c
@@ -607,7 +607,7 @@ static int debug_opt_scan(char *opt)
607 } 607 }
608 608
609 for ( ; *opt; opt++) 609 for ( ; *opt; opt++)
610 switch (*opt) { 610 switch (*opt) {
611 case 'F' : case 'f': 611 case 'F' : case 'f':
612 if (sanity) 612 if (sanity)
613 return 0; 613 return 0;
@@ -1127,7 +1127,7 @@ static void read_slab_dir(void)
1127 continue; 1127 continue;
1128 switch (de->d_type) { 1128 switch (de->d_type) {
1129 case DT_LNK: 1129 case DT_LNK:
1130 alias->name = strdup(de->d_name); 1130 alias->name = strdup(de->d_name);
1131 count = readlink(de->d_name, buffer, sizeof(buffer)); 1131 count = readlink(de->d_name, buffer, sizeof(buffer));
1132 1132
1133 if (count < 0) 1133 if (count < 0)
@@ -1143,7 +1143,7 @@ static void read_slab_dir(void)
1143 case DT_DIR: 1143 case DT_DIR:
1144 if (chdir(de->d_name)) 1144 if (chdir(de->d_name))
1145 fatal("Unable to access slab %s\n", slab->name); 1145 fatal("Unable to access slab %s\n", slab->name);
1146 slab->name = strdup(de->d_name); 1146 slab->name = strdup(de->d_name);
1147 slab->alias = 0; 1147 slab->alias = 0;
1148 slab->refs = 0; 1148 slab->refs = 0;
1149 slab->aliases = get_obj("aliases"); 1149 slab->aliases = get_obj("aliases");