aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-06 17:49:42 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:12:54 -0400
commit77c5e2d01af871f4bfbe08feefa3d5118cb1001b (patch)
tree434fb61e1570f0d8bab479508181e034fc6b6c45
parentb49af68ff9fc5d6e0d96704a1843968b91cc73c6 (diff)
slub: fix object tracking
Object tracking did not work the right way for several call chains. Fix this up by adding a new parameter to slub_alloc and slub_free that specifies the caller address explicitly. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/slub.c57
1 files changed, 20 insertions, 37 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 347c11e80d8e..cfc5301afe42 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -297,9 +297,6 @@ static void set_track(struct kmem_cache *s, void *object,
297 memset(p, 0, sizeof(struct track)); 297 memset(p, 0, sizeof(struct track));
298} 298}
299 299
300#define set_tracking(__s, __o, __a) set_track(__s, __o, __a, \
301 __builtin_return_address(0))
302
303static void init_tracking(struct kmem_cache *s, void *object) 300static void init_tracking(struct kmem_cache *s, void *object)
304{ 301{
305 if (s->flags & SLAB_STORE_USER) { 302 if (s->flags & SLAB_STORE_USER) {
@@ -1163,8 +1160,8 @@ static void flush_all(struct kmem_cache *s)
1163 * Fastpath is not possible if we need to get a new slab or have 1160 * Fastpath is not possible if we need to get a new slab or have
1164 * debugging enabled (which means all slabs are marked with PageError) 1161 * debugging enabled (which means all slabs are marked with PageError)
1165 */ 1162 */
1166static __always_inline void *slab_alloc(struct kmem_cache *s, 1163static void *slab_alloc(struct kmem_cache *s,
1167 gfp_t gfpflags, int node) 1164 gfp_t gfpflags, int node, void *addr)
1168{ 1165{
1169 struct page *page; 1166 struct page *page;
1170 void **object; 1167 void **object;
@@ -1238,20 +1235,20 @@ debug:
1238 if (!alloc_object_checks(s, page, object)) 1235 if (!alloc_object_checks(s, page, object))
1239 goto another_slab; 1236 goto another_slab;
1240 if (s->flags & SLAB_STORE_USER) 1237 if (s->flags & SLAB_STORE_USER)
1241 set_tracking(s, object, TRACK_ALLOC); 1238 set_track(s, object, TRACK_ALLOC, addr);
1242 goto have_object; 1239 goto have_object;
1243} 1240}
1244 1241
1245void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) 1242void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1246{ 1243{
1247 return slab_alloc(s, gfpflags, -1); 1244 return slab_alloc(s, gfpflags, -1, __builtin_return_address(0));
1248} 1245}
1249EXPORT_SYMBOL(kmem_cache_alloc); 1246EXPORT_SYMBOL(kmem_cache_alloc);
1250 1247
1251#ifdef CONFIG_NUMA 1248#ifdef CONFIG_NUMA
1252void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node) 1249void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1253{ 1250{
1254 return slab_alloc(s, gfpflags, node); 1251 return slab_alloc(s, gfpflags, node, __builtin_return_address(0));
1255} 1252}
1256EXPORT_SYMBOL(kmem_cache_alloc_node); 1253EXPORT_SYMBOL(kmem_cache_alloc_node);
1257#endif 1254#endif
@@ -1262,7 +1259,8 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
1262 * 1259 *
1263 * No special cachelines need to be read 1260 * No special cachelines need to be read
1264 */ 1261 */
1265static void slab_free(struct kmem_cache *s, struct page *page, void *x) 1262static void slab_free(struct kmem_cache *s, struct page *page,
1263 void *x, void *addr)
1266{ 1264{
1267 void *prior; 1265 void *prior;
1268 void **object = (void *)x; 1266 void **object = (void *)x;
@@ -1314,20 +1312,20 @@ slab_empty:
1314 return; 1312 return;
1315 1313
1316debug: 1314debug:
1317 if (free_object_checks(s, page, x)) 1315 if (!free_object_checks(s, page, x))
1318 goto checks_ok; 1316 goto out_unlock;
1319 goto out_unlock; 1317 if (s->flags & SLAB_STORE_USER)
1318 set_track(s, x, TRACK_FREE, addr);
1319 goto checks_ok;
1320} 1320}
1321 1321
1322void kmem_cache_free(struct kmem_cache *s, void *x) 1322void kmem_cache_free(struct kmem_cache *s, void *x)
1323{ 1323{
1324 struct page * page; 1324 struct page *page;
1325 1325
1326 page = virt_to_head_page(x); 1326 page = virt_to_head_page(x);
1327 1327
1328 if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER))) 1328 slab_free(s, page, x, __builtin_return_address(0));
1329 set_tracking(s, x, TRACK_FREE);
1330 slab_free(s, page, x);
1331} 1329}
1332EXPORT_SYMBOL(kmem_cache_free); 1330EXPORT_SYMBOL(kmem_cache_free);
1333 1331
@@ -2018,7 +2016,7 @@ void *__kmalloc(size_t size, gfp_t flags)
2018 struct kmem_cache *s = get_slab(size, flags); 2016 struct kmem_cache *s = get_slab(size, flags);
2019 2017
2020 if (s) 2018 if (s)
2021 return kmem_cache_alloc(s, flags); 2019 return slab_alloc(s, flags, -1, __builtin_return_address(0));
2022 return NULL; 2020 return NULL;
2023} 2021}
2024EXPORT_SYMBOL(__kmalloc); 2022EXPORT_SYMBOL(__kmalloc);
@@ -2029,7 +2027,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2029 struct kmem_cache *s = get_slab(size, flags); 2027 struct kmem_cache *s = get_slab(size, flags);
2030 2028
2031 if (s) 2029 if (s)
2032 return kmem_cache_alloc_node(s, flags, node); 2030 return slab_alloc(s, flags, node, __builtin_return_address(0));
2033 return NULL; 2031 return NULL;
2034} 2032}
2035EXPORT_SYMBOL(__kmalloc_node); 2033EXPORT_SYMBOL(__kmalloc_node);
@@ -2075,12 +2073,9 @@ void kfree(const void *x)
2075 return; 2073 return;
2076 2074
2077 page = virt_to_head_page(x); 2075 page = virt_to_head_page(x);
2078
2079 s = page->slab; 2076 s = page->slab;
2080 2077
2081 if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER))) 2078 slab_free(s, page, (void *)x, __builtin_return_address(0));
2082 set_tracking(s, (void *)x, TRACK_FREE);
2083 slab_free(s, page, (void *)x);
2084} 2079}
2085EXPORT_SYMBOL(kfree); 2080EXPORT_SYMBOL(kfree);
2086 2081
@@ -2289,7 +2284,7 @@ void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags)
2289{ 2284{
2290 void *x; 2285 void *x;
2291 2286
2292 x = kmem_cache_alloc(s, flags); 2287 x = slab_alloc(s, flags, -1, __builtin_return_address(0));
2293 if (x) 2288 if (x)
2294 memset(x, 0, s->objsize); 2289 memset(x, 0, s->objsize);
2295 return x; 2290 return x;
@@ -2497,34 +2492,22 @@ static void resiliency_test(void) {};
2497void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) 2492void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
2498{ 2493{
2499 struct kmem_cache *s = get_slab(size, gfpflags); 2494 struct kmem_cache *s = get_slab(size, gfpflags);
2500 void *object;
2501 2495
2502 if (!s) 2496 if (!s)
2503 return NULL; 2497 return NULL;
2504 2498
2505 object = kmem_cache_alloc(s, gfpflags); 2499 return slab_alloc(s, gfpflags, -1, caller);
2506
2507 if (object && (s->flags & SLAB_STORE_USER))
2508 set_track(s, object, TRACK_ALLOC, caller);
2509
2510 return object;
2511} 2500}
2512 2501
2513void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, 2502void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
2514 int node, void *caller) 2503 int node, void *caller)
2515{ 2504{
2516 struct kmem_cache *s = get_slab(size, gfpflags); 2505 struct kmem_cache *s = get_slab(size, gfpflags);
2517 void *object;
2518 2506
2519 if (!s) 2507 if (!s)
2520 return NULL; 2508 return NULL;
2521 2509
2522 object = kmem_cache_alloc_node(s, gfpflags, node); 2510 return slab_alloc(s, gfpflags, node, caller);
2523
2524 if (object && (s->flags & SLAB_STORE_USER))
2525 set_track(s, object, TRACK_ALLOC, caller);
2526
2527 return object;
2528} 2511}
2529 2512
2530#ifdef CONFIG_SYSFS 2513#ifdef CONFIG_SYSFS