aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-05-30 13:54:46 -0400
committerPekka Enberg <penberg@kernel.org>2012-08-16 02:45:04 -0400
commit19c7ff9ecd89441096dab6a56f926f7df8ba850a (patch)
tree61bc27b47eb4867252b4d6aaa9c0ae99994b7556 /mm/slub.c
parent455ce9eb1cfa083da0def023094190aeb133855a (diff)
slub: Take node lock during object free checks
Only applies to scenarios where debugging is on: Validation of slabs can currently occur while debugging information is updated from the fast paths of the allocator. This results in various races where we get false reports about slab metadata not being in order. This patch makes the fast paths take the node lock so that serialization with slab validation will occur. Causes additional slowdown in debug scenarios. Reported-by: Waiman Long <Waiman.Long@hp.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c30
1 files changed, 18 insertions, 12 deletions
diff --git a/mm/slub.c b/mm/slub.c
index c83fe96f5e42..e131084e87a3 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1069,13 +1069,13 @@ bad:
1069 return 0; 1069 return 0;
1070} 1070}
1071 1071
1072static noinline int free_debug_processing(struct kmem_cache *s, 1072static noinline struct kmem_cache_node *free_debug_processing(
1073 struct page *page, void *object, unsigned long addr) 1073 struct kmem_cache *s, struct page *page, void *object,
1074 unsigned long addr, unsigned long *flags)
1074{ 1075{
1075 unsigned long flags; 1076 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1076 int rc = 0;
1077 1077
1078 local_irq_save(flags); 1078 spin_lock_irqsave(&n->list_lock, *flags);
1079 slab_lock(page); 1079 slab_lock(page);
1080 1080
1081 if (!check_slab(s, page)) 1081 if (!check_slab(s, page))
@@ -1113,15 +1113,19 @@ static noinline int free_debug_processing(struct kmem_cache *s,
1113 set_track(s, object, TRACK_FREE, addr); 1113 set_track(s, object, TRACK_FREE, addr);
1114 trace(s, page, object, 0); 1114 trace(s, page, object, 0);
1115 init_object(s, object, SLUB_RED_INACTIVE); 1115 init_object(s, object, SLUB_RED_INACTIVE);
1116 rc = 1;
1117out: 1116out:
1118 slab_unlock(page); 1117 slab_unlock(page);
1119 local_irq_restore(flags); 1118 /*
1120 return rc; 1119 * Keep node_lock to preserve integrity
1120 * until the object is actually freed
1121 */
1122 return n;
1121 1123
1122fail: 1124fail:
1125 slab_unlock(page);
1126 spin_unlock_irqrestore(&n->list_lock, *flags);
1123 slab_fix(s, "Object at 0x%p not freed", object); 1127 slab_fix(s, "Object at 0x%p not freed", object);
1124 goto out; 1128 return NULL;
1125} 1129}
1126 1130
1127static int __init setup_slub_debug(char *str) 1131static int __init setup_slub_debug(char *str)
@@ -1214,8 +1218,9 @@ static inline void setup_object_debug(struct kmem_cache *s,
1214static inline int alloc_debug_processing(struct kmem_cache *s, 1218static inline int alloc_debug_processing(struct kmem_cache *s,
1215 struct page *page, void *object, unsigned long addr) { return 0; } 1219 struct page *page, void *object, unsigned long addr) { return 0; }
1216 1220
1217static inline int free_debug_processing(struct kmem_cache *s, 1221static inline struct kmem_cache_node *free_debug_processing(
1218 struct page *page, void *object, unsigned long addr) { return 0; } 1222 struct kmem_cache *s, struct page *page, void *object,
1223 unsigned long addr, unsigned long *flags) { return NULL; }
1219 1224
1220static inline int slab_pad_check(struct kmem_cache *s, struct page *page) 1225static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1221 { return 1; } 1226 { return 1; }
@@ -2452,7 +2457,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2452 2457
2453 stat(s, FREE_SLOWPATH); 2458 stat(s, FREE_SLOWPATH);
2454 2459
2455 if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr)) 2460 if (kmem_cache_debug(s) &&
2461 !(n = free_debug_processing(s, page, x, addr, &flags)))
2456 return; 2462 return;
2457 2463
2458 do { 2464 do {