diff options
author | Andrey Ryabinin <a.ryabinin@samsung.com> | 2014-08-06 19:04:18 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-06 21:01:14 -0400 |
commit | 02e72cc61713185013d958baba508288ba2a0157 (patch) | |
tree | 34881ac42d6205fb30aeebb343983d7807a310da /mm/slub.c | |
parent | c07b8183cbb86d34007e5a3935e0ec89f5bb83c6 (diff) |
mm: slub: SLUB_DEBUG=n: use the same alloc/free hooks as for SLUB_DEBUG=y
There are two versions of alloc/free hooks now - one for
CONFIG_SLUB_DEBUG=y and another one for CONFIG_SLUB_DEBUG=n.
I see no reason why calls to other debugging subsystems (LOCKDEP,
DEBUG_ATOMIC_SLEEP, KMEMCHECK and FAILSLAB) are hidden under SLUB_DEBUG.
All this features should work regardless of SLUB_DEBUG config, as all of
them already have own Kconfig options.
This also fixes failslab for CONFIG_SLUB_DEBUG=n configuration. It
simply has not worked before because should_failslab() call was in a
hook hidden under "#ifdef CONFIG_SLUB_DEBUG #else".
Note: There is one concealed change in allocation path for SLUB_DEBUG=n
and all other debugging features disabled. The might_sleep_if() call
can generate some code even if DEBUG_ATOMIC_SLEEP=n. For
PREEMPT_VOLUNTARY=y might_sleep() inserts _cond_resched() call, but I
think it should be ok.
Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 97 |
1 files changed, 36 insertions, 61 deletions
@@ -940,60 +940,6 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, | |||
940 | } | 940 | } |
941 | 941 | ||
942 | /* | 942 | /* |
943 | * Hooks for other subsystems that check memory allocations. In a typical | ||
944 | * production configuration these hooks all should produce no code at all. | ||
945 | */ | ||
946 | static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) | ||
947 | { | ||
948 | kmemleak_alloc(ptr, size, 1, flags); | ||
949 | } | ||
950 | |||
951 | static inline void kfree_hook(const void *x) | ||
952 | { | ||
953 | kmemleak_free(x); | ||
954 | } | ||
955 | |||
956 | static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) | ||
957 | { | ||
958 | flags &= gfp_allowed_mask; | ||
959 | lockdep_trace_alloc(flags); | ||
960 | might_sleep_if(flags & __GFP_WAIT); | ||
961 | |||
962 | return should_failslab(s->object_size, flags, s->flags); | ||
963 | } | ||
964 | |||
965 | static inline void slab_post_alloc_hook(struct kmem_cache *s, | ||
966 | gfp_t flags, void *object) | ||
967 | { | ||
968 | flags &= gfp_allowed_mask; | ||
969 | kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); | ||
970 | kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); | ||
971 | } | ||
972 | |||
973 | static inline void slab_free_hook(struct kmem_cache *s, void *x) | ||
974 | { | ||
975 | kmemleak_free_recursive(x, s->flags); | ||
976 | |||
977 | /* | ||
978 | * Trouble is that we may no longer disable interrupts in the fast path | ||
979 | * So in order to make the debug calls that expect irqs to be | ||
980 | * disabled we need to disable interrupts temporarily. | ||
981 | */ | ||
982 | #if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP) | ||
983 | { | ||
984 | unsigned long flags; | ||
985 | |||
986 | local_irq_save(flags); | ||
987 | kmemcheck_slab_free(s, x, s->object_size); | ||
988 | debug_check_no_locks_freed(x, s->object_size); | ||
989 | local_irq_restore(flags); | ||
990 | } | ||
991 | #endif | ||
992 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) | ||
993 | debug_check_no_obj_freed(x, s->object_size); | ||
994 | } | ||
995 | |||
996 | /* | ||
997 | * Tracking of fully allocated slabs for debugging purposes. | 943 | * Tracking of fully allocated slabs for debugging purposes. |
998 | */ | 944 | */ |
999 | static void add_full(struct kmem_cache *s, | 945 | static void add_full(struct kmem_cache *s, |
@@ -1277,6 +1223,12 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node, | |||
1277 | static inline void dec_slabs_node(struct kmem_cache *s, int node, | 1223 | static inline void dec_slabs_node(struct kmem_cache *s, int node, |
1278 | int objects) {} | 1224 | int objects) {} |
1279 | 1225 | ||
1226 | #endif /* CONFIG_SLUB_DEBUG */ | ||
1227 | |||
1228 | /* | ||
1229 | * Hooks for other subsystems that check memory allocations. In a typical | ||
1230 | * production configuration these hooks all should produce no code at all. | ||
1231 | */ | ||
1280 | static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) | 1232 | static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) |
1281 | { | 1233 | { |
1282 | kmemleak_alloc(ptr, size, 1, flags); | 1234 | kmemleak_alloc(ptr, size, 1, flags); |
@@ -1288,21 +1240,44 @@ static inline void kfree_hook(const void *x) | |||
1288 | } | 1240 | } |
1289 | 1241 | ||
1290 | static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) | 1242 | static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) |
1291 | { return 0; } | 1243 | { |
1244 | flags &= gfp_allowed_mask; | ||
1245 | lockdep_trace_alloc(flags); | ||
1246 | might_sleep_if(flags & __GFP_WAIT); | ||
1292 | 1247 | ||
1293 | static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, | 1248 | return should_failslab(s->object_size, flags, s->flags); |
1294 | void *object) | 1249 | } |
1250 | |||
1251 | static inline void slab_post_alloc_hook(struct kmem_cache *s, | ||
1252 | gfp_t flags, void *object) | ||
1295 | { | 1253 | { |
1296 | kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, | 1254 | flags &= gfp_allowed_mask; |
1297 | flags & gfp_allowed_mask); | 1255 | kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); |
1256 | kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags); | ||
1298 | } | 1257 | } |
1299 | 1258 | ||
1300 | static inline void slab_free_hook(struct kmem_cache *s, void *x) | 1259 | static inline void slab_free_hook(struct kmem_cache *s, void *x) |
1301 | { | 1260 | { |
1302 | kmemleak_free_recursive(x, s->flags); | 1261 | kmemleak_free_recursive(x, s->flags); |
1303 | } | ||
1304 | 1262 | ||
1305 | #endif /* CONFIG_SLUB_DEBUG */ | 1263 | /* |
1264 | * Trouble is that we may no longer disable interrupts in the fast path | ||
1265 | * So in order to make the debug calls that expect irqs to be | ||
1266 | * disabled we need to disable interrupts temporarily. | ||
1267 | */ | ||
1268 | #if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP) | ||
1269 | { | ||
1270 | unsigned long flags; | ||
1271 | |||
1272 | local_irq_save(flags); | ||
1273 | kmemcheck_slab_free(s, x, s->object_size); | ||
1274 | debug_check_no_locks_freed(x, s->object_size); | ||
1275 | local_irq_restore(flags); | ||
1276 | } | ||
1277 | #endif | ||
1278 | if (!(s->flags & SLAB_DEBUG_OBJECTS)) | ||
1279 | debug_check_no_obj_freed(x, s->object_size); | ||
1280 | } | ||
1306 | 1281 | ||
1307 | /* | 1282 | /* |
1308 | * Slab allocation and freeing | 1283 | * Slab allocation and freeing |