diff options
author | Akinobu Mita <akinobu.mita@gmail.com> | 2007-05-06 17:49:58 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-07 15:12:55 -0400 |
commit | 824ebef122153a03925ae0ed211b4e8568d1c8db (patch) | |
tree | 2a9ce9acc187b6a6985064c773cbf850ad405a07 /mm/slab.c | |
parent | f0f3980b21508bd573eff1746d469436f50a903d (diff) |
fault injection: fix failslab with CONFIG_NUMA
Currently failslab injects failures into ____cache_alloc(). But with enabling
CONFIG_NUMA it's not enough to let actual slab allocator functions (kmalloc,
kmem_cache_alloc, ...) return NULL.
This patch moves fault injection hook inside of __cache_alloc() and
__cache_alloc_node(). These are lower call path than ____cache_alloc() and
enable to inject faulures to slab allocators with CONFIG_NUMA.
Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com>
Cc: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 11 |
1 files changed, 7 insertions, 4 deletions
@@ -3170,7 +3170,7 @@ static int __init failslab_debugfs(void) | |||
3170 | struct dentry *dir; | 3170 | struct dentry *dir; |
3171 | int err; | 3171 | int err; |
3172 | 3172 | ||
3173 | err = init_fault_attr_dentries(&failslab.attr, "failslab"); | 3173 | err = init_fault_attr_dentries(&failslab.attr, "failslab"); |
3174 | if (err) | 3174 | if (err) |
3175 | return err; | 3175 | return err; |
3176 | dir = failslab.attr.dentries.dir; | 3176 | dir = failslab.attr.dentries.dir; |
@@ -3208,9 +3208,6 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
3208 | 3208 | ||
3209 | check_irq_off(); | 3209 | check_irq_off(); |
3210 | 3210 | ||
3211 | if (should_failslab(cachep, flags)) | ||
3212 | return NULL; | ||
3213 | |||
3214 | ac = cpu_cache_get(cachep); | 3211 | ac = cpu_cache_get(cachep); |
3215 | if (likely(ac->avail)) { | 3212 | if (likely(ac->avail)) { |
3216 | STATS_INC_ALLOCHIT(cachep); | 3213 | STATS_INC_ALLOCHIT(cachep); |
@@ -3402,6 +3399,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | |||
3402 | unsigned long save_flags; | 3399 | unsigned long save_flags; |
3403 | void *ptr; | 3400 | void *ptr; |
3404 | 3401 | ||
3402 | if (should_failslab(cachep, flags)) | ||
3403 | return NULL; | ||
3404 | |||
3405 | cache_alloc_debugcheck_before(cachep, flags); | 3405 | cache_alloc_debugcheck_before(cachep, flags); |
3406 | local_irq_save(save_flags); | 3406 | local_irq_save(save_flags); |
3407 | 3407 | ||
@@ -3472,6 +3472,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) | |||
3472 | unsigned long save_flags; | 3472 | unsigned long save_flags; |
3473 | void *objp; | 3473 | void *objp; |
3474 | 3474 | ||
3475 | if (should_failslab(cachep, flags)) | ||
3476 | return NULL; | ||
3477 | |||
3475 | cache_alloc_debugcheck_before(cachep, flags); | 3478 | cache_alloc_debugcheck_before(cachep, flags); |
3476 | local_irq_save(save_flags); | 3479 | local_irq_save(save_flags); |
3477 | objp = __do_cache_alloc(cachep, flags); | 3480 | objp = __do_cache_alloc(cachep, flags); |