aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-31 02:19:48 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-31 02:19:48 -0500
commit818fa7f3908c7bd6c0045e9d94dc23a899ef6144 (patch)
treead3435c3f57c8222ad61709b716168932f13be6c /mm/slab.c
parent3fd4bc015ef879a7d2b955ce97fb125e3a51ba7e (diff)
parent5fdf7e5975a0b0f6a0370655612c5dca3fd6311b (diff)
Merge branch 'tracing/kmemtrace' into tracing/kmemtrace2
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c81
1 files changed, 9 insertions, 72 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 7f72bb386a09..83075f36df7b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2132,6 +2132,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
2132 * 2132 *
2133 * @name must be valid until the cache is destroyed. This implies that 2133 * @name must be valid until the cache is destroyed. This implies that
2134 * the module calling this has to destroy the cache before getting unloaded. 2134 * the module calling this has to destroy the cache before getting unloaded.
2135 * Note that kmem_cache_name() is not guaranteed to return the same pointer,
2136 * therefore applications must manage it themselves.
2135 * 2137 *
2136 * The flags are 2138 * The flags are
2137 * 2139 *
@@ -2618,7 +2620,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2618 if (OFF_SLAB(cachep)) { 2620 if (OFF_SLAB(cachep)) {
2619 /* Slab management obj is off-slab. */ 2621 /* Slab management obj is off-slab. */
2620 slabp = kmem_cache_alloc_node(cachep->slabp_cache, 2622 slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2621 local_flags & ~GFP_THISNODE, nodeid); 2623 local_flags, nodeid);
2622 if (!slabp) 2624 if (!slabp)
2623 return NULL; 2625 return NULL;
2624 } else { 2626 } else {
@@ -3006,7 +3008,7 @@ retry:
3006 * there must be at least one object available for 3008 * there must be at least one object available for
3007 * allocation. 3009 * allocation.
3008 */ 3010 */
3009 BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num); 3011 BUG_ON(slabp->inuse >= cachep->num);
3010 3012
3011 while (slabp->inuse < cachep->num && batchcount--) { 3013 while (slabp->inuse < cachep->num && batchcount--) {
3012 STATS_INC_ALLOCED(cachep); 3014 STATS_INC_ALLOCED(cachep);
@@ -3115,79 +3117,14 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3115#define cache_alloc_debugcheck_after(a,b,objp,d) (objp) 3117#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3116#endif 3118#endif
3117 3119
3118#ifdef CONFIG_FAILSLAB 3120static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
3119
3120static struct failslab_attr {
3121
3122 struct fault_attr attr;
3123
3124 u32 ignore_gfp_wait;
3125#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3126 struct dentry *ignore_gfp_wait_file;
3127#endif
3128
3129} failslab = {
3130 .attr = FAULT_ATTR_INITIALIZER,
3131 .ignore_gfp_wait = 1,
3132};
3133
3134static int __init setup_failslab(char *str)
3135{
3136 return setup_fault_attr(&failslab.attr, str);
3137}
3138__setup("failslab=", setup_failslab);
3139
3140static int should_failslab(struct kmem_cache *cachep, gfp_t flags)
3141{ 3121{
3142 if (cachep == &cache_cache) 3122 if (cachep == &cache_cache)
3143 return 0; 3123 return false;
3144 if (flags & __GFP_NOFAIL)
3145 return 0;
3146 if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT))
3147 return 0;
3148 3124
3149 return should_fail(&failslab.attr, obj_size(cachep)); 3125 return should_failslab(obj_size(cachep), flags);
3150} 3126}
3151 3127
3152#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
3153
3154static int __init failslab_debugfs(void)
3155{
3156 mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
3157 struct dentry *dir;
3158 int err;
3159
3160 err = init_fault_attr_dentries(&failslab.attr, "failslab");
3161 if (err)
3162 return err;
3163 dir = failslab.attr.dentries.dir;
3164
3165 failslab.ignore_gfp_wait_file =
3166 debugfs_create_bool("ignore-gfp-wait", mode, dir,
3167 &failslab.ignore_gfp_wait);
3168
3169 if (!failslab.ignore_gfp_wait_file) {
3170 err = -ENOMEM;
3171 debugfs_remove(failslab.ignore_gfp_wait_file);
3172 cleanup_fault_attr_dentries(&failslab.attr);
3173 }
3174
3175 return err;
3176}
3177
3178late_initcall(failslab_debugfs);
3179
3180#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
3181
3182#else /* CONFIG_FAILSLAB */
3183
3184static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags)
3185{
3186 return 0;
3187}
3188
3189#endif /* CONFIG_FAILSLAB */
3190
3191static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) 3128static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3192{ 3129{
3193 void *objp; 3130 void *objp;
@@ -3390,7 +3327,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3390 unsigned long save_flags; 3327 unsigned long save_flags;
3391 void *ptr; 3328 void *ptr;
3392 3329
3393 if (should_failslab(cachep, flags)) 3330 if (slab_should_failslab(cachep, flags))
3394 return NULL; 3331 return NULL;
3395 3332
3396 cache_alloc_debugcheck_before(cachep, flags); 3333 cache_alloc_debugcheck_before(cachep, flags);
@@ -3466,7 +3403,7 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3466 unsigned long save_flags; 3403 unsigned long save_flags;
3467 void *objp; 3404 void *objp;
3468 3405
3469 if (should_failslab(cachep, flags)) 3406 if (slab_should_failslab(cachep, flags))
3470 return NULL; 3407 return NULL;
3471 3408
3472 cache_alloc_debugcheck_before(cachep, flags); 3409 cache_alloc_debugcheck_before(cachep, flags);