diff options
author | David Woodhouse <David.Woodhouse@intel.com> | 2009-01-05 04:50:33 -0500 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2009-01-05 04:50:33 -0500 |
commit | 353816f43d1fb340ff2d9a911dd5d0799c09f6a5 (patch) | |
tree | 517290fd884d286fe2971137ac89f89e3567785a /mm/slab.c | |
parent | 160bbab3000dafccbe43688e48208cecf4deb879 (diff) | |
parent | fe0bdec68b77020281dc814805edfe594ae89e0f (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
arch/arm/mach-pxa/corgi.c
arch/arm/mach-pxa/poodle.c
arch/arm/mach-pxa/spitz.c
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 91 |
1 files changed, 14 insertions, 77 deletions
@@ -2123,6 +2123,8 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep) | |||
2123 | * | 2123 | * |
2124 | * @name must be valid until the cache is destroyed. This implies that | 2124 | * @name must be valid until the cache is destroyed. This implies that |
2125 | * the module calling this has to destroy the cache before getting unloaded. | 2125 | * the module calling this has to destroy the cache before getting unloaded. |
2126 | * Note that kmem_cache_name() is not guaranteed to return the same pointer, | ||
2127 | * therefore applications must manage it themselves. | ||
2126 | * | 2128 | * |
2127 | * The flags are | 2129 | * The flags are |
2128 | * | 2130 | * |
@@ -2155,7 +2157,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, | |||
2155 | 2157 | ||
2156 | /* | 2158 | /* |
2157 | * We use cache_chain_mutex to ensure a consistent view of | 2159 | * We use cache_chain_mutex to ensure a consistent view of |
2158 | * cpu_online_map as well. Please see cpuup_callback | 2160 | * cpu_online_mask as well. Please see cpuup_callback |
2159 | */ | 2161 | */ |
2160 | get_online_cpus(); | 2162 | get_online_cpus(); |
2161 | mutex_lock(&cache_chain_mutex); | 2163 | mutex_lock(&cache_chain_mutex); |
@@ -2609,7 +2611,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, | |||
2609 | if (OFF_SLAB(cachep)) { | 2611 | if (OFF_SLAB(cachep)) { |
2610 | /* Slab management obj is off-slab. */ | 2612 | /* Slab management obj is off-slab. */ |
2611 | slabp = kmem_cache_alloc_node(cachep->slabp_cache, | 2613 | slabp = kmem_cache_alloc_node(cachep->slabp_cache, |
2612 | local_flags & ~GFP_THISNODE, nodeid); | 2614 | local_flags, nodeid); |
2613 | if (!slabp) | 2615 | if (!slabp) |
2614 | return NULL; | 2616 | return NULL; |
2615 | } else { | 2617 | } else { |
@@ -2997,7 +2999,7 @@ retry: | |||
2997 | * there must be at least one object available for | 2999 | * there must be at least one object available for |
2998 | * allocation. | 3000 | * allocation. |
2999 | */ | 3001 | */ |
3000 | BUG_ON(slabp->inuse < 0 || slabp->inuse >= cachep->num); | 3002 | BUG_ON(slabp->inuse >= cachep->num); |
3001 | 3003 | ||
3002 | while (slabp->inuse < cachep->num && batchcount--) { | 3004 | while (slabp->inuse < cachep->num && batchcount--) { |
3003 | STATS_INC_ALLOCED(cachep); | 3005 | STATS_INC_ALLOCED(cachep); |
@@ -3106,79 +3108,14 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
3106 | #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) | 3108 | #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) |
3107 | #endif | 3109 | #endif |
3108 | 3110 | ||
3109 | #ifdef CONFIG_FAILSLAB | 3111 | static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) |
3110 | |||
3111 | static struct failslab_attr { | ||
3112 | |||
3113 | struct fault_attr attr; | ||
3114 | |||
3115 | u32 ignore_gfp_wait; | ||
3116 | #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS | ||
3117 | struct dentry *ignore_gfp_wait_file; | ||
3118 | #endif | ||
3119 | |||
3120 | } failslab = { | ||
3121 | .attr = FAULT_ATTR_INITIALIZER, | ||
3122 | .ignore_gfp_wait = 1, | ||
3123 | }; | ||
3124 | |||
3125 | static int __init setup_failslab(char *str) | ||
3126 | { | ||
3127 | return setup_fault_attr(&failslab.attr, str); | ||
3128 | } | ||
3129 | __setup("failslab=", setup_failslab); | ||
3130 | |||
3131 | static int should_failslab(struct kmem_cache *cachep, gfp_t flags) | ||
3132 | { | 3112 | { |
3133 | if (cachep == &cache_cache) | 3113 | if (cachep == &cache_cache) |
3134 | return 0; | 3114 | return false; |
3135 | if (flags & __GFP_NOFAIL) | ||
3136 | return 0; | ||
3137 | if (failslab.ignore_gfp_wait && (flags & __GFP_WAIT)) | ||
3138 | return 0; | ||
3139 | 3115 | ||
3140 | return should_fail(&failslab.attr, obj_size(cachep)); | 3116 | return should_failslab(obj_size(cachep), flags); |
3141 | } | 3117 | } |
3142 | 3118 | ||
3143 | #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS | ||
3144 | |||
3145 | static int __init failslab_debugfs(void) | ||
3146 | { | ||
3147 | mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; | ||
3148 | struct dentry *dir; | ||
3149 | int err; | ||
3150 | |||
3151 | err = init_fault_attr_dentries(&failslab.attr, "failslab"); | ||
3152 | if (err) | ||
3153 | return err; | ||
3154 | dir = failslab.attr.dentries.dir; | ||
3155 | |||
3156 | failslab.ignore_gfp_wait_file = | ||
3157 | debugfs_create_bool("ignore-gfp-wait", mode, dir, | ||
3158 | &failslab.ignore_gfp_wait); | ||
3159 | |||
3160 | if (!failslab.ignore_gfp_wait_file) { | ||
3161 | err = -ENOMEM; | ||
3162 | debugfs_remove(failslab.ignore_gfp_wait_file); | ||
3163 | cleanup_fault_attr_dentries(&failslab.attr); | ||
3164 | } | ||
3165 | |||
3166 | return err; | ||
3167 | } | ||
3168 | |||
3169 | late_initcall(failslab_debugfs); | ||
3170 | |||
3171 | #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ | ||
3172 | |||
3173 | #else /* CONFIG_FAILSLAB */ | ||
3174 | |||
3175 | static inline int should_failslab(struct kmem_cache *cachep, gfp_t flags) | ||
3176 | { | ||
3177 | return 0; | ||
3178 | } | ||
3179 | |||
3180 | #endif /* CONFIG_FAILSLAB */ | ||
3181 | |||
3182 | static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) | 3119 | static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) |
3183 | { | 3120 | { |
3184 | void *objp; | 3121 | void *objp; |
@@ -3381,7 +3318,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, | |||
3381 | unsigned long save_flags; | 3318 | unsigned long save_flags; |
3382 | void *ptr; | 3319 | void *ptr; |
3383 | 3320 | ||
3384 | if (should_failslab(cachep, flags)) | 3321 | if (slab_should_failslab(cachep, flags)) |
3385 | return NULL; | 3322 | return NULL; |
3386 | 3323 | ||
3387 | cache_alloc_debugcheck_before(cachep, flags); | 3324 | cache_alloc_debugcheck_before(cachep, flags); |
@@ -3457,7 +3394,7 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) | |||
3457 | unsigned long save_flags; | 3394 | unsigned long save_flags; |
3458 | void *objp; | 3395 | void *objp; |
3459 | 3396 | ||
3460 | if (should_failslab(cachep, flags)) | 3397 | if (slab_should_failslab(cachep, flags)) |
3461 | return NULL; | 3398 | return NULL; |
3462 | 3399 | ||
3463 | cache_alloc_debugcheck_before(cachep, flags); | 3400 | cache_alloc_debugcheck_before(cachep, flags); |
@@ -3686,9 +3623,9 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) | |||
3686 | EXPORT_SYMBOL(__kmalloc_node); | 3623 | EXPORT_SYMBOL(__kmalloc_node); |
3687 | 3624 | ||
3688 | void *__kmalloc_node_track_caller(size_t size, gfp_t flags, | 3625 | void *__kmalloc_node_track_caller(size_t size, gfp_t flags, |
3689 | int node, void *caller) | 3626 | int node, unsigned long caller) |
3690 | { | 3627 | { |
3691 | return __do_kmalloc_node(size, flags, node, caller); | 3628 | return __do_kmalloc_node(size, flags, node, (void *)caller); |
3692 | } | 3629 | } |
3693 | EXPORT_SYMBOL(__kmalloc_node_track_caller); | 3630 | EXPORT_SYMBOL(__kmalloc_node_track_caller); |
3694 | #else | 3631 | #else |
@@ -3730,9 +3667,9 @@ void *__kmalloc(size_t size, gfp_t flags) | |||
3730 | } | 3667 | } |
3731 | EXPORT_SYMBOL(__kmalloc); | 3668 | EXPORT_SYMBOL(__kmalloc); |
3732 | 3669 | ||
3733 | void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller) | 3670 | void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) |
3734 | { | 3671 | { |
3735 | return __do_kmalloc(size, flags, caller); | 3672 | return __do_kmalloc(size, flags, (void *)caller); |
3736 | } | 3673 | } |
3737 | EXPORT_SYMBOL(__kmalloc_track_caller); | 3674 | EXPORT_SYMBOL(__kmalloc_track_caller); |
3738 | 3675 | ||