diff options
author | Christoph Lameter <cl@linux.com> | 2012-07-06 16:25:13 -0400 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2012-07-09 05:13:42 -0400 |
commit | 20cea9683ecc6dd75a80c0dd02dc69c64e95be75 (patch) | |
tree | c52994730d2d280f9300197cc4f561b15e3dd4b2 | |
parent | 18004c5d4084d965aa1396392706b8688306427a (diff) |
mm, sl[aou]b: Move kmem_cache_create mutex handling to common code
Move the mutex handling into the common kmem_cache_create()
function.
Then we can also move more checks out of SLAB's kmem_cache_create()
into the common code.
Reviewed-by: Glauber Costa <glommer@parallels.com>
Signed-off-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r-- | mm/slab.c | 52 | ||||
-rw-r--r-- | mm/slab_common.c | 41 | ||||
-rw-r--r-- | mm/slub.c | 28 |
3 files changed, 54 insertions, 67 deletions
@@ -2228,55 +2228,10 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
2228 | unsigned long flags, void (*ctor)(void *)) | 2228 | unsigned long flags, void (*ctor)(void *)) |
2229 | { | 2229 | { |
2230 | size_t left_over, slab_size, ralign; | 2230 | size_t left_over, slab_size, ralign; |
2231 | struct kmem_cache *cachep = NULL, *pc; | 2231 | struct kmem_cache *cachep = NULL; |
2232 | gfp_t gfp; | 2232 | gfp_t gfp; |
2233 | 2233 | ||
2234 | /* | ||
2235 | * Sanity checks... these are all serious usage bugs. | ||
2236 | */ | ||
2237 | if (!name || in_interrupt() || (size < BYTES_PER_WORD) || | ||
2238 | size > KMALLOC_MAX_SIZE) { | ||
2239 | printk(KERN_ERR "%s: Early error in slab %s\n", __func__, | ||
2240 | name); | ||
2241 | BUG(); | ||
2242 | } | ||
2243 | |||
2244 | /* | ||
2245 | * We use cache_chain_mutex to ensure a consistent view of | ||
2246 | * cpu_online_mask as well. Please see cpuup_callback | ||
2247 | */ | ||
2248 | if (slab_is_available()) { | ||
2249 | get_online_cpus(); | ||
2250 | mutex_lock(&slab_mutex); | ||
2251 | } | ||
2252 | |||
2253 | list_for_each_entry(pc, &slab_caches, list) { | ||
2254 | char tmp; | ||
2255 | int res; | ||
2256 | |||
2257 | /* | ||
2258 | * This happens when the module gets unloaded and doesn't | ||
2259 | * destroy its slab cache and no-one else reuses the vmalloc | ||
2260 | * area of the module. Print a warning. | ||
2261 | */ | ||
2262 | res = probe_kernel_address(pc->name, tmp); | ||
2263 | if (res) { | ||
2264 | printk(KERN_ERR | ||
2265 | "SLAB: cache with size %d has lost its name\n", | ||
2266 | pc->size); | ||
2267 | continue; | ||
2268 | } | ||
2269 | |||
2270 | if (!strcmp(pc->name, name)) { | ||
2271 | printk(KERN_ERR | ||
2272 | "kmem_cache_create: duplicate cache %s\n", name); | ||
2273 | dump_stack(); | ||
2274 | goto oops; | ||
2275 | } | ||
2276 | } | ||
2277 | |||
2278 | #if DEBUG | 2234 | #if DEBUG |
2279 | WARN_ON(strchr(name, ' ')); /* It confuses parsers */ | ||
2280 | #if FORCED_DEBUG | 2235 | #if FORCED_DEBUG |
2281 | /* | 2236 | /* |
2282 | * Enable redzoning and last user accounting, except for caches with | 2237 | * Enable redzoning and last user accounting, except for caches with |
@@ -2495,11 +2450,6 @@ __kmem_cache_create (const char *name, size_t size, size_t align, | |||
2495 | 2450 | ||
2496 | /* cache setup completed, link it into the list */ | 2451 | /* cache setup completed, link it into the list */ |
2497 | list_add(&cachep->list, &slab_caches); | 2452 | list_add(&cachep->list, &slab_caches); |
2498 | oops: | ||
2499 | if (slab_is_available()) { | ||
2500 | mutex_unlock(&slab_mutex); | ||
2501 | put_online_cpus(); | ||
2502 | } | ||
2503 | return cachep; | 2453 | return cachep; |
2504 | } | 2454 | } |
2505 | 2455 | ||
diff --git a/mm/slab_common.c b/mm/slab_common.c index 50e1ff10bff9..12637cee1f95 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -11,7 +11,8 @@ | |||
11 | #include <linux/memory.h> | 11 | #include <linux/memory.h> |
12 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | 14 | #include <linux/cpu.h> | |
15 | #include <linux/uaccess.h> | ||
15 | #include <asm/cacheflush.h> | 16 | #include <asm/cacheflush.h> |
16 | #include <asm/tlbflush.h> | 17 | #include <asm/tlbflush.h> |
17 | #include <asm/page.h> | 18 | #include <asm/page.h> |
@@ -61,8 +62,46 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align | |||
61 | } | 62 | } |
62 | #endif | 63 | #endif |
63 | 64 | ||
65 | get_online_cpus(); | ||
66 | mutex_lock(&slab_mutex); | ||
67 | |||
68 | #ifdef CONFIG_DEBUG_VM | ||
69 | list_for_each_entry(s, &slab_caches, list) { | ||
70 | char tmp; | ||
71 | int res; | ||
72 | |||
73 | /* | ||
74 | * This happens when the module gets unloaded and doesn't | ||
75 | * destroy its slab cache and no-one else reuses the vmalloc | ||
76 | * area of the module. Print a warning. | ||
77 | */ | ||
78 | res = probe_kernel_address(s->name, tmp); | ||
79 | if (res) { | ||
80 | printk(KERN_ERR | ||
81 | "Slab cache with size %d has lost its name\n", | ||
82 | s->object_size); | ||
83 | continue; | ||
84 | } | ||
85 | |||
86 | if (!strcmp(s->name, name)) { | ||
87 | printk(KERN_ERR "kmem_cache_create(%s): Cache name" | ||
88 | " already exists.\n", | ||
89 | name); | ||
90 | dump_stack(); | ||
91 | s = NULL; | ||
92 | goto oops; | ||
93 | } | ||
94 | } | ||
95 | |||
96 | WARN_ON(strchr(name, ' ')); /* It confuses parsers */ | ||
97 | #endif | ||
98 | |||
64 | s = __kmem_cache_create(name, size, align, flags, ctor); | 99 | s = __kmem_cache_create(name, size, align, flags, ctor); |
65 | 100 | ||
101 | oops: | ||
102 | mutex_unlock(&slab_mutex); | ||
103 | put_online_cpus(); | ||
104 | |||
66 | #ifdef CONFIG_DEBUG_VM | 105 | #ifdef CONFIG_DEBUG_VM |
67 | out: | 106 | out: |
68 | #endif | 107 | #endif |
@@ -3911,7 +3911,6 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, | |||
3911 | struct kmem_cache *s; | 3911 | struct kmem_cache *s; |
3912 | char *n; | 3912 | char *n; |
3913 | 3913 | ||
3914 | mutex_lock(&slab_mutex); | ||
3915 | s = find_mergeable(size, align, flags, name, ctor); | 3914 | s = find_mergeable(size, align, flags, name, ctor); |
3916 | if (s) { | 3915 | if (s) { |
3917 | s->refcount++; | 3916 | s->refcount++; |
@@ -3924,37 +3923,36 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, | |||
3924 | 3923 | ||
3925 | if (sysfs_slab_alias(s, name)) { | 3924 | if (sysfs_slab_alias(s, name)) { |
3926 | s->refcount--; | 3925 | s->refcount--; |
3927 | goto err; | 3926 | return NULL; |
3928 | } | 3927 | } |
3929 | mutex_unlock(&slab_mutex); | ||
3930 | return s; | 3928 | return s; |
3931 | } | 3929 | } |
3932 | 3930 | ||
3933 | n = kstrdup(name, GFP_KERNEL); | 3931 | n = kstrdup(name, GFP_KERNEL); |
3934 | if (!n) | 3932 | if (!n) |
3935 | goto err; | 3933 | return NULL; |
3936 | 3934 | ||
3937 | s = kmalloc(kmem_size, GFP_KERNEL); | 3935 | s = kmalloc(kmem_size, GFP_KERNEL); |
3938 | if (s) { | 3936 | if (s) { |
3939 | if (kmem_cache_open(s, n, | 3937 | if (kmem_cache_open(s, n, |
3940 | size, align, flags, ctor)) { | 3938 | size, align, flags, ctor)) { |
3939 | int r; | ||
3940 | |||
3941 | list_add(&s->list, &slab_caches); | 3941 | list_add(&s->list, &slab_caches); |
3942 | mutex_unlock(&slab_mutex); | 3942 | mutex_unlock(&slab_mutex); |
3943 | if (sysfs_slab_add(s)) { | 3943 | r = sysfs_slab_add(s); |
3944 | mutex_lock(&slab_mutex); | 3944 | mutex_lock(&slab_mutex); |
3945 | list_del(&s->list); | 3945 | |
3946 | kfree(n); | 3946 | if (!r) |
3947 | kfree(s); | 3947 | return s; |
3948 | goto err; | 3948 | |
3949 | } | 3949 | list_del(&s->list); |
3950 | return s; | 3950 | kmem_cache_close(s); |
3951 | } | 3951 | } |
3952 | kfree(s); | 3952 | kfree(s); |
3953 | } | 3953 | } |
3954 | kfree(n); | 3954 | kfree(n); |
3955 | err: | 3955 | return NULL; |
3956 | mutex_unlock(&slab_mutex); | ||
3957 | return s; | ||
3958 | } | 3956 | } |
3959 | 3957 | ||
3960 | #ifdef CONFIG_SMP | 3958 | #ifdef CONFIG_SMP |