aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-09-04 19:18:33 -0400
committerPekka Enberg <penberg@kernel.org>2012-09-05 05:00:36 -0400
commitdb265eca77000c5dafc5608975afe8dafb2a02d5 (patch)
tree1c33709fa115e8e814e515f4ee535314ed090ab1
parent12c3667fb780e20360ad0bde32dfb3591ef609ad (diff)
mm/sl[aou]b: Move duping of slab name to slab_common.c
Duping of the slabname has to be done by each slab. Moving this code to slab_common avoids duplicate implementations. With this patch we have common string handling for all slab allocators. Strings passed to kmem_cache_create() are copied internally. Subsystems can create temporary strings to create slab caches. Slabs allocated in early states of bootstrap will never be freed (and those can never be freed since they are essential to slab allocator operations). During bootstrap we therefore do not have to worry about duping names. Reviewed-by: Glauber Costa <glommer@parallels.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r--mm/slab_common.c30
-rw-r--r--mm/slub.c21
2 files changed, 23 insertions, 28 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 7df814e8fbea..f18c06fd97c7 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -100,6 +100,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
100{ 100{
101 struct kmem_cache *s = NULL; 101 struct kmem_cache *s = NULL;
102 int err = 0; 102 int err = 0;
103 char *n;
103 104
104 get_online_cpus(); 105 get_online_cpus();
105 mutex_lock(&slab_mutex); 106 mutex_lock(&slab_mutex);
@@ -108,16 +109,26 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
108 goto out_locked; 109 goto out_locked;
109 110
110 111
111 s = __kmem_cache_create(name, size, align, flags, ctor); 112 n = kstrdup(name, GFP_KERNEL);
112 if (!s) 113 if (!n) {
113 err = -ENOSYS; /* Until __kmem_cache_create returns code */ 114 err = -ENOMEM;
115 goto out_locked;
116 }
117
118 s = __kmem_cache_create(n, size, align, flags, ctor);
119
120 if (s) {
121 /*
122 * Check if the slab has actually been created and if it was a
123 * real instatiation. Aliases do not belong on the list
124 */
125 if (s->refcount == 1)
126 list_add(&s->list, &slab_caches);
114 127
115 /* 128 } else {
116 * Check if the slab has actually been created and if it was a 129 kfree(n);
117 * real instatiation. Aliases do not belong on the list 130 err = -ENOSYS; /* Until __kmem_cache_create returns code */
118 */ 131 }
119 if (s && s->refcount == 1)
120 list_add(&s->list, &slab_caches);
121 132
122out_locked: 133out_locked:
123 mutex_unlock(&slab_mutex); 134 mutex_unlock(&slab_mutex);
@@ -153,6 +164,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
153 if (s->flags & SLAB_DESTROY_BY_RCU) 164 if (s->flags & SLAB_DESTROY_BY_RCU)
154 rcu_barrier(); 165 rcu_barrier();
155 166
167 kfree(s->name);
156 kmem_cache_free(kmem_cache, s); 168 kmem_cache_free(kmem_cache, s);
157 } else { 169 } else {
158 list_add(&s->list, &slab_caches); 170 list_add(&s->list, &slab_caches);
diff --git a/mm/slub.c b/mm/slub.c
index e5e09873f5ec..91c9a2fe6760 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -210,10 +210,7 @@ static void sysfs_slab_remove(struct kmem_cache *);
210static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; } 210static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
211static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p) 211static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
212 { return 0; } 212 { return 0; }
213static inline void sysfs_slab_remove(struct kmem_cache *s) 213static inline void sysfs_slab_remove(struct kmem_cache *s) { }
214{
215 kfree(s->name);
216}
217 214
218#endif 215#endif
219 216
@@ -3929,7 +3926,6 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
3929 size_t align, unsigned long flags, void (*ctor)(void *)) 3926 size_t align, unsigned long flags, void (*ctor)(void *))
3930{ 3927{
3931 struct kmem_cache *s; 3928 struct kmem_cache *s;
3932 char *n;
3933 3929
3934 s = find_mergeable(size, align, flags, name, ctor); 3930 s = find_mergeable(size, align, flags, name, ctor);
3935 if (s) { 3931 if (s) {
@@ -3948,13 +3944,9 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
3948 return s; 3944 return s;
3949 } 3945 }
3950 3946
3951 n = kstrdup(name, GFP_KERNEL);
3952 if (!n)
3953 return NULL;
3954
3955 s = kmem_cache_alloc(kmem_cache, GFP_KERNEL); 3947 s = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
3956 if (s) { 3948 if (s) {
3957 if (kmem_cache_open(s, n, 3949 if (kmem_cache_open(s, name,
3958 size, align, flags, ctor)) { 3950 size, align, flags, ctor)) {
3959 int r; 3951 int r;
3960 3952
@@ -3969,7 +3961,6 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
3969 } 3961 }
3970 kmem_cache_free(kmem_cache, s); 3962 kmem_cache_free(kmem_cache, s);
3971 } 3963 }
3972 kfree(n);
3973 return NULL; 3964 return NULL;
3974} 3965}
3975 3966
@@ -5200,13 +5191,6 @@ static ssize_t slab_attr_store(struct kobject *kobj,
5200 return err; 5191 return err;
5201} 5192}
5202 5193
5203static void kmem_cache_release(struct kobject *kobj)
5204{
5205 struct kmem_cache *s = to_slab(kobj);
5206
5207 kfree(s->name);
5208}
5209
5210static const struct sysfs_ops slab_sysfs_ops = { 5194static const struct sysfs_ops slab_sysfs_ops = {
5211 .show = slab_attr_show, 5195 .show = slab_attr_show,
5212 .store = slab_attr_store, 5196 .store = slab_attr_store,
@@ -5214,7 +5198,6 @@ static const struct sysfs_ops slab_sysfs_ops = {
5214 5198
5215static struct kobj_type slab_ktype = { 5199static struct kobj_type slab_ktype = {
5216 .sysfs_ops = &slab_sysfs_ops, 5200 .sysfs_ops = &slab_sysfs_ops,
5217 .release = kmem_cache_release
5218}; 5201};
5219 5202
5220static int uevent_filter(struct kset *kset, struct kobject *kobj) 5203static int uevent_filter(struct kset *kset, struct kobject *kobj)