diff options
Diffstat (limited to 'mm/slub.c')
-rw-r--r-- | mm/slub.c | 54 |
1 files changed, 25 insertions, 29 deletions
@@ -36,13 +36,13 @@ | |||
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Lock order: | 38 | * Lock order: |
39 | * 1. slub_lock (Global Semaphore) | 39 | * 1. slab_mutex (Global Mutex) |
40 | * 2. node->list_lock | 40 | * 2. node->list_lock |
41 | * 3. slab_lock(page) (Only on some arches and for debugging) | 41 | * 3. slab_lock(page) (Only on some arches and for debugging) |
42 | * | 42 | * |
43 | * slub_lock | 43 | * slab_mutex |
44 | * | 44 | * |
45 | * The role of the slub_lock is to protect the list of all the slabs | 45 | * The role of the slab_mutex is to protect the list of all the slabs |
46 | * and to synchronize major metadata changes to slab cache structures. | 46 | * and to synchronize major metadata changes to slab cache structures. |
47 | * | 47 | * |
48 | * The slab_lock is only used for debugging and on arches that do not | 48 | * The slab_lock is only used for debugging and on arches that do not |
@@ -183,10 +183,6 @@ static int kmem_size = sizeof(struct kmem_cache); | |||
183 | static struct notifier_block slab_notifier; | 183 | static struct notifier_block slab_notifier; |
184 | #endif | 184 | #endif |
185 | 185 | ||
186 | /* A list of all slab caches on the system */ | ||
187 | static DECLARE_RWSEM(slub_lock); | ||
188 | static LIST_HEAD(slab_caches); | ||
189 | |||
190 | /* | 186 | /* |
191 | * Tracking user of a slab. | 187 | * Tracking user of a slab. |
192 | */ | 188 | */ |
@@ -3177,11 +3173,11 @@ static inline int kmem_cache_close(struct kmem_cache *s) | |||
3177 | */ | 3173 | */ |
3178 | void kmem_cache_destroy(struct kmem_cache *s) | 3174 | void kmem_cache_destroy(struct kmem_cache *s) |
3179 | { | 3175 | { |
3180 | down_write(&slub_lock); | 3176 | mutex_lock(&slab_mutex); |
3181 | s->refcount--; | 3177 | s->refcount--; |
3182 | if (!s->refcount) { | 3178 | if (!s->refcount) { |
3183 | list_del(&s->list); | 3179 | list_del(&s->list); |
3184 | up_write(&slub_lock); | 3180 | mutex_unlock(&slab_mutex); |
3185 | if (kmem_cache_close(s)) { | 3181 | if (kmem_cache_close(s)) { |
3186 | printk(KERN_ERR "SLUB %s: %s called for cache that " | 3182 | printk(KERN_ERR "SLUB %s: %s called for cache that " |
3187 | "still has objects.\n", s->name, __func__); | 3183 | "still has objects.\n", s->name, __func__); |
@@ -3191,7 +3187,7 @@ void kmem_cache_destroy(struct kmem_cache *s) | |||
3191 | rcu_barrier(); | 3187 | rcu_barrier(); |
3192 | sysfs_slab_remove(s); | 3188 | sysfs_slab_remove(s); |
3193 | } else | 3189 | } else |
3194 | up_write(&slub_lock); | 3190 | mutex_unlock(&slab_mutex); |
3195 | } | 3191 | } |
3196 | EXPORT_SYMBOL(kmem_cache_destroy); | 3192 | EXPORT_SYMBOL(kmem_cache_destroy); |
3197 | 3193 | ||
@@ -3253,7 +3249,7 @@ static struct kmem_cache *__init create_kmalloc_cache(const char *name, | |||
3253 | 3249 | ||
3254 | /* | 3250 | /* |
3255 | * This function is called with IRQs disabled during early-boot on | 3251 | * This function is called with IRQs disabled during early-boot on |
3256 | * single CPU so there's no need to take slub_lock here. | 3252 | * single CPU so there's no need to take slab_mutex here. |
3257 | */ | 3253 | */ |
3258 | if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, | 3254 | if (!kmem_cache_open(s, name, size, ARCH_KMALLOC_MINALIGN, |
3259 | flags, NULL)) | 3255 | flags, NULL)) |
@@ -3538,10 +3534,10 @@ static int slab_mem_going_offline_callback(void *arg) | |||
3538 | { | 3534 | { |
3539 | struct kmem_cache *s; | 3535 | struct kmem_cache *s; |
3540 | 3536 | ||
3541 | down_read(&slub_lock); | 3537 | mutex_lock(&slab_mutex); |
3542 | list_for_each_entry(s, &slab_caches, list) | 3538 | list_for_each_entry(s, &slab_caches, list) |
3543 | kmem_cache_shrink(s); | 3539 | kmem_cache_shrink(s); |
3544 | up_read(&slub_lock); | 3540 | mutex_unlock(&slab_mutex); |
3545 | 3541 | ||
3546 | return 0; | 3542 | return 0; |
3547 | } | 3543 | } |
@@ -3562,7 +3558,7 @@ static void slab_mem_offline_callback(void *arg) | |||
3562 | if (offline_node < 0) | 3558 | if (offline_node < 0) |
3563 | return; | 3559 | return; |
3564 | 3560 | ||
3565 | down_read(&slub_lock); | 3561 | mutex_lock(&slab_mutex); |
3566 | list_for_each_entry(s, &slab_caches, list) { | 3562 | list_for_each_entry(s, &slab_caches, list) { |
3567 | n = get_node(s, offline_node); | 3563 | n = get_node(s, offline_node); |
3568 | if (n) { | 3564 | if (n) { |
@@ -3578,7 +3574,7 @@ static void slab_mem_offline_callback(void *arg) | |||
3578 | kmem_cache_free(kmem_cache_node, n); | 3574 | kmem_cache_free(kmem_cache_node, n); |
3579 | } | 3575 | } |
3580 | } | 3576 | } |
3581 | up_read(&slub_lock); | 3577 | mutex_unlock(&slab_mutex); |
3582 | } | 3578 | } |
3583 | 3579 | ||
3584 | static int slab_mem_going_online_callback(void *arg) | 3580 | static int slab_mem_going_online_callback(void *arg) |
@@ -3601,7 +3597,7 @@ static int slab_mem_going_online_callback(void *arg) | |||
3601 | * allocate a kmem_cache_node structure in order to bring the node | 3597 | * allocate a kmem_cache_node structure in order to bring the node |
3602 | * online. | 3598 | * online. |
3603 | */ | 3599 | */ |
3604 | down_read(&slub_lock); | 3600 | mutex_lock(&slab_mutex); |
3605 | list_for_each_entry(s, &slab_caches, list) { | 3601 | list_for_each_entry(s, &slab_caches, list) { |
3606 | /* | 3602 | /* |
3607 | * XXX: kmem_cache_alloc_node will fallback to other nodes | 3603 | * XXX: kmem_cache_alloc_node will fallback to other nodes |
@@ -3617,7 +3613,7 @@ static int slab_mem_going_online_callback(void *arg) | |||
3617 | s->node[nid] = n; | 3613 | s->node[nid] = n; |
3618 | } | 3614 | } |
3619 | out: | 3615 | out: |
3620 | up_read(&slub_lock); | 3616 | mutex_unlock(&slab_mutex); |
3621 | return ret; | 3617 | return ret; |
3622 | } | 3618 | } |
3623 | 3619 | ||
@@ -3915,7 +3911,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, | |||
3915 | struct kmem_cache *s; | 3911 | struct kmem_cache *s; |
3916 | char *n; | 3912 | char *n; |
3917 | 3913 | ||
3918 | down_write(&slub_lock); | 3914 | mutex_lock(&slab_mutex); |
3919 | s = find_mergeable(size, align, flags, name, ctor); | 3915 | s = find_mergeable(size, align, flags, name, ctor); |
3920 | if (s) { | 3916 | if (s) { |
3921 | s->refcount++; | 3917 | s->refcount++; |
@@ -3930,7 +3926,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, | |||
3930 | s->refcount--; | 3926 | s->refcount--; |
3931 | goto err; | 3927 | goto err; |
3932 | } | 3928 | } |
3933 | up_write(&slub_lock); | 3929 | mutex_unlock(&slab_mutex); |
3934 | return s; | 3930 | return s; |
3935 | } | 3931 | } |
3936 | 3932 | ||
@@ -3943,9 +3939,9 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, | |||
3943 | if (kmem_cache_open(s, n, | 3939 | if (kmem_cache_open(s, n, |
3944 | size, align, flags, ctor)) { | 3940 | size, align, flags, ctor)) { |
3945 | list_add(&s->list, &slab_caches); | 3941 | list_add(&s->list, &slab_caches); |
3946 | up_write(&slub_lock); | 3942 | mutex_unlock(&slab_mutex); |
3947 | if (sysfs_slab_add(s)) { | 3943 | if (sysfs_slab_add(s)) { |
3948 | down_write(&slub_lock); | 3944 | mutex_lock(&slab_mutex); |
3949 | list_del(&s->list); | 3945 | list_del(&s->list); |
3950 | kfree(n); | 3946 | kfree(n); |
3951 | kfree(s); | 3947 | kfree(s); |
@@ -3957,7 +3953,7 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size, | |||
3957 | } | 3953 | } |
3958 | kfree(n); | 3954 | kfree(n); |
3959 | err: | 3955 | err: |
3960 | up_write(&slub_lock); | 3956 | mutex_unlock(&slab_mutex); |
3961 | return s; | 3957 | return s; |
3962 | } | 3958 | } |
3963 | 3959 | ||
@@ -3978,13 +3974,13 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb, | |||
3978 | case CPU_UP_CANCELED_FROZEN: | 3974 | case CPU_UP_CANCELED_FROZEN: |
3979 | case CPU_DEAD: | 3975 | case CPU_DEAD: |
3980 | case CPU_DEAD_FROZEN: | 3976 | case CPU_DEAD_FROZEN: |
3981 | down_read(&slub_lock); | 3977 | mutex_lock(&slab_mutex); |
3982 | list_for_each_entry(s, &slab_caches, list) { | 3978 | list_for_each_entry(s, &slab_caches, list) { |
3983 | local_irq_save(flags); | 3979 | local_irq_save(flags); |
3984 | __flush_cpu_slab(s, cpu); | 3980 | __flush_cpu_slab(s, cpu); |
3985 | local_irq_restore(flags); | 3981 | local_irq_restore(flags); |
3986 | } | 3982 | } |
3987 | up_read(&slub_lock); | 3983 | mutex_unlock(&slab_mutex); |
3988 | break; | 3984 | break; |
3989 | default: | 3985 | default: |
3990 | break; | 3986 | break; |
@@ -5360,11 +5356,11 @@ static int __init slab_sysfs_init(void) | |||
5360 | struct kmem_cache *s; | 5356 | struct kmem_cache *s; |
5361 | int err; | 5357 | int err; |
5362 | 5358 | ||
5363 | down_write(&slub_lock); | 5359 | mutex_lock(&slab_mutex); |
5364 | 5360 | ||
5365 | slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); | 5361 | slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj); |
5366 | if (!slab_kset) { | 5362 | if (!slab_kset) { |
5367 | up_write(&slub_lock); | 5363 | mutex_unlock(&slab_mutex); |
5368 | printk(KERN_ERR "Cannot register slab subsystem.\n"); | 5364 | printk(KERN_ERR "Cannot register slab subsystem.\n"); |
5369 | return -ENOSYS; | 5365 | return -ENOSYS; |
5370 | } | 5366 | } |
@@ -5389,7 +5385,7 @@ static int __init slab_sysfs_init(void) | |||
5389 | kfree(al); | 5385 | kfree(al); |
5390 | } | 5386 | } |
5391 | 5387 | ||
5392 | up_write(&slub_lock); | 5388 | mutex_unlock(&slab_mutex); |
5393 | resiliency_test(); | 5389 | resiliency_test(); |
5394 | return 0; | 5390 | return 0; |
5395 | } | 5391 | } |
@@ -5415,7 +5411,7 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
5415 | { | 5411 | { |
5416 | loff_t n = *pos; | 5412 | loff_t n = *pos; |
5417 | 5413 | ||
5418 | down_read(&slub_lock); | 5414 | mutex_lock(&slab_mutex); |
5419 | if (!n) | 5415 | if (!n) |
5420 | print_slabinfo_header(m); | 5416 | print_slabinfo_header(m); |
5421 | 5417 | ||
@@ -5429,7 +5425,7 @@ static void *s_next(struct seq_file *m, void *p, loff_t *pos) | |||
5429 | 5425 | ||
5430 | static void s_stop(struct seq_file *m, void *p) | 5426 | static void s_stop(struct seq_file *m, void *p) |
5431 | { | 5427 | { |
5432 | up_read(&slub_lock); | 5428 | mutex_unlock(&slab_mutex); |
5433 | } | 5429 | } |
5434 | 5430 | ||
5435 | static int s_show(struct seq_file *m, void *p) | 5431 | static int s_show(struct seq_file *m, void *p) |