aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c46
-rw-r--r--mm/slub.c23
2 files changed, 34 insertions, 35 deletions
diff --git a/mm/slab.c b/mm/slab.c
index b03b2e46b806..ff31261fd24f 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -304,11 +304,11 @@ struct kmem_list3 {
304/* 304/*
305 * Need this for bootstrapping a per node allocator. 305 * Need this for bootstrapping a per node allocator.
306 */ 306 */
307#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1) 307#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
308struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; 308struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
309#define CACHE_CACHE 0 309#define CACHE_CACHE 0
310#define SIZE_AC 1 310#define SIZE_AC MAX_NUMNODES
311#define SIZE_L3 (1 + MAX_NUMNODES) 311#define SIZE_L3 (2 * MAX_NUMNODES)
312 312
313static int drain_freelist(struct kmem_cache *cache, 313static int drain_freelist(struct kmem_cache *cache,
314 struct kmem_list3 *l3, int tofree); 314 struct kmem_list3 *l3, int tofree);
@@ -1410,6 +1410,22 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1410} 1410}
1411 1411
1412/* 1412/*
1413 * For setting up all the kmem_list3s for cache whose buffer_size is same as
1414 * size of kmem_list3.
1415 */
1416static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1417{
1418 int node;
1419
1420 for_each_online_node(node) {
1421 cachep->nodelists[node] = &initkmem_list3[index + node];
1422 cachep->nodelists[node]->next_reap = jiffies +
1423 REAPTIMEOUT_LIST3 +
1424 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1425 }
1426}
1427
1428/*
1413 * Initialisation. Called after the page allocator have been initialised and 1429 * Initialisation. Called after the page allocator have been initialised and
1414 * before smp_init(). 1430 * before smp_init().
1415 */ 1431 */
@@ -1432,6 +1448,7 @@ void __init kmem_cache_init(void)
1432 if (i < MAX_NUMNODES) 1448 if (i < MAX_NUMNODES)
1433 cache_cache.nodelists[i] = NULL; 1449 cache_cache.nodelists[i] = NULL;
1434 } 1450 }
1451 set_up_list3s(&cache_cache, CACHE_CACHE);
1435 1452
1436 /* 1453 /*
1437 * Fragmentation resistance on low memory - only use bigger 1454 * Fragmentation resistance on low memory - only use bigger
@@ -1587,10 +1604,9 @@ void __init kmem_cache_init(void)
1587 { 1604 {
1588 int nid; 1605 int nid;
1589 1606
1590 /* Replace the static kmem_list3 structures for the boot cpu */
1591 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], node);
1592
1593 for_each_online_node(nid) { 1607 for_each_online_node(nid) {
1608 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], nid);
1609
1594 init_list(malloc_sizes[INDEX_AC].cs_cachep, 1610 init_list(malloc_sizes[INDEX_AC].cs_cachep,
1595 &initkmem_list3[SIZE_AC + nid], nid); 1611 &initkmem_list3[SIZE_AC + nid], nid);
1596 1612
@@ -1960,22 +1976,6 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1960 } 1976 }
1961} 1977}
1962 1978
1963/*
1964 * For setting up all the kmem_list3s for cache whose buffer_size is same as
1965 * size of kmem_list3.
1966 */
1967static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1968{
1969 int node;
1970
1971 for_each_online_node(node) {
1972 cachep->nodelists[node] = &initkmem_list3[index + node];
1973 cachep->nodelists[node]->next_reap = jiffies +
1974 REAPTIMEOUT_LIST3 +
1975 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1976 }
1977}
1978
1979static void __kmem_cache_destroy(struct kmem_cache *cachep) 1979static void __kmem_cache_destroy(struct kmem_cache *cachep)
1980{ 1980{
1981 int i; 1981 int i;
@@ -2099,7 +2099,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
2099 g_cpucache_up = PARTIAL_L3; 2099 g_cpucache_up = PARTIAL_L3;
2100 } else { 2100 } else {
2101 int node; 2101 int node;
2102 for_each_node_state(node, N_NORMAL_MEMORY) { 2102 for_each_online_node(node) {
2103 cachep->nodelists[node] = 2103 cachep->nodelists[node] =
2104 kmalloc_node(sizeof(struct kmem_list3), 2104 kmalloc_node(sizeof(struct kmem_list3),
2105 GFP_KERNEL, node); 2105 GFP_KERNEL, node);
diff --git a/mm/slub.c b/mm/slub.c
index 474945ecd89d..5cc4b7dddb50 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3962,7 +3962,7 @@ static struct kset_uevent_ops slab_uevent_ops = {
3962 .filter = uevent_filter, 3962 .filter = uevent_filter,
3963}; 3963};
3964 3964
3965static decl_subsys(slab, &slab_ktype, &slab_uevent_ops); 3965static struct kset *slab_kset;
3966 3966
3967#define ID_STR_LENGTH 64 3967#define ID_STR_LENGTH 64
3968 3968
@@ -4015,7 +4015,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
4015 * This is typically the case for debug situations. In that 4015 * This is typically the case for debug situations. In that
4016 * case we can catch duplicate names easily. 4016 * case we can catch duplicate names easily.
4017 */ 4017 */
4018 sysfs_remove_link(&slab_subsys.kobj, s->name); 4018 sysfs_remove_link(&slab_kset->kobj, s->name);
4019 name = s->name; 4019 name = s->name;
4020 } else { 4020 } else {
4021 /* 4021 /*
@@ -4025,12 +4025,12 @@ static int sysfs_slab_add(struct kmem_cache *s)
4025 name = create_unique_id(s); 4025 name = create_unique_id(s);
4026 } 4026 }
4027 4027
4028 kobj_set_kset_s(s, slab_subsys); 4028 s->kobj.kset = slab_kset;
4029 kobject_set_name(&s->kobj, name); 4029 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
4030 kobject_init(&s->kobj); 4030 if (err) {
4031 err = kobject_add(&s->kobj); 4031 kobject_put(&s->kobj);
4032 if (err)
4033 return err; 4032 return err;
4033 }
4034 4034
4035 err = sysfs_create_group(&s->kobj, &slab_attr_group); 4035 err = sysfs_create_group(&s->kobj, &slab_attr_group);
4036 if (err) 4036 if (err)
@@ -4070,9 +4070,8 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
4070 /* 4070 /*
4071 * If we have a leftover link then remove it. 4071 * If we have a leftover link then remove it.
4072 */ 4072 */
4073 sysfs_remove_link(&slab_subsys.kobj, name); 4073 sysfs_remove_link(&slab_kset->kobj, name);
4074 return sysfs_create_link(&slab_subsys.kobj, 4074 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
4075 &s->kobj, name);
4076 } 4075 }
4077 4076
4078 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL); 4077 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
@@ -4091,8 +4090,8 @@ static int __init slab_sysfs_init(void)
4091 struct kmem_cache *s; 4090 struct kmem_cache *s;
4092 int err; 4091 int err;
4093 4092
4094 err = subsystem_register(&slab_subsys); 4093 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
4095 if (err) { 4094 if (!slab_kset) {
4096 printk(KERN_ERR "Cannot register slab subsystem.\n"); 4095 printk(KERN_ERR "Cannot register slab subsystem.\n");
4097 return -ENOSYS; 4096 return -ENOSYS;
4098 } 4097 }