aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2013-01-10 14:12:17 -0500
committerPekka Enberg <penberg@kernel.org>2013-02-01 05:32:06 -0500
commit6744f087ba2a49f6d6935d9daa0b20a0f03567b5 (patch)
tree0f208fc00f89179d5b1fd5ae32556663d9d4abac /mm/slab.c
parente33660165c901d18e7d3df2290db070d3e4b46df (diff)
slab: Common name for the per node structures
Rename the structure used for the per node structures in slab to have a name that expresses that fact. Acked-by: Glauber Costa <glommer@parallels.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c87
1 files changed, 43 insertions, 44 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 2a7132ec4ff6..7c0da4c86973 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -288,7 +288,7 @@ struct arraycache_init {
288/* 288/*
289 * The slab lists for all objects. 289 * The slab lists for all objects.
290 */ 290 */
291struct kmem_list3 { 291struct kmem_cache_node {
292 struct list_head slabs_partial; /* partial list first, better asm code */ 292 struct list_head slabs_partial; /* partial list first, better asm code */
293 struct list_head slabs_full; 293 struct list_head slabs_full;
294 struct list_head slabs_free; 294 struct list_head slabs_free;
@@ -306,13 +306,13 @@ struct kmem_list3 {
306 * Need this for bootstrapping a per node allocator. 306 * Need this for bootstrapping a per node allocator.
307 */ 307 */
308#define NUM_INIT_LISTS (3 * MAX_NUMNODES) 308#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
309static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; 309static struct kmem_cache_node __initdata initkmem_list3[NUM_INIT_LISTS];
310#define CACHE_CACHE 0 310#define CACHE_CACHE 0
311#define SIZE_AC MAX_NUMNODES 311#define SIZE_AC MAX_NUMNODES
312#define SIZE_L3 (2 * MAX_NUMNODES) 312#define SIZE_L3 (2 * MAX_NUMNODES)
313 313
314static int drain_freelist(struct kmem_cache *cache, 314static int drain_freelist(struct kmem_cache *cache,
315 struct kmem_list3 *l3, int tofree); 315 struct kmem_cache_node *l3, int tofree);
316static void free_block(struct kmem_cache *cachep, void **objpp, int len, 316static void free_block(struct kmem_cache *cachep, void **objpp, int len,
317 int node); 317 int node);
318static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp); 318static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
@@ -329,9 +329,9 @@ EXPORT_SYMBOL(kmalloc_dma_caches);
329static int slab_early_init = 1; 329static int slab_early_init = 1;
330 330
331#define INDEX_AC kmalloc_index(sizeof(struct arraycache_init)) 331#define INDEX_AC kmalloc_index(sizeof(struct arraycache_init))
332#define INDEX_L3 kmalloc_index(sizeof(struct kmem_list3)) 332#define INDEX_L3 kmalloc_index(sizeof(struct kmem_cache_node))
333 333
334static void kmem_list3_init(struct kmem_list3 *parent) 334static void kmem_list3_init(struct kmem_cache_node *parent)
335{ 335{
336 INIT_LIST_HEAD(&parent->slabs_full); 336 INIT_LIST_HEAD(&parent->slabs_full);
337 INIT_LIST_HEAD(&parent->slabs_partial); 337 INIT_LIST_HEAD(&parent->slabs_partial);
@@ -546,7 +546,7 @@ static void slab_set_lock_classes(struct kmem_cache *cachep,
546 int q) 546 int q)
547{ 547{
548 struct array_cache **alc; 548 struct array_cache **alc;
549 struct kmem_list3 *l3; 549 struct kmem_cache_node *l3;
550 int r; 550 int r;
551 551
552 l3 = cachep->nodelists[q]; 552 l3 = cachep->nodelists[q];
@@ -591,7 +591,7 @@ static void init_node_lock_keys(int q)
591 return; 591 return;
592 592
593 for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) { 593 for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) {
594 struct kmem_list3 *l3; 594 struct kmem_cache_node *l3;
595 struct kmem_cache *cache = kmalloc_caches[i]; 595 struct kmem_cache *cache = kmalloc_caches[i];
596 596
597 if (!cache) 597 if (!cache)
@@ -608,9 +608,8 @@ static void init_node_lock_keys(int q)
608 608
609static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q) 609static void on_slab_lock_classes_node(struct kmem_cache *cachep, int q)
610{ 610{
611 struct kmem_list3 *l3; 611
612 l3 = cachep->nodelists[q]; 612 if (!cachep->nodelists[q])
613 if (!l3)
614 return; 613 return;
615 614
616 slab_set_lock_classes(cachep, &on_slab_l3_key, 615 slab_set_lock_classes(cachep, &on_slab_l3_key,
@@ -901,7 +900,7 @@ static inline bool is_slab_pfmemalloc(struct slab *slabp)
901static void recheck_pfmemalloc_active(struct kmem_cache *cachep, 900static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
902 struct array_cache *ac) 901 struct array_cache *ac)
903{ 902{
904 struct kmem_list3 *l3 = cachep->nodelists[numa_mem_id()]; 903 struct kmem_cache_node *l3 = cachep->nodelists[numa_mem_id()];
905 struct slab *slabp; 904 struct slab *slabp;
906 unsigned long flags; 905 unsigned long flags;
907 906
@@ -934,7 +933,7 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
934 933
935 /* Ensure the caller is allowed to use objects from PFMEMALLOC slab */ 934 /* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
936 if (unlikely(is_obj_pfmemalloc(objp))) { 935 if (unlikely(is_obj_pfmemalloc(objp))) {
937 struct kmem_list3 *l3; 936 struct kmem_cache_node *l3;
938 937
939 if (gfp_pfmemalloc_allowed(flags)) { 938 if (gfp_pfmemalloc_allowed(flags)) {
940 clear_obj_pfmemalloc(&objp); 939 clear_obj_pfmemalloc(&objp);
@@ -1106,7 +1105,7 @@ static void free_alien_cache(struct array_cache **ac_ptr)
1106static void __drain_alien_cache(struct kmem_cache *cachep, 1105static void __drain_alien_cache(struct kmem_cache *cachep,
1107 struct array_cache *ac, int node) 1106 struct array_cache *ac, int node)
1108{ 1107{
1109 struct kmem_list3 *rl3 = cachep->nodelists[node]; 1108 struct kmem_cache_node *rl3 = cachep->nodelists[node];
1110 1109
1111 if (ac->avail) { 1110 if (ac->avail) {
1112 spin_lock(&rl3->list_lock); 1111 spin_lock(&rl3->list_lock);
@@ -1127,7 +1126,7 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
1127/* 1126/*
1128 * Called from cache_reap() to regularly drain alien caches round robin. 1127 * Called from cache_reap() to regularly drain alien caches round robin.
1129 */ 1128 */
1130static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3) 1129static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *l3)
1131{ 1130{
1132 int node = __this_cpu_read(slab_reap_node); 1131 int node = __this_cpu_read(slab_reap_node);
1133 1132
@@ -1162,7 +1161,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1162{ 1161{
1163 struct slab *slabp = virt_to_slab(objp); 1162 struct slab *slabp = virt_to_slab(objp);
1164 int nodeid = slabp->nodeid; 1163 int nodeid = slabp->nodeid;
1165 struct kmem_list3 *l3; 1164 struct kmem_cache_node *l3;
1166 struct array_cache *alien = NULL; 1165 struct array_cache *alien = NULL;
1167 int node; 1166 int node;
1168 1167
@@ -1207,8 +1206,8 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1207static int init_cache_nodelists_node(int node) 1206static int init_cache_nodelists_node(int node)
1208{ 1207{
1209 struct kmem_cache *cachep; 1208 struct kmem_cache *cachep;
1210 struct kmem_list3 *l3; 1209 struct kmem_cache_node *l3;
1211 const int memsize = sizeof(struct kmem_list3); 1210 const int memsize = sizeof(struct kmem_cache_node);
1212 1211
1213 list_for_each_entry(cachep, &slab_caches, list) { 1212 list_for_each_entry(cachep, &slab_caches, list) {
1214 /* 1213 /*
@@ -1244,7 +1243,7 @@ static int init_cache_nodelists_node(int node)
1244static void __cpuinit cpuup_canceled(long cpu) 1243static void __cpuinit cpuup_canceled(long cpu)
1245{ 1244{
1246 struct kmem_cache *cachep; 1245 struct kmem_cache *cachep;
1247 struct kmem_list3 *l3 = NULL; 1246 struct kmem_cache_node *l3 = NULL;
1248 int node = cpu_to_mem(cpu); 1247 int node = cpu_to_mem(cpu);
1249 const struct cpumask *mask = cpumask_of_node(node); 1248 const struct cpumask *mask = cpumask_of_node(node);
1250 1249
@@ -1309,7 +1308,7 @@ free_array_cache:
1309static int __cpuinit cpuup_prepare(long cpu) 1308static int __cpuinit cpuup_prepare(long cpu)
1310{ 1309{
1311 struct kmem_cache *cachep; 1310 struct kmem_cache *cachep;
1312 struct kmem_list3 *l3 = NULL; 1311 struct kmem_cache_node *l3 = NULL;
1313 int node = cpu_to_mem(cpu); 1312 int node = cpu_to_mem(cpu);
1314 int err; 1313 int err;
1315 1314
@@ -1463,7 +1462,7 @@ static int __meminit drain_cache_nodelists_node(int node)
1463 int ret = 0; 1462 int ret = 0;
1464 1463
1465 list_for_each_entry(cachep, &slab_caches, list) { 1464 list_for_each_entry(cachep, &slab_caches, list) {
1466 struct kmem_list3 *l3; 1465 struct kmem_cache_node *l3;
1467 1466
1468 l3 = cachep->nodelists[node]; 1467 l3 = cachep->nodelists[node];
1469 if (!l3) 1468 if (!l3)
@@ -1516,15 +1515,15 @@ out:
1516/* 1515/*
1517 * swap the static kmem_list3 with kmalloced memory 1516 * swap the static kmem_list3 with kmalloced memory
1518 */ 1517 */
1519static void __init init_list(struct kmem_cache *cachep, struct kmem_list3 *list, 1518static void __init init_list(struct kmem_cache *cachep, struct kmem_cache_node *list,
1520 int nodeid) 1519 int nodeid)
1521{ 1520{
1522 struct kmem_list3 *ptr; 1521 struct kmem_cache_node *ptr;
1523 1522
1524 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_NOWAIT, nodeid); 1523 ptr = kmalloc_node(sizeof(struct kmem_cache_node), GFP_NOWAIT, nodeid);
1525 BUG_ON(!ptr); 1524 BUG_ON(!ptr);
1526 1525
1527 memcpy(ptr, list, sizeof(struct kmem_list3)); 1526 memcpy(ptr, list, sizeof(struct kmem_cache_node));
1528 /* 1527 /*
1529 * Do not assume that spinlocks can be initialized via memcpy: 1528 * Do not assume that spinlocks can be initialized via memcpy:
1530 */ 1529 */
@@ -1556,7 +1555,7 @@ static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1556 */ 1555 */
1557static void setup_nodelists_pointer(struct kmem_cache *cachep) 1556static void setup_nodelists_pointer(struct kmem_cache *cachep)
1558{ 1557{
1559 cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids]; 1558 cachep->nodelists = (struct kmem_cache_node **)&cachep->array[nr_cpu_ids];
1560} 1559}
1561 1560
1562/* 1561/*
@@ -1613,7 +1612,7 @@ void __init kmem_cache_init(void)
1613 */ 1612 */
1614 create_boot_cache(kmem_cache, "kmem_cache", 1613 create_boot_cache(kmem_cache, "kmem_cache",
1615 offsetof(struct kmem_cache, array[nr_cpu_ids]) + 1614 offsetof(struct kmem_cache, array[nr_cpu_ids]) +
1616 nr_node_ids * sizeof(struct kmem_list3 *), 1615 nr_node_ids * sizeof(struct kmem_cache_node *),
1617 SLAB_HWCACHE_ALIGN); 1616 SLAB_HWCACHE_ALIGN);
1618 list_add(&kmem_cache->list, &slab_caches); 1617 list_add(&kmem_cache->list, &slab_caches);
1619 1618
@@ -1787,7 +1786,7 @@ __initcall(cpucache_init);
1787static noinline void 1786static noinline void
1788slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid) 1787slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1789{ 1788{
1790 struct kmem_list3 *l3; 1789 struct kmem_cache_node *l3;
1791 struct slab *slabp; 1790 struct slab *slabp;
1792 unsigned long flags; 1791 unsigned long flags;
1793 int node; 1792 int node;
@@ -2279,7 +2278,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2279 int node; 2278 int node;
2280 for_each_online_node(node) { 2279 for_each_online_node(node) {
2281 cachep->nodelists[node] = 2280 cachep->nodelists[node] =
2282 kmalloc_node(sizeof(struct kmem_list3), 2281 kmalloc_node(sizeof(struct kmem_cache_node),
2283 gfp, node); 2282 gfp, node);
2284 BUG_ON(!cachep->nodelists[node]); 2283 BUG_ON(!cachep->nodelists[node]);
2285 kmem_list3_init(cachep->nodelists[node]); 2284 kmem_list3_init(cachep->nodelists[node]);
@@ -2547,7 +2546,7 @@ static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2547#define check_spinlock_acquired_node(x, y) do { } while(0) 2546#define check_spinlock_acquired_node(x, y) do { } while(0)
2548#endif 2547#endif
2549 2548
2550static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 2549static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *l3,
2551 struct array_cache *ac, 2550 struct array_cache *ac,
2552 int force, int node); 2551 int force, int node);
2553 2552
@@ -2567,7 +2566,7 @@ static void do_drain(void *arg)
2567 2566
2568static void drain_cpu_caches(struct kmem_cache *cachep) 2567static void drain_cpu_caches(struct kmem_cache *cachep)
2569{ 2568{
2570 struct kmem_list3 *l3; 2569 struct kmem_cache_node *l3;
2571 int node; 2570 int node;
2572 2571
2573 on_each_cpu(do_drain, cachep, 1); 2572 on_each_cpu(do_drain, cachep, 1);
@@ -2592,7 +2591,7 @@ static void drain_cpu_caches(struct kmem_cache *cachep)
2592 * Returns the actual number of slabs released. 2591 * Returns the actual number of slabs released.
2593 */ 2592 */
2594static int drain_freelist(struct kmem_cache *cache, 2593static int drain_freelist(struct kmem_cache *cache,
2595 struct kmem_list3 *l3, int tofree) 2594 struct kmem_cache_node *l3, int tofree)
2596{ 2595{
2597 struct list_head *p; 2596 struct list_head *p;
2598 int nr_freed; 2597 int nr_freed;
@@ -2630,7 +2629,7 @@ out:
2630static int __cache_shrink(struct kmem_cache *cachep) 2629static int __cache_shrink(struct kmem_cache *cachep)
2631{ 2630{
2632 int ret = 0, i = 0; 2631 int ret = 0, i = 0;
2633 struct kmem_list3 *l3; 2632 struct kmem_cache_node *l3;
2634 2633
2635 drain_cpu_caches(cachep); 2634 drain_cpu_caches(cachep);
2636 2635
@@ -2672,7 +2671,7 @@ EXPORT_SYMBOL(kmem_cache_shrink);
2672int __kmem_cache_shutdown(struct kmem_cache *cachep) 2671int __kmem_cache_shutdown(struct kmem_cache *cachep)
2673{ 2672{
2674 int i; 2673 int i;
2675 struct kmem_list3 *l3; 2674 struct kmem_cache_node *l3;
2676 int rc = __cache_shrink(cachep); 2675 int rc = __cache_shrink(cachep);
2677 2676
2678 if (rc) 2677 if (rc)
@@ -2869,7 +2868,7 @@ static int cache_grow(struct kmem_cache *cachep,
2869 struct slab *slabp; 2868 struct slab *slabp;
2870 size_t offset; 2869 size_t offset;
2871 gfp_t local_flags; 2870 gfp_t local_flags;
2872 struct kmem_list3 *l3; 2871 struct kmem_cache_node *l3;
2873 2872
2874 /* 2873 /*
2875 * Be lazy and only check for valid flags here, keeping it out of the 2874 * Be lazy and only check for valid flags here, keeping it out of the
@@ -3059,7 +3058,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
3059 bool force_refill) 3058 bool force_refill)
3060{ 3059{
3061 int batchcount; 3060 int batchcount;
3062 struct kmem_list3 *l3; 3061 struct kmem_cache_node *l3;
3063 struct array_cache *ac; 3062 struct array_cache *ac;
3064 int node; 3063 int node;
3065 3064
@@ -3391,7 +3390,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3391{ 3390{
3392 struct list_head *entry; 3391 struct list_head *entry;
3393 struct slab *slabp; 3392 struct slab *slabp;
3394 struct kmem_list3 *l3; 3393 struct kmem_cache_node *l3;
3395 void *obj; 3394 void *obj;
3396 int x; 3395 int x;
3397 3396
@@ -3586,7 +3585,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3586 int node) 3585 int node)
3587{ 3586{
3588 int i; 3587 int i;
3589 struct kmem_list3 *l3; 3588 struct kmem_cache_node *l3;
3590 3589
3591 for (i = 0; i < nr_objects; i++) { 3590 for (i = 0; i < nr_objects; i++) {
3592 void *objp; 3591 void *objp;
@@ -3632,7 +3631,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3632static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) 3631static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3633{ 3632{
3634 int batchcount; 3633 int batchcount;
3635 struct kmem_list3 *l3; 3634 struct kmem_cache_node *l3;
3636 int node = numa_mem_id(); 3635 int node = numa_mem_id();
3637 3636
3638 batchcount = ac->batchcount; 3637 batchcount = ac->batchcount;
@@ -3924,7 +3923,7 @@ EXPORT_SYMBOL(kfree);
3924static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp) 3923static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
3925{ 3924{
3926 int node; 3925 int node;
3927 struct kmem_list3 *l3; 3926 struct kmem_cache_node *l3;
3928 struct array_cache *new_shared; 3927 struct array_cache *new_shared;
3929 struct array_cache **new_alien = NULL; 3928 struct array_cache **new_alien = NULL;
3930 3929
@@ -3969,7 +3968,7 @@ static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
3969 free_alien_cache(new_alien); 3968 free_alien_cache(new_alien);
3970 continue; 3969 continue;
3971 } 3970 }
3972 l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node); 3971 l3 = kmalloc_node(sizeof(struct kmem_cache_node), gfp, node);
3973 if (!l3) { 3972 if (!l3) {
3974 free_alien_cache(new_alien); 3973 free_alien_cache(new_alien);
3975 kfree(new_shared); 3974 kfree(new_shared);
@@ -4165,7 +4164,7 @@ skip_setup:
4165 * necessary. Note that the l3 listlock also protects the array_cache 4164 * necessary. Note that the l3 listlock also protects the array_cache
4166 * if drain_array() is used on the shared array. 4165 * if drain_array() is used on the shared array.
4167 */ 4166 */
4168static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3, 4167static void drain_array(struct kmem_cache *cachep, struct kmem_cache_node *l3,
4169 struct array_cache *ac, int force, int node) 4168 struct array_cache *ac, int force, int node)
4170{ 4169{
4171 int tofree; 4170 int tofree;
@@ -4204,7 +4203,7 @@ static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
4204static void cache_reap(struct work_struct *w) 4203static void cache_reap(struct work_struct *w)
4205{ 4204{
4206 struct kmem_cache *searchp; 4205 struct kmem_cache *searchp;
4207 struct kmem_list3 *l3; 4206 struct kmem_cache_node *l3;
4208 int node = numa_mem_id(); 4207 int node = numa_mem_id();
4209 struct delayed_work *work = to_delayed_work(w); 4208 struct delayed_work *work = to_delayed_work(w);
4210 4209
@@ -4268,7 +4267,7 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
4268 const char *name; 4267 const char *name;
4269 char *error = NULL; 4268 char *error = NULL;
4270 int node; 4269 int node;
4271 struct kmem_list3 *l3; 4270 struct kmem_cache_node *l3;
4272 4271
4273 active_objs = 0; 4272 active_objs = 0;
4274 num_slabs = 0; 4273 num_slabs = 0;
@@ -4482,7 +4481,7 @@ static int leaks_show(struct seq_file *m, void *p)
4482{ 4481{
4483 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list); 4482 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
4484 struct slab *slabp; 4483 struct slab *slabp;
4485 struct kmem_list3 *l3; 4484 struct kmem_cache_node *l3;
4486 const char *name; 4485 const char *name;
4487 unsigned long *n = m->private; 4486 unsigned long *n = m->private;
4488 int node; 4487 int node;