aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c349
1 files changed, 134 insertions, 215 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 7c52b3890d25..154aac8411c5 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -237,11 +237,10 @@ struct arraycache_init {
237/* 237/*
238 * Need this for bootstrapping a per node allocator. 238 * Need this for bootstrapping a per node allocator.
239 */ 239 */
240#define NUM_INIT_LISTS (3 * MAX_NUMNODES) 240#define NUM_INIT_LISTS (2 * MAX_NUMNODES)
241static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS]; 241static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS];
242#define CACHE_CACHE 0 242#define CACHE_CACHE 0
243#define SIZE_AC MAX_NUMNODES 243#define SIZE_NODE (MAX_NUMNODES)
244#define SIZE_NODE (2 * MAX_NUMNODES)
245 244
246static int drain_freelist(struct kmem_cache *cache, 245static int drain_freelist(struct kmem_cache *cache,
247 struct kmem_cache_node *n, int tofree); 246 struct kmem_cache_node *n, int tofree);
@@ -253,7 +252,6 @@ static void cache_reap(struct work_struct *unused);
253 252
254static int slab_early_init = 1; 253static int slab_early_init = 1;
255 254
256#define INDEX_AC kmalloc_index(sizeof(struct arraycache_init))
257#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node)) 255#define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node))
258 256
259static void kmem_cache_node_init(struct kmem_cache_node *parent) 257static void kmem_cache_node_init(struct kmem_cache_node *parent)
@@ -458,9 +456,6 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache,
458 return reciprocal_divide(offset, cache->reciprocal_buffer_size); 456 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
459} 457}
460 458
461static struct arraycache_init initarray_generic =
462 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
463
464/* internal cache of cache description objs */ 459/* internal cache of cache description objs */
465static struct kmem_cache kmem_cache_boot = { 460static struct kmem_cache kmem_cache_boot = {
466 .batchcount = 1, 461 .batchcount = 1,
@@ -476,7 +471,7 @@ static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
476 471
477static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) 472static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
478{ 473{
479 return cachep->array[smp_processor_id()]; 474 return this_cpu_ptr(cachep->cpu_cache);
480} 475}
481 476
482static size_t calculate_freelist_size(int nr_objs, size_t align) 477static size_t calculate_freelist_size(int nr_objs, size_t align)
@@ -785,8 +780,8 @@ static inline void *ac_get_obj(struct kmem_cache *cachep,
785 return objp; 780 return objp;
786} 781}
787 782
788static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac, 783static noinline void *__ac_put_obj(struct kmem_cache *cachep,
789 void *objp) 784 struct array_cache *ac, void *objp)
790{ 785{
791 if (unlikely(pfmemalloc_active)) { 786 if (unlikely(pfmemalloc_active)) {
792 /* Some pfmemalloc slabs exist, check if this is one */ 787 /* Some pfmemalloc slabs exist, check if this is one */
@@ -984,46 +979,50 @@ static void drain_alien_cache(struct kmem_cache *cachep,
984 } 979 }
985} 980}
986 981
987static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) 982static int __cache_free_alien(struct kmem_cache *cachep, void *objp,
983 int node, int page_node)
988{ 984{
989 int nodeid = page_to_nid(virt_to_page(objp));
990 struct kmem_cache_node *n; 985 struct kmem_cache_node *n;
991 struct alien_cache *alien = NULL; 986 struct alien_cache *alien = NULL;
992 struct array_cache *ac; 987 struct array_cache *ac;
993 int node;
994 LIST_HEAD(list); 988 LIST_HEAD(list);
995 989
996 node = numa_mem_id();
997
998 /*
999 * Make sure we are not freeing a object from another node to the array
1000 * cache on this cpu.
1001 */
1002 if (likely(nodeid == node))
1003 return 0;
1004
1005 n = get_node(cachep, node); 990 n = get_node(cachep, node);
1006 STATS_INC_NODEFREES(cachep); 991 STATS_INC_NODEFREES(cachep);
1007 if (n->alien && n->alien[nodeid]) { 992 if (n->alien && n->alien[page_node]) {
1008 alien = n->alien[nodeid]; 993 alien = n->alien[page_node];
1009 ac = &alien->ac; 994 ac = &alien->ac;
1010 spin_lock(&alien->lock); 995 spin_lock(&alien->lock);
1011 if (unlikely(ac->avail == ac->limit)) { 996 if (unlikely(ac->avail == ac->limit)) {
1012 STATS_INC_ACOVERFLOW(cachep); 997 STATS_INC_ACOVERFLOW(cachep);
1013 __drain_alien_cache(cachep, ac, nodeid, &list); 998 __drain_alien_cache(cachep, ac, page_node, &list);
1014 } 999 }
1015 ac_put_obj(cachep, ac, objp); 1000 ac_put_obj(cachep, ac, objp);
1016 spin_unlock(&alien->lock); 1001 spin_unlock(&alien->lock);
1017 slabs_destroy(cachep, &list); 1002 slabs_destroy(cachep, &list);
1018 } else { 1003 } else {
1019 n = get_node(cachep, nodeid); 1004 n = get_node(cachep, page_node);
1020 spin_lock(&n->list_lock); 1005 spin_lock(&n->list_lock);
1021 free_block(cachep, &objp, 1, nodeid, &list); 1006 free_block(cachep, &objp, 1, page_node, &list);
1022 spin_unlock(&n->list_lock); 1007 spin_unlock(&n->list_lock);
1023 slabs_destroy(cachep, &list); 1008 slabs_destroy(cachep, &list);
1024 } 1009 }
1025 return 1; 1010 return 1;
1026} 1011}
1012
1013static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1014{
1015 int page_node = page_to_nid(virt_to_page(objp));
1016 int node = numa_mem_id();
1017 /*
1018 * Make sure we are not freeing a object from another node to the array
1019 * cache on this cpu.
1020 */
1021 if (likely(node == page_node))
1022 return 0;
1023
1024 return __cache_free_alien(cachep, objp, node, page_node);
1025}
1027#endif 1026#endif
1028 1027
1029/* 1028/*
@@ -1092,24 +1091,25 @@ static void cpuup_canceled(long cpu)
1092 struct alien_cache **alien; 1091 struct alien_cache **alien;
1093 LIST_HEAD(list); 1092 LIST_HEAD(list);
1094 1093
1095 /* cpu is dead; no one can alloc from it. */
1096 nc = cachep->array[cpu];
1097 cachep->array[cpu] = NULL;
1098 n = get_node(cachep, node); 1094 n = get_node(cachep, node);
1099
1100 if (!n) 1095 if (!n)
1101 goto free_array_cache; 1096 continue;
1102 1097
1103 spin_lock_irq(&n->list_lock); 1098 spin_lock_irq(&n->list_lock);
1104 1099
1105 /* Free limit for this kmem_cache_node */ 1100 /* Free limit for this kmem_cache_node */
1106 n->free_limit -= cachep->batchcount; 1101 n->free_limit -= cachep->batchcount;
1107 if (nc) 1102
1103 /* cpu is dead; no one can alloc from it. */
1104 nc = per_cpu_ptr(cachep->cpu_cache, cpu);
1105 if (nc) {
1108 free_block(cachep, nc->entry, nc->avail, node, &list); 1106 free_block(cachep, nc->entry, nc->avail, node, &list);
1107 nc->avail = 0;
1108 }
1109 1109
1110 if (!cpumask_empty(mask)) { 1110 if (!cpumask_empty(mask)) {
1111 spin_unlock_irq(&n->list_lock); 1111 spin_unlock_irq(&n->list_lock);
1112 goto free_array_cache; 1112 goto free_slab;
1113 } 1113 }
1114 1114
1115 shared = n->shared; 1115 shared = n->shared;
@@ -1129,9 +1129,9 @@ static void cpuup_canceled(long cpu)
1129 drain_alien_cache(cachep, alien); 1129 drain_alien_cache(cachep, alien);
1130 free_alien_cache(alien); 1130 free_alien_cache(alien);
1131 } 1131 }
1132free_array_cache: 1132
1133free_slab:
1133 slabs_destroy(cachep, &list); 1134 slabs_destroy(cachep, &list);
1134 kfree(nc);
1135 } 1135 }
1136 /* 1136 /*
1137 * In the previous loop, all the objects were freed to 1137 * In the previous loop, all the objects were freed to
@@ -1168,32 +1168,23 @@ static int cpuup_prepare(long cpu)
1168 * array caches 1168 * array caches
1169 */ 1169 */
1170 list_for_each_entry(cachep, &slab_caches, list) { 1170 list_for_each_entry(cachep, &slab_caches, list) {
1171 struct array_cache *nc;
1172 struct array_cache *shared = NULL; 1171 struct array_cache *shared = NULL;
1173 struct alien_cache **alien = NULL; 1172 struct alien_cache **alien = NULL;
1174 1173
1175 nc = alloc_arraycache(node, cachep->limit,
1176 cachep->batchcount, GFP_KERNEL);
1177 if (!nc)
1178 goto bad;
1179 if (cachep->shared) { 1174 if (cachep->shared) {
1180 shared = alloc_arraycache(node, 1175 shared = alloc_arraycache(node,
1181 cachep->shared * cachep->batchcount, 1176 cachep->shared * cachep->batchcount,
1182 0xbaadf00d, GFP_KERNEL); 1177 0xbaadf00d, GFP_KERNEL);
1183 if (!shared) { 1178 if (!shared)
1184 kfree(nc);
1185 goto bad; 1179 goto bad;
1186 }
1187 } 1180 }
1188 if (use_alien_caches) { 1181 if (use_alien_caches) {
1189 alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL); 1182 alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
1190 if (!alien) { 1183 if (!alien) {
1191 kfree(shared); 1184 kfree(shared);
1192 kfree(nc);
1193 goto bad; 1185 goto bad;
1194 } 1186 }
1195 } 1187 }
1196 cachep->array[cpu] = nc;
1197 n = get_node(cachep, node); 1188 n = get_node(cachep, node);
1198 BUG_ON(!n); 1189 BUG_ON(!n);
1199 1190
@@ -1385,15 +1376,6 @@ static void __init set_up_node(struct kmem_cache *cachep, int index)
1385} 1376}
1386 1377
1387/* 1378/*
1388 * The memory after the last cpu cache pointer is used for the
1389 * the node pointer.
1390 */
1391static void setup_node_pointer(struct kmem_cache *cachep)
1392{
1393 cachep->node = (struct kmem_cache_node **)&cachep->array[nr_cpu_ids];
1394}
1395
1396/*
1397 * Initialisation. Called after the page allocator have been initialised and 1379 * Initialisation. Called after the page allocator have been initialised and
1398 * before smp_init(). 1380 * before smp_init().
1399 */ 1381 */
@@ -1404,7 +1386,6 @@ void __init kmem_cache_init(void)
1404 BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) < 1386 BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
1405 sizeof(struct rcu_head)); 1387 sizeof(struct rcu_head));
1406 kmem_cache = &kmem_cache_boot; 1388 kmem_cache = &kmem_cache_boot;
1407 setup_node_pointer(kmem_cache);
1408 1389
1409 if (num_possible_nodes() == 1) 1390 if (num_possible_nodes() == 1)
1410 use_alien_caches = 0; 1391 use_alien_caches = 0;
@@ -1412,8 +1393,6 @@ void __init kmem_cache_init(void)
1412 for (i = 0; i < NUM_INIT_LISTS; i++) 1393 for (i = 0; i < NUM_INIT_LISTS; i++)
1413 kmem_cache_node_init(&init_kmem_cache_node[i]); 1394 kmem_cache_node_init(&init_kmem_cache_node[i]);
1414 1395
1415 set_up_node(kmem_cache, CACHE_CACHE);
1416
1417 /* 1396 /*
1418 * Fragmentation resistance on low memory - only use bigger 1397 * Fragmentation resistance on low memory - only use bigger
1419 * page orders on machines with more than 32MB of memory if 1398 * page orders on machines with more than 32MB of memory if
@@ -1448,49 +1427,22 @@ void __init kmem_cache_init(void)
1448 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids 1427 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
1449 */ 1428 */
1450 create_boot_cache(kmem_cache, "kmem_cache", 1429 create_boot_cache(kmem_cache, "kmem_cache",
1451 offsetof(struct kmem_cache, array[nr_cpu_ids]) + 1430 offsetof(struct kmem_cache, node) +
1452 nr_node_ids * sizeof(struct kmem_cache_node *), 1431 nr_node_ids * sizeof(struct kmem_cache_node *),
1453 SLAB_HWCACHE_ALIGN); 1432 SLAB_HWCACHE_ALIGN);
1454 list_add(&kmem_cache->list, &slab_caches); 1433 list_add(&kmem_cache->list, &slab_caches);
1455 1434 slab_state = PARTIAL;
1456 /* 2+3) create the kmalloc caches */
1457 1435
1458 /* 1436 /*
1459 * Initialize the caches that provide memory for the array cache and the 1437 * Initialize the caches that provide memory for the kmem_cache_node
1460 * kmem_cache_node structures first. Without this, further allocations will 1438 * structures first. Without this, further allocations will bug.
1461 * bug.
1462 */ 1439 */
1463 1440 kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node",
1464 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
1465 kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
1466
1467 if (INDEX_AC != INDEX_NODE)
1468 kmalloc_caches[INDEX_NODE] =
1469 create_kmalloc_cache("kmalloc-node",
1470 kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS); 1441 kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
1442 slab_state = PARTIAL_NODE;
1471 1443
1472 slab_early_init = 0; 1444 slab_early_init = 0;
1473 1445
1474 /* 4) Replace the bootstrap head arrays */
1475 {
1476 struct array_cache *ptr;
1477
1478 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1479
1480 memcpy(ptr, cpu_cache_get(kmem_cache),
1481 sizeof(struct arraycache_init));
1482
1483 kmem_cache->array[smp_processor_id()] = ptr;
1484
1485 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
1486
1487 BUG_ON(cpu_cache_get(kmalloc_caches[INDEX_AC])
1488 != &initarray_generic.cache);
1489 memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]),
1490 sizeof(struct arraycache_init));
1491
1492 kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr;
1493 }
1494 /* 5) Replace the bootstrap kmem_cache_node */ 1446 /* 5) Replace the bootstrap kmem_cache_node */
1495 { 1447 {
1496 int nid; 1448 int nid;
@@ -1498,13 +1450,8 @@ void __init kmem_cache_init(void)
1498 for_each_online_node(nid) { 1450 for_each_online_node(nid) {
1499 init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid); 1451 init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid);
1500 1452
1501 init_list(kmalloc_caches[INDEX_AC], 1453 init_list(kmalloc_caches[INDEX_NODE],
1502 &init_kmem_cache_node[SIZE_AC + nid], nid);
1503
1504 if (INDEX_AC != INDEX_NODE) {
1505 init_list(kmalloc_caches[INDEX_NODE],
1506 &init_kmem_cache_node[SIZE_NODE + nid], nid); 1454 &init_kmem_cache_node[SIZE_NODE + nid], nid);
1507 }
1508 } 1455 }
1509 } 1456 }
1510 1457
@@ -2037,56 +1984,53 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
2037 return left_over; 1984 return left_over;
2038} 1985}
2039 1986
1987static struct array_cache __percpu *alloc_kmem_cache_cpus(
1988 struct kmem_cache *cachep, int entries, int batchcount)
1989{
1990 int cpu;
1991 size_t size;
1992 struct array_cache __percpu *cpu_cache;
1993
1994 size = sizeof(void *) * entries + sizeof(struct array_cache);
1995 cpu_cache = __alloc_percpu(size, 0);
1996
1997 if (!cpu_cache)
1998 return NULL;
1999
2000 for_each_possible_cpu(cpu) {
2001 init_arraycache(per_cpu_ptr(cpu_cache, cpu),
2002 entries, batchcount);
2003 }
2004
2005 return cpu_cache;
2006}
2007
2040static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) 2008static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2041{ 2009{
2042 if (slab_state >= FULL) 2010 if (slab_state >= FULL)
2043 return enable_cpucache(cachep, gfp); 2011 return enable_cpucache(cachep, gfp);
2044 2012
2013 cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1);
2014 if (!cachep->cpu_cache)
2015 return 1;
2016
2045 if (slab_state == DOWN) { 2017 if (slab_state == DOWN) {
2046 /* 2018 /* Creation of first cache (kmem_cache). */
2047 * Note: Creation of first cache (kmem_cache). 2019 set_up_node(kmem_cache, CACHE_CACHE);
2048 * The setup_node is taken care
2049 * of by the caller of __kmem_cache_create
2050 */
2051 cachep->array[smp_processor_id()] = &initarray_generic.cache;
2052 slab_state = PARTIAL;
2053 } else if (slab_state == PARTIAL) { 2020 } else if (slab_state == PARTIAL) {
2054 /* 2021 /* For kmem_cache_node */
2055 * Note: the second kmem_cache_create must create the cache 2022 set_up_node(cachep, SIZE_NODE);
2056 * that's used by kmalloc(24), otherwise the creation of
2057 * further caches will BUG().
2058 */
2059 cachep->array[smp_processor_id()] = &initarray_generic.cache;
2060
2061 /*
2062 * If the cache that's used by kmalloc(sizeof(kmem_cache_node)) is
2063 * the second cache, then we need to set up all its node/,
2064 * otherwise the creation of further caches will BUG().
2065 */
2066 set_up_node(cachep, SIZE_AC);
2067 if (INDEX_AC == INDEX_NODE)
2068 slab_state = PARTIAL_NODE;
2069 else
2070 slab_state = PARTIAL_ARRAYCACHE;
2071 } else { 2023 } else {
2072 /* Remaining boot caches */ 2024 int node;
2073 cachep->array[smp_processor_id()] =
2074 kmalloc(sizeof(struct arraycache_init), gfp);
2075 2025
2076 if (slab_state == PARTIAL_ARRAYCACHE) { 2026 for_each_online_node(node) {
2077 set_up_node(cachep, SIZE_NODE); 2027 cachep->node[node] = kmalloc_node(
2078 slab_state = PARTIAL_NODE; 2028 sizeof(struct kmem_cache_node), gfp, node);
2079 } else { 2029 BUG_ON(!cachep->node[node]);
2080 int node; 2030 kmem_cache_node_init(cachep->node[node]);
2081 for_each_online_node(node) {
2082 cachep->node[node] =
2083 kmalloc_node(sizeof(struct kmem_cache_node),
2084 gfp, node);
2085 BUG_ON(!cachep->node[node]);
2086 kmem_cache_node_init(cachep->node[node]);
2087 }
2088 } 2031 }
2089 } 2032 }
2033
2090 cachep->node[numa_mem_id()]->next_reap = 2034 cachep->node[numa_mem_id()]->next_reap =
2091 jiffies + REAPTIMEOUT_NODE + 2035 jiffies + REAPTIMEOUT_NODE +
2092 ((unsigned long)cachep) % REAPTIMEOUT_NODE; 2036 ((unsigned long)cachep) % REAPTIMEOUT_NODE;
@@ -2100,6 +2044,32 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2100 return 0; 2044 return 0;
2101} 2045}
2102 2046
2047unsigned long kmem_cache_flags(unsigned long object_size,
2048 unsigned long flags, const char *name,
2049 void (*ctor)(void *))
2050{
2051 return flags;
2052}
2053
2054struct kmem_cache *
2055__kmem_cache_alias(const char *name, size_t size, size_t align,
2056 unsigned long flags, void (*ctor)(void *))
2057{
2058 struct kmem_cache *cachep;
2059
2060 cachep = find_mergeable(size, align, flags, name, ctor);
2061 if (cachep) {
2062 cachep->refcount++;
2063
2064 /*
2065 * Adjust the object sizes so that we clear
2066 * the complete object on kzalloc.
2067 */
2068 cachep->object_size = max_t(int, cachep->object_size, size);
2069 }
2070 return cachep;
2071}
2072
2103/** 2073/**
2104 * __kmem_cache_create - Create a cache. 2074 * __kmem_cache_create - Create a cache.
2105 * @cachep: cache management descriptor 2075 * @cachep: cache management descriptor
@@ -2183,7 +2153,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2183 else 2153 else
2184 gfp = GFP_NOWAIT; 2154 gfp = GFP_NOWAIT;
2185 2155
2186 setup_node_pointer(cachep);
2187#if DEBUG 2156#if DEBUG
2188 2157
2189 /* 2158 /*
@@ -2440,8 +2409,7 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep)
2440 if (rc) 2409 if (rc)
2441 return rc; 2410 return rc;
2442 2411
2443 for_each_online_cpu(i) 2412 free_percpu(cachep->cpu_cache);
2444 kfree(cachep->array[i]);
2445 2413
2446 /* NUMA: free the node structures */ 2414 /* NUMA: free the node structures */
2447 for_each_kmem_cache_node(cachep, i, n) { 2415 for_each_kmem_cache_node(cachep, i, n) {
@@ -3399,7 +3367,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3399 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp)) 3367 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
3400 return; 3368 return;
3401 3369
3402 if (likely(ac->avail < ac->limit)) { 3370 if (ac->avail < ac->limit) {
3403 STATS_INC_FREEHIT(cachep); 3371 STATS_INC_FREEHIT(cachep);
3404 } else { 3372 } else {
3405 STATS_INC_FREEMISS(cachep); 3373 STATS_INC_FREEMISS(cachep);
@@ -3496,7 +3464,6 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
3496 return kmem_cache_alloc_node_trace(cachep, flags, node, size); 3464 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
3497} 3465}
3498 3466
3499#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3500void *__kmalloc_node(size_t size, gfp_t flags, int node) 3467void *__kmalloc_node(size_t size, gfp_t flags, int node)
3501{ 3468{
3502 return __do_kmalloc_node(size, flags, node, _RET_IP_); 3469 return __do_kmalloc_node(size, flags, node, _RET_IP_);
@@ -3509,13 +3476,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3509 return __do_kmalloc_node(size, flags, node, caller); 3476 return __do_kmalloc_node(size, flags, node, caller);
3510} 3477}
3511EXPORT_SYMBOL(__kmalloc_node_track_caller); 3478EXPORT_SYMBOL(__kmalloc_node_track_caller);
3512#else
3513void *__kmalloc_node(size_t size, gfp_t flags, int node)
3514{
3515 return __do_kmalloc_node(size, flags, node, 0);
3516}
3517EXPORT_SYMBOL(__kmalloc_node);
3518#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
3519#endif /* CONFIG_NUMA */ 3479#endif /* CONFIG_NUMA */
3520 3480
3521/** 3481/**
@@ -3541,8 +3501,6 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3541 return ret; 3501 return ret;
3542} 3502}
3543 3503
3544
3545#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
3546void *__kmalloc(size_t size, gfp_t flags) 3504void *__kmalloc(size_t size, gfp_t flags)
3547{ 3505{
3548 return __do_kmalloc(size, flags, _RET_IP_); 3506 return __do_kmalloc(size, flags, _RET_IP_);
@@ -3555,14 +3513,6 @@ void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3555} 3513}
3556EXPORT_SYMBOL(__kmalloc_track_caller); 3514EXPORT_SYMBOL(__kmalloc_track_caller);
3557 3515
3558#else
3559void *__kmalloc(size_t size, gfp_t flags)
3560{
3561 return __do_kmalloc(size, flags, 0);
3562}
3563EXPORT_SYMBOL(__kmalloc);
3564#endif
3565
3566/** 3516/**
3567 * kmem_cache_free - Deallocate an object 3517 * kmem_cache_free - Deallocate an object
3568 * @cachep: The cache the allocation was from. 3518 * @cachep: The cache the allocation was from.
@@ -3707,72 +3657,45 @@ fail:
3707 return -ENOMEM; 3657 return -ENOMEM;
3708} 3658}
3709 3659
3710struct ccupdate_struct {
3711 struct kmem_cache *cachep;
3712 struct array_cache *new[0];
3713};
3714
3715static void do_ccupdate_local(void *info)
3716{
3717 struct ccupdate_struct *new = info;
3718 struct array_cache *old;
3719
3720 check_irq_off();
3721 old = cpu_cache_get(new->cachep);
3722
3723 new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
3724 new->new[smp_processor_id()] = old;
3725}
3726
3727/* Always called with the slab_mutex held */ 3660/* Always called with the slab_mutex held */
3728static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, 3661static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
3729 int batchcount, int shared, gfp_t gfp) 3662 int batchcount, int shared, gfp_t gfp)
3730{ 3663{
3731 struct ccupdate_struct *new; 3664 struct array_cache __percpu *cpu_cache, *prev;
3732 int i; 3665 int cpu;
3733 3666
3734 new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *), 3667 cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount);
3735 gfp); 3668 if (!cpu_cache)
3736 if (!new)
3737 return -ENOMEM; 3669 return -ENOMEM;
3738 3670
3739 for_each_online_cpu(i) { 3671 prev = cachep->cpu_cache;
3740 new->new[i] = alloc_arraycache(cpu_to_mem(i), limit, 3672 cachep->cpu_cache = cpu_cache;
3741 batchcount, gfp); 3673 kick_all_cpus_sync();
3742 if (!new->new[i]) {
3743 for (i--; i >= 0; i--)
3744 kfree(new->new[i]);
3745 kfree(new);
3746 return -ENOMEM;
3747 }
3748 }
3749 new->cachep = cachep;
3750
3751 on_each_cpu(do_ccupdate_local, (void *)new, 1);
3752 3674
3753 check_irq_on(); 3675 check_irq_on();
3754 cachep->batchcount = batchcount; 3676 cachep->batchcount = batchcount;
3755 cachep->limit = limit; 3677 cachep->limit = limit;
3756 cachep->shared = shared; 3678 cachep->shared = shared;
3757 3679
3758 for_each_online_cpu(i) { 3680 if (!prev)
3681 goto alloc_node;
3682
3683 for_each_online_cpu(cpu) {
3759 LIST_HEAD(list); 3684 LIST_HEAD(list);
3760 struct array_cache *ccold = new->new[i];
3761 int node; 3685 int node;
3762 struct kmem_cache_node *n; 3686 struct kmem_cache_node *n;
3687 struct array_cache *ac = per_cpu_ptr(prev, cpu);
3763 3688
3764 if (!ccold) 3689 node = cpu_to_mem(cpu);
3765 continue;
3766
3767 node = cpu_to_mem(i);
3768 n = get_node(cachep, node); 3690 n = get_node(cachep, node);
3769 spin_lock_irq(&n->list_lock); 3691 spin_lock_irq(&n->list_lock);
3770 free_block(cachep, ccold->entry, ccold->avail, node, &list); 3692 free_block(cachep, ac->entry, ac->avail, node, &list);
3771 spin_unlock_irq(&n->list_lock); 3693 spin_unlock_irq(&n->list_lock);
3772 slabs_destroy(cachep, &list); 3694 slabs_destroy(cachep, &list);
3773 kfree(ccold);
3774 } 3695 }
3775 kfree(new); 3696 free_percpu(prev);
3697
3698alloc_node:
3776 return alloc_kmem_cache_node(cachep, gfp); 3699 return alloc_kmem_cache_node(cachep, gfp);
3777} 3700}
3778 3701
@@ -4255,19 +4178,15 @@ static const struct seq_operations slabstats_op = {
4255 4178
4256static int slabstats_open(struct inode *inode, struct file *file) 4179static int slabstats_open(struct inode *inode, struct file *file)
4257{ 4180{
4258 unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL); 4181 unsigned long *n;
4259 int ret = -ENOMEM; 4182
4260 if (n) { 4183 n = __seq_open_private(file, &slabstats_op, PAGE_SIZE);
4261 ret = seq_open(file, &slabstats_op); 4184 if (!n)
4262 if (!ret) { 4185 return -ENOMEM;
4263 struct seq_file *m = file->private_data; 4186
4264 *n = PAGE_SIZE / (2 * sizeof(unsigned long)); 4187 *n = PAGE_SIZE / (2 * sizeof(unsigned long));
4265 m->private = n; 4188
4266 n = NULL; 4189 return 0;
4267 }
4268 kfree(n);
4269 }
4270 return ret;
4271} 4190}
4272 4191
4273static const struct file_operations proc_slabstats_operations = { 4192static const struct file_operations proc_slabstats_operations = {