aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c249
1 files changed, 162 insertions, 87 deletions
diff --git a/mm/slab.c b/mm/slab.c
index bac0f4fcc216..e49f8f46f46d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -115,6 +115,7 @@
115#include <linux/reciprocal_div.h> 115#include <linux/reciprocal_div.h>
116#include <linux/debugobjects.h> 116#include <linux/debugobjects.h>
117#include <linux/kmemcheck.h> 117#include <linux/kmemcheck.h>
118#include <linux/memory.h>
118 119
119#include <asm/cacheflush.h> 120#include <asm/cacheflush.h>
120#include <asm/tlbflush.h> 121#include <asm/tlbflush.h>
@@ -144,30 +145,6 @@
144#define BYTES_PER_WORD sizeof(void *) 145#define BYTES_PER_WORD sizeof(void *)
145#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) 146#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
146 147
147#ifndef ARCH_KMALLOC_MINALIGN
148/*
149 * Enforce a minimum alignment for the kmalloc caches.
150 * Usually, the kmalloc caches are cache_line_size() aligned, except when
151 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
152 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
153 * alignment larger than the alignment of a 64-bit integer.
154 * ARCH_KMALLOC_MINALIGN allows that.
155 * Note that increasing this value may disable some debug features.
156 */
157#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
158#endif
159
160#ifndef ARCH_SLAB_MINALIGN
161/*
162 * Enforce a minimum alignment for all caches.
163 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
164 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
165 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
166 * some debug features.
167 */
168#define ARCH_SLAB_MINALIGN 0
169#endif
170
171#ifndef ARCH_KMALLOC_FLAGS 148#ifndef ARCH_KMALLOC_FLAGS
172#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN 149#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
173#endif 150#endif
@@ -844,7 +821,7 @@ static void init_reap_node(int cpu)
844{ 821{
845 int node; 822 int node;
846 823
847 node = next_node(cpu_to_node(cpu), node_online_map); 824 node = next_node(cpu_to_mem(cpu), node_online_map);
848 if (node == MAX_NUMNODES) 825 if (node == MAX_NUMNODES)
849 node = first_node(node_online_map); 826 node = first_node(node_online_map);
850 827
@@ -1073,7 +1050,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1073 struct array_cache *alien = NULL; 1050 struct array_cache *alien = NULL;
1074 int node; 1051 int node;
1075 1052
1076 node = numa_node_id(); 1053 node = numa_mem_id();
1077 1054
1078 /* 1055 /*
1079 * Make sure we are not freeing a object from another node to the array 1056 * Make sure we are not freeing a object from another node to the array
@@ -1102,11 +1079,57 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1102} 1079}
1103#endif 1080#endif
1104 1081
1082/*
1083 * Allocates and initializes nodelists for a node on each slab cache, used for
1084 * either memory or cpu hotplug. If memory is being hot-added, the kmem_list3
1085 * will be allocated off-node since memory is not yet online for the new node.
1086 * When hotplugging memory or a cpu, existing nodelists are not replaced if
1087 * already in use.
1088 *
1089 * Must hold cache_chain_mutex.
1090 */
1091static int init_cache_nodelists_node(int node)
1092{
1093 struct kmem_cache *cachep;
1094 struct kmem_list3 *l3;
1095 const int memsize = sizeof(struct kmem_list3);
1096
1097 list_for_each_entry(cachep, &cache_chain, next) {
1098 /*
1099 * Set up the size64 kmemlist for cpu before we can
1100 * begin anything. Make sure some other cpu on this
1101 * node has not already allocated this
1102 */
1103 if (!cachep->nodelists[node]) {
1104 l3 = kmalloc_node(memsize, GFP_KERNEL, node);
1105 if (!l3)
1106 return -ENOMEM;
1107 kmem_list3_init(l3);
1108 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1109 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1110
1111 /*
1112 * The l3s don't come and go as CPUs come and
1113 * go. cache_chain_mutex is sufficient
1114 * protection here.
1115 */
1116 cachep->nodelists[node] = l3;
1117 }
1118
1119 spin_lock_irq(&cachep->nodelists[node]->list_lock);
1120 cachep->nodelists[node]->free_limit =
1121 (1 + nr_cpus_node(node)) *
1122 cachep->batchcount + cachep->num;
1123 spin_unlock_irq(&cachep->nodelists[node]->list_lock);
1124 }
1125 return 0;
1126}
1127
1105static void __cpuinit cpuup_canceled(long cpu) 1128static void __cpuinit cpuup_canceled(long cpu)
1106{ 1129{
1107 struct kmem_cache *cachep; 1130 struct kmem_cache *cachep;
1108 struct kmem_list3 *l3 = NULL; 1131 struct kmem_list3 *l3 = NULL;
1109 int node = cpu_to_node(cpu); 1132 int node = cpu_to_mem(cpu);
1110 const struct cpumask *mask = cpumask_of_node(node); 1133 const struct cpumask *mask = cpumask_of_node(node);
1111 1134
1112 list_for_each_entry(cachep, &cache_chain, next) { 1135 list_for_each_entry(cachep, &cache_chain, next) {
@@ -1171,8 +1194,8 @@ static int __cpuinit cpuup_prepare(long cpu)
1171{ 1194{
1172 struct kmem_cache *cachep; 1195 struct kmem_cache *cachep;
1173 struct kmem_list3 *l3 = NULL; 1196 struct kmem_list3 *l3 = NULL;
1174 int node = cpu_to_node(cpu); 1197 int node = cpu_to_mem(cpu);
1175 const int memsize = sizeof(struct kmem_list3); 1198 int err;
1176 1199
1177 /* 1200 /*
1178 * We need to do this right in the beginning since 1201 * We need to do this right in the beginning since
@@ -1180,35 +1203,9 @@ static int __cpuinit cpuup_prepare(long cpu)
1180 * kmalloc_node allows us to add the slab to the right 1203 * kmalloc_node allows us to add the slab to the right
1181 * kmem_list3 and not this cpu's kmem_list3 1204 * kmem_list3 and not this cpu's kmem_list3
1182 */ 1205 */
1183 1206 err = init_cache_nodelists_node(node);
1184 list_for_each_entry(cachep, &cache_chain, next) { 1207 if (err < 0)
1185 /* 1208 goto bad;
1186 * Set up the size64 kmemlist for cpu before we can
1187 * begin anything. Make sure some other cpu on this
1188 * node has not already allocated this
1189 */
1190 if (!cachep->nodelists[node]) {
1191 l3 = kmalloc_node(memsize, GFP_KERNEL, node);
1192 if (!l3)
1193 goto bad;
1194 kmem_list3_init(l3);
1195 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1196 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1197
1198 /*
1199 * The l3s don't come and go as CPUs come and
1200 * go. cache_chain_mutex is sufficient
1201 * protection here.
1202 */
1203 cachep->nodelists[node] = l3;
1204 }
1205
1206 spin_lock_irq(&cachep->nodelists[node]->list_lock);
1207 cachep->nodelists[node]->free_limit =
1208 (1 + nr_cpus_node(node)) *
1209 cachep->batchcount + cachep->num;
1210 spin_unlock_irq(&cachep->nodelists[node]->list_lock);
1211 }
1212 1209
1213 /* 1210 /*
1214 * Now we can go ahead with allocating the shared arrays and 1211 * Now we can go ahead with allocating the shared arrays and
@@ -1324,18 +1321,82 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1324 mutex_unlock(&cache_chain_mutex); 1321 mutex_unlock(&cache_chain_mutex);
1325 break; 1322 break;
1326 } 1323 }
1327 return err ? NOTIFY_BAD : NOTIFY_OK; 1324 return notifier_from_errno(err);
1328} 1325}
1329 1326
1330static struct notifier_block __cpuinitdata cpucache_notifier = { 1327static struct notifier_block __cpuinitdata cpucache_notifier = {
1331 &cpuup_callback, NULL, 0 1328 &cpuup_callback, NULL, 0
1332}; 1329};
1333 1330
1331#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1332/*
1333 * Drains freelist for a node on each slab cache, used for memory hot-remove.
1334 * Returns -EBUSY if all objects cannot be drained so that the node is not
1335 * removed.
1336 *
1337 * Must hold cache_chain_mutex.
1338 */
1339static int __meminit drain_cache_nodelists_node(int node)
1340{
1341 struct kmem_cache *cachep;
1342 int ret = 0;
1343
1344 list_for_each_entry(cachep, &cache_chain, next) {
1345 struct kmem_list3 *l3;
1346
1347 l3 = cachep->nodelists[node];
1348 if (!l3)
1349 continue;
1350
1351 drain_freelist(cachep, l3, l3->free_objects);
1352
1353 if (!list_empty(&l3->slabs_full) ||
1354 !list_empty(&l3->slabs_partial)) {
1355 ret = -EBUSY;
1356 break;
1357 }
1358 }
1359 return ret;
1360}
1361
1362static int __meminit slab_memory_callback(struct notifier_block *self,
1363 unsigned long action, void *arg)
1364{
1365 struct memory_notify *mnb = arg;
1366 int ret = 0;
1367 int nid;
1368
1369 nid = mnb->status_change_nid;
1370 if (nid < 0)
1371 goto out;
1372
1373 switch (action) {
1374 case MEM_GOING_ONLINE:
1375 mutex_lock(&cache_chain_mutex);
1376 ret = init_cache_nodelists_node(nid);
1377 mutex_unlock(&cache_chain_mutex);
1378 break;
1379 case MEM_GOING_OFFLINE:
1380 mutex_lock(&cache_chain_mutex);
1381 ret = drain_cache_nodelists_node(nid);
1382 mutex_unlock(&cache_chain_mutex);
1383 break;
1384 case MEM_ONLINE:
1385 case MEM_OFFLINE:
1386 case MEM_CANCEL_ONLINE:
1387 case MEM_CANCEL_OFFLINE:
1388 break;
1389 }
1390out:
1391 return ret ? notifier_from_errno(ret) : NOTIFY_OK;
1392}
1393#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1394
1334/* 1395/*
1335 * swap the static kmem_list3 with kmalloced memory 1396 * swap the static kmem_list3 with kmalloced memory
1336 */ 1397 */
1337static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, 1398static void __init init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1338 int nodeid) 1399 int nodeid)
1339{ 1400{
1340 struct kmem_list3 *ptr; 1401 struct kmem_list3 *ptr;
1341 1402
@@ -1418,7 +1479,7 @@ void __init kmem_cache_init(void)
1418 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1479 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1419 */ 1480 */
1420 1481
1421 node = numa_node_id(); 1482 node = numa_mem_id();
1422 1483
1423 /* 1) create the cache_cache */ 1484 /* 1) create the cache_cache */
1424 INIT_LIST_HEAD(&cache_chain); 1485 INIT_LIST_HEAD(&cache_chain);
@@ -1580,6 +1641,14 @@ void __init kmem_cache_init_late(void)
1580 */ 1641 */
1581 register_cpu_notifier(&cpucache_notifier); 1642 register_cpu_notifier(&cpucache_notifier);
1582 1643
1644#ifdef CONFIG_NUMA
1645 /*
1646 * Register a memory hotplug callback that initializes and frees
1647 * nodelists.
1648 */
1649 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1650#endif
1651
1583 /* 1652 /*
1584 * The reap timers are started later, with a module init call: That part 1653 * The reap timers are started later, with a module init call: That part
1585 * of the kernel is not yet operational. 1654 * of the kernel is not yet operational.
@@ -2052,7 +2121,7 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2052 } 2121 }
2053 } 2122 }
2054 } 2123 }
2055 cachep->nodelists[numa_node_id()]->next_reap = 2124 cachep->nodelists[numa_mem_id()]->next_reap =
2056 jiffies + REAPTIMEOUT_LIST3 + 2125 jiffies + REAPTIMEOUT_LIST3 +
2057 ((unsigned long)cachep) % REAPTIMEOUT_LIST3; 2126 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2058 2127
@@ -2220,8 +2289,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2220 if (ralign < align) { 2289 if (ralign < align) {
2221 ralign = align; 2290 ralign = align;
2222 } 2291 }
2223 /* disable debug if necessary */ 2292 /* disable debug if not aligning with REDZONE_ALIGN */
2224 if (ralign > __alignof__(unsigned long long)) 2293 if (ralign & (__alignof__(unsigned long long) - 1))
2225 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2294 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2226 /* 2295 /*
2227 * 4) Store it. 2296 * 4) Store it.
@@ -2247,8 +2316,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2247 */ 2316 */
2248 if (flags & SLAB_RED_ZONE) { 2317 if (flags & SLAB_RED_ZONE) {
2249 /* add space for red zone words */ 2318 /* add space for red zone words */
2250 cachep->obj_offset += sizeof(unsigned long long); 2319 cachep->obj_offset += align;
2251 size += 2 * sizeof(unsigned long long); 2320 size += align + sizeof(unsigned long long);
2252 } 2321 }
2253 if (flags & SLAB_STORE_USER) { 2322 if (flags & SLAB_STORE_USER) {
2254 /* user store requires one word storage behind the end of 2323 /* user store requires one word storage behind the end of
@@ -2383,7 +2452,7 @@ static void check_spinlock_acquired(struct kmem_cache *cachep)
2383{ 2452{
2384#ifdef CONFIG_SMP 2453#ifdef CONFIG_SMP
2385 check_irq_off(); 2454 check_irq_off();
2386 assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock); 2455 assert_spin_locked(&cachep->nodelists[numa_mem_id()]->list_lock);
2387#endif 2456#endif
2388} 2457}
2389 2458
@@ -2410,7 +2479,7 @@ static void do_drain(void *arg)
2410{ 2479{
2411 struct kmem_cache *cachep = arg; 2480 struct kmem_cache *cachep = arg;
2412 struct array_cache *ac; 2481 struct array_cache *ac;
2413 int node = numa_node_id(); 2482 int node = numa_mem_id();
2414 2483
2415 check_irq_off(); 2484 check_irq_off();
2416 ac = cpu_cache_get(cachep); 2485 ac = cpu_cache_get(cachep);
@@ -2943,7 +3012,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2943 3012
2944retry: 3013retry:
2945 check_irq_off(); 3014 check_irq_off();
2946 node = numa_node_id(); 3015 node = numa_mem_id();
2947 ac = cpu_cache_get(cachep); 3016 ac = cpu_cache_get(cachep);
2948 batchcount = ac->batchcount; 3017 batchcount = ac->batchcount;
2949 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) { 3018 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
@@ -3147,11 +3216,13 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3147 3216
3148 if (in_interrupt() || (flags & __GFP_THISNODE)) 3217 if (in_interrupt() || (flags & __GFP_THISNODE))
3149 return NULL; 3218 return NULL;
3150 nid_alloc = nid_here = numa_node_id(); 3219 nid_alloc = nid_here = numa_mem_id();
3220 get_mems_allowed();
3151 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD)) 3221 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3152 nid_alloc = cpuset_mem_spread_node(); 3222 nid_alloc = cpuset_slab_spread_node();
3153 else if (current->mempolicy) 3223 else if (current->mempolicy)
3154 nid_alloc = slab_node(current->mempolicy); 3224 nid_alloc = slab_node(current->mempolicy);
3225 put_mems_allowed();
3155 if (nid_alloc != nid_here) 3226 if (nid_alloc != nid_here)
3156 return ____cache_alloc_node(cachep, flags, nid_alloc); 3227 return ____cache_alloc_node(cachep, flags, nid_alloc);
3157 return NULL; 3228 return NULL;
@@ -3178,6 +3249,7 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3178 if (flags & __GFP_THISNODE) 3249 if (flags & __GFP_THISNODE)
3179 return NULL; 3250 return NULL;
3180 3251
3252 get_mems_allowed();
3181 zonelist = node_zonelist(slab_node(current->mempolicy), flags); 3253 zonelist = node_zonelist(slab_node(current->mempolicy), flags);
3182 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); 3254 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
3183 3255
@@ -3209,7 +3281,7 @@ retry:
3209 if (local_flags & __GFP_WAIT) 3281 if (local_flags & __GFP_WAIT)
3210 local_irq_enable(); 3282 local_irq_enable();
3211 kmem_flagcheck(cache, flags); 3283 kmem_flagcheck(cache, flags);
3212 obj = kmem_getpages(cache, local_flags, numa_node_id()); 3284 obj = kmem_getpages(cache, local_flags, numa_mem_id());
3213 if (local_flags & __GFP_WAIT) 3285 if (local_flags & __GFP_WAIT)
3214 local_irq_disable(); 3286 local_irq_disable();
3215 if (obj) { 3287 if (obj) {
@@ -3233,6 +3305,7 @@ retry:
3233 } 3305 }
3234 } 3306 }
3235 } 3307 }
3308 put_mems_allowed();
3236 return obj; 3309 return obj;
3237} 3310}
3238 3311
@@ -3316,6 +3389,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3316{ 3389{
3317 unsigned long save_flags; 3390 unsigned long save_flags;
3318 void *ptr; 3391 void *ptr;
3392 int slab_node = numa_mem_id();
3319 3393
3320 flags &= gfp_allowed_mask; 3394 flags &= gfp_allowed_mask;
3321 3395
@@ -3328,7 +3402,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3328 local_irq_save(save_flags); 3402 local_irq_save(save_flags);
3329 3403
3330 if (nodeid == -1) 3404 if (nodeid == -1)
3331 nodeid = numa_node_id(); 3405 nodeid = slab_node;
3332 3406
3333 if (unlikely(!cachep->nodelists[nodeid])) { 3407 if (unlikely(!cachep->nodelists[nodeid])) {
3334 /* Node not bootstrapped yet */ 3408 /* Node not bootstrapped yet */
@@ -3336,7 +3410,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3336 goto out; 3410 goto out;
3337 } 3411 }
3338 3412
3339 if (nodeid == numa_node_id()) { 3413 if (nodeid == slab_node) {
3340 /* 3414 /*
3341 * Use the locally cached objects if possible. 3415 * Use the locally cached objects if possible.
3342 * However ____cache_alloc does not allow fallback 3416 * However ____cache_alloc does not allow fallback
@@ -3380,8 +3454,8 @@ __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3380 * We may just have run out of memory on the local node. 3454 * We may just have run out of memory on the local node.
3381 * ____cache_alloc_node() knows how to locate memory on other nodes 3455 * ____cache_alloc_node() knows how to locate memory on other nodes
3382 */ 3456 */
3383 if (!objp) 3457 if (!objp)
3384 objp = ____cache_alloc_node(cache, flags, numa_node_id()); 3458 objp = ____cache_alloc_node(cache, flags, numa_mem_id());
3385 3459
3386 out: 3460 out:
3387 return objp; 3461 return objp;
@@ -3478,7 +3552,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3478{ 3552{
3479 int batchcount; 3553 int batchcount;
3480 struct kmem_list3 *l3; 3554 struct kmem_list3 *l3;
3481 int node = numa_node_id(); 3555 int node = numa_mem_id();
3482 3556
3483 batchcount = ac->batchcount; 3557 batchcount = ac->batchcount;
3484#if DEBUG 3558#if DEBUG
@@ -3912,7 +3986,7 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3912 return -ENOMEM; 3986 return -ENOMEM;
3913 3987
3914 for_each_online_cpu(i) { 3988 for_each_online_cpu(i) {
3915 new->new[i] = alloc_arraycache(cpu_to_node(i), limit, 3989 new->new[i] = alloc_arraycache(cpu_to_mem(i), limit,
3916 batchcount, gfp); 3990 batchcount, gfp);
3917 if (!new->new[i]) { 3991 if (!new->new[i]) {
3918 for (i--; i >= 0; i--) 3992 for (i--; i >= 0; i--)
@@ -3934,9 +4008,9 @@ static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3934 struct array_cache *ccold = new->new[i]; 4008 struct array_cache *ccold = new->new[i];
3935 if (!ccold) 4009 if (!ccold)
3936 continue; 4010 continue;
3937 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 4011 spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
3938 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i)); 4012 free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
3939 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock); 4013 spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
3940 kfree(ccold); 4014 kfree(ccold);
3941 } 4015 }
3942 kfree(new); 4016 kfree(new);
@@ -4042,7 +4116,7 @@ static void cache_reap(struct work_struct *w)
4042{ 4116{
4043 struct kmem_cache *searchp; 4117 struct kmem_cache *searchp;
4044 struct kmem_list3 *l3; 4118 struct kmem_list3 *l3;
4045 int node = numa_node_id(); 4119 int node = numa_mem_id();
4046 struct delayed_work *work = to_delayed_work(w); 4120 struct delayed_work *work = to_delayed_work(w);
4047 4121
4048 if (!mutex_trylock(&cache_chain_mutex)) 4122 if (!mutex_trylock(&cache_chain_mutex))
@@ -4216,10 +4290,11 @@ static int s_show(struct seq_file *m, void *p)
4216 unsigned long node_frees = cachep->node_frees; 4290 unsigned long node_frees = cachep->node_frees;
4217 unsigned long overflows = cachep->node_overflow; 4291 unsigned long overflows = cachep->node_overflow;
4218 4292
4219 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ 4293 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu "
4220 %4lu %4lu %4lu %4lu %4lu", allocs, high, grown, 4294 "%4lu %4lu %4lu %4lu %4lu",
4221 reaped, errors, max_freeable, node_allocs, 4295 allocs, high, grown,
4222 node_frees, overflows); 4296 reaped, errors, max_freeable, node_allocs,
4297 node_frees, overflows);
4223 } 4298 }
4224 /* cpu stats */ 4299 /* cpu stats */
4225 { 4300 {