aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/vm/slabinfo.c10
-rw-r--r--init/Kconfig2
-rw-r--r--mm/slub.c44
3 files changed, 34 insertions, 22 deletions
diff --git a/Documentation/vm/slabinfo.c b/Documentation/vm/slabinfo.c
index d3ce295bffac..e4230ed16ee7 100644
--- a/Documentation/vm/slabinfo.c
+++ b/Documentation/vm/slabinfo.c
@@ -38,7 +38,7 @@ struct slabinfo {
38 unsigned long alloc_from_partial, alloc_slab, free_slab, alloc_refill; 38 unsigned long alloc_from_partial, alloc_slab, free_slab, alloc_refill;
39 unsigned long cpuslab_flush, deactivate_full, deactivate_empty; 39 unsigned long cpuslab_flush, deactivate_full, deactivate_empty;
40 unsigned long deactivate_to_head, deactivate_to_tail; 40 unsigned long deactivate_to_head, deactivate_to_tail;
41 unsigned long deactivate_remote_frees; 41 unsigned long deactivate_remote_frees, order_fallback;
42 int numa[MAX_NODES]; 42 int numa[MAX_NODES];
43 int numa_partial[MAX_NODES]; 43 int numa_partial[MAX_NODES];
44} slabinfo[MAX_SLABS]; 44} slabinfo[MAX_SLABS];
@@ -293,7 +293,7 @@ int line = 0;
293void first_line(void) 293void first_line(void)
294{ 294{
295 if (show_activity) 295 if (show_activity)
296 printf("Name Objects Alloc Free %%Fast\n"); 296 printf("Name Objects Alloc Free %%Fast Fallb O\n");
297 else 297 else
298 printf("Name Objects Objsize Space " 298 printf("Name Objects Objsize Space "
299 "Slabs/Part/Cpu O/S O %%Fr %%Ef Flg\n"); 299 "Slabs/Part/Cpu O/S O %%Fr %%Ef Flg\n");
@@ -573,11 +573,12 @@ void slabcache(struct slabinfo *s)
573 total_alloc = s->alloc_fastpath + s->alloc_slowpath; 573 total_alloc = s->alloc_fastpath + s->alloc_slowpath;
574 total_free = s->free_fastpath + s->free_slowpath; 574 total_free = s->free_fastpath + s->free_slowpath;
575 575
576 printf("%-21s %8ld %8ld %8ld %3ld %3ld \n", 576 printf("%-21s %8ld %10ld %10ld %3ld %3ld %5ld %1d\n",
577 s->name, s->objects, 577 s->name, s->objects,
578 total_alloc, total_free, 578 total_alloc, total_free,
579 total_alloc ? (s->alloc_fastpath * 100 / total_alloc) : 0, 579 total_alloc ? (s->alloc_fastpath * 100 / total_alloc) : 0,
580 total_free ? (s->free_fastpath * 100 / total_free) : 0); 580 total_free ? (s->free_fastpath * 100 / total_free) : 0,
581 s->order_fallback, s->order);
581 } 582 }
582 else 583 else
583 printf("%-21s %8ld %7d %8s %14s %4d %1d %3ld %3ld %s\n", 584 printf("%-21s %8ld %7d %8s %14s %4d %1d %3ld %3ld %s\n",
@@ -1188,6 +1189,7 @@ void read_slab_dir(void)
1188 slab->deactivate_to_head = get_obj("deactivate_to_head"); 1189 slab->deactivate_to_head = get_obj("deactivate_to_head");
1189 slab->deactivate_to_tail = get_obj("deactivate_to_tail"); 1190 slab->deactivate_to_tail = get_obj("deactivate_to_tail");
1190 slab->deactivate_remote_frees = get_obj("deactivate_remote_frees"); 1191 slab->deactivate_remote_frees = get_obj("deactivate_remote_frees");
1192 slab->order_fallback = get_obj("order_fallback");
1191 chdir(".."); 1193 chdir("..");
1192 if (slab->name[0] == ':') 1194 if (slab->name[0] == ':')
1193 alias_targets++; 1195 alias_targets++;
diff --git a/init/Kconfig b/init/Kconfig
index 3e7b257fc05f..6a44defac3ec 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -720,7 +720,7 @@ config VM_EVENT_COUNTERS
720config SLUB_DEBUG 720config SLUB_DEBUG
721 default y 721 default y
722 bool "Enable SLUB debugging support" if EMBEDDED 722 bool "Enable SLUB debugging support" if EMBEDDED
723 depends on SLUB 723 depends on SLUB && SYSFS
724 help 724 help
725 SLUB has extensive debug support features. Disabling these can 725 SLUB has extensive debug support features. Disabling these can
726 result in significant savings in code size. This also disables 726 result in significant savings in code size. This also disables
diff --git a/mm/slub.c b/mm/slub.c
index 32b62623846a..d379b782fc83 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -217,7 +217,7 @@ struct track {
217 217
218enum track_item { TRACK_ALLOC, TRACK_FREE }; 218enum track_item { TRACK_ALLOC, TRACK_FREE };
219 219
220#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG) 220#ifdef CONFIG_SLUB_DEBUG
221static int sysfs_slab_add(struct kmem_cache *); 221static int sysfs_slab_add(struct kmem_cache *);
222static int sysfs_slab_alias(struct kmem_cache *, const char *); 222static int sysfs_slab_alias(struct kmem_cache *, const char *);
223static void sysfs_slab_remove(struct kmem_cache *); 223static void sysfs_slab_remove(struct kmem_cache *);
@@ -814,7 +814,8 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
814 return search == NULL; 814 return search == NULL;
815} 815}
816 816
817static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc) 817static void trace(struct kmem_cache *s, struct page *page, void *object,
818 int alloc)
818{ 819{
819 if (s->flags & SLAB_TRACE) { 820 if (s->flags & SLAB_TRACE) {
820 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n", 821 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
@@ -1267,8 +1268,7 @@ static void add_partial(struct kmem_cache_node *n,
1267 spin_unlock(&n->list_lock); 1268 spin_unlock(&n->list_lock);
1268} 1269}
1269 1270
1270static void remove_partial(struct kmem_cache *s, 1271static void remove_partial(struct kmem_cache *s, struct page *page)
1271 struct page *page)
1272{ 1272{
1273 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); 1273 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1274 1274
@@ -1283,7 +1283,8 @@ static void remove_partial(struct kmem_cache *s,
1283 * 1283 *
1284 * Must hold list_lock. 1284 * Must hold list_lock.
1285 */ 1285 */
1286static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page) 1286static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
1287 struct page *page)
1287{ 1288{
1288 if (slab_trylock(page)) { 1289 if (slab_trylock(page)) {
1289 list_del(&page->lru); 1290 list_del(&page->lru);
@@ -1420,8 +1421,8 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1420 * so that the others get filled first. That way the 1421 * so that the others get filled first. That way the
1421 * size of the partial list stays small. 1422 * size of the partial list stays small.
1422 * 1423 *
1423 * kmem_cache_shrink can reclaim any empty slabs from the 1424 * kmem_cache_shrink can reclaim any empty slabs from
1424 * partial list. 1425 * the partial list.
1425 */ 1426 */
1426 add_partial(n, page, 1); 1427 add_partial(n, page, 1);
1427 slab_unlock(page); 1428 slab_unlock(page);
@@ -2909,7 +2910,7 @@ static int slab_mem_going_online_callback(void *arg)
2909 return 0; 2910 return 0;
2910 2911
2911 /* 2912 /*
2912 * We are bringing a node online. No memory is availabe yet. We must 2913 * We are bringing a node online. No memory is available yet. We must
2913 * allocate a kmem_cache_node structure in order to bring the node 2914 * allocate a kmem_cache_node structure in order to bring the node
2914 * online. 2915 * online.
2915 */ 2916 */
@@ -3246,7 +3247,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3246 return slab_alloc(s, gfpflags, node, caller); 3247 return slab_alloc(s, gfpflags, node, caller);
3247} 3248}
3248 3249
3249#if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO) 3250#ifdef CONFIG_SLUB_DEBUG
3250static unsigned long count_partial(struct kmem_cache_node *n, 3251static unsigned long count_partial(struct kmem_cache_node *n,
3251 int (*get_count)(struct page *)) 3252 int (*get_count)(struct page *))
3252{ 3253{
@@ -3275,9 +3276,7 @@ static int count_free(struct page *page)
3275{ 3276{
3276 return page->objects - page->inuse; 3277 return page->objects - page->inuse;
3277} 3278}
3278#endif
3279 3279
3280#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
3281static int validate_slab(struct kmem_cache *s, struct page *page, 3280static int validate_slab(struct kmem_cache *s, struct page *page,
3282 unsigned long *map) 3281 unsigned long *map)
3283{ 3282{
@@ -3812,7 +3811,12 @@ SLAB_ATTR_RO(objs_per_slab);
3812static ssize_t order_store(struct kmem_cache *s, 3811static ssize_t order_store(struct kmem_cache *s,
3813 const char *buf, size_t length) 3812 const char *buf, size_t length)
3814{ 3813{
3815 int order = simple_strtoul(buf, NULL, 10); 3814 unsigned long order;
3815 int err;
3816
3817 err = strict_strtoul(buf, 10, &order);
3818 if (err)
3819 return err;
3816 3820
3817 if (order > slub_max_order || order < slub_min_order) 3821 if (order > slub_max_order || order < slub_min_order)
3818 return -EINVAL; 3822 return -EINVAL;
@@ -4065,10 +4069,16 @@ static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
4065static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s, 4069static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
4066 const char *buf, size_t length) 4070 const char *buf, size_t length)
4067{ 4071{
4068 int n = simple_strtoul(buf, NULL, 10); 4072 unsigned long ratio;
4073 int err;
4074
4075 err = strict_strtoul(buf, 10, &ratio);
4076 if (err)
4077 return err;
4078
4079 if (ratio < 100)
4080 s->remote_node_defrag_ratio = ratio * 10;
4069 4081
4070 if (n < 100)
4071 s->remote_node_defrag_ratio = n * 10;
4072 return length; 4082 return length;
4073} 4083}
4074SLAB_ATTR(remote_node_defrag_ratio); 4084SLAB_ATTR(remote_node_defrag_ratio);
@@ -4425,8 +4435,8 @@ __initcall(slab_sysfs_init);
4425 */ 4435 */
4426#ifdef CONFIG_SLABINFO 4436#ifdef CONFIG_SLABINFO
4427 4437
4428ssize_t slabinfo_write(struct file *file, const char __user * buffer, 4438ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4429 size_t count, loff_t *ppos) 4439 size_t count, loff_t *ppos)
4430{ 4440{
4431 return -EINVAL; 4441 return -EINVAL;
4432} 4442}