aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c82
1 files changed, 52 insertions, 30 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 0280eee6cf37..c65a4edafc33 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -374,14 +374,8 @@ static struct track *get_track(struct kmem_cache *s, void *object,
374static void set_track(struct kmem_cache *s, void *object, 374static void set_track(struct kmem_cache *s, void *object,
375 enum track_item alloc, unsigned long addr) 375 enum track_item alloc, unsigned long addr)
376{ 376{
377 struct track *p; 377 struct track *p = get_track(s, object, alloc);
378
379 if (s->offset)
380 p = object + s->offset + sizeof(void *);
381 else
382 p = object + s->inuse;
383 378
384 p += alloc;
385 if (addr) { 379 if (addr) {
386 p->addr = addr; 380 p->addr = addr;
387 p->cpu = smp_processor_id(); 381 p->cpu = smp_processor_id();
@@ -1335,7 +1329,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1335 n = get_node(s, zone_to_nid(zone)); 1329 n = get_node(s, zone_to_nid(zone));
1336 1330
1337 if (n && cpuset_zone_allowed_hardwall(zone, flags) && 1331 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1338 n->nr_partial > n->min_partial) { 1332 n->nr_partial > s->min_partial) {
1339 page = get_partial_node(n); 1333 page = get_partial_node(n);
1340 if (page) 1334 if (page)
1341 return page; 1335 return page;
@@ -1387,7 +1381,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
1387 slab_unlock(page); 1381 slab_unlock(page);
1388 } else { 1382 } else {
1389 stat(c, DEACTIVATE_EMPTY); 1383 stat(c, DEACTIVATE_EMPTY);
1390 if (n->nr_partial < n->min_partial) { 1384 if (n->nr_partial < s->min_partial) {
1391 /* 1385 /*
1392 * Adding an empty slab to the partial slabs in order 1386 * Adding an empty slab to the partial slabs in order
1393 * to avoid page allocator overhead. This slab needs 1387 * to avoid page allocator overhead. This slab needs
@@ -1724,7 +1718,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
1724 c = get_cpu_slab(s, smp_processor_id()); 1718 c = get_cpu_slab(s, smp_processor_id());
1725 debug_check_no_locks_freed(object, c->objsize); 1719 debug_check_no_locks_freed(object, c->objsize);
1726 if (!(s->flags & SLAB_DEBUG_OBJECTS)) 1720 if (!(s->flags & SLAB_DEBUG_OBJECTS))
1727 debug_check_no_obj_freed(object, s->objsize); 1721 debug_check_no_obj_freed(object, c->objsize);
1728 if (likely(page == c->page && c->node >= 0)) { 1722 if (likely(page == c->page && c->node >= 0)) {
1729 object[c->offset] = c->freelist; 1723 object[c->offset] = c->freelist;
1730 c->freelist = object; 1724 c->freelist = object;
@@ -1844,6 +1838,7 @@ static inline int calculate_order(int size)
1844 int order; 1838 int order;
1845 int min_objects; 1839 int min_objects;
1846 int fraction; 1840 int fraction;
1841 int max_objects;
1847 1842
1848 /* 1843 /*
1849 * Attempt to find best configuration for a slab. This 1844 * Attempt to find best configuration for a slab. This
@@ -1856,6 +1851,9 @@ static inline int calculate_order(int size)
1856 min_objects = slub_min_objects; 1851 min_objects = slub_min_objects;
1857 if (!min_objects) 1852 if (!min_objects)
1858 min_objects = 4 * (fls(nr_cpu_ids) + 1); 1853 min_objects = 4 * (fls(nr_cpu_ids) + 1);
1854 max_objects = (PAGE_SIZE << slub_max_order)/size;
1855 min_objects = min(min_objects, max_objects);
1856
1859 while (min_objects > 1) { 1857 while (min_objects > 1) {
1860 fraction = 16; 1858 fraction = 16;
1861 while (fraction >= 4) { 1859 while (fraction >= 4) {
@@ -1865,7 +1863,7 @@ static inline int calculate_order(int size)
1865 return order; 1863 return order;
1866 fraction /= 2; 1864 fraction /= 2;
1867 } 1865 }
1868 min_objects /= 2; 1866 min_objects --;
1869 } 1867 }
1870 1868
1871 /* 1869 /*
@@ -1928,17 +1926,6 @@ static void
1928init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) 1926init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
1929{ 1927{
1930 n->nr_partial = 0; 1928 n->nr_partial = 0;
1931
1932 /*
1933 * The larger the object size is, the more pages we want on the partial
1934 * list to avoid pounding the page allocator excessively.
1935 */
1936 n->min_partial = ilog2(s->size);
1937 if (n->min_partial < MIN_PARTIAL)
1938 n->min_partial = MIN_PARTIAL;
1939 else if (n->min_partial > MAX_PARTIAL)
1940 n->min_partial = MAX_PARTIAL;
1941
1942 spin_lock_init(&n->list_lock); 1929 spin_lock_init(&n->list_lock);
1943 INIT_LIST_HEAD(&n->partial); 1930 INIT_LIST_HEAD(&n->partial);
1944#ifdef CONFIG_SLUB_DEBUG 1931#ifdef CONFIG_SLUB_DEBUG
@@ -2181,6 +2168,15 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2181} 2168}
2182#endif 2169#endif
2183 2170
2171static void set_min_partial(struct kmem_cache *s, unsigned long min)
2172{
2173 if (min < MIN_PARTIAL)
2174 min = MIN_PARTIAL;
2175 else if (min > MAX_PARTIAL)
2176 min = MAX_PARTIAL;
2177 s->min_partial = min;
2178}
2179
2184/* 2180/*
2185 * calculate_sizes() determines the order and the distribution of data within 2181 * calculate_sizes() determines the order and the distribution of data within
2186 * a slab object. 2182 * a slab object.
@@ -2319,6 +2315,11 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2319 if (!calculate_sizes(s, -1)) 2315 if (!calculate_sizes(s, -1))
2320 goto error; 2316 goto error;
2321 2317
2318 /*
2319 * The larger the object size is, the more pages we want on the partial
2320 * list to avoid pounding the page allocator excessively.
2321 */
2322 set_min_partial(s, ilog2(s->size));
2322 s->refcount = 1; 2323 s->refcount = 1;
2323#ifdef CONFIG_NUMA 2324#ifdef CONFIG_NUMA
2324 s->remote_node_defrag_ratio = 1000; 2325 s->remote_node_defrag_ratio = 1000;
@@ -2475,7 +2476,7 @@ EXPORT_SYMBOL(kmem_cache_destroy);
2475 * Kmalloc subsystem 2476 * Kmalloc subsystem
2476 *******************************************************************/ 2477 *******************************************************************/
2477 2478
2478struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; 2479struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
2479EXPORT_SYMBOL(kmalloc_caches); 2480EXPORT_SYMBOL(kmalloc_caches);
2480 2481
2481static int __init setup_slub_min_order(char *str) 2482static int __init setup_slub_min_order(char *str)
@@ -2537,7 +2538,7 @@ panic:
2537} 2538}
2538 2539
2539#ifdef CONFIG_ZONE_DMA 2540#ifdef CONFIG_ZONE_DMA
2540static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; 2541static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
2541 2542
2542static void sysfs_add_func(struct work_struct *w) 2543static void sysfs_add_func(struct work_struct *w)
2543{ 2544{
@@ -2658,7 +2659,7 @@ void *__kmalloc(size_t size, gfp_t flags)
2658{ 2659{
2659 struct kmem_cache *s; 2660 struct kmem_cache *s;
2660 2661
2661 if (unlikely(size > PAGE_SIZE)) 2662 if (unlikely(size > SLUB_MAX_SIZE))
2662 return kmalloc_large(size, flags); 2663 return kmalloc_large(size, flags);
2663 2664
2664 s = get_slab(size, flags); 2665 s = get_slab(size, flags);
@@ -2686,7 +2687,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
2686{ 2687{
2687 struct kmem_cache *s; 2688 struct kmem_cache *s;
2688 2689
2689 if (unlikely(size > PAGE_SIZE)) 2690 if (unlikely(size > SLUB_MAX_SIZE))
2690 return kmalloc_large_node(size, flags, node); 2691 return kmalloc_large_node(size, flags, node);
2691 2692
2692 s = get_slab(size, flags); 2693 s = get_slab(size, flags);
@@ -2986,7 +2987,7 @@ void __init kmem_cache_init(void)
2986 caches++; 2987 caches++;
2987 } 2988 }
2988 2989
2989 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { 2990 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
2990 create_kmalloc_cache(&kmalloc_caches[i], 2991 create_kmalloc_cache(&kmalloc_caches[i],
2991 "kmalloc", 1 << i, GFP_KERNEL); 2992 "kmalloc", 1 << i, GFP_KERNEL);
2992 caches++; 2993 caches++;
@@ -3023,7 +3024,7 @@ void __init kmem_cache_init(void)
3023 slab_state = UP; 3024 slab_state = UP;
3024 3025
3025 /* Provide the correct kmalloc names now that the caches are up */ 3026 /* Provide the correct kmalloc names now that the caches are up */
3026 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) 3027 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
3027 kmalloc_caches[i]. name = 3028 kmalloc_caches[i]. name =
3028 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); 3029 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
3029 3030
@@ -3223,7 +3224,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
3223{ 3224{
3224 struct kmem_cache *s; 3225 struct kmem_cache *s;
3225 3226
3226 if (unlikely(size > PAGE_SIZE)) 3227 if (unlikely(size > SLUB_MAX_SIZE))
3227 return kmalloc_large(size, gfpflags); 3228 return kmalloc_large(size, gfpflags);
3228 3229
3229 s = get_slab(size, gfpflags); 3230 s = get_slab(size, gfpflags);
@@ -3239,7 +3240,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3239{ 3240{
3240 struct kmem_cache *s; 3241 struct kmem_cache *s;
3241 3242
3242 if (unlikely(size > PAGE_SIZE)) 3243 if (unlikely(size > SLUB_MAX_SIZE))
3243 return kmalloc_large_node(size, gfpflags, node); 3244 return kmalloc_large_node(size, gfpflags, node);
3244 3245
3245 s = get_slab(size, gfpflags); 3246 s = get_slab(size, gfpflags);
@@ -3836,6 +3837,26 @@ static ssize_t order_show(struct kmem_cache *s, char *buf)
3836} 3837}
3837SLAB_ATTR(order); 3838SLAB_ATTR(order);
3838 3839
3840static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
3841{
3842 return sprintf(buf, "%lu\n", s->min_partial);
3843}
3844
3845static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
3846 size_t length)
3847{
3848 unsigned long min;
3849 int err;
3850
3851 err = strict_strtoul(buf, 10, &min);
3852 if (err)
3853 return err;
3854
3855 set_min_partial(s, min);
3856 return length;
3857}
3858SLAB_ATTR(min_partial);
3859
3839static ssize_t ctor_show(struct kmem_cache *s, char *buf) 3860static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3840{ 3861{
3841 if (s->ctor) { 3862 if (s->ctor) {
@@ -4151,6 +4172,7 @@ static struct attribute *slab_attrs[] = {
4151 &object_size_attr.attr, 4172 &object_size_attr.attr,
4152 &objs_per_slab_attr.attr, 4173 &objs_per_slab_attr.attr,
4153 &order_attr.attr, 4174 &order_attr.attr,
4175 &min_partial_attr.attr,
4154 &objects_attr.attr, 4176 &objects_attr.attr,
4155 &objects_partial_attr.attr, 4177 &objects_partial_attr.attr,
4156 &total_objects_attr.attr, 4178 &total_objects_attr.attr,