aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorPekka Enberg <penberg@kernel.org>2011-03-20 12:12:03 -0400
committerPekka Enberg <penberg@kernel.org>2011-03-20 12:12:03 -0400
commitc53badd0801728feedfcccae04239410b52b0d03 (patch)
tree016421ec2a618767f01df8cfbf765a4dab3b3629 /mm/slub.c
parent521cb40b0c44418a4fd36dc633f575813d59a43d (diff)
parent865d794d1f144d0f93796840d01696cd70647a8a (diff)
Merge branch 'slab/next' into for-linus
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c132
1 files changed, 81 insertions, 51 deletions
diff --git a/mm/slub.c b/mm/slub.c
index e15aa7f193c9..e841d8921c22 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -281,11 +281,40 @@ static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
281 return (p - addr) / s->size; 281 return (p - addr) / s->size;
282} 282}
283 283
284static inline size_t slab_ksize(const struct kmem_cache *s)
285{
286#ifdef CONFIG_SLUB_DEBUG
287 /*
288 * Debugging requires use of the padding between object
289 * and whatever may come after it.
290 */
291 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
292 return s->objsize;
293
294#endif
295 /*
296 * If we have the need to store the freelist pointer
297 * back there or track user information then we can
298 * only use the space before that information.
299 */
300 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
301 return s->inuse;
302 /*
303 * Else we can use all the padding etc for the allocation
304 */
305 return s->size;
306}
307
308static inline int order_objects(int order, unsigned long size, int reserved)
309{
310 return ((PAGE_SIZE << order) - reserved) / size;
311}
312
284static inline struct kmem_cache_order_objects oo_make(int order, 313static inline struct kmem_cache_order_objects oo_make(int order,
285 unsigned long size) 314 unsigned long size, int reserved)
286{ 315{
287 struct kmem_cache_order_objects x = { 316 struct kmem_cache_order_objects x = {
288 (order << OO_SHIFT) + (PAGE_SIZE << order) / size 317 (order << OO_SHIFT) + order_objects(order, size, reserved)
289 }; 318 };
290 319
291 return x; 320 return x;
@@ -617,7 +646,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
617 return 1; 646 return 1;
618 647
619 start = page_address(page); 648 start = page_address(page);
620 length = (PAGE_SIZE << compound_order(page)); 649 length = (PAGE_SIZE << compound_order(page)) - s->reserved;
621 end = start + length; 650 end = start + length;
622 remainder = length % s->size; 651 remainder = length % s->size;
623 if (!remainder) 652 if (!remainder)
@@ -698,7 +727,7 @@ static int check_slab(struct kmem_cache *s, struct page *page)
698 return 0; 727 return 0;
699 } 728 }
700 729
701 maxobj = (PAGE_SIZE << compound_order(page)) / s->size; 730 maxobj = order_objects(compound_order(page), s->size, s->reserved);
702 if (page->objects > maxobj) { 731 if (page->objects > maxobj) {
703 slab_err(s, page, "objects %u > max %u", 732 slab_err(s, page, "objects %u > max %u",
704 s->name, page->objects, maxobj); 733 s->name, page->objects, maxobj);
@@ -748,7 +777,7 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
748 nr++; 777 nr++;
749 } 778 }
750 779
751 max_objects = (PAGE_SIZE << compound_order(page)) / s->size; 780 max_objects = order_objects(compound_order(page), s->size, s->reserved);
752 if (max_objects > MAX_OBJS_PER_PAGE) 781 if (max_objects > MAX_OBJS_PER_PAGE)
753 max_objects = MAX_OBJS_PER_PAGE; 782 max_objects = MAX_OBJS_PER_PAGE;
754 783
@@ -800,7 +829,7 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
800static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) 829static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object)
801{ 830{
802 flags &= gfp_allowed_mask; 831 flags &= gfp_allowed_mask;
803 kmemcheck_slab_alloc(s, flags, object, s->objsize); 832 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
804 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags); 833 kmemleak_alloc_recursive(object, s->objsize, 1, s->flags, flags);
805} 834}
806 835
@@ -1249,21 +1278,38 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1249 __free_pages(page, order); 1278 __free_pages(page, order);
1250} 1279}
1251 1280
1281#define need_reserve_slab_rcu \
1282 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1283
1252static void rcu_free_slab(struct rcu_head *h) 1284static void rcu_free_slab(struct rcu_head *h)
1253{ 1285{
1254 struct page *page; 1286 struct page *page;
1255 1287
1256 page = container_of((struct list_head *)h, struct page, lru); 1288 if (need_reserve_slab_rcu)
1289 page = virt_to_head_page(h);
1290 else
1291 page = container_of((struct list_head *)h, struct page, lru);
1292
1257 __free_slab(page->slab, page); 1293 __free_slab(page->slab, page);
1258} 1294}
1259 1295
1260static void free_slab(struct kmem_cache *s, struct page *page) 1296static void free_slab(struct kmem_cache *s, struct page *page)
1261{ 1297{
1262 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { 1298 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1263 /* 1299 struct rcu_head *head;
1264 * RCU free overloads the RCU head over the LRU 1300
1265 */ 1301 if (need_reserve_slab_rcu) {
1266 struct rcu_head *head = (void *)&page->lru; 1302 int order = compound_order(page);
1303 int offset = (PAGE_SIZE << order) - s->reserved;
1304
1305 VM_BUG_ON(s->reserved != sizeof(*head));
1306 head = page_address(page) + offset;
1307 } else {
1308 /*
1309 * RCU free overloads the RCU head over the LRU
1310 */
1311 head = (void *)&page->lru;
1312 }
1267 1313
1268 call_rcu(head, rcu_free_slab); 1314 call_rcu(head, rcu_free_slab);
1269 } else 1315 } else
@@ -1988,13 +2034,13 @@ static int slub_nomerge;
1988 * the smallest order which will fit the object. 2034 * the smallest order which will fit the object.
1989 */ 2035 */
1990static inline int slab_order(int size, int min_objects, 2036static inline int slab_order(int size, int min_objects,
1991 int max_order, int fract_leftover) 2037 int max_order, int fract_leftover, int reserved)
1992{ 2038{
1993 int order; 2039 int order;
1994 int rem; 2040 int rem;
1995 int min_order = slub_min_order; 2041 int min_order = slub_min_order;
1996 2042
1997 if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE) 2043 if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
1998 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 2044 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
1999 2045
2000 for (order = max(min_order, 2046 for (order = max(min_order,
@@ -2003,10 +2049,10 @@ static inline int slab_order(int size, int min_objects,
2003 2049
2004 unsigned long slab_size = PAGE_SIZE << order; 2050 unsigned long slab_size = PAGE_SIZE << order;
2005 2051
2006 if (slab_size < min_objects * size) 2052 if (slab_size < min_objects * size + reserved)
2007 continue; 2053 continue;
2008 2054
2009 rem = slab_size % size; 2055 rem = (slab_size - reserved) % size;
2010 2056
2011 if (rem <= slab_size / fract_leftover) 2057 if (rem <= slab_size / fract_leftover)
2012 break; 2058 break;
@@ -2016,7 +2062,7 @@ static inline int slab_order(int size, int min_objects,
2016 return order; 2062 return order;
2017} 2063}
2018 2064
2019static inline int calculate_order(int size) 2065static inline int calculate_order(int size, int reserved)
2020{ 2066{
2021 int order; 2067 int order;
2022 int min_objects; 2068 int min_objects;
@@ -2034,14 +2080,14 @@ static inline int calculate_order(int size)
2034 min_objects = slub_min_objects; 2080 min_objects = slub_min_objects;
2035 if (!min_objects) 2081 if (!min_objects)
2036 min_objects = 4 * (fls(nr_cpu_ids) + 1); 2082 min_objects = 4 * (fls(nr_cpu_ids) + 1);
2037 max_objects = (PAGE_SIZE << slub_max_order)/size; 2083 max_objects = order_objects(slub_max_order, size, reserved);
2038 min_objects = min(min_objects, max_objects); 2084 min_objects = min(min_objects, max_objects);
2039 2085
2040 while (min_objects > 1) { 2086 while (min_objects > 1) {
2041 fraction = 16; 2087 fraction = 16;
2042 while (fraction >= 4) { 2088 while (fraction >= 4) {
2043 order = slab_order(size, min_objects, 2089 order = slab_order(size, min_objects,
2044 slub_max_order, fraction); 2090 slub_max_order, fraction, reserved);
2045 if (order <= slub_max_order) 2091 if (order <= slub_max_order)
2046 return order; 2092 return order;
2047 fraction /= 2; 2093 fraction /= 2;
@@ -2053,14 +2099,14 @@ static inline int calculate_order(int size)
2053 * We were unable to place multiple objects in a slab. Now 2099 * We were unable to place multiple objects in a slab. Now
2054 * lets see if we can place a single object there. 2100 * lets see if we can place a single object there.
2055 */ 2101 */
2056 order = slab_order(size, 1, slub_max_order, 1); 2102 order = slab_order(size, 1, slub_max_order, 1, reserved);
2057 if (order <= slub_max_order) 2103 if (order <= slub_max_order)
2058 return order; 2104 return order;
2059 2105
2060 /* 2106 /*
2061 * Doh this slab cannot be placed using slub_max_order. 2107 * Doh this slab cannot be placed using slub_max_order.
2062 */ 2108 */
2063 order = slab_order(size, 1, MAX_ORDER, 1); 2109 order = slab_order(size, 1, MAX_ORDER, 1, reserved);
2064 if (order < MAX_ORDER) 2110 if (order < MAX_ORDER)
2065 return order; 2111 return order;
2066 return -ENOSYS; 2112 return -ENOSYS;
@@ -2311,7 +2357,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
2311 if (forced_order >= 0) 2357 if (forced_order >= 0)
2312 order = forced_order; 2358 order = forced_order;
2313 else 2359 else
2314 order = calculate_order(size); 2360 order = calculate_order(size, s->reserved);
2315 2361
2316 if (order < 0) 2362 if (order < 0)
2317 return 0; 2363 return 0;
@@ -2329,8 +2375,8 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
2329 /* 2375 /*
2330 * Determine the number of objects per slab 2376 * Determine the number of objects per slab
2331 */ 2377 */
2332 s->oo = oo_make(order, size); 2378 s->oo = oo_make(order, size, s->reserved);
2333 s->min = oo_make(get_order(size), size); 2379 s->min = oo_make(get_order(size), size, s->reserved);
2334 if (oo_objects(s->oo) > oo_objects(s->max)) 2380 if (oo_objects(s->oo) > oo_objects(s->max))
2335 s->max = s->oo; 2381 s->max = s->oo;
2336 2382
@@ -2349,6 +2395,10 @@ static int kmem_cache_open(struct kmem_cache *s,
2349 s->objsize = size; 2395 s->objsize = size;
2350 s->align = align; 2396 s->align = align;
2351 s->flags = kmem_cache_flags(size, flags, name, ctor); 2397 s->flags = kmem_cache_flags(size, flags, name, ctor);
2398 s->reserved = 0;
2399
2400 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
2401 s->reserved = sizeof(struct rcu_head);
2352 2402
2353 if (!calculate_sizes(s, -1)) 2403 if (!calculate_sizes(s, -1))
2354 goto error; 2404 goto error;
@@ -2399,12 +2449,6 @@ unsigned int kmem_cache_size(struct kmem_cache *s)
2399} 2449}
2400EXPORT_SYMBOL(kmem_cache_size); 2450EXPORT_SYMBOL(kmem_cache_size);
2401 2451
2402const char *kmem_cache_name(struct kmem_cache *s)
2403{
2404 return s->name;
2405}
2406EXPORT_SYMBOL(kmem_cache_name);
2407
2408static void list_slab_objects(struct kmem_cache *s, struct page *page, 2452static void list_slab_objects(struct kmem_cache *s, struct page *page,
2409 const char *text) 2453 const char *text)
2410{ 2454{
@@ -2696,7 +2740,6 @@ EXPORT_SYMBOL(__kmalloc_node);
2696size_t ksize(const void *object) 2740size_t ksize(const void *object)
2697{ 2741{
2698 struct page *page; 2742 struct page *page;
2699 struct kmem_cache *s;
2700 2743
2701 if (unlikely(object == ZERO_SIZE_PTR)) 2744 if (unlikely(object == ZERO_SIZE_PTR))
2702 return 0; 2745 return 0;
@@ -2707,28 +2750,8 @@ size_t ksize(const void *object)
2707 WARN_ON(!PageCompound(page)); 2750 WARN_ON(!PageCompound(page));
2708 return PAGE_SIZE << compound_order(page); 2751 return PAGE_SIZE << compound_order(page);
2709 } 2752 }
2710 s = page->slab;
2711
2712#ifdef CONFIG_SLUB_DEBUG
2713 /*
2714 * Debugging requires use of the padding between object
2715 * and whatever may come after it.
2716 */
2717 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
2718 return s->objsize;
2719 2753
2720#endif 2754 return slab_ksize(page->slab);
2721 /*
2722 * If we have the need to store the freelist pointer
2723 * back there or track user information then we can
2724 * only use the space before that information.
2725 */
2726 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
2727 return s->inuse;
2728 /*
2729 * Else we can use all the padding etc for the allocation
2730 */
2731 return s->size;
2732} 2755}
2733EXPORT_SYMBOL(ksize); 2756EXPORT_SYMBOL(ksize);
2734 2757
@@ -4017,6 +4040,12 @@ static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4017} 4040}
4018SLAB_ATTR_RO(destroy_by_rcu); 4041SLAB_ATTR_RO(destroy_by_rcu);
4019 4042
4043static ssize_t reserved_show(struct kmem_cache *s, char *buf)
4044{
4045 return sprintf(buf, "%d\n", s->reserved);
4046}
4047SLAB_ATTR_RO(reserved);
4048
4020#ifdef CONFIG_SLUB_DEBUG 4049#ifdef CONFIG_SLUB_DEBUG
4021static ssize_t slabs_show(struct kmem_cache *s, char *buf) 4050static ssize_t slabs_show(struct kmem_cache *s, char *buf)
4022{ 4051{
@@ -4303,6 +4332,7 @@ static struct attribute *slab_attrs[] = {
4303 &reclaim_account_attr.attr, 4332 &reclaim_account_attr.attr,
4304 &destroy_by_rcu_attr.attr, 4333 &destroy_by_rcu_attr.attr,
4305 &shrink_attr.attr, 4334 &shrink_attr.attr,
4335 &reserved_attr.attr,
4306#ifdef CONFIG_SLUB_DEBUG 4336#ifdef CONFIG_SLUB_DEBUG
4307 &total_objects_attr.attr, 4337 &total_objects_attr.attr,
4308 &slabs_attr.attr, 4338 &slabs_attr.attr,