aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorPekka Enberg <penberg@kernel.org>2011-03-11 11:10:45 -0500
committerPekka Enberg <penberg@kernel.org>2011-03-11 11:10:45 -0500
commitc9149556756d56c68451a4a8735c37e7062fd3d7 (patch)
treea2dae56b22adaa9a23c8f92f30c3b3ad3b610850 /mm/slub.c
parentd71f606f687ef9d0cdddfd3619ca7cb9a0b3fb63 (diff)
parent5bfe53a77e8a3ffce4a10003c75f464a138e272d (diff)
Merge branch 'slab/rcu' into slab/next
Conflicts: mm/slub.c
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c77
1 files changed, 55 insertions, 22 deletions
diff --git a/mm/slub.c b/mm/slub.c
index ea6f0390996f..e841d8921c22 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -305,11 +305,16 @@ static inline size_t slab_ksize(const struct kmem_cache *s)
305 return s->size; 305 return s->size;
306} 306}
307 307
308static inline int order_objects(int order, unsigned long size, int reserved)
309{
310 return ((PAGE_SIZE << order) - reserved) / size;
311}
312
308static inline struct kmem_cache_order_objects oo_make(int order, 313static inline struct kmem_cache_order_objects oo_make(int order,
309 unsigned long size) 314 unsigned long size, int reserved)
310{ 315{
311 struct kmem_cache_order_objects x = { 316 struct kmem_cache_order_objects x = {
312 (order << OO_SHIFT) + (PAGE_SIZE << order) / size 317 (order << OO_SHIFT) + order_objects(order, size, reserved)
313 }; 318 };
314 319
315 return x; 320 return x;
@@ -641,7 +646,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
641 return 1; 646 return 1;
642 647
643 start = page_address(page); 648 start = page_address(page);
644 length = (PAGE_SIZE << compound_order(page)); 649 length = (PAGE_SIZE << compound_order(page)) - s->reserved;
645 end = start + length; 650 end = start + length;
646 remainder = length % s->size; 651 remainder = length % s->size;
647 if (!remainder) 652 if (!remainder)
@@ -722,7 +727,7 @@ static int check_slab(struct kmem_cache *s, struct page *page)
722 return 0; 727 return 0;
723 } 728 }
724 729
725 maxobj = (PAGE_SIZE << compound_order(page)) / s->size; 730 maxobj = order_objects(compound_order(page), s->size, s->reserved);
726 if (page->objects > maxobj) { 731 if (page->objects > maxobj) {
727 slab_err(s, page, "objects %u > max %u", 732 slab_err(s, page, "objects %u > max %u",
728 s->name, page->objects, maxobj); 733 s->name, page->objects, maxobj);
@@ -772,7 +777,7 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
772 nr++; 777 nr++;
773 } 778 }
774 779
775 max_objects = (PAGE_SIZE << compound_order(page)) / s->size; 780 max_objects = order_objects(compound_order(page), s->size, s->reserved);
776 if (max_objects > MAX_OBJS_PER_PAGE) 781 if (max_objects > MAX_OBJS_PER_PAGE)
777 max_objects = MAX_OBJS_PER_PAGE; 782 max_objects = MAX_OBJS_PER_PAGE;
778 783
@@ -1273,21 +1278,38 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
1273 __free_pages(page, order); 1278 __free_pages(page, order);
1274} 1279}
1275 1280
1281#define need_reserve_slab_rcu \
1282 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1283
1276static void rcu_free_slab(struct rcu_head *h) 1284static void rcu_free_slab(struct rcu_head *h)
1277{ 1285{
1278 struct page *page; 1286 struct page *page;
1279 1287
1280 page = container_of((struct list_head *)h, struct page, lru); 1288 if (need_reserve_slab_rcu)
1289 page = virt_to_head_page(h);
1290 else
1291 page = container_of((struct list_head *)h, struct page, lru);
1292
1281 __free_slab(page->slab, page); 1293 __free_slab(page->slab, page);
1282} 1294}
1283 1295
1284static void free_slab(struct kmem_cache *s, struct page *page) 1296static void free_slab(struct kmem_cache *s, struct page *page)
1285{ 1297{
1286 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) { 1298 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1287 /* 1299 struct rcu_head *head;
1288 * RCU free overloads the RCU head over the LRU 1300
1289 */ 1301 if (need_reserve_slab_rcu) {
1290 struct rcu_head *head = (void *)&page->lru; 1302 int order = compound_order(page);
1303 int offset = (PAGE_SIZE << order) - s->reserved;
1304
1305 VM_BUG_ON(s->reserved != sizeof(*head));
1306 head = page_address(page) + offset;
1307 } else {
1308 /*
1309 * RCU free overloads the RCU head over the LRU
1310 */
1311 head = (void *)&page->lru;
1312 }
1291 1313
1292 call_rcu(head, rcu_free_slab); 1314 call_rcu(head, rcu_free_slab);
1293 } else 1315 } else
@@ -2012,13 +2034,13 @@ static int slub_nomerge;
2012 * the smallest order which will fit the object. 2034 * the smallest order which will fit the object.
2013 */ 2035 */
2014static inline int slab_order(int size, int min_objects, 2036static inline int slab_order(int size, int min_objects,
2015 int max_order, int fract_leftover) 2037 int max_order, int fract_leftover, int reserved)
2016{ 2038{
2017 int order; 2039 int order;
2018 int rem; 2040 int rem;
2019 int min_order = slub_min_order; 2041 int min_order = slub_min_order;
2020 2042
2021 if ((PAGE_SIZE << min_order) / size > MAX_OBJS_PER_PAGE) 2043 if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
2022 return get_order(size * MAX_OBJS_PER_PAGE) - 1; 2044 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
2023 2045
2024 for (order = max(min_order, 2046 for (order = max(min_order,
@@ -2027,10 +2049,10 @@ static inline int slab_order(int size, int min_objects,
2027 2049
2028 unsigned long slab_size = PAGE_SIZE << order; 2050 unsigned long slab_size = PAGE_SIZE << order;
2029 2051
2030 if (slab_size < min_objects * size) 2052 if (slab_size < min_objects * size + reserved)
2031 continue; 2053 continue;
2032 2054
2033 rem = slab_size % size; 2055 rem = (slab_size - reserved) % size;
2034 2056
2035 if (rem <= slab_size / fract_leftover) 2057 if (rem <= slab_size / fract_leftover)
2036 break; 2058 break;
@@ -2040,7 +2062,7 @@ static inline int slab_order(int size, int min_objects,
2040 return order; 2062 return order;
2041} 2063}
2042 2064
2043static inline int calculate_order(int size) 2065static inline int calculate_order(int size, int reserved)
2044{ 2066{
2045 int order; 2067 int order;
2046 int min_objects; 2068 int min_objects;
@@ -2058,14 +2080,14 @@ static inline int calculate_order(int size)
2058 min_objects = slub_min_objects; 2080 min_objects = slub_min_objects;
2059 if (!min_objects) 2081 if (!min_objects)
2060 min_objects = 4 * (fls(nr_cpu_ids) + 1); 2082 min_objects = 4 * (fls(nr_cpu_ids) + 1);
2061 max_objects = (PAGE_SIZE << slub_max_order)/size; 2083 max_objects = order_objects(slub_max_order, size, reserved);
2062 min_objects = min(min_objects, max_objects); 2084 min_objects = min(min_objects, max_objects);
2063 2085
2064 while (min_objects > 1) { 2086 while (min_objects > 1) {
2065 fraction = 16; 2087 fraction = 16;
2066 while (fraction >= 4) { 2088 while (fraction >= 4) {
2067 order = slab_order(size, min_objects, 2089 order = slab_order(size, min_objects,
2068 slub_max_order, fraction); 2090 slub_max_order, fraction, reserved);
2069 if (order <= slub_max_order) 2091 if (order <= slub_max_order)
2070 return order; 2092 return order;
2071 fraction /= 2; 2093 fraction /= 2;
@@ -2077,14 +2099,14 @@ static inline int calculate_order(int size)
2077 * We were unable to place multiple objects in a slab. Now 2099 * We were unable to place multiple objects in a slab. Now
2078 * lets see if we can place a single object there. 2100 * lets see if we can place a single object there.
2079 */ 2101 */
2080 order = slab_order(size, 1, slub_max_order, 1); 2102 order = slab_order(size, 1, slub_max_order, 1, reserved);
2081 if (order <= slub_max_order) 2103 if (order <= slub_max_order)
2082 return order; 2104 return order;
2083 2105
2084 /* 2106 /*
2085 * Doh this slab cannot be placed using slub_max_order. 2107 * Doh this slab cannot be placed using slub_max_order.
2086 */ 2108 */
2087 order = slab_order(size, 1, MAX_ORDER, 1); 2109 order = slab_order(size, 1, MAX_ORDER, 1, reserved);
2088 if (order < MAX_ORDER) 2110 if (order < MAX_ORDER)
2089 return order; 2111 return order;
2090 return -ENOSYS; 2112 return -ENOSYS;
@@ -2335,7 +2357,7 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
2335 if (forced_order >= 0) 2357 if (forced_order >= 0)
2336 order = forced_order; 2358 order = forced_order;
2337 else 2359 else
2338 order = calculate_order(size); 2360 order = calculate_order(size, s->reserved);
2339 2361
2340 if (order < 0) 2362 if (order < 0)
2341 return 0; 2363 return 0;
@@ -2353,8 +2375,8 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
2353 /* 2375 /*
2354 * Determine the number of objects per slab 2376 * Determine the number of objects per slab
2355 */ 2377 */
2356 s->oo = oo_make(order, size); 2378 s->oo = oo_make(order, size, s->reserved);
2357 s->min = oo_make(get_order(size), size); 2379 s->min = oo_make(get_order(size), size, s->reserved);
2358 if (oo_objects(s->oo) > oo_objects(s->max)) 2380 if (oo_objects(s->oo) > oo_objects(s->max))
2359 s->max = s->oo; 2381 s->max = s->oo;
2360 2382
@@ -2373,6 +2395,10 @@ static int kmem_cache_open(struct kmem_cache *s,
2373 s->objsize = size; 2395 s->objsize = size;
2374 s->align = align; 2396 s->align = align;
2375 s->flags = kmem_cache_flags(size, flags, name, ctor); 2397 s->flags = kmem_cache_flags(size, flags, name, ctor);
2398 s->reserved = 0;
2399
2400 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
2401 s->reserved = sizeof(struct rcu_head);
2376 2402
2377 if (!calculate_sizes(s, -1)) 2403 if (!calculate_sizes(s, -1))
2378 goto error; 2404 goto error;
@@ -4014,6 +4040,12 @@ static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4014} 4040}
4015SLAB_ATTR_RO(destroy_by_rcu); 4041SLAB_ATTR_RO(destroy_by_rcu);
4016 4042
4043static ssize_t reserved_show(struct kmem_cache *s, char *buf)
4044{
4045 return sprintf(buf, "%d\n", s->reserved);
4046}
4047SLAB_ATTR_RO(reserved);
4048
4017#ifdef CONFIG_SLUB_DEBUG 4049#ifdef CONFIG_SLUB_DEBUG
4018static ssize_t slabs_show(struct kmem_cache *s, char *buf) 4050static ssize_t slabs_show(struct kmem_cache *s, char *buf)
4019{ 4051{
@@ -4300,6 +4332,7 @@ static struct attribute *slab_attrs[] = {
4300 &reclaim_account_attr.attr, 4332 &reclaim_account_attr.attr,
4301 &destroy_by_rcu_attr.attr, 4333 &destroy_by_rcu_attr.attr,
4302 &shrink_attr.attr, 4334 &shrink_attr.attr,
4335 &reserved_attr.attr,
4303#ifdef CONFIG_SLUB_DEBUG 4336#ifdef CONFIG_SLUB_DEBUG
4304 &total_objects_attr.attr, 4337 &total_objects_attr.attr,
4305 &slabs_attr.attr, 4338 &slabs_attr.attr,