aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorWei Yang <weiyang@linux.vnet.ibm.com>2014-08-06 19:04:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:15 -0400
commit54266640709a24c9844245d0d9f36b9cb1f31326 (patch)
treec3fc54c3c86b0ff0d06f244baadada4f7d0dd2a3 /mm/slub.c
parent5e804789673114c616816f8387169790afe376b5 (diff)
slub: avoid duplicate creation on the first object
When a kmem_cache is created with ctor, each object in the kmem_cache will be initialized before ready to use. While in slub implementation, the first object will be initialized twice. This patch reduces the duplication of initialization of the first object. Fix commit 7656c72b ("SLUB: add macros for scanning objects in a slab"). Signed-off-by: Wei Yang <weiyang@linux.vnet.ibm.com> Acked-by: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 92d8139c556d..1f1f838326a0 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -283,6 +283,10 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
283 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\ 283 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
284 __p += (__s)->size) 284 __p += (__s)->size)
285 285
286#define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
287 for (__p = (__addr), __idx = 1; __idx <= __objects;\
288 __p += (__s)->size, __idx++)
289
286/* Determine object index from a given position */ 290/* Determine object index from a given position */
287static inline int slab_index(void *p, struct kmem_cache *s, void *addr) 291static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
288{ 292{
@@ -1379,9 +1383,9 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1379{ 1383{
1380 struct page *page; 1384 struct page *page;
1381 void *start; 1385 void *start;
1382 void *last;
1383 void *p; 1386 void *p;
1384 int order; 1387 int order;
1388 int idx;
1385 1389
1386 BUG_ON(flags & GFP_SLAB_BUG_MASK); 1390 BUG_ON(flags & GFP_SLAB_BUG_MASK);
1387 1391
@@ -1402,14 +1406,13 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1402 if (unlikely(s->flags & SLAB_POISON)) 1406 if (unlikely(s->flags & SLAB_POISON))
1403 memset(start, POISON_INUSE, PAGE_SIZE << order); 1407 memset(start, POISON_INUSE, PAGE_SIZE << order);
1404 1408
1405 last = start; 1409 for_each_object_idx(p, idx, s, start, page->objects) {
1406 for_each_object(p, s, start, page->objects) { 1410 setup_object(s, page, p);
1407 setup_object(s, page, last); 1411 if (likely(idx < page->objects))
1408 set_freepointer(s, last, p); 1412 set_freepointer(s, p, p + s->size);
1409 last = p; 1413 else
1414 set_freepointer(s, p, NULL);
1410 } 1415 }
1411 setup_object(s, page, last);
1412 set_freepointer(s, last, NULL);
1413 1416
1414 page->freelist = start; 1417 page->freelist = start;
1415 page->inuse = page->objects; 1418 page->inuse = page->objects;