diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-10-16 04:26:09 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 12:43:01 -0400 |
commit | 42a9fdbb12ac6c027b4b91ab9b5a60aa3a834489 (patch) | |
tree | d319573849af86fa405b93e5b93add0d34305db7 | |
parent | 4c93c355d5d563f300df7e61ef753d7a064411e9 (diff) |
SLUB: Optimize cacheline use for zeroing
We touch a cacheline in the kmem_cache structure for zeroing to get the
size. However, the hot paths in slab_alloc and slab_free do not reference
any other fields in kmem_cache, so we may have to just bring in the
cacheline for this one access.
Add a new field to kmem_cache_cpu that contains the object size. That
cacheline must already be used in the hotpaths. So we save one cacheline
on every slab_alloc if we zero.
We need to update the kmem_cache_cpu object size if an aliasing operation
changes the objsize of an non debug slab.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/slub_def.h | 1 | ||||
-rw-r--r-- | mm/slub.c | 14 |
2 files changed, 13 insertions, 2 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index f74716b59ce2..d65159d1d4f5 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -16,6 +16,7 @@ struct kmem_cache_cpu { | |||
16 | struct page *page; | 16 | struct page *page; |
17 | int node; | 17 | int node; |
18 | unsigned int offset; | 18 | unsigned int offset; |
19 | unsigned int objsize; | ||
19 | }; | 20 | }; |
20 | 21 | ||
21 | struct kmem_cache_node { | 22 | struct kmem_cache_node { |
@@ -1576,7 +1576,7 @@ static void __always_inline *slab_alloc(struct kmem_cache *s, | |||
1576 | local_irq_restore(flags); | 1576 | local_irq_restore(flags); |
1577 | 1577 | ||
1578 | if (unlikely((gfpflags & __GFP_ZERO) && object)) | 1578 | if (unlikely((gfpflags & __GFP_ZERO) && object)) |
1579 | memset(object, 0, s->objsize); | 1579 | memset(object, 0, c->objsize); |
1580 | 1580 | ||
1581 | return object; | 1581 | return object; |
1582 | } | 1582 | } |
@@ -1858,8 +1858,9 @@ static void init_kmem_cache_cpu(struct kmem_cache *s, | |||
1858 | { | 1858 | { |
1859 | c->page = NULL; | 1859 | c->page = NULL; |
1860 | c->freelist = NULL; | 1860 | c->freelist = NULL; |
1861 | c->offset = s->offset / sizeof(void *); | ||
1862 | c->node = 0; | 1861 | c->node = 0; |
1862 | c->offset = s->offset / sizeof(void *); | ||
1863 | c->objsize = s->objsize; | ||
1863 | } | 1864 | } |
1864 | 1865 | ||
1865 | static void init_kmem_cache_node(struct kmem_cache_node *n) | 1866 | static void init_kmem_cache_node(struct kmem_cache_node *n) |
@@ -2852,12 +2853,21 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
2852 | down_write(&slub_lock); | 2853 | down_write(&slub_lock); |
2853 | s = find_mergeable(size, align, flags, name, ctor); | 2854 | s = find_mergeable(size, align, flags, name, ctor); |
2854 | if (s) { | 2855 | if (s) { |
2856 | int cpu; | ||
2857 | |||
2855 | s->refcount++; | 2858 | s->refcount++; |
2856 | /* | 2859 | /* |
2857 | * Adjust the object sizes so that we clear | 2860 | * Adjust the object sizes so that we clear |
2858 | * the complete object on kzalloc. | 2861 | * the complete object on kzalloc. |
2859 | */ | 2862 | */ |
2860 | s->objsize = max(s->objsize, (int)size); | 2863 | s->objsize = max(s->objsize, (int)size); |
2864 | |||
2865 | /* | ||
2866 | * And then we need to update the object size in the | ||
2867 | * per cpu structures | ||
2868 | */ | ||
2869 | for_each_online_cpu(cpu) | ||
2870 | get_cpu_slab(s, cpu)->objsize = s->objsize; | ||
2861 | s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); | 2871 | s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *))); |
2862 | up_write(&slub_lock); | 2872 | up_write(&slub_lock); |
2863 | if (sysfs_slab_alias(s, name)) | 2873 | if (sysfs_slab_alias(s, name)) |