diff options
author | Pekka Enberg <penberg@cs.helsinki.fi> | 2006-02-01 06:05:48 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-02-01 11:53:17 -0500 |
commit | 5295a74cc0bcf1291686eb734ccb06baa3d55c1a (patch) | |
tree | e90b312f402729893a97f164ce7e03386fe9c5ce | |
parent | 78d382d77c84229d031431931bf6490d5da6ab86 (diff) |
[PATCH] slab: reduce inlining
From: Manfred Spraul <manfred@colorfullife.com>
Reduce the amount of inline functions in slab to the functions that
are used in the hot path:
- no inline for debug functions
- no __always_inline, inline is already __always_inline
- remove inline from a few numa support functions.
Before:
text data bss dec hex filename
13588 752 48 14388 3834 mm/slab.o (defconfig)
16671 2492 48 19211 4b0b mm/slab.o (numa)
After:
text data bss dec hex filename
13366 752 48 14166 3756 mm/slab.o (defconfig)
16230 2492 48 18770 4952 mm/slab.o (numa)
Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | mm/slab.c | 14 |
1 files changed, 7 insertions, 7 deletions
@@ -337,7 +337,7 @@ static __always_inline int index_of(const size_t size) | |||
337 | #define INDEX_AC index_of(sizeof(struct arraycache_init)) | 337 | #define INDEX_AC index_of(sizeof(struct arraycache_init)) |
338 | #define INDEX_L3 index_of(sizeof(struct kmem_list3)) | 338 | #define INDEX_L3 index_of(sizeof(struct kmem_list3)) |
339 | 339 | ||
340 | static inline void kmem_list3_init(struct kmem_list3 *parent) | 340 | static void kmem_list3_init(struct kmem_list3 *parent) |
341 | { | 341 | { |
342 | INIT_LIST_HEAD(&parent->slabs_full); | 342 | INIT_LIST_HEAD(&parent->slabs_full); |
343 | INIT_LIST_HEAD(&parent->slabs_partial); | 343 | INIT_LIST_HEAD(&parent->slabs_partial); |
@@ -818,7 +818,7 @@ static struct array_cache *alloc_arraycache(int node, int entries, | |||
818 | #ifdef CONFIG_NUMA | 818 | #ifdef CONFIG_NUMA |
819 | static void *__cache_alloc_node(kmem_cache_t *, gfp_t, int); | 819 | static void *__cache_alloc_node(kmem_cache_t *, gfp_t, int); |
820 | 820 | ||
821 | static inline struct array_cache **alloc_alien_cache(int node, int limit) | 821 | static struct array_cache **alloc_alien_cache(int node, int limit) |
822 | { | 822 | { |
823 | struct array_cache **ac_ptr; | 823 | struct array_cache **ac_ptr; |
824 | int memsize = sizeof(void *) * MAX_NUMNODES; | 824 | int memsize = sizeof(void *) * MAX_NUMNODES; |
@@ -845,7 +845,7 @@ static inline struct array_cache **alloc_alien_cache(int node, int limit) | |||
845 | return ac_ptr; | 845 | return ac_ptr; |
846 | } | 846 | } |
847 | 847 | ||
848 | static inline void free_alien_cache(struct array_cache **ac_ptr) | 848 | static void free_alien_cache(struct array_cache **ac_ptr) |
849 | { | 849 | { |
850 | int i; | 850 | int i; |
851 | 851 | ||
@@ -858,8 +858,8 @@ static inline void free_alien_cache(struct array_cache **ac_ptr) | |||
858 | kfree(ac_ptr); | 858 | kfree(ac_ptr); |
859 | } | 859 | } |
860 | 860 | ||
861 | static inline void __drain_alien_cache(kmem_cache_t *cachep, | 861 | static void __drain_alien_cache(kmem_cache_t *cachep, |
862 | struct array_cache *ac, int node) | 862 | struct array_cache *ac, int node) |
863 | { | 863 | { |
864 | struct kmem_list3 *rl3 = cachep->nodelists[node]; | 864 | struct kmem_list3 *rl3 = cachep->nodelists[node]; |
865 | 865 | ||
@@ -1534,7 +1534,7 @@ static void slab_destroy(kmem_cache_t *cachep, struct slab *slabp) | |||
1534 | 1534 | ||
1535 | /* For setting up all the kmem_list3s for cache whose buffer_size is same | 1535 | /* For setting up all the kmem_list3s for cache whose buffer_size is same |
1536 | as size of kmem_list3. */ | 1536 | as size of kmem_list3. */ |
1537 | static inline void set_up_list3s(kmem_cache_t *cachep, int index) | 1537 | static void set_up_list3s(kmem_cache_t *cachep, int index) |
1538 | { | 1538 | { |
1539 | int node; | 1539 | int node; |
1540 | 1540 | ||
@@ -1937,7 +1937,7 @@ static void check_spinlock_acquired(kmem_cache_t *cachep) | |||
1937 | #endif | 1937 | #endif |
1938 | } | 1938 | } |
1939 | 1939 | ||
1940 | static inline void check_spinlock_acquired_node(kmem_cache_t *cachep, int node) | 1940 | static void check_spinlock_acquired_node(kmem_cache_t *cachep, int node) |
1941 | { | 1941 | { |
1942 | #ifdef CONFIG_SMP | 1942 | #ifdef CONFIG_SMP |
1943 | check_irq_off(); | 1943 | check_irq_off(); |