diff options
-rw-r--r-- | Documentation/kernel-parameters.txt | 14 | ||||
-rw-r--r-- | mm/slab.h | 15 | ||||
-rw-r--r-- | mm/slab_common.c | 91 | ||||
-rw-r--r-- | mm/slub.c | 91 |
4 files changed, 117 insertions, 94 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index d9a452e8fb9b..a126a31dde02 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -3158,6 +3158,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
3158 | 3158 | ||
3159 | slram= [HW,MTD] | 3159 | slram= [HW,MTD] |
3160 | 3160 | ||
3161 | slab_nomerge [MM] | ||
3162 | Disable merging of slabs with similar size. May be | ||
3163 | necessary if there is some reason to distinguish | ||
3164 | allocs to different slabs. Debug options disable | ||
3165 | merging on their own. | ||
3166 | For more information see Documentation/vm/slub.txt. | ||
3167 | |||
3161 | slab_max_order= [MM, SLAB] | 3168 | slab_max_order= [MM, SLAB] |
3162 | Determines the maximum allowed order for slabs. | 3169 | Determines the maximum allowed order for slabs. |
3163 | A high setting may cause OOMs due to memory | 3170 | A high setting may cause OOMs due to memory |
@@ -3193,11 +3200,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
3193 | For more information see Documentation/vm/slub.txt. | 3200 | For more information see Documentation/vm/slub.txt. |
3194 | 3201 | ||
3195 | slub_nomerge [MM, SLUB] | 3202 | slub_nomerge [MM, SLUB] |
3196 | Disable merging of slabs with similar size. May be | 3203 | Same with slab_nomerge. This is supported for legacy. |
3197 | necessary if there is some reason to distinguish | 3204 | See slab_nomerge for more information. |
3198 | allocs to different slabs. Debug options disable | ||
3199 | merging on their own. | ||
3200 | For more information see Documentation/vm/slub.txt. | ||
3201 | 3205 | ||
3202 | smart2= [HW] | 3206 | smart2= [HW] |
3203 | Format: <io1>[,<io2>[,...,<io8>]] | 3207 | Format: <io1>[,<io2>[,...,<io8>]] |
@@ -88,15 +88,30 @@ extern void create_boot_cache(struct kmem_cache *, const char *name, | |||
88 | size_t size, unsigned long flags); | 88 | size_t size, unsigned long flags); |
89 | 89 | ||
90 | struct mem_cgroup; | 90 | struct mem_cgroup; |
91 | |||
92 | int slab_unmergeable(struct kmem_cache *s); | ||
93 | struct kmem_cache *find_mergeable(size_t size, size_t align, | ||
94 | unsigned long flags, const char *name, void (*ctor)(void *)); | ||
91 | #ifdef CONFIG_SLUB | 95 | #ifdef CONFIG_SLUB |
92 | struct kmem_cache * | 96 | struct kmem_cache * |
93 | __kmem_cache_alias(const char *name, size_t size, size_t align, | 97 | __kmem_cache_alias(const char *name, size_t size, size_t align, |
94 | unsigned long flags, void (*ctor)(void *)); | 98 | unsigned long flags, void (*ctor)(void *)); |
99 | |||
100 | unsigned long kmem_cache_flags(unsigned long object_size, | ||
101 | unsigned long flags, const char *name, | ||
102 | void (*ctor)(void *)); | ||
95 | #else | 103 | #else |
96 | static inline struct kmem_cache * | 104 | static inline struct kmem_cache * |
97 | __kmem_cache_alias(const char *name, size_t size, size_t align, | 105 | __kmem_cache_alias(const char *name, size_t size, size_t align, |
98 | unsigned long flags, void (*ctor)(void *)) | 106 | unsigned long flags, void (*ctor)(void *)) |
99 | { return NULL; } | 107 | { return NULL; } |
108 | |||
109 | static inline unsigned long kmem_cache_flags(unsigned long object_size, | ||
110 | unsigned long flags, const char *name, | ||
111 | void (*ctor)(void *)) | ||
112 | { | ||
113 | return flags; | ||
114 | } | ||
100 | #endif | 115 | #endif |
101 | 116 | ||
102 | 117 | ||
diff --git a/mm/slab_common.c b/mm/slab_common.c index d7d8ffd0c306..f206cb10a544 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -31,6 +31,34 @@ DEFINE_MUTEX(slab_mutex); | |||
31 | struct kmem_cache *kmem_cache; | 31 | struct kmem_cache *kmem_cache; |
32 | 32 | ||
33 | /* | 33 | /* |
34 | * Set of flags that will prevent slab merging | ||
35 | */ | ||
36 | #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ | ||
37 | SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ | ||
38 | SLAB_FAILSLAB) | ||
39 | |||
40 | #define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ | ||
41 | SLAB_CACHE_DMA | SLAB_NOTRACK) | ||
42 | |||
43 | /* | ||
44 | * Merge control. If this is set then no merging of slab caches will occur. | ||
45 | * (Could be removed. This was introduced to pacify the merge skeptics.) | ||
46 | */ | ||
47 | static int slab_nomerge; | ||
48 | |||
49 | static int __init setup_slab_nomerge(char *str) | ||
50 | { | ||
51 | slab_nomerge = 1; | ||
52 | return 1; | ||
53 | } | ||
54 | |||
55 | #ifdef CONFIG_SLUB | ||
56 | __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0); | ||
57 | #endif | ||
58 | |||
59 | __setup("slab_nomerge", setup_slab_nomerge); | ||
60 | |||
61 | /* | ||
34 | * Determine the size of a slab object | 62 | * Determine the size of a slab object |
35 | */ | 63 | */ |
36 | unsigned int kmem_cache_size(struct kmem_cache *s) | 64 | unsigned int kmem_cache_size(struct kmem_cache *s) |
@@ -116,6 +144,69 @@ out: | |||
116 | #endif | 144 | #endif |
117 | 145 | ||
118 | /* | 146 | /* |
147 | * Find a mergeable slab cache | ||
148 | */ | ||
149 | int slab_unmergeable(struct kmem_cache *s) | ||
150 | { | ||
151 | if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE)) | ||
152 | return 1; | ||
153 | |||
154 | if (!is_root_cache(s)) | ||
155 | return 1; | ||
156 | |||
157 | if (s->ctor) | ||
158 | return 1; | ||
159 | |||
160 | /* | ||
161 | * We may have set a slab to be unmergeable during bootstrap. | ||
162 | */ | ||
163 | if (s->refcount < 0) | ||
164 | return 1; | ||
165 | |||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | struct kmem_cache *find_mergeable(size_t size, size_t align, | ||
170 | unsigned long flags, const char *name, void (*ctor)(void *)) | ||
171 | { | ||
172 | struct kmem_cache *s; | ||
173 | |||
174 | if (slab_nomerge || (flags & SLAB_NEVER_MERGE)) | ||
175 | return NULL; | ||
176 | |||
177 | if (ctor) | ||
178 | return NULL; | ||
179 | |||
180 | size = ALIGN(size, sizeof(void *)); | ||
181 | align = calculate_alignment(flags, align, size); | ||
182 | size = ALIGN(size, align); | ||
183 | flags = kmem_cache_flags(size, flags, name, NULL); | ||
184 | |||
185 | list_for_each_entry(s, &slab_caches, list) { | ||
186 | if (slab_unmergeable(s)) | ||
187 | continue; | ||
188 | |||
189 | if (size > s->size) | ||
190 | continue; | ||
191 | |||
192 | if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME)) | ||
193 | continue; | ||
194 | /* | ||
195 | * Check if alignment is compatible. | ||
196 | * Courtesy of Adrian Drzewiecki | ||
197 | */ | ||
198 | if ((s->size & ~(align - 1)) != s->size) | ||
199 | continue; | ||
200 | |||
201 | if (s->size - size >= sizeof(void *)) | ||
202 | continue; | ||
203 | |||
204 | return s; | ||
205 | } | ||
206 | return NULL; | ||
207 | } | ||
208 | |||
209 | /* | ||
119 | * Figure out what the alignment of the objects will be given a set of | 210 | * Figure out what the alignment of the objects will be given a set of |
120 | * flags, a user specified alignment and the size of the objects. | 211 | * flags, a user specified alignment and the size of the objects. |
121 | */ | 212 | */ |
@@ -169,16 +169,6 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) | |||
169 | */ | 169 | */ |
170 | #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) | 170 | #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) |
171 | 171 | ||
172 | /* | ||
173 | * Set of flags that will prevent slab merging | ||
174 | */ | ||
175 | #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ | ||
176 | SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ | ||
177 | SLAB_FAILSLAB) | ||
178 | |||
179 | #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ | ||
180 | SLAB_CACHE_DMA | SLAB_NOTRACK) | ||
181 | |||
182 | #define OO_SHIFT 16 | 172 | #define OO_SHIFT 16 |
183 | #define OO_MASK ((1 << OO_SHIFT) - 1) | 173 | #define OO_MASK ((1 << OO_SHIFT) - 1) |
184 | #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */ | 174 | #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */ |
@@ -1176,7 +1166,7 @@ out: | |||
1176 | 1166 | ||
1177 | __setup("slub_debug", setup_slub_debug); | 1167 | __setup("slub_debug", setup_slub_debug); |
1178 | 1168 | ||
1179 | static unsigned long kmem_cache_flags(unsigned long object_size, | 1169 | unsigned long kmem_cache_flags(unsigned long object_size, |
1180 | unsigned long flags, const char *name, | 1170 | unsigned long flags, const char *name, |
1181 | void (*ctor)(void *)) | 1171 | void (*ctor)(void *)) |
1182 | { | 1172 | { |
@@ -1208,7 +1198,7 @@ static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n, | |||
1208 | struct page *page) {} | 1198 | struct page *page) {} |
1209 | static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, | 1199 | static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, |
1210 | struct page *page) {} | 1200 | struct page *page) {} |
1211 | static inline unsigned long kmem_cache_flags(unsigned long object_size, | 1201 | unsigned long kmem_cache_flags(unsigned long object_size, |
1212 | unsigned long flags, const char *name, | 1202 | unsigned long flags, const char *name, |
1213 | void (*ctor)(void *)) | 1203 | void (*ctor)(void *)) |
1214 | { | 1204 | { |
@@ -2719,12 +2709,6 @@ static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER; | |||
2719 | static int slub_min_objects; | 2709 | static int slub_min_objects; |
2720 | 2710 | ||
2721 | /* | 2711 | /* |
2722 | * Merge control. If this is set then no merging of slab caches will occur. | ||
2723 | * (Could be removed. This was introduced to pacify the merge skeptics.) | ||
2724 | */ | ||
2725 | static int slub_nomerge; | ||
2726 | |||
2727 | /* | ||
2728 | * Calculate the order of allocation given an slab object size. | 2712 | * Calculate the order of allocation given an slab object size. |
2729 | * | 2713 | * |
2730 | * The order of allocation has significant impact on performance and other | 2714 | * The order of allocation has significant impact on performance and other |
@@ -3252,14 +3236,6 @@ static int __init setup_slub_min_objects(char *str) | |||
3252 | 3236 | ||
3253 | __setup("slub_min_objects=", setup_slub_min_objects); | 3237 | __setup("slub_min_objects=", setup_slub_min_objects); |
3254 | 3238 | ||
3255 | static int __init setup_slub_nomerge(char *str) | ||
3256 | { | ||
3257 | slub_nomerge = 1; | ||
3258 | return 1; | ||
3259 | } | ||
3260 | |||
3261 | __setup("slub_nomerge", setup_slub_nomerge); | ||
3262 | |||
3263 | void *__kmalloc(size_t size, gfp_t flags) | 3239 | void *__kmalloc(size_t size, gfp_t flags) |
3264 | { | 3240 | { |
3265 | struct kmem_cache *s; | 3241 | struct kmem_cache *s; |
@@ -3637,69 +3613,6 @@ void __init kmem_cache_init_late(void) | |||
3637 | { | 3613 | { |
3638 | } | 3614 | } |
3639 | 3615 | ||
3640 | /* | ||
3641 | * Find a mergeable slab cache | ||
3642 | */ | ||
3643 | static int slab_unmergeable(struct kmem_cache *s) | ||
3644 | { | ||
3645 | if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) | ||
3646 | return 1; | ||
3647 | |||
3648 | if (!is_root_cache(s)) | ||
3649 | return 1; | ||
3650 | |||
3651 | if (s->ctor) | ||
3652 | return 1; | ||
3653 | |||
3654 | /* | ||
3655 | * We may have set a slab to be unmergeable during bootstrap. | ||
3656 | */ | ||
3657 | if (s->refcount < 0) | ||
3658 | return 1; | ||
3659 | |||
3660 | return 0; | ||
3661 | } | ||
3662 | |||
3663 | static struct kmem_cache *find_mergeable(size_t size, size_t align, | ||
3664 | unsigned long flags, const char *name, void (*ctor)(void *)) | ||
3665 | { | ||
3666 | struct kmem_cache *s; | ||
3667 | |||
3668 | if (slub_nomerge || (flags & SLUB_NEVER_MERGE)) | ||
3669 | return NULL; | ||
3670 | |||
3671 | if (ctor) | ||
3672 | return NULL; | ||
3673 | |||
3674 | size = ALIGN(size, sizeof(void *)); | ||
3675 | align = calculate_alignment(flags, align, size); | ||
3676 | size = ALIGN(size, align); | ||
3677 | flags = kmem_cache_flags(size, flags, name, NULL); | ||
3678 | |||
3679 | list_for_each_entry(s, &slab_caches, list) { | ||
3680 | if (slab_unmergeable(s)) | ||
3681 | continue; | ||
3682 | |||
3683 | if (size > s->size) | ||
3684 | continue; | ||
3685 | |||
3686 | if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME)) | ||
3687 | continue; | ||
3688 | /* | ||
3689 | * Check if alignment is compatible. | ||
3690 | * Courtesy of Adrian Drzewiecki | ||
3691 | */ | ||
3692 | if ((s->size & ~(align - 1)) != s->size) | ||
3693 | continue; | ||
3694 | |||
3695 | if (s->size - size >= sizeof(void *)) | ||
3696 | continue; | ||
3697 | |||
3698 | return s; | ||
3699 | } | ||
3700 | return NULL; | ||
3701 | } | ||
3702 | |||
3703 | struct kmem_cache * | 3616 | struct kmem_cache * |
3704 | __kmem_cache_alias(const char *name, size_t size, size_t align, | 3617 | __kmem_cache_alias(const char *name, size_t size, size_t align, |
3705 | unsigned long flags, void (*ctor)(void *)) | 3618 | unsigned long flags, void (*ctor)(void *)) |