diff options
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r-- | mm/slab_common.c | 182 |
1 files changed, 181 insertions, 1 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c index 069a24e64403..a8e76d79ee65 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -13,6 +13,8 @@ | |||
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/cpu.h> | 14 | #include <linux/cpu.h> |
15 | #include <linux/uaccess.h> | 15 | #include <linux/uaccess.h> |
16 | #include <linux/seq_file.h> | ||
17 | #include <linux/proc_fs.h> | ||
16 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
17 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
18 | #include <asm/page.h> | 20 | #include <asm/page.h> |
@@ -71,6 +73,34 @@ static inline int kmem_cache_sanity_check(const char *name, size_t size) | |||
71 | #endif | 73 | #endif |
72 | 74 | ||
73 | /* | 75 | /* |
76 | * Figure out what the alignment of the objects will be given a set of | ||
77 | * flags, a user specified alignment and the size of the objects. | ||
78 | */ | ||
79 | unsigned long calculate_alignment(unsigned long flags, | ||
80 | unsigned long align, unsigned long size) | ||
81 | { | ||
82 | /* | ||
83 | * If the user wants hardware cache aligned objects then follow that | ||
84 | * suggestion if the object is sufficiently large. | ||
85 | * | ||
86 | * The hardware cache alignment cannot override the specified | ||
87 | * alignment though. If that is greater then use it. | ||
88 | */ | ||
89 | if (flags & SLAB_HWCACHE_ALIGN) { | ||
90 | unsigned long ralign = cache_line_size(); | ||
91 | while (size <= ralign / 2) | ||
92 | ralign /= 2; | ||
93 | align = max(align, ralign); | ||
94 | } | ||
95 | |||
96 | if (align < ARCH_SLAB_MINALIGN) | ||
97 | align = ARCH_SLAB_MINALIGN; | ||
98 | |||
99 | return ALIGN(align, sizeof(void *)); | ||
100 | } | ||
101 | |||
102 | |||
103 | /* | ||
74 | * kmem_cache_create - Create a cache. | 104 | * kmem_cache_create - Create a cache. |
75 | * @name: A string which is used in /proc/slabinfo to identify this cache. | 105 | * @name: A string which is used in /proc/slabinfo to identify this cache. |
76 | * @size: The size of objects to be created in this cache. | 106 | * @size: The size of objects to be created in this cache. |
@@ -107,6 +137,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align | |||
107 | if (!kmem_cache_sanity_check(name, size) == 0) | 137 | if (!kmem_cache_sanity_check(name, size) == 0) |
108 | goto out_locked; | 138 | goto out_locked; |
109 | 139 | ||
140 | /* | ||
141 | * Some allocators will constraint the set of valid flags to a subset | ||
142 | * of all flags. We expect them to define CACHE_CREATE_MASK in this | ||
143 | * case, and we'll just provide them with a sanitized version of the | ||
144 | * passed flags. | ||
145 | */ | ||
146 | flags &= CACHE_CREATE_MASK; | ||
110 | 147 | ||
111 | s = __kmem_cache_alias(name, size, align, flags, ctor); | 148 | s = __kmem_cache_alias(name, size, align, flags, ctor); |
112 | if (s) | 149 | if (s) |
@@ -115,7 +152,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align | |||
115 | s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); | 152 | s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); |
116 | if (s) { | 153 | if (s) { |
117 | s->object_size = s->size = size; | 154 | s->object_size = s->size = size; |
118 | s->align = align; | 155 | s->align = calculate_alignment(flags, align, size); |
119 | s->ctor = ctor; | 156 | s->ctor = ctor; |
120 | s->name = kstrdup(name, GFP_KERNEL); | 157 | s->name = kstrdup(name, GFP_KERNEL); |
121 | if (!s->name) { | 158 | if (!s->name) { |
@@ -192,3 +229,146 @@ int slab_is_available(void) | |||
192 | { | 229 | { |
193 | return slab_state >= UP; | 230 | return slab_state >= UP; |
194 | } | 231 | } |
232 | |||
233 | #ifndef CONFIG_SLOB | ||
234 | /* Create a cache during boot when no slab services are available yet */ | ||
235 | void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size, | ||
236 | unsigned long flags) | ||
237 | { | ||
238 | int err; | ||
239 | |||
240 | s->name = name; | ||
241 | s->size = s->object_size = size; | ||
242 | s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size); | ||
243 | err = __kmem_cache_create(s, flags); | ||
244 | |||
245 | if (err) | ||
246 | panic("Creation of kmalloc slab %s size=%zd failed. Reason %d\n", | ||
247 | name, size, err); | ||
248 | |||
249 | s->refcount = -1; /* Exempt from merging for now */ | ||
250 | } | ||
251 | |||
252 | struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size, | ||
253 | unsigned long flags) | ||
254 | { | ||
255 | struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); | ||
256 | |||
257 | if (!s) | ||
258 | panic("Out of memory when creating slab %s\n", name); | ||
259 | |||
260 | create_boot_cache(s, name, size, flags); | ||
261 | list_add(&s->list, &slab_caches); | ||
262 | s->refcount = 1; | ||
263 | return s; | ||
264 | } | ||
265 | |||
266 | #endif /* !CONFIG_SLOB */ | ||
267 | |||
268 | |||
269 | #ifdef CONFIG_SLABINFO | ||
270 | static void print_slabinfo_header(struct seq_file *m) | ||
271 | { | ||
272 | /* | ||
273 | * Output format version, so at least we can change it | ||
274 | * without _too_ many complaints. | ||
275 | */ | ||
276 | #ifdef CONFIG_DEBUG_SLAB | ||
277 | seq_puts(m, "slabinfo - version: 2.1 (statistics)\n"); | ||
278 | #else | ||
279 | seq_puts(m, "slabinfo - version: 2.1\n"); | ||
280 | #endif | ||
281 | seq_puts(m, "# name <active_objs> <num_objs> <objsize> " | ||
282 | "<objperslab> <pagesperslab>"); | ||
283 | seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>"); | ||
284 | seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>"); | ||
285 | #ifdef CONFIG_DEBUG_SLAB | ||
286 | seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> " | ||
287 | "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>"); | ||
288 | seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>"); | ||
289 | #endif | ||
290 | seq_putc(m, '\n'); | ||
291 | } | ||
292 | |||
293 | static void *s_start(struct seq_file *m, loff_t *pos) | ||
294 | { | ||
295 | loff_t n = *pos; | ||
296 | |||
297 | mutex_lock(&slab_mutex); | ||
298 | if (!n) | ||
299 | print_slabinfo_header(m); | ||
300 | |||
301 | return seq_list_start(&slab_caches, *pos); | ||
302 | } | ||
303 | |||
304 | static void *s_next(struct seq_file *m, void *p, loff_t *pos) | ||
305 | { | ||
306 | return seq_list_next(p, &slab_caches, pos); | ||
307 | } | ||
308 | |||
309 | static void s_stop(struct seq_file *m, void *p) | ||
310 | { | ||
311 | mutex_unlock(&slab_mutex); | ||
312 | } | ||
313 | |||
314 | static int s_show(struct seq_file *m, void *p) | ||
315 | { | ||
316 | struct kmem_cache *s = list_entry(p, struct kmem_cache, list); | ||
317 | struct slabinfo sinfo; | ||
318 | |||
319 | memset(&sinfo, 0, sizeof(sinfo)); | ||
320 | get_slabinfo(s, &sinfo); | ||
321 | |||
322 | seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", | ||
323 | s->name, sinfo.active_objs, sinfo.num_objs, s->size, | ||
324 | sinfo.objects_per_slab, (1 << sinfo.cache_order)); | ||
325 | |||
326 | seq_printf(m, " : tunables %4u %4u %4u", | ||
327 | sinfo.limit, sinfo.batchcount, sinfo.shared); | ||
328 | seq_printf(m, " : slabdata %6lu %6lu %6lu", | ||
329 | sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail); | ||
330 | slabinfo_show_stats(m, s); | ||
331 | seq_putc(m, '\n'); | ||
332 | return 0; | ||
333 | } | ||
334 | |||
335 | /* | ||
336 | * slabinfo_op - iterator that generates /proc/slabinfo | ||
337 | * | ||
338 | * Output layout: | ||
339 | * cache-name | ||
340 | * num-active-objs | ||
341 | * total-objs | ||
342 | * object size | ||
343 | * num-active-slabs | ||
344 | * total-slabs | ||
345 | * num-pages-per-slab | ||
346 | * + further values on SMP and with statistics enabled | ||
347 | */ | ||
348 | static const struct seq_operations slabinfo_op = { | ||
349 | .start = s_start, | ||
350 | .next = s_next, | ||
351 | .stop = s_stop, | ||
352 | .show = s_show, | ||
353 | }; | ||
354 | |||
355 | static int slabinfo_open(struct inode *inode, struct file *file) | ||
356 | { | ||
357 | return seq_open(file, &slabinfo_op); | ||
358 | } | ||
359 | |||
360 | static const struct file_operations proc_slabinfo_operations = { | ||
361 | .open = slabinfo_open, | ||
362 | .read = seq_read, | ||
363 | .write = slabinfo_write, | ||
364 | .llseek = seq_lseek, | ||
365 | .release = seq_release, | ||
366 | }; | ||
367 | |||
368 | static int __init slab_proc_init(void) | ||
369 | { | ||
370 | proc_create("slabinfo", S_IRUSR, NULL, &proc_slabinfo_operations); | ||
371 | return 0; | ||
372 | } | ||
373 | module_init(slab_proc_init); | ||
374 | #endif /* CONFIG_SLABINFO */ | ||