aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c54
1 files changed, 10 insertions, 44 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 3230cd2c6b3b..50a73fca19c4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -145,30 +145,6 @@
145#define BYTES_PER_WORD sizeof(void *) 145#define BYTES_PER_WORD sizeof(void *)
146#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long)) 146#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
147 147
148#ifndef ARCH_KMALLOC_MINALIGN
149/*
150 * Enforce a minimum alignment for the kmalloc caches.
151 * Usually, the kmalloc caches are cache_line_size() aligned, except when
152 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
153 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
154 * alignment larger than the alignment of a 64-bit integer.
155 * ARCH_KMALLOC_MINALIGN allows that.
156 * Note that increasing this value may disable some debug features.
157 */
158#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
159#endif
160
161#ifndef ARCH_SLAB_MINALIGN
162/*
163 * Enforce a minimum alignment for all caches.
164 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
165 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
166 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
167 * some debug features.
168 */
169#define ARCH_SLAB_MINALIGN 0
170#endif
171
172#ifndef ARCH_KMALLOC_FLAGS 148#ifndef ARCH_KMALLOC_FLAGS
173#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN 149#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
174#endif 150#endif
@@ -2313,8 +2289,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2313 if (ralign < align) { 2289 if (ralign < align) {
2314 ralign = align; 2290 ralign = align;
2315 } 2291 }
2316 /* disable debug if necessary */ 2292 /* disable debug if not aligning with REDZONE_ALIGN */
2317 if (ralign > __alignof__(unsigned long long)) 2293 if (ralign & (__alignof__(unsigned long long) - 1))
2318 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER); 2294 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2319 /* 2295 /*
2320 * 4) Store it. 2296 * 4) Store it.
@@ -2340,8 +2316,8 @@ kmem_cache_create (const char *name, size_t size, size_t align,
2340 */ 2316 */
2341 if (flags & SLAB_RED_ZONE) { 2317 if (flags & SLAB_RED_ZONE) {
2342 /* add space for red zone words */ 2318 /* add space for red zone words */
2343 cachep->obj_offset += sizeof(unsigned long long); 2319 cachep->obj_offset += align;
2344 size += 2 * sizeof(unsigned long long); 2320 size += align + sizeof(unsigned long long);
2345 } 2321 }
2346 if (flags & SLAB_STORE_USER) { 2322 if (flags & SLAB_STORE_USER) {
2347 /* user store requires one word storage behind the end of 2323 /* user store requires one word storage behind the end of
@@ -3695,21 +3671,10 @@ EXPORT_SYMBOL(kmem_cache_alloc_notrace);
3695 */ 3671 */
3696int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr) 3672int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
3697{ 3673{
3698 unsigned long addr = (unsigned long)ptr;
3699 unsigned long min_addr = PAGE_OFFSET;
3700 unsigned long align_mask = BYTES_PER_WORD - 1;
3701 unsigned long size = cachep->buffer_size; 3674 unsigned long size = cachep->buffer_size;
3702 struct page *page; 3675 struct page *page;
3703 3676
3704 if (unlikely(addr < min_addr)) 3677 if (unlikely(!kern_ptr_validate(ptr, size)))
3705 goto out;
3706 if (unlikely(addr > (unsigned long)high_memory - size))
3707 goto out;
3708 if (unlikely(addr & align_mask))
3709 goto out;
3710 if (unlikely(!kern_addr_valid(addr)))
3711 goto out;
3712 if (unlikely(!kern_addr_valid(addr + size - 1)))
3713 goto out; 3678 goto out;
3714 page = virt_to_page(ptr); 3679 page = virt_to_page(ptr);
3715 if (unlikely(!PageSlab(page))) 3680 if (unlikely(!PageSlab(page)))
@@ -4320,10 +4285,11 @@ static int s_show(struct seq_file *m, void *p)
4320 unsigned long node_frees = cachep->node_frees; 4285 unsigned long node_frees = cachep->node_frees;
4321 unsigned long overflows = cachep->node_overflow; 4286 unsigned long overflows = cachep->node_overflow;
4322 4287
4323 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \ 4288 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu "
4324 %4lu %4lu %4lu %4lu %4lu", allocs, high, grown, 4289 "%4lu %4lu %4lu %4lu %4lu",
4325 reaped, errors, max_freeable, node_allocs, 4290 allocs, high, grown,
4326 node_frees, overflows); 4291 reaped, errors, max_freeable, node_allocs,
4292 node_frees, overflows);
4327 } 4293 }
4328 /* cpu stats */ 4294 /* cpu stats */
4329 { 4295 {