aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/mm/slab.c b/mm/slab.c
index c77ebe6cc87c..4fcc5dd8d5a6 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2190,9 +2190,16 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2190 size += BYTES_PER_WORD; 2190 size += BYTES_PER_WORD;
2191 } 2191 }
2192#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) 2192#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2193 if (size >= kmalloc_size(INDEX_NODE + 1) 2193 /*
2194 && cachep->object_size > cache_line_size() 2194 * To activate debug pagealloc, off-slab management is necessary
2195 && ALIGN(size, cachep->align) < PAGE_SIZE) { 2195 * requirement. In early phase of initialization, small sized slab
2196 * doesn't get initialized so it would not be possible. So, we need
2197 * to check size >= 256. It guarantees that all necessary small
2198 * sized slab is initialized in current slab initialization sequence.
2199 */
2200 if (!slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
2201 size >= 256 && cachep->object_size > cache_line_size() &&
2202 ALIGN(size, cachep->align) < PAGE_SIZE) {
2196 cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align); 2203 cachep->obj_offset += PAGE_SIZE - ALIGN(size, cachep->align);
2197 size = PAGE_SIZE; 2204 size = PAGE_SIZE;
2198 } 2205 }