aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slob.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slob.c')
-rw-r--r--mm/slob.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/slob.c b/mm/slob.c
index 77786be032e0..c9401a7eaa5f 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -21,7 +21,7 @@
21 * 21 *
22 * SLAB is emulated on top of SLOB by simply calling constructors and 22 * SLAB is emulated on top of SLOB by simply calling constructors and
23 * destructors for every SLAB allocation. Objects are returned with 23 * destructors for every SLAB allocation. Objects are returned with
24 * the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is 24 * the 8-byte alignment unless the SLAB_HWCACHE_ALIGN flag is
25 * set, in which case the low-level allocator will fragment blocks to 25 * set, in which case the low-level allocator will fragment blocks to
26 * create the proper alignment. Again, objects of page-size or greater 26 * create the proper alignment. Again, objects of page-size or greater
27 * are allocated by calling __get_free_pages. As SLAB objects know 27 * are allocated by calling __get_free_pages. As SLAB objects know
@@ -295,7 +295,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
295 c->ctor = ctor; 295 c->ctor = ctor;
296 c->dtor = dtor; 296 c->dtor = dtor;
297 /* ignore alignment unless it's forced */ 297 /* ignore alignment unless it's forced */
298 c->align = (flags & SLAB_MUST_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; 298 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
299 if (c->align < align) 299 if (c->align < align)
300 c->align = align; 300 c->align = align;
301 } else if (flags & SLAB_PANIC) 301 } else if (flags & SLAB_PANIC)