aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slob.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-06 17:49:56 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:12:55 -0400
commit5af60839909b8e3b28ca7cd7912fa0b23475617f (patch)
tree774b068673ad7bb6fc67d29339c9a07bf12a7789 /mm/slob.c
parent96018fdacbfcaf6a0694d066b525f67c24025688 (diff)
slab allocators: Remove obsolete SLAB_MUST_HWCACHE_ALIGN
This patch was recently posted to lkml and acked by Pekka. The flag SLAB_MUST_HWCACHE_ALIGN is 1. Never checked by SLAB at all. 2. A duplicate of SLAB_HWCACHE_ALIGN for SLUB 3. Fulfills the role of SLAB_HWCACHE_ALIGN for SLOB. The only remaining use is in sparc64 and ppc64 and their use there reflects some earlier role that the slab flag once may have had. If its specified then SLAB_HWCACHE_ALIGN is also specified. The flag is confusing, inconsistent and has no purpose. Remove it. Acked-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slob.c')
-rw-r--r--mm/slob.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/slob.c b/mm/slob.c
index 77786be032e0..c9401a7eaa5f 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -21,7 +21,7 @@
21 * 21 *
22 * SLAB is emulated on top of SLOB by simply calling constructors and 22 * SLAB is emulated on top of SLOB by simply calling constructors and
23 * destructors for every SLAB allocation. Objects are returned with 23 * destructors for every SLAB allocation. Objects are returned with
24 * the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is 24 * the 8-byte alignment unless the SLAB_HWCACHE_ALIGN flag is
25 * set, in which case the low-level allocator will fragment blocks to 25 * set, in which case the low-level allocator will fragment blocks to
26 * create the proper alignment. Again, objects of page-size or greater 26 * create the proper alignment. Again, objects of page-size or greater
27 * are allocated by calling __get_free_pages. As SLAB objects know 27 * are allocated by calling __get_free_pages. As SLAB objects know
@@ -295,7 +295,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
295 c->ctor = ctor; 295 c->ctor = ctor;
296 c->dtor = dtor; 296 c->dtor = dtor;
297 /* ignore alignment unless it's forced */ 297 /* ignore alignment unless it's forced */
298 c->align = (flags & SLAB_MUST_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; 298 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
299 if (c->align < align) 299 if (c->align < align)
300 c->align = align; 300 c->align = align;
301 } else if (flags & SLAB_PANIC) 301 } else if (flags & SLAB_PANIC)