diff options
author | Christoph Lameter <clameter@sgi.com> | 2007-05-06 17:49:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-07 15:12:55 -0400 |
commit | 5af60839909b8e3b28ca7cd7912fa0b23475617f (patch) | |
tree | 774b068673ad7bb6fc67d29339c9a07bf12a7789 /mm | |
parent | 96018fdacbfcaf6a0694d066b525f67c24025688 (diff) |
slab allocators: Remove obsolete SLAB_MUST_HWCACHE_ALIGN
This patch was recently posted to lkml and acked by Pekka.
The flag SLAB_MUST_HWCACHE_ALIGN is
1. Never checked by SLAB at all.
2. A duplicate of SLAB_HWCACHE_ALIGN for SLUB
3. Fulfills the role of SLAB_HWCACHE_ALIGN for SLOB.
The only remaining use is in sparc64 and ppc64 and their use there
reflects some earlier role that the slab flag once may have had. If
its specified then SLAB_HWCACHE_ALIGN is also specified.
The flag is confusing, inconsistent and has no purpose.
Remove it.
Acked-by: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/slab.c | 4 | ||||
-rw-r--r-- | mm/slob.c | 4 | ||||
-rw-r--r-- | mm/slub.c | 5 |
3 files changed, 6 insertions, 7 deletions
@@ -175,12 +175,12 @@ | |||
175 | # define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ | 175 | # define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ |
176 | SLAB_POISON | SLAB_HWCACHE_ALIGN | \ | 176 | SLAB_POISON | SLAB_HWCACHE_ALIGN | \ |
177 | SLAB_CACHE_DMA | \ | 177 | SLAB_CACHE_DMA | \ |
178 | SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \ | 178 | SLAB_STORE_USER | \ |
179 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 179 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
180 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) | 180 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) |
181 | #else | 181 | #else |
182 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ | 182 | # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ |
183 | SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \ | 183 | SLAB_CACHE_DMA | \ |
184 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ | 184 | SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ |
185 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) | 185 | SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) |
186 | #endif | 186 | #endif |
@@ -21,7 +21,7 @@ | |||
21 | * | 21 | * |
22 | * SLAB is emulated on top of SLOB by simply calling constructors and | 22 | * SLAB is emulated on top of SLOB by simply calling constructors and |
23 | * destructors for every SLAB allocation. Objects are returned with | 23 | * destructors for every SLAB allocation. Objects are returned with |
24 | * the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is | 24 | * the 8-byte alignment unless the SLAB_HWCACHE_ALIGN flag is |
25 | * set, in which case the low-level allocator will fragment blocks to | 25 | * set, in which case the low-level allocator will fragment blocks to |
26 | * create the proper alignment. Again, objects of page-size or greater | 26 | * create the proper alignment. Again, objects of page-size or greater |
27 | * are allocated by calling __get_free_pages. As SLAB objects know | 27 | * are allocated by calling __get_free_pages. As SLAB objects know |
@@ -295,7 +295,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, | |||
295 | c->ctor = ctor; | 295 | c->ctor = ctor; |
296 | c->dtor = dtor; | 296 | c->dtor = dtor; |
297 | /* ignore alignment unless it's forced */ | 297 | /* ignore alignment unless it's forced */ |
298 | c->align = (flags & SLAB_MUST_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; | 298 | c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; |
299 | if (c->align < align) | 299 | if (c->align < align) |
300 | c->align = align; | 300 | c->align = align; |
301 | } else if (flags & SLAB_PANIC) | 301 | } else if (flags & SLAB_PANIC) |
@@ -1496,7 +1496,7 @@ static unsigned long calculate_alignment(unsigned long flags, | |||
1496 | * specified alignment though. If that is greater | 1496 | * specified alignment though. If that is greater |
1497 | * then use it. | 1497 | * then use it. |
1498 | */ | 1498 | */ |
1499 | if ((flags & (SLAB_MUST_HWCACHE_ALIGN | SLAB_HWCACHE_ALIGN)) && | 1499 | if ((flags & SLAB_HWCACHE_ALIGN) && |
1500 | size > L1_CACHE_BYTES / 2) | 1500 | size > L1_CACHE_BYTES / 2) |
1501 | return max_t(unsigned long, align, L1_CACHE_BYTES); | 1501 | return max_t(unsigned long, align, L1_CACHE_BYTES); |
1502 | 1502 | ||
@@ -3142,8 +3142,7 @@ SLAB_ATTR(reclaim_account); | |||
3142 | 3142 | ||
3143 | static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) | 3143 | static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) |
3144 | { | 3144 | { |
3145 | return sprintf(buf, "%d\n", !!(s->flags & | 3145 | return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN)); |
3146 | (SLAB_HWCACHE_ALIGN|SLAB_MUST_HWCACHE_ALIGN))); | ||
3147 | } | 3146 | } |
3148 | SLAB_ATTR_RO(hwcache_align); | 3147 | SLAB_ATTR_RO(hwcache_align); |
3149 | 3148 | ||