aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/mm/hugetlbpage.c3
-rw-r--r--arch/powerpc/mm/init_64.c3
-rw-r--r--arch/sparc64/mm/tsb.c3
-rw-r--r--include/linux/slab.h1
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slob.c4
-rw-r--r--mm/slub.c5
7 files changed, 9 insertions, 14 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 8508f973d9cc..c8814177b716 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -1057,8 +1057,7 @@ static int __init hugetlbpage_init(void)
1057 huge_pgtable_cache = kmem_cache_create("hugepte_cache", 1057 huge_pgtable_cache = kmem_cache_create("hugepte_cache",
1058 HUGEPTE_TABLE_SIZE, 1058 HUGEPTE_TABLE_SIZE,
1059 HUGEPTE_TABLE_SIZE, 1059 HUGEPTE_TABLE_SIZE,
1060 SLAB_HWCACHE_ALIGN | 1060 SLAB_HWCACHE_ALIGN,
1061 SLAB_MUST_HWCACHE_ALIGN,
1062 zero_ctor, NULL); 1061 zero_ctor, NULL);
1063 if (! huge_pgtable_cache) 1062 if (! huge_pgtable_cache)
1064 panic("hugetlbpage_init(): could not create hugepte cache\n"); 1063 panic("hugetlbpage_init(): could not create hugepte cache\n");
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index d12a87ec5ae9..5a7750147b7d 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -183,8 +183,7 @@ void pgtable_cache_init(void)
183 "for size: %08x...\n", name, i, size); 183 "for size: %08x...\n", name, i, size);
184 pgtable_cache[i] = kmem_cache_create(name, 184 pgtable_cache[i] = kmem_cache_create(name,
185 size, size, 185 size, size,
186 SLAB_HWCACHE_ALIGN | 186 SLAB_HWCACHE_ALIGN,
187 SLAB_MUST_HWCACHE_ALIGN,
188 zero_ctor, 187 zero_ctor,
189 NULL); 188 NULL);
190 if (! pgtable_cache[i]) 189 if (! pgtable_cache[i])
diff --git a/arch/sparc64/mm/tsb.c b/arch/sparc64/mm/tsb.c
index 57eb3025537a..4be378d9a382 100644
--- a/arch/sparc64/mm/tsb.c
+++ b/arch/sparc64/mm/tsb.c
@@ -262,8 +262,7 @@ void __init pgtable_cache_init(void)
262 262
263 tsb_caches[i] = kmem_cache_create(name, 263 tsb_caches[i] = kmem_cache_create(name,
264 size, size, 264 size, size,
265 SLAB_HWCACHE_ALIGN | 265 SLAB_HWCACHE_ALIGN,
266 SLAB_MUST_HWCACHE_ALIGN,
267 NULL, NULL); 266 NULL, NULL);
268 if (!tsb_caches[i]) { 267 if (!tsb_caches[i]) {
269 prom_printf("Could not create %s cache\n", name); 268 prom_printf("Could not create %s cache\n", name);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 67425c277e12..a9befa50d3e3 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -26,7 +26,6 @@ typedef struct kmem_cache kmem_cache_t __deprecated;
26#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ 26#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
27#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ 27#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
28#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ 28#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
29#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* Force alignment even if debuggin is active */
30#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ 29#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
31#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ 30#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
32#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ 31#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
diff --git a/mm/slab.c b/mm/slab.c
index 997c3b2f50c9..583644f6ae11 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -175,12 +175,12 @@
175# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \ 175# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
176 SLAB_POISON | SLAB_HWCACHE_ALIGN | \ 176 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
177 SLAB_CACHE_DMA | \ 177 SLAB_CACHE_DMA | \
178 SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \ 178 SLAB_STORE_USER | \
179 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 179 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
180 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) 180 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
181#else 181#else
182# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ 182# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
183 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \ 183 SLAB_CACHE_DMA | \
184 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ 184 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
185 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD) 185 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
186#endif 186#endif
diff --git a/mm/slob.c b/mm/slob.c
index 77786be032e0..c9401a7eaa5f 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -21,7 +21,7 @@
21 * 21 *
22 * SLAB is emulated on top of SLOB by simply calling constructors and 22 * SLAB is emulated on top of SLOB by simply calling constructors and
23 * destructors for every SLAB allocation. Objects are returned with 23 * destructors for every SLAB allocation. Objects are returned with
24 * the 8-byte alignment unless the SLAB_MUST_HWCACHE_ALIGN flag is 24 * the 8-byte alignment unless the SLAB_HWCACHE_ALIGN flag is
25 * set, in which case the low-level allocator will fragment blocks to 25 * set, in which case the low-level allocator will fragment blocks to
26 * create the proper alignment. Again, objects of page-size or greater 26 * create the proper alignment. Again, objects of page-size or greater
27 * are allocated by calling __get_free_pages. As SLAB objects know 27 * are allocated by calling __get_free_pages. As SLAB objects know
@@ -295,7 +295,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
295 c->ctor = ctor; 295 c->ctor = ctor;
296 c->dtor = dtor; 296 c->dtor = dtor;
297 /* ignore alignment unless it's forced */ 297 /* ignore alignment unless it's forced */
298 c->align = (flags & SLAB_MUST_HWCACHE_ALIGN) ? SLOB_ALIGN : 0; 298 c->align = (flags & SLAB_HWCACHE_ALIGN) ? SLOB_ALIGN : 0;
299 if (c->align < align) 299 if (c->align < align)
300 c->align = align; 300 c->align = align;
301 } else if (flags & SLAB_PANIC) 301 } else if (flags & SLAB_PANIC)
diff --git a/mm/slub.c b/mm/slub.c
index 3904002bdb35..79940e98e5e6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1496,7 +1496,7 @@ static unsigned long calculate_alignment(unsigned long flags,
1496 * specified alignment though. If that is greater 1496 * specified alignment though. If that is greater
1497 * then use it. 1497 * then use it.
1498 */ 1498 */
1499 if ((flags & (SLAB_MUST_HWCACHE_ALIGN | SLAB_HWCACHE_ALIGN)) && 1499 if ((flags & SLAB_HWCACHE_ALIGN) &&
1500 size > L1_CACHE_BYTES / 2) 1500 size > L1_CACHE_BYTES / 2)
1501 return max_t(unsigned long, align, L1_CACHE_BYTES); 1501 return max_t(unsigned long, align, L1_CACHE_BYTES);
1502 1502
@@ -3142,8 +3142,7 @@ SLAB_ATTR(reclaim_account);
3142 3142
3143static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf) 3143static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
3144{ 3144{
3145 return sprintf(buf, "%d\n", !!(s->flags & 3145 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
3146 (SLAB_HWCACHE_ALIGN|SLAB_MUST_HWCACHE_ALIGN)));
3147} 3146}
3148SLAB_ATTR_RO(hwcache_align); 3147SLAB_ATTR_RO(hwcache_align);
3149 3148